blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7c30c30dcc7cc854a841fbb8a6e3e7b45eb5bcf8 | 22aa900e70c8cc6005ecadbb2ae710526af8d3ba | /course/forms.py | 8da7a5b70959e28879bb8df24e4db2ededff90aa | [] | no_license | skafis/career_choice | f79ac3df223122a19a7718d9247ca4e2e72ee22e | 84d3ec752ba6da60e7130f132bd329ff72d66cae | refs/heads/master | 2021-01-13T02:50:44.580867 | 2016-12-22T16:32:40 | 2016-12-22T16:32:40 | 77,144,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | from django import forms
from .models import Courses
class add_coursesForm(forms.ModelForm):
class Meta:
model = Courses
fields = [
'name',
'time',
'cost'
] | [
"franciskipchumba5@gmail.com"
] | franciskipchumba5@gmail.com |
1999644c558f0f3bf2fc69e88aea396932927a64 | f3ad39ebf9654c99edb33c0fee843a53f9b6c31a | /backend/wesmusicmedia_20833/settings.py | 66d308873bed16394e0b2159023b8a1dcdfc1907 | [] | no_license | crowdbotics-apps/wesmusicmedia-20833 | 306814d32b3acd43c446cd004351c9fb93009afa | 474162c36a486c6028cfec8214d93d83fda4e235 | refs/heads/master | 2022-12-25T19:45:48.328522 | 2020-09-29T17:51:09 | 2020-09-29T17:51:09 | 299,693,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,900 | py | """
Django settings for wesmusicmedia_20833 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
"event",
]
LOCAL_APPS = [
"home",
"users.apps.UsersConfig",
]
THIRD_PARTY_APPS = [
"rest_framework",
"rest_framework.authtoken",
"rest_auth",
"rest_auth.registration",
"bootstrap4",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"django_extensions",
"drf_yasg",
# start fcm_django push notifications
"fcm_django",
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "wesmusicmedia_20833.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "wesmusicmedia_20833.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {"default": env.db()}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
80243321d8683bd4650a0a54e8f33792991bf2dc | 6fca7f97a9c8ed83daf94acaf6ad610773046930 | /Vendas.py | 58ec5fe765b8337c09303baed16b22378d934530 | [] | no_license | RenanSantos89/AnaliseVendasShopping | a0fc06d613a0948afd5a416dccf141cf8a1fd36e | 703423652e3d69009c5d4c6e57e118ffda211878 | refs/heads/master | 2023-06-26T08:02:35.807826 | 2021-07-22T14:19:42 | 2021-07-22T14:19:42 | 388,424,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,565 | py | import pandas as pd
import win32com.client as win32
# Importar a Base de Dados
# Import Database
tb_vendas = pd.read_excel('Vendas_Shopping.xlsx')
# Visualizar a base de dados
# View Database
pd.set_option('display.max_columns', None)
#print(tb_vendas)
# Faturamento por Loja
# Billing Store
Faturamento_loja = tb_vendas[['ID Loja','Valor Final']].groupby('ID Loja').sum()
#print(Faturamento_loja)
# Quantidade de produtos vendidos por loja
# Numbers of products sold per store
QTD_prod_loja = tb_vendas[['ID Loja','Quantidade']].groupby('ID Loja').sum()
#print(QTD_prod_loja)
# Ticket medio por produto em cada loja
# Average ticket per product in each store.
Tic_medio = (Faturamento_loja['Valor Final'] / QTD_prod_loja['Quantidade']).to_frame()
Tic_medio = Tic_medio.rename(columns={0: 'Ticket Medio'})
print(Tic_medio)
# enviar por email
# Send Mail
outlook = win32.Dispatch('outlook.application')
mail = outlook.CreateItem(0)
mail.To = '???@gmail.com;?????@gmail.com'
mail.Subject = 'Relatorio de Vendas por Loja'
#: formatando numero, , separador de milhar , . separador dedecimal , 2f duas casas decimais
mail.HTMLBody = f'''
<p>Prezados,</p>
<p>Segue o Relatorio de Vendas por cada loja.</p>
<p>Faturamento:</p>
{Faturamento_loja.to_html(formatters={'Valor Final': 'R${:,.2f}'.format})}
<p>Quantidade Vendida: </p>
{QTD_prod_loja.to_html()}
<p>Ticket medio dos produtos em cada Loja: </p>
{Tic_medio.to_html(formatters={'Ticket Medio': 'R${:,.2f}'.format})}
<p>Att, </p>
<p>Renan Silva Santos </p>
'''
mail.Send()
print('Email Enviado') | [
"renan.jobs13@gmail.com"
] | renan.jobs13@gmail.com |
9530f2161cb6209d9db71fbebc16805db550b037 | 7578ed7ba3d6cd8458f0c653bec0ea80cf9b7ecd | /make_csv_test.py | 80dfeaad233dc2e288e2fcee86cc6fad5e6dea72 | [] | no_license | Mtakenoko/endoscope_augmentation | 93e379f11cb43b2a94921400e5bb1f5739ab4a60 | 3b648ffa7dde21b16a7d73134275ec487ba74b32 | refs/heads/master | 2023-01-27T16:28:32.711485 | 2020-12-04T16:53:17 | 2020-12-04T16:53:17 | 308,393,572 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 676 | py | import os
import csv
def csv_test():
path = "eyemodel_test"
files = os.listdir(path)
files_dir = [f for f in files if os.path.isdir(os.path.join(path, f))]
with open(path + '.csv', 'w', newline='') as f:
writer = csv.writer(f)
for dir in files_dir:
images = os.listdir(path+'/'+ dir)
images_files = [i for i in images if os.path.isfile(os.path.join(path +'/'+ dir, i))]
for i in images_files:
base, ext = os.path.splitext(i)
if ext == '.jpg':
writer.writerow(['data/'+ path + '/' + dir +'/' + base +'.jpg', 'data/'+ path + '/' + dir +'/' + base +'.png']) | [
"takehaya.724@gmail.com"
] | takehaya.724@gmail.com |
944266199b8a3c3742d2561e20f9de6f0dbba96b | da3c44999e9f21dfdc7b66381cc219d6108cf5ac | /emergency-room.py | ffe37a95ff3cf94680d271576705a98e64ef4881 | [
"MIT"
] | permissive | josehenriqueroveda/emergency-room | 1528660a2c39fb10472cd2f11e2fb4d9047b5dcc | 238f08e78bf98709c5089f246d6624cac74a38ba | refs/heads/master | 2022-12-18T14:48:29.222089 | 2020-09-10T12:03:55 | 2020-09-10T12:03:55 | 290,506,509 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,347 | py | import simpy
import random
import pandas as pd
import matplotlib.pyplot as plt
logo = """
______ _____
| ____| | __ \
| |__ _ __ ___ ___ _ __ __ _ ___ _ __ ___ _ _ | |__) |___ ___ _ __ ___
| __| | '_ ` _ \ / _ \ '__/ _` |/ _ \ '_ \ / __| | | | | _ // _ \ / _ \| '_ ` _ \
| |____| | | | | | __/ | | (_| | __/ | | | (__| |_| | | | \ \ (_) | (_) | | | | | |
|______|_| |_| |_|\___|_| \__, |\___|_| |_|\___|\__, | |_| \_\___/ \___/|_| |_| |_|
__/ | __/ |
|___/ |___/
"""
print(logo)
class EmergencyRoom:
# Time in minutes
# Simulation run time and warm-up (warm-up is time before audit results are collected)
simulation_time = int(input('Enter how many days for the simulation: '))*1440
warm_up = int(simulation_time / 5)
# Average time between patients arriving
inter_arrival_time = int(input('Average patient arrival time in minutes: '))
# Number of doctors in ER
n_doctors = int(input('Number of doctors: '))
# Time between audits
audit_interval = 100
# Average and standard deviation of time doctors spends with patients in ER
appointment_mean = int(input('Average time in medical consultation (minutes): '))
appointment_std = int(input('Standard deviation in time in medical consultation (minutes): '))
# Lists to store audit results
audit_time = []
audit_patients_in_ER = []
audit_patients_waiting = []
audit_patients_waiting_p1 = []
audit_patients_waiting_p2 = []
audit_patients_waiting_p3 = []
audit_reources_used = []
# Dataframes to store results
patient_queuing_results = pd.DataFrame(
columns=['priority', 'q_time', 'consult_time'])
results = pd.DataFrame()
# Number of patients on simulation
patient_count = 0
# Total patients waiting and priority
patients_waiting = 0
patients_waiting_by_priority = [0, 0, 0]
class Model:
def __init__(self):
self.env = simpy.Environment()
def build_audit_results(self):
EmergencyRoom.results['time'] = EmergencyRoom.audit_time
EmergencyRoom.results['patients in ER'] = EmergencyRoom.audit_patients_in_ER
EmergencyRoom.results['all patients waiting'] = \
EmergencyRoom.audit_patients_waiting
EmergencyRoom.results['high priority patients waiting'] = \
EmergencyRoom.audit_patients_waiting_p1
EmergencyRoom.results['medium priority patients waiting'] = \
EmergencyRoom.audit_patients_waiting_p2
EmergencyRoom.results['low priority patients waiting'] = \
EmergencyRoom.audit_patients_waiting_p3
EmergencyRoom.results['resources occupied'] = \
EmergencyRoom.audit_reources_used
def chart(self):
fig = plt.figure(figsize=(12, 4.5), dpi=75)
# Create charts side by side
# Figure 1: patient perspective results
ax1 = fig.add_subplot(131) # 1 row, 3 cols, chart position 1
x = EmergencyRoom.patient_queuing_results.index
# Chart loops through 3 priorites
markers = ['o', 'x', '^']
for priority in range(1, 4):
x = (EmergencyRoom.patient_queuing_results
[EmergencyRoom.patient_queuing_results['priority'] ==
priority].index)
y = (EmergencyRoom.patient_queuing_results
[EmergencyRoom.patient_queuing_results['priority'] ==
priority]['q_time'])
ax1.scatter(x, y,
marker=markers[priority - 1],
label='Priority ' + str(priority))
ax1.set_xlabel('Patient ID')
ax1.set_ylabel('Waiting time in minutes')
ax1.legend()
ax1.grid(True, which='both', lw=1, ls='--', c='.75')
# Figure 2: ER queuing results
ax2 = fig.add_subplot(132) # 1 row, 3 cols, chart position 2
x = EmergencyRoom.results['time']/1440 #convert to days
y1 = EmergencyRoom.results['high priority patients waiting']
y2 = EmergencyRoom.results['medium priority patients waiting']
y3 = EmergencyRoom.results['low priority patients waiting']
y4 = EmergencyRoom.results['all patients waiting']
ax2.plot(x, y1, marker='o', label='High priority')
ax2.plot(x, y2, marker='x', label='Medium priority')
ax2.plot(x, y3, marker='^', label='Low priority')
ax2.plot(x, y4, marker='s', label='All')
ax2.set_xlabel('Day')
ax2.set_ylabel('Patients waiting')
ax2.legend()
ax2.grid(True, which='both', lw=1, ls='--', c='.75')
# Figure 3: ER doctors in attendance
ax3 = fig.add_subplot(133) # 1 row, 3 cols, chart position 3
x = EmergencyRoom.results['time']/1440
y = EmergencyRoom.results['resources occupied']
ax3.plot(x, y, label='Doctors in attendance')
ax3.set_xlabel('Day')
ax3.set_ylabel('Doctors in attendance')
ax3.legend()
ax3.grid(True, which='both', lw=1, ls='--', c='.75')
# Create plot
plt.tight_layout(pad=3)
plt.show()
def perform_audit(self):
# Delay before first aurdit if length of warm-up
yield self.env.timeout(EmergencyRoom.warm_up)
# The trigger repeated audits
while True:
# Record time
EmergencyRoom.audit_time.append(self.env.now)
# Record patients waiting
EmergencyRoom.audit_patients_waiting.append(
EmergencyRoom.patients_waiting)
EmergencyRoom.audit_patients_waiting_p1.append(
EmergencyRoom.patients_waiting_by_priority[0])
EmergencyRoom.audit_patients_waiting_p2.append(
EmergencyRoom.patients_waiting_by_priority[1])
EmergencyRoom.audit_patients_waiting_p3.append(
EmergencyRoom.patients_waiting_by_priority[2])
# Record patients waiting by asking length of dict of all patients
EmergencyRoom.audit_patients_in_ER.append(len(Patient.all_patients))
# Record resources occupied
EmergencyRoom.audit_reources_used.append(
self.doc_resources.docs.count)
# Trigger next audit after interval
yield self.env.timeout(EmergencyRoom.audit_interval)
def run(self):
# Set up resources
self.doc_resources = Resources(self.env, EmergencyRoom.n_doctors)
# Initialise processes
self.env.process(self.trigger_admissions())
self.env.process(self.perform_audit())
# Run
self.env.run(until=EmergencyRoom.simulation_time)
# End of simulation, build and save results
EmergencyRoom.patient_queuing_results.to_csv('patient results.csv')
self.build_audit_results()
EmergencyRoom.results.to_csv('operational results.csv')
# Plot results
self.chart()
def doctor_appointment(self, p):
with self.doc_resources.docs.request(priority=p.priority) as req:
EmergencyRoom.patients_waiting += 1
EmergencyRoom.patients_waiting_by_priority[p.priority - 1] += 1
# Wait for doctors to become available
yield req
# Doctor available -> Record time that appointment starts
p.time_see_doc = self.env.now
# Ppatient queuing time
p.queuing_time = self.env.now - p.time_in
# Reduce the number of patients waiting
EmergencyRoom.patients_waiting_by_priority[p.priority - 1] -= 1
EmergencyRoom.patients_waiting -= 1
# List with patient priority and queuing
_results = [p.priority, p.queuing_time]
# Appointment time required
yield self.env.timeout(p.consulation_time)
# At end of appointment add the time spent
_results.append(self.env.now - p.time_see_doc)
# Record results data if warm-up complete
if self.env.now >= EmergencyRoom.warm_up:
EmergencyRoom.patient_queuing_results.loc[p.id] = _results
# Delete patient (removal from patient dictionary removes only
# reference to patient and Python then automatically cleans up)
del Patient.all_patients[p.id]
def trigger_admissions(self):
# Generating new patients
while True:
# Initialise new patient
p = Patient(self.env)
# Add patient to dictionary of patients
Patient.all_patients[p.id] = p
# Pass patient to doctor_appointment method
self.env.process(self.doctor_appointment(p))
# Sample time for next asmissions
next_admission = random.expovariate(
1 / EmergencyRoom.inter_arrival_time)
# Schedule next admission
yield self.env.timeout(next_admission)
class Patient:
all_patients = {}
def __init__(self, env):
# Increment number of patients
EmergencyRoom.patient_count += 1
# Set patient id and priority
self.id = EmergencyRoom.patient_count
self.priority = random.randint(1, 3)
# Set appointment time by random normal distribution. If value <0 then set to 0
self.consulation_time = random.normalvariate(
EmergencyRoom.appointment_mean, EmergencyRoom.appointment_std)
self.consulation_time = 0 if self.consulation_time < 0 \
else self.consulation_time
# Initial queuing time as zero
self.queuing_time = 0
# record simulation time patient enters simulation
self.time_in = env.now
# Set up variables to record simulation appointment time, then exit simulation
self.time_see_doc = 0
self.time_out = 0
class Resources:
def __init__(self, env, n_doctors):
self.docs = simpy.PriorityResource(env, capacity=n_doctors)
# Run model
if __name__ == '__main__':
# Initialise model environment
model = Model()
# Run model
model.run() | [
"zeh.henrique92@gmail.com"
] | zeh.henrique92@gmail.com |
ef0c265180c890036b834b01e406048d0adff839 | 56b6a13a4652dcd4089b870161b5ba219cb82b6e | /Interface - Jeu_Picross_Mania/Menu_principal/anciennes_versions_menu/fonctions5x5_4.py | 5a2e914a5de9f8c946bc2324916e153d678e3e7e | [] | no_license | fasimonet/Picross-Mania | f7b001c68cbc013d60ce79c2129ffd86b8dc5517 | 0e447020ed4fc901c1e0daba77dd2fb8fbccf5d3 | refs/heads/master | 2020-09-02T08:41:33.262878 | 2019-11-03T13:11:27 | 2019-11-03T13:11:27 | 219,181,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,326 | py | # Créé par Sinan et un peu Fabien (:joke:) le 21/03/2016 en Python 3.2
# Classes du programme "grille"
import pygame
from pygame.locals import *
pygame.init()
from constantes5x5_3 import *
#===============================================================================
# AFFICHAGE DES GRILLES DE JEU
#===============================================================================
# Cette fonction est la fusion de toutes les fonctions ci-dessous, et permet ainsi d'afficher la grille totale
# Notons qu'il faut d'abord afficher les grilles tierces puis la grille principale pour avoir les épaisseurs correctes
def Affichage_Grille(SUPPORT,X_START,Y_START, NBR_CARRE_CENTRAL, NBR_CARRE_GRAND, NBR_CARRE_PETIT, TAILLE_CARRE):
Affichage_Grille_Haut_Horizontal(SUPPORT,X_START,Y_START, NBR_CARRE_PETIT, NBR_CARRE_GRAND, TAILLE_CARRE)
Affichage_Grille_Haut_Vertical(SUPPORT,X_START,Y_START, NBR_CARRE_GRAND, NBR_CARRE_PETIT, TAILLE_CARRE)
Affichage_Grille_Gauche_Horizontal(SUPPORT,X_START,Y_START, NBR_CARRE_GRAND, NBR_CARRE_PETIT, TAILLE_CARRE)
Affichage_Grille_Gauche_Vertical(SUPPORT,X_START,Y_START, NBR_CARRE_PETIT, NBR_CARRE_GRAND, TAILLE_CARRE)
Affichage_Grille_Milieu_Horizontal(SUPPORT,X_START,Y_START, NBR_CARRE_CENTRAL, TAILLE_CARRE)
Affichage_Grille_Milieu_Vertical(SUPPORT,X_START,Y_START, NBR_CARRE_CENTRAL, TAILLE_CARRE)
#-------------------------------------------------------------------------------
# AFFICHAGE DE LA GRILLE DU HAUT CONTENANT LES INDICATIONS
# NECESSAIRES POUR REMPLIR LA GRILLE CENTRALE VERTICALEMENT
#-------------------------------------------------------------------------------
#(I) Affichage des lignes verticales de la grille
#=================================================
def Affichage_Grille_Haut_Vertical(SUPPORT,X_START,Y_START, NBR_CARRE_GRAND, NBR_CARRE_PETIT, TAILLE_CARRE):
# BOUCLE CARRE GRAND VERTICAL - Qui parcourt la grille dans la longueur (= à son plus grand nombre de carré + 1) pour obtenir l'origine des traits verticaux
for x in range(NBR_CARRE_GRAND + 1):
# On affecte la variable X_START à la variable X_END, car on trace des traits verticaux et que l'abscisse des points d'origines et finaux sera tjrs la même
X_END = X_START
# On affecte la variable Y_END à la variable Y_START à laquelle on soustrait la hauteur de la grille, pour obtenir l'ordonnée du point final
Y_END = Y_START - TAILLE_CARRE * (NBR_CARRE_PETIT)
# On trace la ligne vertical d'épaisseur 2
pygame.draw.line(SUPPORT, NOIR, (X_START, Y_START),(X_END,Y_END), 2)
# On augmente la variable X_START d'une TAILLE_CARRE pour obtenir l'origine du prochain point
X_START += TAILLE_CARRE
#(II) Affichage des lignes horizontales de la grille
#====================================================
def Affichage_Grille_Haut_Horizontal(SUPPORT,X_START,Y_START, NBR_CARRE_PETIT, NBR_CARRE_GRAND, TAILLE_CARRE ):
# BOUCLE CARRE PETIT HORIZONTAL- Qui parcourt la grille dans la hauteur (= à son plus petit nombre de carré + 1) pour obtenir l'origine des traits horizontaux
for x in range(NBR_CARRE_PETIT + 1):
# On affecte la variable Y_START à la variable Y_END, car on trace des traits horizontaux et que l'ordonée des points d'origines et finaux sera tjrs la même
Y_END = Y_START
# On affecte à la variable X_END, la variable X_START à laquelle on ajoute la longueur de la grille, pour obtenir l'abscisse du point final
X_END = X_START + TAILLE_CARRE * (NBR_CARRE_GRAND)
# On trace la ligne vertical d'épaisseur 2
pygame.draw.line(SUPPORT, NOIR, (X_START, Y_START),(X_END,Y_END), 2)
# On diminue la variable Y_START d'une TAILLE_CARRE pour obtenir l'ordonnée du prochain point
Y_START -= TAILLE_CARRE
#-------------------------------------------------------------------------------
# AFFICHAGE DE LA GRILLE DE GAUCHE CONTENANT LES INDICATIONS
# NECESSAIRES POUR REMPLIR LA GRILLE CENTRALE HORIZONTALEMENT
#-------------------------------------------------------------------------------
#(I) Affichage des lignes verticales de la grille
#=================================================
def Affichage_Grille_Gauche_Vertical(SUPPORT,X_START,Y_START, NBR_CARRE_PETIT, NBR_CARRE_GRAND, TAILLE_CARRE):
# BOUCLE CARRE PETIT VERTICAL - Qui parcourt la grille dans la longueur (= à son plus petit nombre de carré + 1) pour obtenir l'origine des traits verticaux
for x in range(NBR_CARRE_PETIT + 1):
# On affecte la variable X_START à la variable X_END, car on trace des traits verticaux et que l'abscisse des points d'origines et finaux sera tjrs la même
X_END = X_START
# On affecte la variable Y_END à la variable Y_START à laquelle on soustrait la hauteur de la grille, pour obtenir l'ordonnée du point final
Y_END = Y_START + TAILLE_CARRE * (NBR_CARRE_GRAND)
# On trace la ligne vertical d'épaisseur 2
pygame.draw.line(SUPPORT, NOIR, (X_START, Y_START),(X_END,Y_END), 2)
# On diminue la variable X_START d'une TAILLE_CARRE pour obtenir l'origine du prochain point
X_START -= TAILLE_CARRE
#(II) Affichage des lignes horizontales de la grille
#====================================================
def Affichage_Grille_Gauche_Horizontal(SUPPORT,X_START,Y_START, NBR_CARRE_GRAND, NBR_CARRE_PETIT, TAILLE_CARRE):
# BOUCLE CARRE PETIT HORIZONTAL- Qui parcourt la grille dans la hauteur (= à son plus grand nombre de carré + 1) pour obtenir l'origine des traits horizontaux
for x in range(NBR_CARRE_GRAND + 1):
# On affecte la variable Y_START à la variable Y_END, car on trace des traits horizontaux et que l'ordonnée des points d'origines et finaux sera tjrs la même
Y_END = Y_START
# On affecte à la variable X_END, la variable X_START à laquelle on soustrait la longueur de la grille, pour obtenir l'abscisse du point final
X_END = X_START - TAILLE_CARRE * (NBR_CARRE_PETIT)
# On trace la ligne vertical d'épaisseur 2
pygame.draw.line(SUPPORT, NOIR, (X_START, Y_START),(X_END,Y_END), 2)
# On augmente la variable Y_START d'une TAILLE_CARRE pour obtenir l'ordonnée du prochain point
Y_START += TAILLE_CARRE
#-------------------------------------------------------------------------------
# AFFICHAGE DE LA GRILLE CENTRALE A COMPLETER PAR LE JOUEUR
# EN FONCTION DES INDICATIONS DES DEUX AUTRES GRILLES
#-------------------------------------------------------------------------------
#(I) Affichage des lignes verticales de la grille
#=================================================
def Affichage_Grille_Milieu_Vertical(SUPPORT,X_START,Y_START, NBR_CARRE_CENTRAL, TAILLE_CARRE):
# BOUCLE CARRE CENTRAL GRILLE PRINCIPAL - Qui parcourt la grille dans la longueur (= à son plus grand nombre de carré + 1) pour obtenir l'origine des traits verticaux
for x in range(NBR_CARRE_CENTRAL + 1):
# On affecte la variable X_START à la variable X_END, car on trace des traits verticaux et que l'abscisse des points d'origines et finaux sera tjrs la même
X_END = X_START
# On affecte la variable Y_END à la variable Y_START à laquelle on ajoute la hauteur de la grille, pour obtenir l'ordonnée du point final
Y_END = Y_START + TAILLE_CARRE * (NBR_CARRE_CENTRAL) + 1
# On trace la ligne vertical d'épaisseur 4
pygame.draw.line(SUPPORT, VERT_PIN, (X_START, Y_START),(X_END,Y_END), 4)
# On augmente la variable X_START d'une TAILLE_CARRE pour obtenir l'abscisse du prochain point
X_START += TAILLE_CARRE
#(II) Affichage des lignes horizontales de la grille
#====================================================
def Affichage_Grille_Milieu_Horizontal(SUPPORT,X_START,Y_START, NBR_CARRE_CENTRAL, TAILLE_CARRE):
# BOUCLE CARRE CENTRAL GRILLE PRINCIPAL - Qui parcourt la grille dans la longueur (= à son plus grand nombre de carré + 1) pour obtenir l'origine des traits verticaux
for x in range(NBR_CARRE_CENTRAL + 1):
# On affecte la variable Y_START à la variable Y_END, car on trace des traits horizontaux et que l'ordonée des points d'origines et finaux sera tjrs la même
Y_END = Y_START
# On affecte à la variable X_END, la variable X_START à laquelle on ajoute la longueur de la grille, pour obtenir l'abscisse du point final
X_END = X_START + TAILLE_CARRE * (NBR_CARRE_CENTRAL) + 1
# On trace la ligne vertical d'épaisseur 4
pygame.draw.line(SUPPORT, VERT_PIN, (X_START, Y_START),(X_END,Y_END), 4)
# On augmente la variable Y_START d'une TAILLE_CARRE pour obtenir l'ordonnée du prochain point
Y_START += TAILLE_CARRE
#===============================================================================
# AFFICHAGE DES FIGURES DE LA GRILLE CENTRALE
#===============================================================================
#-------------------------------------------------------------------------------
# NOIRCISSEMENT DE LA CASE / EFFACEMENT DE LA CASE
#-------------------------------------------------------------------------------
def Afficher_Carre(SUPPORT, COULEUR1, COULEUR2, X_ORIGINE, Y_ORIGINE, CASE_X, CASE_Y, TAILLE_CARRE, EPAISSEUR_CONTOUR):
# On affecte à l'abscisse X_START, l'abscisse du point d'origine à laquelle on ajoute la case où se trouve la souris multiplié par la taille d'un carré
X_START = X_ORIGINE + CASE_Y * TAILLE_CARRE + 3
# On affecte à l'ordonnée Y_START, l'ordonné du point d'origine à laquelle on ajoute la case où se trouve la souris multiplié par la taille d'un carré
Y_START = Y_ORIGINE + CASE_X * TAILLE_CARRE + 3
# On affecte à la variable X_END, la taille carré (=longueur)
X_END = TAILLE_CARRE - 4
# On affecte à la variable Y_END, la taille carré (= hauteur)
Y_END = TAILLE_CARRE - 4
# On trace le rectangle
pygame.draw.rect(SUPPORT, COULEUR1, (X_START, Y_START, X_END, Y_END ))
pygame.draw.rect(SUPPORT, COULEUR2, (X_START, Y_START, X_END, Y_END ), EPAISSEUR_CONTOUR)
#-------------------------------------------------------------------------------
# COCHAGE DE LA CASE
#-------------------------------------------------------------------------------
def Afficher_Croix(SUPPORT,COULEUR, X_ORIGINE, Y_ORIGINE, CASE_X, CASE_Y, TAILLE_CARRE):
# On affecte à l'abscisse X_START_1, l'abscisse du point d'origine à laquelle on ajoute la case où se trouve la souris multiplié par la taille d'un carré
X_START_1 = X_ORIGINE + CASE_Y * TAILLE_CARRE + 7
# On affecte à l'ordonnée Y_START_1, l'ordonné du point d'origine à laquelle on ajoute la case où se trouve la souris multiplié par la taille d'un carré
Y_START_1 = Y_ORIGINE + CASE_X * TAILLE_CARRE
# On affecte à la variable X_END_1, l'abscisse X_START_1 à laquelle on ajoute la taille carré (=longueur)
X_END_1 = X_START_1 + TAILLE_CARRE - 13
# On affecte à la variable Y_END_1, l'abscisse Y_START_1 à laquelle on ajoute la taille carré (= hauteur)
Y_END_1 = Y_START_1 + TAILLE_CARRE - 5
# On affecte à l'abscisse X_START_2, l'abscisse X_START_1 + la longueur du carré
X_START_2 = X_START_1 + TAILLE_CARRE - 10
# On affecte à l'ordonné Y_START_2, l'ordonnée Y_START_1
Y_START_2 = Y_START_1 + 5
# On affecte à l'abscisse X_END_2, la valeur de X_START_1
X_END_2 = X_START_1
# On affecte à l'ordonnée Y_END_2, l'ordonnée à l'origine + la hauteur d'un carré
Y_END_2 = Y_START_2 + TAILLE_CARRE - 4
# On trace la première diagonale
pygame.draw.line(SUPPORT, COULEUR,(X_START_1, Y_START_1),(X_END_1, Y_END_1), 4)
# On trace la seconde diagonale
pygame.draw.line(SUPPORT, COULEUR,(X_START_2, Y_START_2),(X_END_2, Y_END_2), 4)
#-------------------------------------------------------------------------------
# AFFICHAGE DES INDICATIONS
#-------------------------------------------------------------------------------
def Afficher_Indic_Gauche(SUPPORT, COULEUR, X_ORIGINE, Y_ORIGINE, NBR_CARRE_PETIT, NBR_CARRE_GRAND, TAILLE_POLICE, TAILLE_CARRE, MATRICE_GAUCHE, DIVISEUR):
font = pygame.font.Font(None, TAILLE_POLICE)
for X in range(NBR_CARRE_PETIT):
for Y in range(NBR_CARRE_GRAND):
X_START = ((X_ORIGINE - NBR_CARRE_PETIT * TAILLE_CARRE + 3) + X * TAILLE_CARRE + 3 + TAILLE_CARRE//DIVISEUR)
Y_START = (Y_ORIGINE + Y * TAILLE_CARRE + 3) + TAILLE_CARRE//DIVISEUR
texte = font.render(str(MATRICE_GAUCHE[Y,X]), True, COULEUR)
SUPPORT.blit(texte, (X_START,Y_START))
def Afficher_Indic_Haut(SUPPORT, COULEUR, X_ORIGINE, Y_ORIGINE, NBR_CARRE_PETIT, NBR_CARRE_GRAND, TAILLE_POLICE, TAILLE_CARRE, MATRICE_HAUT, DIVISEUR):
font = pygame.font.Font(None, TAILLE_POLICE)
for X in range(NBR_CARRE_GRAND):
for Y in range(NBR_CARRE_PETIT):
X_START = (X_ORIGINE + X * TAILLE_CARRE + 3) + TAILLE_CARRE//DIVISEUR + 3
Y_START = (Y_ORIGINE - NBR_CARRE_PETIT * TAILLE_CARRE + 3) + Y * TAILLE_CARRE + 3 + TAILLE_CARRE//DIVISEUR
texte = font.render(str(MATRICE_HAUT[Y,X]), True, COULEUR)
SUPPORT.blit(texte, (X_START,Y_START))
| [
"fabien.simonet@etu.udamail.fr"
] | fabien.simonet@etu.udamail.fr |
dc37308651f34c0ea73ccde4cae542ae9a2859eb | 23d7bfd4dfea288e094e6103e6630e2fe7445fa1 | /Dash/Code/03_Graph_user_data.py | d764fab7ba6bda286e005eda8816ad63ec430f56 | [] | no_license | vectormars/Python-Practice | 3714c4b34c7de240152ef7131fbf2c8156d17980 | c1edc38c1bfeda3dbb2c1df16b87fa793c97fb5f | refs/heads/master | 2018-10-08T17:42:44.175903 | 2018-07-12T17:10:10 | 2018-07-12T17:10:10 | 116,074,218 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 894 | py | import pandas_datareader.data as web
import datetime
import dash
import dash_core_components as dcc
import dash_html_components as html
app = dash.Dash()
stock = 'TSLA'
start = datetime.datetime(2015, 1, 1)
end = datetime.datetime(2018, 2, 8)
df = web.DataReader(stock, 'morningstar', start, end)
df.reset_index(inplace=True)
df.set_index("Date", inplace=True)
df = df.drop("Symbol", axis=1)
app.layout = html.Div(children=[
html.H1(children='Whoa, a graph!'),
html.Div(children='''
Making a stock graph!.
'''),
dcc.Graph(
id='example-graph',
figure={
'data': [
{'x': df.index, 'y': df.Close, 'type': 'line', 'name': stock},
],
'layout': {
'title': stock
}
}
)
])
if __name__ == '__main__':
app.run_server(debug=True) | [
"noreply@github.com"
] | noreply@github.com |
4380bbd9ffaeebb0d795d325986d7ec51e35b7dc | 607856899400e859725d5656079d1778c1cf5860 | /Manager/models.py | 4cecec3a1829f0cfcf12ac16f24954cf95c447d0 | [] | no_license | guchenghao/Shopping-Online-System | d2020f24cf97b81f0e12c98616c75e0e23aac797 | 9e44fd95eb4ee56eb8f7e58dab2dff4653237896 | refs/heads/master | 2021-04-03T02:03:45.640999 | 2018-11-14T02:26:41 | 2018-11-14T02:26:41 | 124,623,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | # coding=utf8
from django.db import models
# Create your models here.
class Products(models.Model):
name = models.CharField(max_length=1000)
productType = models.CharField(max_length=100)
produceTime = models.DateField(null=True)
picture = models.ImageField(upload_to="goods_image/")
expTime = models.IntegerField(null=True)
price = models.FloatField()
info = models.CharField(max_length=1000)
saleTime = models.DateField()
Inventory = models.IntegerField() | [
"1754926878@qq.com"
] | 1754926878@qq.com |
c03bbfd25eb5532e173f6b9f982d7620319b1f07 | 33b73dce48d5eaf2d8381cada87eac412c3f5ac8 | /create_movie.py | a98d821d19fb0b13b132464bf7e05c4db490dc8b | [] | no_license | rigvedepur/hiresimage-movie-creator | 90dbcf0c80b820688d7e5085e6b2c4ad30660784 | b0a6d56604bdf290a5bc01bf04cafc804c87f538 | refs/heads/master | 2023-07-23T11:02:34.085762 | 2021-12-27T06:57:46 | 2021-12-27T06:57:46 | 251,533,844 | 0 | 0 | null | 2023-07-06T21:57:34 | 2020-03-31T07:43:49 | Python | UTF-8 | Python | false | false | 2,075 | py | import cv2
import argparse
from skimage.io import imread
from progress.bar import IncrementalBar
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", required=True, help="input filenname")
ap.add_argument("-r", "--resolution", required=True, help="output resolution", default='4K')
ap.add_argument("-s", "--step", required=False, help="step size", default=5)
ap.add_argument("-f", "--fps", required=True, help="fps", default=30)
ap.add_argument("-c", "--codec", required=False, help="codec", default='MP4V')
args = vars(ap.parse_args())
# Read input filename and shape
filename = args['input']
image = imread(filename)
(image_height, image_width) = image.shape[:2]
# Assign output screen resolution
resolution = args['resolution']
if resolution == '4K':
size = (3840, 2160)
elif resolution == '1080p':
size = (1920, 1080)
elif resolution == '720p':
size = (1280, 720)
elif resolution == '480p':
size = (854, 480)
elif resolution == '360p':
size = (640, 360)
elif resolution == '240p':
size = (426, 240)
else:
size = (3840, 2160)
(screen_width, screen_height) = size
# Read output FPS
fps = int(args['fps'])
# Read step-size for panning in x-direction
step = int(args['step'])
# Read format of output file-extension (.avi or .mp4)
codec = args['codec']
if codec == 'DIVX':
file_ext = '.avi'
elif codec == 'MP4V':
file_ext = '.mp4'
# Define output filename
output_filename = 'output_'+str(fps)+'-fps_'+resolution+'-resolution_'+str(step)+'-step'+file_ext
# Inspect ROI array by row (in Y) and by frame (in X)
out = cv2.VideoWriter(output_filename, cv2.VideoWriter_fourcc(*codec), fps, size)
total_frames = int((image_height//screen_height) * (image_width//step))
bar = IncrementalBar('Progress: ', max=total_frames)
for j in [k*screen_height for k in range(image_height//screen_height)]:
for i in [f*step for f in range(image_width//step)]:
crop = image[j:j+screen_height, i:i+screen_width, :]
crop = cv2.cvtColor(crop, cv2.COLOR_RGB2BGR)
out.write(crop)
bar.next()
out.release()
bar.finish()
print('Movie saved successfully')
| [
"16384911+rigvedepur@users.noreply.github.com"
] | 16384911+rigvedepur@users.noreply.github.com |
9d65d56ca05d0ede955d00276d9cdb11a8cc54f8 | 478388af7c362f63c4457e99711ab8cfd750d7f9 | /examples/swag_ksikka/configuration_bert.py | e9dd11adfebc2957e06ccced47b8ea84e5379e0b | [
"Apache-2.0"
] | permissive | karansikka1/transformers | 9084a81ef2c9b0c160cad02bffcb6a16381723c2 | 1bedc2face5376077e9fb27b6c20a8122d4d9f6e | refs/heads/master | 2022-04-21T01:55:11.486724 | 2020-04-19T23:32:21 | 2020-04-19T23:32:21 | 256,115,532 | 0 | 0 | Apache-2.0 | 2020-04-16T05:12:42 | 2020-04-16T05:12:41 | null | UTF-8 | Python | false | false | 8,655 | py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" BERT model configuration """
import logging
from configuration_utils import PretrainedConfig
logger = logging.getLogger(__name__)
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"bert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json",
"bert-large-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-config.json",
"bert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-config.json",
"bert-large-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-config.json",
"bert-base-multilingual-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-config.json",
"bert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-config.json",
"bert-base-chinese": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-config.json",
"bert-base-german-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-config.json",
"bert-large-uncased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-config.json",
"bert-large-cased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-config.json",
"bert-large-uncased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-config.json",
"bert-large-cased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-config.json",
"bert-base-cased-finetuned-mrpc": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-config.json",
"bert-base-german-dbmdz-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-cased-config.json",
"bert-base-german-dbmdz-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-uncased-config.json",
"bert-base-japanese": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-config.json",
"bert-base-japanese-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-whole-word-masking-config.json",
"bert-base-japanese-char": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char-config.json",
"bert-base-japanese-char-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char-whole-word-masking-config.json",
"bert-base-finnish-cased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-cased-v1/config.json",
"bert-base-finnish-uncased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-uncased-v1/config.json",
"bert-base-dutch-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/wietsedv/bert-base-dutch-cased/config.json",
}
class BertConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a :class:`~transformers.BertModel`.
It is used to instantiate an BERT model according to the specified arguments, defining the model
architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
the BERT `bert-base-uncased <https://huggingface.co/bert-base-uncased>`__ architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used
to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig`
for more information.
Args:
vocab_size (:obj:`int`, optional, defaults to 30522):
Vocabulary size of the BERT model. Defines the different tokens that
can be represented by the `inputs_ids` passed to the forward method of :class:`~transformers.BertModel`.
hidden_size (:obj:`int`, optional, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (:obj:`int`, optional, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (:obj:`int`, optional, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (:obj:`int`, optional, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (:obj:`str` or :obj:`function`, optional, defaults to "gelu"):
The non-linear activation function (function or string) in the encoder and pooler.
If string, "gelu", "relu", "swish" and "gelu_new" are supported.
hidden_dropout_prob (:obj:`float`, optional, defaults to 0.1):
The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (:obj:`float`, optional, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (:obj:`int`, optional, defaults to 512):
The maximum sequence length that this model might ever be used with.
Typically set this to something large just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (:obj:`int`, optional, defaults to 2):
The vocabulary size of the `token_type_ids` passed into :class:`~transformers.BertModel`.
initializer_range (:obj:`float`, optional, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (:obj:`float`, optional, defaults to 1e-12):
The epsilon used by the layer normalization layers.
Example::
from transformers import BertModel, BertConfig
# Initializing a BERT bert-base-uncased style configuration
configuration = BertConfig()
# Initializing a model from the bert-base-uncased style configuration
model = BertModel(configuration)
# Accessing the model configuration
configuration = model.config
Attributes:
pretrained_config_archive_map (Dict[str, str]):
A dictionary containing all the available pre-trained checkpoints.
"""
pretrained_config_archive_map = BERT_PRETRAINED_CONFIG_ARCHIVE_MAP
model_type = "bert"
def __init__(
self,
vocab_size=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=0,
**kwargs
):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
| [
"karan.sikka@sri.com"
] | karan.sikka@sri.com |
c61d403099fed6fbcb69b33fa047ee2d16e137e1 | 5eb52c07e5b1bd00af77306f927f382b684cd6ff | /indy_common/generates_request.py | b70eefee0dfbe79d777c07d732f89f1458602edb | [
"Apache-2.0"
] | permissive | hyperledger/indy-node | bce39486988f5114581cff4f6d14fc1b7684143c | e6bb87d4c605aff9914491d062248b6ec857334c | refs/heads/main | 2023-09-03T15:33:08.187153 | 2023-05-08T22:48:21 | 2023-05-08T22:48:21 | 77,021,566 | 691 | 783 | Apache-2.0 | 2023-05-09T15:42:43 | 2016-12-21T05:45:04 | Python | UTF-8 | Python | false | false | 328 | py | from abc import abstractmethod
class GeneratesRequest:
@abstractmethod
def _op(self):
pass
@abstractmethod
def ledgerRequest(self):
"""
Generates a Request object to be submitted to the ledger.
:return: a Request to be submitted, or None if it shouldn't be written
"""
| [
"alexander.sherbakov@dsr-company.com"
] | alexander.sherbakov@dsr-company.com |
4a1363c06326737704a56ca87d0aee7cb9efc192 | 230352aa4775f328dd67d2a5db17a832dab4ad54 | /myblog/urls.py | d32cb7df012683355bd1897c8f910e4969bc7d01 | [] | no_license | light0011/Django_blog | 8e691cb4efc9549b8fb5f29166eabd2310089f88 | 54ec92ef7403ac0dc7d896f25d12950b12af1efb | refs/heads/master | 2021-06-16T20:18:06.627797 | 2017-05-24T02:28:53 | 2017-05-24T02:28:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 859 | py | """myblog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^blog/', include('blog.urls')),
url(r'^blog2/', include('blog2.urls')),
]
| [
"li521YOUXINTWO"
] | li521YOUXINTWO |
dd8b87c4e3eb039651b484713ee069ae838bf750 | 28bea635167b3e0b99c3abf1236a5d6970d65d49 | /esgf2zarr/_version.py | 4d23300f6cbd39cbfe292238ba9c4635b656a65a | [
"Apache-2.0"
] | permissive | pangeo-data/esgf2xarray | 4531dbe0d1108d916cc3a00f807d9abe9e495aac | 6a5e4df0d329c2f23b403cbfbb65f0f1dfa98d52 | refs/heads/master | 2020-04-27T08:59:03.829876 | 2019-03-08T18:05:52 | 2019-03-08T18:05:52 | 174,194,614 | 4 | 1 | Apache-2.0 | 2019-03-08T18:06:26 | 2019-03-06T18:01:20 | Jupyter Notebook | UTF-8 | Python | false | false | 18,555 | py | # This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "xndframes-"
cfg.versionfile_source = "xndframes/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = (
"unable to parse git-describe output: \
'%s'"
% describe_out
)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
} | [
"axbanihirwe@ualr.edu"
] | axbanihirwe@ualr.edu |
4c3f9df27fc36bfcb1b7fbff4458f02b1a830a73 | 4e9150d3a59a0a97fd6b3df1dc383dcdd9c09603 | /views.py | 3b33b33f79f619eef2c37625d344ba3483f01ca9 | [] | no_license | Demon702/Bodhitree-RnD | 126271f7d3a4ba951169cb9c1d340a91bcd02b89 | 2eceed1eed4a2b6ea70cadde029600308c961638 | refs/heads/master | 2020-05-20T14:27:24.566777 | 2019-05-08T13:55:31 | 2019-05-08T13:55:31 | 185,621,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215,010 | py | """
Created on May 20, 2013
@author: aryaveer
Views for assignment module
"""
import copy
import csv
import datetime as DateTime
import json
import mimetypes
import os
import pickle
import re
import shutil
import tarfile
import tempfile
import zipfile
from ast import literal_eval as make_tuple
from copy import deepcopy
from datetime import datetime, timedelta
from wsgiref.util import FileWrapper
import pytz
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied
from django.core.files import File
from django.core.files.storage import default_storage
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.db import transaction
from django.db.models import Max
from django.forms import model_to_dict
from django.http import Http404, HttpResponse, HttpResponseForbidden, HttpResponseNotFound, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.utils import timezone
from django.utils.encoding import smart_str
from django.utils.safestring import mark_safe
# from eventlog.models import log
from formtools.wizard.views import SessionWizardView
from netaddr import iter_iprange
from rest_framework import status
from rest_framework.response import Response
from assignments.assignments_utils import create_output
from assignments.assignments_utils.check_dup_submission import check, create_upload
from assignments.forms import (AssignmentForm, AssignmentImportForm, AssignmentMetaForm, CheckerCodeForm,
CreateTestcaseArray, CreateTestcaseMatrix, CreateTestcaseNumber, ProgramFormCandE,
ProgramFormCNotE, ProgramFormE, SafeExecForm, bulk_add_check, check_allocation_doc)
from assignments.models import (Assignment, AssignmentErrors, AssignmentImport, AssignmentScript, Checker,
CheckerErrors, Program, ProgramErrors, SafeExec, TA_allocation, Testcase,
TestcaseErrors)
from assignments.serializers import AssignmentDetailsSerializer
from assignments.tasks import delete_redundant_files
from courseware.models import Course, CourseHistory, CourseInfo
# from courseware.views import course
from elearning_academy.celeryapp import app
from elearning_academy.decorators import is_moderator_check
from elearning_academy.permissions import get_mode
from evaluate.models import AssignmentResults, ProgramResults, TestcaseResult
from evaluate.tasks import evaluate_assignment
from evaluate.utils.checkOutput import CustomTestcase
from evaluate.utils.evaluator import Results
from exam.models import Pausetimer, Proctoring
from exam.views import validateuser
from report_grading_app.helper import is_valid_ip
from upload.forms import UploadForm
from upload.models import LatestSubmission, ServerSavedSubmission, Upload
from util.core import local2utc
from utils.archives import Archive, extract_or_copy, get_file_name_list, read_file
from utils.filetypes import (README_LINKS, get_compilation_command, get_compiler_name, get_execution_command,
get_extension, get_interpreter_name, is_valid, language_category)
def isCourseCreator(course, user):
'''function returns true if user is owner of the course'''
try:
course_history = CourseHistory.objects.get(
course_id=course, user_id=user.id)
return course_history.is_owner
except Exception:
return False
def iscourse_owner(course, user):
'''function returns true if user is owner of the course'''
course_history = CourseHistory.objects.get(
course_id=course, user_id=user.id)
return course_history.is_creator
def isCourseModerator(course, user):
'''function returns true if user is instructor for the course'''
try:
course_history = CourseHistory.objects.get(
course_id=course, user_id=user.id)
return course_history.is_owner or course_history.is_moderator
except Exception:
return False
def isEnrolledInCourse(course, user):
'''function returns true if user is student of the course'''
try:
course_history = CourseHistory.objects.filter(
course_id=course, user_id=user.id)
return len(course_history) > 0
except Exception:
return False
def club_output_testfile(testcases, new_testcase, program_dir, io_filename_parts, each_testfile):
"""
Clubs output file with the corresponding input file
Searches in the section folder for the io files with the same identifier and if found,
it clubs them, stores them in the filesystem and the corresponding paths are stored in the database
Args:
testcases: testfile
new_testcase:testcase instance
program_dir: section folder
io_filename_parts: parts of the testfile mame
each_testfile: each testfile
Returns: new testcase
"""
out_file_found = False
for each_testfile_again in testcases:
if os.path.isfile(program_dir + '/' + each_testfile_again) and not \
(each_testfile_again in ['makefile', 'Makefile', 'source.tar.gz'] and
each_testfile == each_testfile_again):
if not out_file_found:
io_filename_parts_again = each_testfile_again.split('_')
if io_filename_parts[0] == io_filename_parts_again[0] and io_filename_parts_again[1] == 'out':
out_file_found = True
base2 = os.path.basename(
program_dir + '/' + each_testfile_again)
io_dir2 = program_dir + '/' + os.path.splitext(base2)[0]
if not os.path.exists(io_dir2):
os.makedirs(io_dir2)
shutil.move(program_dir + '/' + each_testfile_again,
io_dir2 + '/' + each_testfile_again)
io_dir2_tar = io_dir2 + '.tar.gz'
# creating a tarfile for testcase file
with tarfile.open(io_dir2_tar, "w:gz") as tar:
tar.add(io_dir2, arcname=os.path.basename(io_dir2))
io_dir2_tarfile = file(io_dir2_tar)
new_testcase.output_files = File(io_dir2_tarfile)
shutil.rmtree(io_dir2)
os.remove(io_dir2_tar)
new_testcase.std_out_file_name = each_testfile_again
if new_testcase.name == '':
new_testcase.name = (
io_dir2.split('/')[-1]).split('_')[2]
# to remove extra input files with same identifier
elif io_filename_parts[0] == io_filename_parts_again[0] and io_filename_parts_again[1] == 'inp':
os.remove(program_dir + '/' + each_testfile_again)
else: # to put some dummy values
new_testcase.output_files = None
new_testcase.std_out_file_name = ''
return new_testcase
def club_input_testfile(testcases, new_testcase, program_dir, io_filename_parts, each_testfile):
"""
Clubs ipnut file with the corresponding output file
Searches in the section folder for the io files with the same identifier and if found,
it clubs them, stores them in the filesystem and the corresponding paths are stored in the database
Args:
testcases: testfile
new_testcase:testcase instance
program_dir: section folder
io_filename_parts: parts of the testfile mame
each_testfile: each testfile
Returns: new testcase
"""
inp_file_found = False
for each_testfile_again in testcases:
if os.path.isfile(program_dir + '/' + each_testfile_again) and not \
(each_testfile_again in ['makefile', 'Makefile', 'source.tar.gz'] and
each_testfile == each_testfile_again):
if not inp_file_found:
io_filename_parts_again = each_testfile_again.split('_')
if io_filename_parts[0] == io_filename_parts_again[0] and io_filename_parts_again[1] == 'inp':
inp_file_found = True
base2 = os.path.basename(
program_dir + '/' + each_testfile_again)
io_dir2 = program_dir + '/' + os.path.splitext(base2)[0]
if not os.path.exists(io_dir2):
os.makedirs(io_dir2)
shutil.move(program_dir + '/' + each_testfile_again,
io_dir2 + '/' + each_testfile_again)
io_dir2_tar = io_dir2 + '.tar.gz'
# creating a tarfile for testcase file
with tarfile.open(io_dir2_tar, "w:gz") as tar:
tar.add(io_dir2, arcname=os.path.basename(io_dir2))
io_dir2_tarfile = file(io_dir2_tar)
new_testcase.input_files = File(io_dir2_tarfile)
shutil.rmtree(io_dir2)
os.remove(io_dir2_tar)
new_testcase.std_in_file_name = each_testfile_again
if new_testcase.name == '':
new_testcase.name = (
io_dir2.split('/')[-1]).split('_')[2]
elif io_filename_parts[0] == io_filename_parts_again[0] and io_filename_parts_again[1] == 'out':
# to remove extra output files with the same identifier
os.remove(program_dir + '/' + each_testfile_again)
else: # to put some dummy values
new_testcase.input_files = None
new_testcase.std_in_file_name = ''
return new_testcase
def adding_testcases(testcases, program_dir, new_program):
"""
for adding the test cases in bulk
finds the values for different fields of the model testcase and assigns them to it
For example, it calls the club_output_testfile() function to get std_out_file_name and output_files
Args:
testcases: test cases
program_dir: section folder
new_program: new section
Returns: it simpy adds the testcases
"""
for each_testfile in testcases:
if os.path.isfile(program_dir + '/' + each_testfile) and \
(each_testfile not in ['makefile', 'Makefile', 'source.tar.gz']):
io_file = program_dir + '/' + each_testfile # indicates the input/output files
io_filename_parts = each_testfile.split('_')
if len(io_filename_parts) == 3:
new_testcase = Testcase()
new_testcase.program = new_program
new_testcase.description = ''
io_dir = program_dir + '/' + \
os.path.splitext(os.path.basename(io_file))[0]
if not os.path.exists(io_dir):
os.makedirs(io_dir)
shutil.move(io_file, io_dir + '/' + each_testfile)
io_dir_tar = io_dir + '.tar.gz'
# creating a tarfile for the testcase file
with tarfile.open(io_dir_tar, "w:gz") as tar:
tar.add(io_dir, arcname=os.path.basename(io_dir))
io_dir_tarfile = file(io_dir_tar)
if io_filename_parts[1] == "inp":
new_testcase.name = (io_dir.split('/')[-1]).split('_')[2]
new_testcase.input_files = File(io_dir_tarfile)
shutil.rmtree(io_dir)
os.remove(io_dir_tar)
new_testcase.std_in_file_name = each_testfile
# for every input file, checking if there is a corresponding output file
new_testcase = club_output_testfile(testcases, new_testcase, program_dir, io_filename_parts,
each_testfile)
elif io_filename_parts[1] == 'out':
new_testcase.name = (io_dir.split('/')[-1]).split('_')[2]
new_testcase.output_files = File(io_dir_tarfile)
shutil.rmtree(io_dir)
os.remove(io_dir_tar)
new_testcase.std_out_file_name = each_testfile
# for every output file, checking if there is a corresponding input file
new_testcase = club_input_testfile(testcases, new_testcase, program_dir, io_filename_parts,
each_testfile)
if new_testcase.name == '': # to put some dummy name when no name is provided
new_testcase.name = 'testcase' + io_filename_parts[0]
new_testcase.progam_line_args = ''
new_testcase.marks = 0
new_testcase.save()
def get_compiler_and_exceution_command(new_program, new_assignment):
"""
gets the compiler and execution commands
Args:
new_program: new program
new_assignment: new assignment
Returns: new program
"""
assignment = get_object_or_404(Assignment, pk=new_assignment.id)
objs = Program.objects.filter(assignment=assignment)
lang_category = language_category(assignment.program_language)
if objs:
if lang_category == 0:
comp_command = pickle.loads(objs[0].compiler_command)
new_program.compiler_command = pickle.dumps(
[comp_command[0], '', ''])
elif lang_category == 1:
comp_command = pickle.loads(objs[0].compiler_command)
new_program.compiler_command = pickle.dumps(
[comp_command[0], '', ''])
exe_command = pickle.loads(objs[0].execution_command)
new_program.execution_command = pickle.dumps(
[exe_command[0], '', ''])
elif lang_category == 2:
exe_command = pickle.loads(objs[0].execution_command)
new_program.execution_command = pickle.dumps(
[exe_command[0], '', ''])
else:
if lang_category == 0:
comp_command = get_compiler_name(assignment.program_language)
new_program.compiler_command = pickle.dumps([comp_command, '', ''])
elif lang_category == 1:
comp_command = get_compiler_name(assignment.program_language)
new_program.compiler_command = pickle.dumps([comp_command, '', ''])
exe_command = get_interpreter_name(assignment.program_language)
new_program.execution_command = pickle.dumps([exe_command, '', ''])
elif lang_category == 2:
exe_command = get_interpreter_name(assignment.program_language)
new_program.execution_command = pickle.dumps([exe_command, '', ''])
return new_program
def add_source_and_makefile(program_dir, new_program):
"""
stores the source and makefile in the new program
Args:
program_dir: section folder
new_program: new program
Returns: jnew program
"""
temp_src = program_dir + '/source'
temp_makefile = program_dir + '/makefile'
temp_makefile2 = program_dir + '/Makefile'
if os.path.isdir(temp_src):
src_tar = program_dir + '/source.tar.gz'
with tarfile.open(src_tar, "w:gz") as tar:
tar.add(temp_src, arcname=os.path.basename(temp_src))
src_tarfile = file(src_tar)
new_program.program_files = File(src_tarfile)
shutil.rmtree(temp_src)
os.remove(src_tar)
if os.path.isfile(temp_makefile):
make_file = file(temp_makefile)
new_program.makefile = File(make_file)
os.remove(temp_makefile)
elif os.path.isfile(temp_makefile2):
make_file = file(temp_makefile2)
new_program.makefile = File(make_file)
os.remove(temp_makefile2)
return new_program
def section_bulk_add(new_assignment):
"""
for adding the sections and perhaps the testcases in bulk
Args:
new_assignment: new assignment
Returns: just adds the new assignment
"""
# in the variable temp_dir[num],
# num indicates the level at which we are in the assignment folder uploaded
# extracting the uploaded file
temp_file2 = new_assignment.bulk_add
temp_file = os.path.join(settings.MEDIA_ROOT, str(temp_file2))
temp_dirarr = temp_file.split("/")
level = 0
temp_dir = ''
for each_dir_element in temp_dirarr:
if level <= 4:
temp_dir = temp_dir + each_dir_element + '/'
level += 1
# temp_dir indicates the folder in which assignment resides
temp_dir = temp_dir[:-1]
extract_or_copy(src=temp_file, dest=temp_dir)
# distributing the sections of the files
# temp_dir now indicates the assignment folder
temp_dir = temp_dir + '/' + new_assignment.name
for index1 in xrange(0, 2):
if index1 == 0:
temp_dir2 = temp_dir + '/Evaluate/'
else:
temp_dir2 = temp_dir + '/Practice/'
if os.path.isdir(temp_dir2):
sections = os.listdir(temp_dir2)
for each_section in sections:
new_program = Program()
new_program.assignment = new_assignment
temp_dir3 = temp_dir2 + each_section
new_program = add_source_and_makefile(temp_dir3, new_program)
new_program = get_compiler_and_exceution_command(
new_program, new_assignment)
new_program.name = each_section
if temp_dir2.endswith('/Evaluate/'):
new_program.program_type = 'Evaluate'
elif temp_dir2.endswith('/Practice/'):
new_program.program_type = 'Practice'
new_program.description = ''
new_program.is_sane = True
new_program.language = new_assignment.program_language
new_program.solution_ready = True
new_program.save()
if os.path.isdir(temp_dir3):
for each_testfile in os.listdir(temp_dir3):
if each_testfile not in ['makefile', 'Makefile', 'source.tar.gz'] and \
os.path.splitext(each_testfile)[-1].lower() == "":
os.rename(temp_dir3 + "/" + each_testfile,
temp_dir3 + "/" + each_testfile + ".txt")
# distributing the test cases of each section
testcases = os.listdir(temp_dir3)
adding_testcases(testcases, temp_dir3, new_program)
# removing the extracted folder
shutil.rmtree(temp_dir)
def testcase_bulk_add(new_program):
"""
creates a new sections and adds the testcases in bulk
Args:
new_program: section
Returns: just adds the test cases
"""
# extracting the uploaded file
temp_file2 = new_program.testcase_bulk_add
temp_file = os.path.join(settings.MEDIA_ROOT, str(temp_file2))
temp_dirarr = temp_file.split("/")
level = 0
temp_dir = ''
for each_dirname in temp_dirarr:
if level <= 4:
temp_dir = temp_dir + each_dirname + '/'
level += 1
temp_dir = temp_dir[:-1]
extract_or_copy(src=temp_file, dest=temp_dir)
# distributing the testcases of the section
temp_dir2 = temp_dir + '/' + new_program.name
testcases = os.listdir(temp_dir2)
adding_testcases(testcases, temp_dir2, new_program)
# deleting the extracted folder
shutil.rmtree(temp_dir2)
@login_required
def index(request, courseID):
""" List all assignments for courseID. courseID is automatically generated
in Course table."""
course = get_object_or_404(Course, pk=courseID)
all_assignments = Assignment.objects.filter(
course=course).filter(trash=False).order_by('-deadline')
if CourseHistory.objects.filter(course_id=course, user_id=request.user.id).count() == 0:
return HttpResponseForbidden("Forbidden 403")
course_history = CourseHistory.objects.get(
course_id=course, user_id=request.user.id)
course_info = CourseInfo.objects.get(pk=course.course_info_id)
is_creator = isCourseCreator(course, request.user)
is_moderator = isCourseModerator(course, request.user)
mode = get_mode(request)
if is_moderator or is_creator:
assignments = all_assignments
leave_course = False
number_of_students = 0
else:
assignments = [a for a in all_assignments if(not a.hide and
(timezone.now() > a.publish_on if a.publish_on else False))]
leave_course = True
number_of_students = 0
return render_to_response(
'assignments/index.html',
{'assignments': assignments, 'mode': mode, 'is_moderator': is_moderator, 'course_info': course_info,
'date_time': timezone.now(),
'course': course, 'leave_course': bool(leave_course),
'number_of_students': number_of_students, 'course_history': course_history},
context_instance=RequestContext(request))
@login_required
def deleteSubmission(request, uploadID):
'''
Logic to delete submission
'''
upload = get_object_or_404(Upload, pk=uploadID)
if not request.user == upload.owner:
return HttpResponseForbidden("Forbidden 403")
assignmentID = upload.assignment.id
upload.delete()
filepath = os.path.join(settings.MEDIA_ROOT, str(upload.filePath))
filepath = filepath.rsplit('/', 1)[0]
shutil.rmtree(filepath)
return HttpResponseRedirect(reverse('assignments_details', kwargs={'assignmentID': assignmentID}))
@login_required
def get_ta_allocation(request, assignmentID):
assignment = get_object_or_404(Assignment, pk=assignmentID)
course = assignment.course
ta_list = {}
if isCourseModerator(course, request.user):
ta_allocation = TA_allocation.objects.filter(assignment=assignment)
for ta_object in ta_allocation:
ta = ta_object.assistant.username
student = ta_object.student.username
if ta in ta_list.keys():
ta_list[ta].append(student)
else:
ta_list[ta] = [student]
return ta_list
@login_required
def detailsAssignment(request, assignmentID):
'''
Logic to display assignment
'''
number_of_submissions = 0
allowed_exam_status = True
submission_allowed = None # New initialize
is_due = None # New initialize
rem_time = None
assignment = get_object_or_404(Assignment, pk=assignmentID)
if not isEnrolledInCourse(assignment.course, request.user):
return HttpResponseRedirect("/courseware/courseslist/")
course = assignment.course
hide_val = assignment.hide
is_moderator = isCourseModerator(course, request.user)
trash_val = assignment.trash
if trash_val or (not is_moderator and hide_val):
raise PermissionDenied
is_creator = isCourseCreator(course, request.user)
is_moderator = isCourseModerator(course, request.user)
mode = get_mode(request)
formData = AssignmentForm(initial=model_to_dict(
assignment), courseID=assignment.course.id)
if (timezone.now() < assignment.publish_on if assignment.publish_on else True) or assignment.hide:
if not is_moderator and not is_creator:
raise PermissionDenied
# changes by jitendra
deadline = get_assignment_deadline(request, assignment)
que = 'SELECT DISTINCT submission_id as id FROM upload_latestsubmission WHERE assignment_id =' + assignmentID + ';'
number_of_submissions = LatestSubmission.objects.raw(que)
number_of_submissions = len(list(number_of_submissions))
if assignment.type_of_lab == "Lab":
rem_time = int(
(assignment.deadline - datetime.now(pytz.timezone('UTC'))).total_seconds())
elif assignment.type_of_lab == "Exam":
(rem_time, _, allowed_exam_status) = get_object_from_proctoring(
assignment.exam_group_id, request.user)
ipaddress = assignment.ipaddress
ip_correct = False
try:
ipadd = request.META["HTTP_X_FORWARDED_FOR"]
except KeyError:
ipadd = request.META.get('REMOTE_ADDR')
if ipaddress:
ip_allowed = []
list1 = ipaddress.split(",")
for ip_add in list1:
match = re.search("-", ip_add)
if match:
list2 = ip_add.split("-")
ip_list = list(iter_iprange(list2[0], list2[1]))
for ip2 in ip_list:
ip_allowed.append(str(ip2))
else:
ip_allowed.append(ip_add)
if ipadd in ip_allowed:
ip_correct = True
else:
ip_correct = True
has_joined = CourseHistory.objects.filter(
course_id=course, user_id=request.user.id)
if assignment.deadline is not None:
submission_allowed = (
timezone.now() <= assignment.deadline) and bool(has_joined)
is_due = (timezone.now() >= assignment.deadline) and bool(has_joined)
perror_ctype = ContentType.objects.get_for_model(ProgramErrors)
terror_ctype = ContentType.objects.get_for_model(TestcaseErrors)
program_errors = []
test_errors = []
for error in AssignmentErrors.objects.filter(assignment=assignment, content_type=terror_ctype):
test_errors.extend(TestcaseErrors.objects.filter(pk=error.object_id))
for error in AssignmentErrors.objects.filter(assignment=assignment, content_type=perror_ctype):
program_errors.extend(ProgramErrors.objects.filter(pk=error.object_id))
course = assignment.course
programs = Program.objects.filter(assignment=assignment)
evaluate_program = [
a_program for a_program in programs if a_program.program_type == "Evaluate"]
practice_program = [
a_program for a_program in programs if a_program.program_type == "Practice"]
programs_with_errors = []
for aprogram in programs:
if not aprogram.is_sane:
try:
p_error = ProgramErrors.objects.get(program=aprogram)
programs_with_errors.append(p_error)
except ProgramErrors.DoesNotExist:
p_error = None
submittedFiles = [s.submission for s in LatestSubmission.objects.filter(
owner=request.user, assignment=assignment)]
if submittedFiles:
best_submission = submittedFiles[0]
else:
best_submission = ""
program_not_ready = False
disable_grading = False
if programs_with_errors or submission_allowed is False and assignment.deadline is not None:
program_not_ready = True
if submittedFiles and submittedFiles[0].is_stale:
disable_grading = True
all_assignments = Assignment.objects.filter(
course=course).filter(trash=False).order_by('-deadline')
courseHistory = CourseHistory.objects.get(user=request.user, course=course)
if courseHistory.is_owner:
assignments = all_assignments
else:
assignments = [a for a in all_assignments if(not a.hide and
(timezone.now() > a.publish_on if a.publish_on else False))]
total_sumissions = Upload.objects.filter(assignment=assignment).count()
isSubmitted = Upload.objects.filter(assignment=assignment).count() > 0
get_params = {'source': 'assignment', 'id': assignmentID}
allowed_exam = False
timer = timedelta(seconds=0)
if is_moderator:
allowed_exam = True
elif assignment.type_of_lab != 'Exam' or assignment.deadline < timezone.now():
allowed_exam = True
elif not ip_correct:
submission_allowed = False
elif assignment.type_of_lab == 'Exam' and ip_correct and submission_allowed and not is_moderator:
oldproc = Proctoring.objects.filter(
owner=request.user,
key=assignment.exam_group_id
)
if oldproc:
# print "Curr time :",datetime.utcnow()
correct = validateuser(request, assignment)
if correct:
try:
obj = Proctoring.objects.get(
owner=request.user, key=assignment.exam_group_id)
timer = obj.time
except Proctoring.DoesNotExist:
timer = timedelta(seconds=0)
allowed_exam = True
else:
allowed_exam = False
else:
elapsed_time = timezone.now() - assignment.publish_on
# elapsed_time = datetime.utcnow() - assignment.publish_on
# print elapsed_time,assignment.publish_on
if elapsed_time > assignment.late_duration:
late_time = assignment.timeduration - elapsed_time
if late_time <= timedelta(seconds=0):
times = timedelta(seconds=0)
else:
times = late_time
else:
times = assignment.timeduration
# print times
proc = Proctoring(
owner=request.user,
assignment=assignment,
ipAddress=ipadd,
time=times,
pause=False,
status=True,
addtime=timedelta(seconds=0),
key=assignment.exam_group_id,
starttime=DateTime.datetime.now()
)
proc.save()
if not Pausetimer.objects.filter(key=assignment.exam_group_id):
pausetimer = Pausetimer(
assignment=assignment,
pause=False,
key=assignment.exam_group_id,
additionaltime=timedelta(seconds=0),
)
pausetimer.save()
timer = times
allowed_exam = True
if timer <= timedelta(seconds=1):
is_due = True
submission_allowed = False
# All submission changes by Jitendra
deadline = get_assignment_deadline(request, assignment)
que = 'SELECT DISTINCT submission_id as id FROM upload_latestsubmission WHERE assignment_id =' + assignmentID + ';'
number_of_submissions = LatestSubmission.objects.raw(que)
number_of_submissions = len(list(number_of_submissions))
if assignment.type_of_lab == "Lab":
rem_time = int(
(assignment.deadline - datetime.now(pytz.timezone('UTC'))).total_seconds())
elif assignment.type_of_lab == "Exam":
(rem_time, __, allowed_exam_status) = get_object_from_proctoring(
assignment.exam_group_id, request.user)
pause_all = Pausetimer.objects.filter(key=assignment.exam_group_id)
if pause_all and pause_all[0].pause:
student_ = Proctoring.objects.filter(
owner=request.user,
key=assignment.exam_group_id
)
for obj in student_:
obj.pause = True
obj.save()
allowed_exam_status = False
allowed_exam = False
if request.method == "POST" and submission_allowed and ip_correct and assignment.deadline is not None:
form = UploadForm(request.POST, request.FILES,
assignment_model_obj=assignment)
if form.is_valid():
new_upload = Upload(
owner=request.user,
assignment=assignment,
filePath=request.FILES['docfile']
)
new_upload.save()
submissions = Upload.objects.filter(
assignment=new_upload.assignment).order_by('-uploaded_on')
submission_to_evaluate = LatestSubmission.objects.filter(assignment=new_upload.assignment).filter(
owner=request.user)
if submission_to_evaluate:
submission_to_evaluate[0].submission = submissions[0]
submission_to_evaluate[0].save()
else:
submission_to_evaluate = LatestSubmission()
submission_to_evaluate.assignment = new_upload.assignment
submission_to_evaluate.owner = request.user
submission_to_evaluate.submission = new_upload
submission_to_evaluate.save()
return HttpResponseRedirect(reverse('assignments_details', kwargs={'assignmentID': assignmentID}))
else:
form = UploadForm()
return render_to_response(
'assignments/details.html',
{'assignment': assignment, 'timer': timer, 'course': course, 'has_joined': has_joined,
'is_moderator': is_moderator, 'programs': programs, 'form': form,
'submission_allowed': submission_allowed, 'allowed_exam': allowed_exam, 'submittedFiles': submittedFiles,
'programs_with_errors': programs_with_errors, 'disable_grading': disable_grading,
'program_not_ready': program_not_ready, 'practice_program': practice_program,
'assignments': assignments, 'program_errors': program_errors, 'test_errors': test_errors,
'published': assignment.publish_on, 'is_due': is_due, 'rem_time': rem_time,
'isSubmitted': isSubmitted, 'date_time': timezone.now(), 'get_params': get_params,
'total_sumissions': total_sumissions, 'mode': mode, 'best_submission': best_submission,
'assignmentID': assignmentID, 'now': timezone.now(), 'evaluate_program': evaluate_program,
'formData': formData, 'number_of_submissions': number_of_submissions, 'user_id': request.user,
'allowed_exam_status': allowed_exam_status, 'taList': get_ta_allocation(request, assignmentID),
'deadline': deadline},
context_instance=RequestContext(request),
)
def get_assignment_deadline(request, _assignment):
"""
:param request:
:param _assignment:
:return: Return dealine of assignment
"""
# tz1 = timedelta(minutes=330)
if _assignment.type_of_lab == 'Exam':
if _assignment.deadline < timezone.now():
return _assignment.deadline
proctor = Proctoring.objects.filter(
owner=request.user, assignment=_assignment)
if not proctor:
return timezone.now() + _assignment.timeduration
time_left = proctor[0].time - (timezone.now() - proctor[0].starttime)
if time_left.days < 0:
return proctor[0].starttime + proctor[0].time
return timezone.now() + time_left
return _assignment.deadline
@login_required
def assignments_hide(request, assignment_id):
"""
Hidding assignment from students from assignment page
"""
assignment = get_object_or_404(Assignment, pk=assignment_id)
is_moderator = isCourseModerator(assignment.course, request.user)
if not is_moderator:
return HttpResponseForbidden("Forbidden 403")
assignment = Assignment.objects.filter(trash=False).get(id=assignment_id)
is_moderator = isCourseModerator(assignment.course, request.user)
if not is_moderator:
return HttpResponseForbidden("Forbidden 403")
assignment.hide = True
# log(request.user,'Hid ASSIGNMENT',{'hide':assignment.hide})
assignment.save()
return HttpResponseRedirect(reverse('assignments_details', kwargs={'assignmentID': assignment_id}))
@login_required
def assignments_hide1(request, pk, assignment_id):
"""
Hiding assignment from students from exam page
"""
assignment = get_object_or_404(Assignment, pk=assignment_id)
is_moderator = isCourseModerator(assignment.course, request.user)
if not is_moderator:
return HttpResponseForbidden("Forbidden 403")
assignment = Assignment.objects.filter(trash=False).get(id=assignment_id)
is_moderator = isCourseModerator(assignment.course, request.user)
if not is_moderator:
return HttpResponseForbidden("Forbidden 403")
assignment.hide = True
assignment.save()
return HttpResponseRedirect(reverse('course', kwargs={'pk': pk, 'ref': "exams"}))
@login_required
def assignments_unhide(request, assignment_id):
"""
Unhide assignment for students from assignment page
"""
assignment = get_object_or_404(Assignment, pk=assignment_id)
is_moderator = isCourseModerator(assignment.course, request.user)
if not is_moderator:
return HttpResponseForbidden("Forbidden 403")
assignment = Assignment.objects.filter(trash=False).get(id=assignment_id)
is_moderator = isCourseModerator(assignment.course, request.user)
if not is_moderator:
return HttpResponseForbidden("Forbidden 403")
assignment.hide = False
# log(request.user,'UnHid ASSIGNMENT',{'hide':assignment.hide})
assignment.save()
return HttpResponseRedirect(reverse('assignments_details', kwargs={'assignmentID': assignment_id}))
@login_required
def assignments_unhide1(request, pk, assignment_id):
"""
Unhide assignment for students from exam page
"""
assignment = get_object_or_404(Assignment, pk=assignment_id)
is_moderator = isCourseModerator(assignment.course, request.user)
if not is_moderator:
return HttpResponseForbidden("Forbidden 403")
assignment = Assignment.objects.filter(trash=False).get(id=assignment_id)
is_moderator = isCourseModerator(assignment.course, request.user)
if not is_moderator:
return HttpResponseForbidden("Forbidden 403")
assignment.hide = False
assignment.save()
return HttpResponseRedirect(reverse('course', kwargs={'pk': pk, 'ref': "exams"}))
@login_required
def editAssignment(request, assignmentID, tabID):
''' Only creator of the course can edit this assignment'''
assignment = get_object_or_404(Assignment, pk=assignmentID)
is_moderator = isCourseModerator(assignment.course, request.user)
if not is_moderator:
return HttpResponseForbidden("Forbidden 403")
course = assignment.course.id
# For listing of assignments in the sidebar
all_assignments = Assignment.objects.filter(
course=course).filter(trash=False).order_by('-deadline')
courseHistory = CourseHistory.objects.get(user=request.user, course=course)
if courseHistory.is_owner:
assignments = all_assignments
else:
assignments = [a for a in all_assignments if(not a.hide and
(timezone.now() > a.publish_on if a.publish_on else False))]
if not is_moderator:
return HttpResponseForbidden("Forbidden 403")
if request.method == 'POST':
request.POST['courseID'] = assignment.course.id
form = AssignmentForm(request.POST, request.FILES,
initial=model_to_dict(assignment))
form.assignment_model = assignment
if form.is_valid():
# check if duration field is changed
if 'duration' in form.changed_data:
assignment.publish_type = "On Demand"
assignment.deadline = None
assignment.freezing_deadline = None
# check if new file is uploaded
if 'document' in form.changed_data:
if assignment.document:
assignment.document.delete(save=False)
if not form.cleaned_data['document']:
form.cleaned_data.pop('document')
if 'helper_code' in form.changed_data:
if assignment.helper_code:
assignment.helper_code.delete(save=False)
if not form.cleaned_data['helper_code']:
form.cleaned_data.pop('helper_code')
if 'model_solution' in form.changed_data:
if assignment.model_solution:
assignment.model_solution.delete(save=False)
if not form.cleaned_data['model_solution']:
form.cleaned_data.pop('model_solution')
if 'freezing_deadline' in form.changed_data:
app.control.revoke(
assignment.deletesubmissions_task_id, terminate=True)
delete_task = delete_redundant_files.apply_async((assignment.id,),
eta=form.cleaned_data['freezing_deadline'])
assignment.deletesubmissions_task_id = delete_task.id
if 'ta_allocation_document' in form.changed_data:
if assignment.ta_allocation_document:
assignment.ta_allocation_document.delete(save=False)
if not form.cleaned_data['ta_allocation_document']:
form.cleaned_data.pop('ta_allocation_document')
if 'type_of_allocation' in form.changed_data or 'previous_allocation_policy' in form.changed_data:
if form.cleaned_data['type_of_allocation'] == 'Use Previous Allocation Policy':
assignment.previous_allocation_policy = form.cleaned_data[
'previous_allocation_policy']
assignment.save()
previous_assignment_id = assignment.previous_allocation_policy.id
perform_ta_allocation(request, assignment.id, form.cleaned_data, allocation_type='Previous',
previous_assignment_id=previous_assignment_id)
elif form.cleaned_data['type_of_allocation'] == 'Automated':
perform_ta_allocation(request, assignment.id, form.cleaned_data, allocation_type='Automated',
previous_assignment_id=None)
else:
pass
if 'bulk_add' in form.changed_data:
if assignment.bulk_add:
assignment.bulk_add.delete(save=False)
progid = Program.objects.filter(
assignment_id=assignment.id)
for row in progid: # deleting all the sections and their testcases on ticking clear
row.delete()
creater = get_object_or_404(User, pk=assignment.creater_id)
creater_name = User.objects.filter(username=creater)
creater_name = (
(str(creater_name).split(':')[-1])[:-2])[1:]
course = get_object_or_404(Course, pk=assignment.course_id)
course_name = Course.objects.filter(title=course)
course_name = ((str(course_name).split(':')[-1])[:-2])[1:]
folder_name = os.path.join(
settings.MEDIA_ROOT, creater_name, course_name, assignment.name)
if os.path.isdir(folder_name):
shutil.rmtree(folder_name)
if not form.cleaned_data['bulk_add']:
form.cleaned_data.pop('bulk_add')
for key in form.cleaned_data.keys():
setattr(assignment, key, form.cleaned_data[key])
for afield in ['model_solution', 'student_program_files', 'program_language']:
if afield in form.changed_data:
assignment.verify_programs = True
assignment.program_model = Program
assignment.changed_list = form.changed_data
break
assignment.save()
if 'ta_allocation_document' in form.changed_data and 'ta_allocation_document' in form.cleaned_data:
perform_ta_allocation(request, assignment.id, form.cleaned_data, allocation_type='New',
previous_assignment_id=None)
if 'bulk_add' in form.changed_data and assignment.bulk_add:
section_bulk_add(assignment)
if any(f in ['student_program_files'] for f in form.changed_data):
all_submissions = Upload.objects.select_related('owner').select_for_update().\
filter(assignment=assignment)
all_submissions.update(is_stale=True)
subject_line = "Please re-submit assignment '{0}' of the course '{1}'".format(assignment.name,
assignment.course.title)
message = "Course '{0}' assignment '{1}' specification has been changed since you submit\
your assignment last time. \
You are required to submit your assignment again. \
Your current submission will not be considered.".format(assignment.course.title, assignment.name)
message_from = 'noreply@evalpro'
with transaction.atomic():
message_to = [a.owner.email for a in all_submissions]
try:
send_mail(subject_line, message, message_from,
message_to, fail_silently=False)
messages.add_message(request, messages.SUCCESS,
"Students have been successfully informed about the changes.")
except Exception as e:
print e.message, type(e)
messages.add_message(
request, messages.ERROR, "Students have not been informed about the changes.")
tabid = int(request.POST.get('name_tabid', 0))
if request.POST.get("Publish"):
return HttpResponseRedirect(reverse('assignments_details', kwargs={'assignmentID': assignmentID}))
return HttpResponseRedirect(reverse('assignments_edit', kwargs={'assignmentID': assignmentID,
'tabID': tabid}))
else:
form = AssignmentForm(initial=model_to_dict(
assignment), courseID=assignment.course.id)
course = assignment.course
return render_to_response(
'assignments/edit.html',
{'assignment': assignment, 'form': form, 'course': course, 'is_moderator': is_moderator,
'assignments': assignments, 'tabID': tabID, 'taList': get_ta_allocation(request, assignmentID)},
context_instance=RequestContext(request))
@login_required
def createAssignment(request, courseID, exams=0):
'''
Logic for creating assignment
'''
course = get_object_or_404(Course, pk=courseID)
is_moderator = isCourseModerator(course, request.user)
all_assignments = Assignment.objects.filter(
course=course).filter(trash=False).order_by('-deadline')
courseHistory = CourseHistory.objects.get(user=request.user, course=course)
if courseHistory.is_owner:
assignments = all_assignments
else:
assignments = [a for a in all_assignments if(not a.hide and
(timezone.now() > a.publish_on if a.publish_on else False))]
if not is_moderator:
return HttpResponseForbidden("Forbidden 403")
if request.method == 'POST':
# if "save-as-draft" in request.POST:
if exams == 0:
request.POST['courseID'] = courseID
form = AssignmentForm(request.POST, request.FILES)
form.this_course = course
tabid = int(request.POST.get('name_tabid', 1))
if form.is_valid():
check_duration_field = form.cleaned_data['duration']
if check_duration_field is not None: # on-demand
form.cleaned_data['assignment_type'] = False
new_assignment = Assignment(**form.cleaned_data)
new_assignment.course = course
new_assignment.creater = request.user
new_assignment.serial_number = (Assignment.objects.filter(course=course).filter(trash=False)
.aggregate(Max('serial_number'))
['serial_number__max'] or 0) + 1
if new_assignment.correctness is None:
new_assignment.correctness = False
if new_assignment.order is None:
new_assignment.order = False
if new_assignment.error is None:
new_assignment.error = 0.0
new_assignment.save()
if new_assignment.publish_type == "On Demand":
pass
else:
delete_task = delete_redundant_files.apply_async((new_assignment.id,),
eta=new_assignment.freezing_deadline)
new_assignment.deletesubmissions_task_id = delete_task.id
new_assignment.save()
if new_assignment.bulk_add:
section_bulk_add(new_assignment)
link = reverse('assignments_createprogram', kwargs={
'assignmentID': new_assignment.id})
messages.success(request, 'Assignment Created! Now <a href="{0}">ADD</a> programs to assignment.'
.format(link), extra_tags='safe')
if new_assignment.type_of_allocation == 'New Allocation Policy':
perform_ta_allocation(request, new_assignment.id, form.cleaned_data, allocation_type='New',
previous_assignment_id=None)
elif new_assignment.type_of_allocation == 'Use Previous Allocation Policy':
previous_assignment_id = new_assignment.previous_allocation_policy.id
perform_ta_allocation(request, new_assignment.id, form.cleaned_data, allocation_type='Previous',
previous_assignment_id=previous_assignment_id)
else:
perform_ta_allocation(request, new_assignment.id, form.cleaned_data, allocation_type='Automated',
previous_assignment_id=None)
tabid = int(request.POST.get('name_tabid', 1))
if request.POST.get("Publish"):
return HttpResponseRedirect(reverse('assignments_details', kwargs={'assignmentID':
new_assignment.id}))
return HttpResponseRedirect(reverse('assignments_edit', kwargs={'assignmentID': new_assignment.id,
'tabID': tabid}))
else:
try:
_ = Assignment.objects.get(
name=request.POST['name'], course=course)
data = {"success": False,
"error": 'This assignment already exists in this course.'}
except Assignment.DoesNotExist:
try:
new_assignment = Assignment(
course_id=courseID,
name=request.POST['name'],
type_of_lab=request.POST['type_of_lab'],
publish_type=request.POST['publish_type'],
description=request.POST['description'],
type_of_allocation=request.POST['type_of_allocation']
)
tabid = int(request.POST.get('name_tabid', 1))
new_assignment.creater = request.user
new_assignment.serial_number = (Assignment.objects.filter(course=course).filter(trash=False)
.aggregate(Max('serial_number'))
['serial_number__max'] or 0) + 1
data = {'success': True}
if 'indentation' in request.POST and request.POST['indentation'] == "True":
new_assignment.indentation = request.POST['indentation']
if 'force_notify' in request.POST and request.POST['force_notify'] == "True":
new_assignment.force_notify = request.POST['force_notify']
if 'execution_time_allowed' in request.POST and request.POST['execution_time_allowed'] == "True":
new_assignment.execution_time_allowed = request.POST['execution_time_allowed']
if 'force_notify' in request.POST and request.POST['force_notify'] == "True":
new_assignment.force_notify = request.POST['force_notify']
serializer = AssignmentDetailsSerializer(data=request.POST)
if not serializer.is_valid():
return Response(serializer.errors, status.HTTP_400_BAD_REQUEST)
if 'publish_on' in request.POST:
new_assignment.publish_on = local2utc(
serializer.validated_data['publish_on'])
if 'deadline' in request.POST:
new_assignment.deadline = local2utc(
serializer.validated_data['deadline'])
if 'hard_deadline' in request.POST:
if 'deadline' in request.POST and request.POST['deadline'] > request.POST['hard_deadline']:
data = {
"success": False, "error": 'Hard deadline should be later than deadline.'}
new_assignment.freezing_deadline = local2utc(
serializer.validated_data['hard_deadline'])
if new_assignment.publish_type == "Scheduled":
delete_task = delete_redundant_files.apply_async((new_assignment.id,),
eta=new_assignment.freezing_deadline)
new_assignment.deletesubmissions_task_id = delete_task.id
if 'duration' in request.POST:
new_assignment.duration = request.POST['duration']
if 'freezing_duration' in request.POST:
new_assignment.freezing_duration = request.POST['freezing_duration']
if 'student_program_files' in request.POST:
new_assignment.student_program_files = request.POST['student_program_files']
err_msg = check_program_files(request)
if err_msg != '':
data = {"success": False, "error": err_msg}
if 'ipaddress' in request.POST:
if is_valid_ip(request.POST['ipaddress']):
new_assignment.ipaddress = request.POST['ipaddress']
else:
data = {"success": False,
"error": 'Invalid ipaddress'}
if new_assignment.type_of_lab == 'Exam':
if 'timeduration' in request.POST:
new_assignment.timeduration = request.POST['timeduration']
else:
data = {"success": False,
"error": 'Enter a timeduration for exam.'}
if 'exam_group_id' in request.POST:
new_assignment.exam_group_id = request.POST['exam_group_id']
else:
data = {"success": False,
"error": 'Enter a group key for exam.'}
if 'late_duration' in request.POST:
new_assignment.late_duration = request.POST['late_duration']
else:
data = {
"success": False, "error": 'Enter a late start timeduration for exam.'}
if 'only_uploads' in request.POST and request.POST['only_uploads'] == "True":
new_assignment.only_uploads = request.POST['only_uploads']
if 'program_language' in request.POST:
new_assignment.program_language = request.POST['program_language']
if 'graphics_program' in request.POST and request.POST['graphics_program'] == "True":
new_assignment.graphics_program = request.POST['graphics_program']
if 'force_upload' in request.POST:
new_assignment.force_upload = request.POST['force_upload']
if 'correctness' in request.POST and request.POST['correctness'] == "True":
new_assignment.correctness = request.POST['correctness']
if 'order' in request.POST and request.POST['order'] == "True":
new_assignment.order = request.POST['order']
if 'error' in request.POST:
new_assignment.error = float(request.POST['error'])
if 'document' in request.FILES:
new_assignment.document = request.FILES['document']
if 'helper_code' in request.FILES:
new_assignment.helper_code = request.FILES['helper_code']
if 'model_solution' in request.FILES:
new_assignment.model_solution = request.FILES['model_solution']
with Archive(fileobj=request.FILES['model_solution']) as archive:
if not archive.is_archive() and not is_valid(filename=request.FILES['model_solution'].name,
lang=data['program_language']):
data = {
"success": False, "error": 'Enter valid model solution files.'}
if not data['success']:
return HttpResponse(json.dumps(data), content_type="application/json")
new_assignment.save()
if new_assignment.type_of_allocation == 'New Allocation Policy':
msg = check_allocation_doc(
request.FILES['ta_allocation_document'], courseID)
if msg != '':
data = {"success": False, "error": msg}
else:
new_assignment.ta_allocation_document = request.FILES[
'ta_allocation_document']
perform_ta_allocation(request, new_assignment.id,
{'ta_allocation_document': request.FILES['ta_allocation_document']},
allocation_type='New', previous_assignment_id=None)
elif new_assignment.type_of_allocation == 'Use Previous Allocation Policy':
try:
previous_assignment = Assignment.objects.get(
pk=request.POST['previous_allocation_policy'])
new_assignment.previous_allocation_policy = previous_assignment
previous_assignment_id = new_assignment.previous_allocation_policy.id
perform_ta_allocation(request, new_assignment.id, {}, allocation_type='Previous',
previous_assignment_id=previous_assignment_id)
except Exception, e:
data = {"success": False, "error": e}
else:
perform_ta_allocation(request, new_assignment.id, {},
allocation_type='Automated',
previous_assignment_id=None)
if 'bulk_add' in request.FILES:
new_assignment.bulk_add = request.FILES['bulk_add']
bulk_add_check(request.FILES)
section_bulk_add(new_assignment)
if not data['success']:
return HttpResponse(json.dumps(data), content_type="application/json")
new_assignment.save()
data = {"success": True}
except Exception, e:
data = {"success": False, "error": e}
return HttpResponse(json.dumps(data), content_type="application/json")
else:
form = AssignmentForm(courseID=courseID)
tabid = 1
return render_to_response(
'assignments/createAssignment.html',
{'form': form, 'course': course, 'is_moderator': is_moderator, 'assignments': assignments,
'tabID': tabid},
context_instance=RequestContext(request))
@login_required
def perform_ta_allocation(request, assignment_id, data, allocation_type, previous_assignment_id):
'''function to map students with TA '''
assignment = get_object_or_404(Assignment, pk=assignment_id)
course = assignment.course
allocation_entries = TA_allocation.objects.filter(assignment=assignment)
if allocation_entries:
for allocation_object in allocation_entries:
allocation_object.delete()
if allocation_type == 'New':
allocation_file = data['ta_allocation_document']
if allocation_file:
filepath = os.path.join(settings.MEDIA_ROOT, str(
assignment.ta_allocation_document))
with open(filepath, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
count = 0
for row in reader:
row[0] = str(row[0]).strip()
row[1] = str(row[1]).strip()
if count == 0:
pass
else:
student = User.objects.filter(username=str(row[0]))
teaching_assistant = User.objects.filter(
username=str(row[1]))
new_ta_allocation = TA_allocation()
new_ta_allocation.assignment = assignment
new_ta_allocation.student = student[0]
new_ta_allocation.assistant = teaching_assistant[0]
new_ta_allocation.save()
count = count + 1
elif allocation_type == 'Previous':
previous_assignment = get_object_or_404(
Assignment, pk=previous_assignment_id)
allocation_objects = TA_allocation.objects.filter(
assignment=previous_assignment)
for allocation_object in allocation_objects:
new_allocation = deepcopy(allocation_object)
new_allocation.id = None
new_allocation.assignment = assignment
new_allocation.save()
else:
ta_list = CourseHistory.objects.filter(course=course).filter(
is_creator=False).filter(is_owner=True)
student_list = CourseHistory.objects.filter(
course=course).filter(is_creator=False).filter(is_owner=False)
if not ta_list or not student_list:
return
mod_variable = len(ta_list)
if mod_variable == 0:
mod_variable = 1
count = 1
for student in student_list:
new_ta_allocation = TA_allocation()
new_ta_allocation.assignment = assignment
student_object = get_object_or_404(User, pk=student.user.id)
new_ta_allocation.student = student_object
ta_object = get_object_or_404(
User, pk=ta_list[count % mod_variable].user.id)
new_ta_allocation.assistant = ta_object
new_ta_allocation.save()
count = count+1
return
def check_program_files(request):
'''
Function to validate student_program_files field
:param request: request
:return:
'''
err_msg = ''
file_list = request.POST['student_program_files'].split()
language = request.POST['program_language']
for afile in file_list:
if not is_valid(afile, language) and not request.POST['force_upload']:
err_msg = "Only {1} files are accepted for {0} language.\
".format(language, " ".join(get_extension(language)))
return err_msg
students_file = set(request.POST['student_program_files'].split())
solution_file = []
if 'model_solution' in request.FILES:
with Archive(fileobj=request.FILES['model_solution']) as archive:
if not archive.is_archive():
if is_valid(filename=request.FILES['model_solution'].name, lang=request.POST['program_language']):
solution_file = [request.FILES['model_solution'].name]
else:
err_msg = "Invalid file in model solution"
return err_msg
else:
solution_file = [a.split("/")[-1]
for a in archive.getfile_members()]
missing_file = students_file - set(solution_file)
if missing_file and solution_file:
err_msg = "{0} was not found. Please upload \
{1}".format(" ".join(missing_file), request.FILES['student_program_files'])
return err_msg
@login_required
def removeAssignment(request, assignmentID):
'''
Logic for deleting assignment from assignment page
'''
assignment = get_object_or_404(Assignment, pk=assignmentID)
course = assignment.course
is_moderator = isCourseModerator(course, request.user)
if not is_moderator:
return HttpResponseForbidden("Forbidden 403")
creater = get_object_or_404(User, pk=assignment.creater_id)
creater_name = User.objects.filter(username=creater)
creater_name = ((str(creater_name).split(':')[-1])[:-2])[1:]
course = get_object_or_404(Course, pk=assignment.course_id)
course_name = Course.objects.filter(title=course)
course_name = ((str(course_name).split(':')[-1])[:-2])[1:]
if not assignment.trash:
assignment.trash = True
assignment.save()
return HttpResponseRedirect(reverse('assignments_index', kwargs={'courseID': course.id}))
else:
folder_name = os.path.join(
settings.MEDIA_ROOT, creater_name, course_name, assignment.name)
if os.path.isdir(folder_name):
shutil.rmtree(folder_name)
assignment.delete()
return HttpResponseRedirect(reverse('assignments_trash', kwargs={'courseID': course.id}))
@login_required
@is_moderator_check
def removeAssignment1(request, assignmentID):
'''
Delete of assignment from exam page
'''
assignment = get_object_or_404(Assignment, pk=assignmentID)
course = assignment.course
is_moderator = isCourseModerator(course, request.user)
if not is_moderator:
return HttpResponseForbidden("Forbidden 403")
creater = get_object_or_404(User, pk=assignment.creater_id)
creater_name = User.objects.filter(username=creater)
creater_name = ((str(creater_name).split(':')[-1])[:-2])[1:]
course = get_object_or_404(Course, pk=assignment.course_id)
course_name = Course.objects.filter(title=course)
course_name = ((str(course_name).split(':')[-1])[:-2])[1:]
if not assignment.trash:
assignment.trash = True
assignment.save()
return HttpResponseRedirect(reverse('course', kwargs={'pk': assignment.course_id, 'ref': "exams"}))
else:
folder_name = os.path.join(
settings.MEDIA_ROOT, creater_name, course_name, assignment.name)
if os.path.isdir(folder_name):
shutil.rmtree(folder_name)
assignment.delete()
return HttpResponseRedirect(reverse('assignments_trash', kwargs={'courseID': course.id}))
@login_required
@is_moderator_check
def restoreAssignment(request, assignmentID):
'''function to restore assignments from trash'''
assignment = get_object_or_404(Assignment, pk=assignmentID)
course = assignment.course
if assignment.trash:
assignment.trash = False
assignment.save()
return HttpResponseRedirect(reverse('assignments_trash', kwargs={'courseID': course.id}))
@login_required
@is_moderator_check
def showAssignmentsTrash(request, courseID):
'''function to show trash assignments'''
course = get_object_or_404(Course, pk=courseID)
trashAssignments = course.get_assignments.all().filter(trash=True)
return render_to_response(
'assignments/showAssignmentsTrash.html',
{'course': course, 'trashAssignments': trashAssignments},
context_instance=RequestContext(request))
@login_required
def createProgram(request, assignmentID):
'''
Only creator of course can create new program in assignment.
'''
assignment = get_object_or_404(Assignment, pk=assignmentID)
course = assignment.course
is_moderator = isCourseModerator(course, request.user)
if not is_moderator:
return HttpResponseForbidden("Forbidden 403")
if request.method == 'POST':
lang_category = language_category(assignment.program_language)
if lang_category == 0: # Compilation needed. Execution not needed. C and C++
form = ProgramFormCNotE(request.POST, request.FILES)
elif lang_category == 1: # Compilation and execution needed.
form = ProgramFormCandE(request.POST, request.FILES)
elif lang_category == 2: # Execution needed. Python and bash
form = ProgramFormE(request.POST, request.FILES)
form.assignment = assignment # files submitted by student
if form.is_valid():
new_program = Program(**form.cleaned_data)
new_program.assignment = assignment
new_program.is_sane = True
new_program.compile_now = True
new_program.execute_now = True
new_program.language = assignment.program_language
new_program.save()
if new_program.testcase_bulk_add:
testcase_bulk_add(new_program)
link = reverse('assignments_createtestcase',
kwargs={'programID': new_program.id})
messages.success(request,
'Section Created! Now <a href="{0}">ADD</a> testcase for this program.'.format(
link),
extra_tags='safe')
list_of_assignment_ids = [instance.submission.id for instance in LatestSubmission.objects
.filter(assignment=assignment)]
all_submissions = Upload.objects.filter(assignment=assignment, assignment__trash=False,
pk__in=list_of_assignment_ids).order_by("-uploaded_on")
AssignmentResults.objects.filter(
submission__in=all_submissions).update(is_stale=True)
return HttpResponseRedirect(reverse('assignments_details', kwargs={'assignmentID': assignmentID}))
else:
objs = Program.objects.filter(assignment=assignment)
initial = {}
lang_category = language_category(assignment.program_language)
if objs:
if lang_category == 0:
comp_command = pickle.loads(objs[0].compiler_command)
initial['compiler_command'] = pickle.dumps(
[comp_command[0], '', ''])
elif lang_category == 1:
comp_command = pickle.loads(objs[0].compiler_command)
initial['compiler_command'] = pickle.dumps(
[comp_command[0], '', ''])
exe_command = pickle.loads(objs[0].execution_command)
initial['execution_command'] = pickle.dumps(
[exe_command[0], '', ''])
elif lang_category == 2:
exe_command = pickle.loads(objs[0].execution_command)
initial['execution_command'] = pickle.dumps(
[exe_command[0], '', ''])
else:
if lang_category == 0:
comp_command = get_compiler_name(assignment.program_language)
initial['compiler_command'] = pickle.dumps(
[comp_command, '', ''])
elif lang_category == 1:
comp_command = get_compiler_name(assignment.program_language)
initial['compiler_command'] = pickle.dumps(
[comp_command, '', ''])
exe_command = get_interpreter_name(assignment.program_language)
initial['execution_command'] = pickle.dumps(
[exe_command, '', ''])
elif lang_category == 2:
exe_command = get_interpreter_name(assignment.program_language)
initial['execution_command'] = pickle.dumps(
[exe_command, '', ''])
if lang_category == 0: # Compilation needed. Execution not needed. C and C++
form = ProgramFormCNotE(initial=initial)
elif lang_category == 1: # Compilation and execution needed.
form = ProgramFormCandE(initial=initial)
elif lang_category == 2: # Execution needed. Python and bash
form = ProgramFormE(initial=initial)
mark_submissions_false(assignment.id)
course = assignment.course
all_assignments = Assignment.objects.filter(course=course).filter(trash=False).order_by('-deadline')
is_moderator = isCourseModerator(course, request.user)
if is_moderator:
assignments = all_assignments
else:
assignments = [a for a in all_assignments if(not a.hide and
(timezone.now() > a.publish_on if a.publish_on else False))]
return render_to_response(
'assignments/createProgram.html',
{'form': form, 'assignment': assignment,
'course': course, 'is_moderator': is_moderator, 'assignments': assignments},
context_instance=RequestContext(request))
@login_required
def editProgram(request, programID):
'''
Logic for edit section
'''
program = get_object_or_404(Program, pk=programID)
is_moderator = isCourseModerator(program.assignment.course, request.user)
if not is_moderator:
return HttpResponseForbidden("Forbidden 403")
if request.method == 'POST':
# form is initialized by model then overwritten by request data and files.
lang_category = language_category(program.assignment.program_language)
if lang_category == 0: # Compilation needed. Execution not needed. C and C++
form = ProgramFormCNotE(
request.POST, request.FILES, initial=model_to_dict(program))
elif lang_category == 1: # Compilation and execution needed.
form = ProgramFormCandE(
request.POST, request.FILES, initial=model_to_dict(program))
elif lang_category == 2: # Execution needed. Python and bash
form = ProgramFormE(request.POST, request.FILES,
initial=model_to_dict(program))
form.assignment = program.assignment
form.program_model = program
if form.is_valid():
# check if new file is uploaded
if 'program_files' in form.changed_data: # program_files are changed."
if program.program_files: # delete older file if any.
program.program_files.delete(save=False)
# if file is being cleared.
if not form.cleaned_data['program_files']:
form.cleaned_data.pop('program_files')
if 'makefile' in form.changed_data:
if program.makefile:
program.makefile.delete(save=False)
if not form.cleaned_data['makefile']:
form.cleaned_data.pop('makefile')
if 'testcase_bulk_add' in form.changed_data:
if program.testcase_bulk_add:
folder_name = os.path.join(settings.MEDIA_ROOT, '/'.join(
(str(program.testcase_bulk_add)).split('/')[:-1]))
program.testcase_bulk_add.delete(save=False)
if not os.listdir(folder_name):
shutil.rmtree(folder_name)
testcaseid = Testcase.objects.filter(program_id=program.id)
for row in testcaseid: # deleting all the testcases under that section on ticking clear
folder_name2 = ''
folder_name3 = ''
if row.input_files != '':
folder_name2 = os.path.join(settings.MEDIA_ROOT, '/'.join(
(str(row.input_files)).split('/')[:-1]))
if row.output_files != '':
folder_name3 = os.path.join(settings.MEDIA_ROOT, '/'.join(
(str(row.output_files)).split('/')[:-1]))
row.delete()
if folder_name2 != '' and os.listdir(folder_name2) == []:
shutil.rmtree(folder_name2)
if folder_name3 != '' and os.listdir(folder_name3) == []:
shutil.rmtree(folder_name3)
if not form.cleaned_data['testcase_bulk_add']:
form.cleaned_data.pop('testcase_bulk_add')
for key in form.cleaned_data.keys():
setattr(program, key, form.cleaned_data[key])
program.delete_error_message()
program.is_sane = True
for afield in ['program_files', 'compiler_command', 'makefile', 'execution_command']:
if afield in form.changed_data:
program.compile_now = True
program.execute_now = True
break
program.save()
if 'testcase_bulk_add' in form.changed_data and program.testcase_bulk_add:
testcase_bulk_add(program)
# Mark all assignment results to stale if program_files/compiler_command/execution_command changed
changed_fields = ['program_files']
if program.compiler_command:
changed_fields.append('compiler_command')
if program.execution_command:
changed_fields.append('execution_command')
if set(changed_fields) - set(form.changed_data):
list_of_assignment_ids = [instance.submission.id for instance in LatestSubmission.objects
.filter(assignment=program.assignment)]
all_submissions = Upload.objects.filter(assignment=program.assignment, assignment__trash=False,
pk__in=list_of_assignment_ids).order_by("-uploaded_on")
AssignmentResults.objects.filter(
submission__in=all_submissions).update(is_stale=True)
mark_submissions_false(program.assignment.id)
return HttpResponseRedirect(reverse('assignments_detailsprogram', kwargs={'programID': programID}))
else:
lang_category = language_category(program.assignment.program_language)
if lang_category == 0: # Compilation needed. Execution not needed. C and C++
form = ProgramFormCNotE(initial=model_to_dict(program))
elif lang_category == 1: # Compilation and execution needed.
form = ProgramFormCandE(initial=model_to_dict(program))
elif lang_category == 2: # Execution needed. Python and bash
form = ProgramFormE(initial=model_to_dict(program))
mark_submissions_false(program.assignment.id)
course = program.assignment.course
all_assignments = Assignment.objects.filter(course=course).filter(trash=False).order_by('-deadline')
all_programs = Program.objects.filter(assignment=program.assignment)
is_moderator = isCourseModerator(course, request.user)
if not is_moderator and program.assignment.hide:
raise PermissionDenied
if is_moderator:
assignments = all_assignments
else:
assignments = [a for a in all_assignments if(not a.hide and
(timezone.now() > a.publish_on if a.publish_on else False))]
return render_to_response(
'assignments/editProgram.html',
{'form': form, 'program': program, 'course': course, 'assignments': assignments, 'programs': all_programs},
context_instance=RequestContext(request)
)
@login_required
def detailProgram(request, programID):
''' Section/Program display as per profile'''
program = get_object_or_404(Program, pk=programID)
testcases = Testcase.objects.filter(program=program)
assignment = program.assignment
is_due = None
if assignment.deadline is not None:
is_due = (timezone.now() >= assignment.deadline)
course = assignment.course
has_submitted = Upload.objects.filter(owner=request.user, assignment=assignment)
all_assignments = Assignment.objects.filter(course=course).filter(trash=False).order_by('-deadline')
all_programs = Program.objects.filter(assignment=assignment)
is_moderator = isCourseModerator(course, request.user)
mode = get_mode(request)
if not is_moderator and assignment.hide:
raise PermissionDenied
if is_moderator:
assignments = all_assignments
else:
assignments = [a for a in all_assignments if(not a.hide and
(timezone.now() > a.publish_on if a.publish_on else False))]
compiler_command = get_compilation_command(program)
execution_command = get_execution_command(program)
program_errors = None
if not program.is_sane:
try:
program_errors = ProgramErrors.objects.get(program=program)
except ProgramErrors.DoesNotExist:
program_errors = None
testcase_errors = []
terror_ctype = ContentType.objects.get_for_model(TestcaseErrors)
for error in AssignmentErrors.objects.filter(assignment=program.assignment, content_type=terror_ctype):
testcase_errors.extend(
TestcaseErrors.objects.filter(pk=error.object_id))
get_params = {'source': 'section', 'id': programID}
formData = ProgramFormCNotE(initial=model_to_dict(program))
try:
checker = Checker.objects.filter(program=program)
except Checker.DoesNotExist:
checker = None
if checker:
checker = checker[0]
if request.method == "POST":
try:
if 'getstd' in request.POST:
if request.POST.get('testID'):
testcaseid = int(request.POST.get('testID'))
testcase = Testcase.objects.filter(id=testcaseid)
if testcase:
testcase = testcase[0]
if testcase.program.program_type == 'Evaluate' and not is_moderator and not is_due:
response = HttpResponse(json.dumps({'stdin': mark_safe("HIDDEN :P"),
'stdout': mark_safe('HIDDEN :P')}),
content_type="application/json")
return response
std_in = ''
std_out = ''
try:
std_in = read_file(readthis=testcase.std_in_file_name,
name=testcase.input_files.file.name)
testcase.input_files.close()
except Exception:
pass
try:
std_out = read_file(readthis=testcase.std_out_file_name,
name=testcase.output_files.file.name)
testcase.output_files.close()
except Exception as e:
print e
std_in = ''.join(std_in)
std_out = ''.join(std_out)
try:
response = HttpResponse(json.dumps({'stdin': mark_safe(std_in),
'stdout': mark_safe(std_out)}),
content_type="application/json")
return response
except Exception as e:
print e
response = HttpResponse(json.dumps({'stdin': mark_safe("Can't display"),
'stdout': mark_safe('Can\'t Display')}),
content_type="application/json")
return response
# if 'update' in request.POST:
# if request.POST.get('testID'):
# testcaseid = int(request.POST.get('testID'))
# testcase = Testcase.objects.filter(id=testcaseid)
# if testcase:
# std_in = request.POST.get('std_in')
# std_out = request.POST.get('std_out')
# i = Archive(name=testcase.input_files.file.name).open_file(testcase.std_in_file_name, 'w')
# i.write(std_in)
# i.close()
# testcase.input_files.close()
# o = Archive(name=testcase.input_files.file.name).open_file(testcase.std_in_file_name, 'w')
# o.write(std_in)
# o.close()
# testcase.output_files.close()
finally:
pass
return render_to_response(
'assignments/detailsProgram.html',
{'programs': all_programs, 'program': program, 'testcases': testcases, 'assignment': assignment,
'checker': checker, 'assignments': assignments, 'date_time': timezone.now(),
'program_errors': program_errors, 'compiler_command': compiler_command,
'execution_command': execution_command, 'course': course, 'is_moderator': is_moderator,
'is_due': is_due, 'has_submitted': has_submitted, 'get_params': get_params, 'mode': mode,
'testcase_errors': testcase_errors, 'programID': programID, 'formData': formData},
context_instance=RequestContext(request)
)
@login_required
def removeProgram(request, programID):
'''Deletion logic for section/program'''
program = get_object_or_404(Program, pk=programID)
is_moderator = isCourseModerator(program.assignment.course, request.user)
if not is_moderator:
return HttpResponseForbidden("Forbidden 403")
assignment = program.assignment
program.delete()
return HttpResponseRedirect(reverse('assignments_details', kwargs={'assignmentID': assignment.id}))
class CreateTestcaseWizard(SessionWizardView):
'''
Create testcase wizard class
'''
file_storage = default_storage
template_name = 'assignments/createTestcasewizard.html'
solution_ready = False # Resolve defined outside __init__
assignment_id = None # Resolve defined outside __init__
program_id = None
testcase_id = None
def dispatch(self, request, *args, **kwargs):
'''
dispatch function
'''
program_id = kwargs['programID']
program = get_object_or_404(Program, pk=program_id)
# self.solution_ready is used in from clean method.
self.assignment_id = program.assignment.id
self.program_id = program_id
if Testcase.objects.filter(program=program):
self.solution_ready = program.solution_ready
else:
self.solution_ready = bool(
program.program_files or program.assignment.model_solution)
is_moderator = isCourseModerator(
program.assignment.course, request.user)
if is_moderator:
return super(CreateTestcaseWizard, self).dispatch(request, *args, **kwargs)
return HttpResponseForbidden("Forbidden 403")
def get_form_kwargs(self, step=None):
if step == '0':
return {'solution_ready': self.solution_ready, 'assignment_id': self.assignment_id,
'program_id': self.program_id}
if step == '1':
choice_dict = {}
if self.storage.get_step_files('0'):
if self.storage.get_step_files('0').get('0-input_files', ""):
f_in_obj = self.storage.get_step_files(
'0').get('0-input_files')
f_in_obj.open()
choice_dict['in_file_choices'] = [
(a, a) for a in get_file_name_list(fileobj=f_in_obj)]
if self.storage.get_step_files('0').get('0-output_files', ""):
f_out_obj = self.storage.get_step_files(
'0').get('0-output_files')
f_out_obj.open()
choice_dict['out_file_choices'] = [
(b, b) for b in get_file_name_list(fileobj=f_out_obj)]
choice_dict['assignment_id'] = self.assignment_id
choice_dict['program_id'] = self.program_id
return choice_dict
else:
return super(CreateTestcaseWizard, self).get_form_kwargs(step)
def get_context_data(self, form, **kwargs):
context = super(CreateTestcaseWizard, self).get_context_data(
form=form, **kwargs)
program = Program.objects.get(pk=self.kwargs['programID'])
compiler_command = get_compilation_command(program)
execution_command = get_execution_command(program)
context.update({'program': program, 'compiler_command': compiler_command,
'execution_command': execution_command, 'assignment_id': program.assignment.id})
return context
def done(self, form_list, **kwargs):
frmdict = form_list[0].cleaned_data
frmdict.update(form_list[1].cleaned_data)
program = Program.objects.get(pk=self.kwargs['programID'])
frmdict.update({'program': program})
Testcase.objects.create(**frmdict)
# Remove temporary files
if self.storage.get_step_files('0'):
for a in self.storage.get_step_files('0').values():
try:
os.remove(os.path.join(settings.MEDIA_ROOT, a.name))
except Exception:
pass
list_of_assignment_ids = [instance.submission.id for instance in LatestSubmission.objects
.filter(assignment=program.assignment)]
all_submissions = Upload.objects.filter(assignment=program.assignment, assignment__trash=False,
pk__in=list_of_assignment_ids).order_by("-uploaded_on")
AssignmentResults.objects.filter(
submission__in=all_submissions).update(is_stale=True)
mark_submissions_false(program.assignment.id)
return HttpResponseRedirect(reverse('assignments_detailsprogram', kwargs={'programID':
self.kwargs['programID']}))
class EditTestcaseWizard(SessionWizardView):
'''
Edit testcase form
'''
file_storage = default_storage
template_name = 'assignments/editTestcasewizard.html'
solution_ready = False
assignment_id = None
program_id = None
testcase_id = None
def dispatch(self, request, *args, **kwargs):
testcase_id = kwargs['testcaseID']
testcase = get_object_or_404(Testcase, pk=testcase_id)
program = testcase.program
self.assignment_id = program.assignment.id
self.program_id = program.id
self.testcase_id = testcase_id
# self.solution_ready is used in from clean method.
self.solution_ready = bool(
program.program_files or program.assignment.model_solution)
is_moderator = isCourseModerator(
program.assignment.course, request.user)
if is_moderator:
return super(EditTestcaseWizard, self).dispatch(request, *args, **kwargs)
return HttpResponseForbidden("Forbidden 403")
def get_form_initial(self, step):
testcase = Testcase.objects.get(pk=self.kwargs['testcaseID'])
return model_to_dict(testcase)
def get_form_kwargs(self, step=None):
if step == '0':
return {'solution_ready': self.solution_ready, 'assignment_id': self.assignment_id,
'program_id': self.program_id}
if step == '1':
choice_dict = {}
testcase = Testcase.objects.get(pk=self.kwargs['testcaseID'])
# if there is at least one file.
if self.storage.get_step_files('0'):
# if input_file is uploaded.
if self.storage.get_step_files('0').get('0-input_files', ""):
f_in_obj = self.storage.get_step_files(
'0').get('0-input_files')
f_in_obj.open()
choice_dict['in_file_choices'] = [(a, a)
for a in get_file_name_list(fileobj=f_in_obj)]
elif testcase.input_files: # provide options from older file.
choice_dict['in_file_choices'] = [(b, b)
for b in get_file_name_list(fileobj=testcase.input_files.file)]
# if output_file is uploaded.
if self.storage.get_step_files('0').get('0-output_files', ""):
f_out_obj = self.storage.get_step_files(
'0').get('0-output_files')
f_out_obj.open()
choice_dict['out_file_choices'] = [(b, b)
for b in get_file_name_list(fileobj=f_out_obj)]
elif testcase.output_files: # provide options from older file.
choice_dict['out_file_choices'] = [(b, b)
for b in get_file_name_list(fileobj=testcase.output_files.file)]
else: # No file uploaded in step 0
if '0-input_files-clear' not in self.storage.get_step_data('0') and testcase.input_files:
choice_dict['in_file_choices'] = [(b, b)
for b in get_file_name_list(fileobj=testcase.input_files.file)]
else:
pass
if '0-output_files-clear' not in self.storage.get_step_data('0') and testcase.output_files:
choice_dict['out_file_choices'] = [(b, b)
for b in get_file_name_list(fileobj=testcase.output_files.file)]
choice_dict['assignment_id'] = self.assignment_id
choice_dict['program_id'] = self.program_id
return choice_dict
else:
return super(EditTestcaseWizard, self).get_form_kwargs(step)
def get_context_data(self, form, **kwargs):
context = super(EditTestcaseWizard, self).get_context_data(
form=form, **kwargs)
testcase = Testcase.objects.get(pk=self.kwargs['testcaseID'])
program = testcase.program
compiler_command = get_compilation_command(program)
execution_command = get_execution_command(program)
context.update({'testcase': testcase, 'compiler_command': compiler_command,
'execution_command': execution_command})
return context
def done(self, form_list, **kwargs):
frmdict = form_list[0].cleaned_data
# consolidated list from both steps.
frmdict.update(form_list[1].cleaned_data)
testcase = Testcase.objects.get(pk=self.kwargs['testcaseID'])
# either new file is being uploaded or older is cleared
if 'input_files' in form_list[0].changed_data:
if testcase.input_files: # there was an older file in test-case
testcase.input_files.delete(save=False) # delete older file.
if not form_list[0].cleaned_data['input_files']: # no new file so do nothing
form_list[0].cleaned_data.pop('input_files')
if 'output_files' in form_list[0].changed_data:
if testcase.output_files:
testcase.output_files.delete(save=False)
if not form_list[0].cleaned_data['output_files']:
form_list[0].cleaned_data.pop('output_files')
for key in frmdict.keys(): # update database table row.
setattr(testcase, key, frmdict[key])
testcase.save()
files_change = set(['input_files', 'output_files']
) - set(form_list[0].changed_data)
stdIO_change = set(
['std_in_file_name', 'std_out_file_name']) - set(form_list[1].changed_data)
if files_change or stdIO_change:
testcase = Testcase.objects.get(pk=self.kwargs['testcaseID'])
list_of_assignment_ids = [instance.submission.id for instance in LatestSubmission.objects
.filter(assignment=testcase.program.assignment)]
all_submissions = Upload.objects.filter(assignment=testcase.program.assignment, assignment__trash=False,
pk__in=list_of_assignment_ids).order_by("-uploaded_on")
AssignmentResults.objects.filter(
submission__in=all_submissions).update(is_stale=True)
# Remove temporary files
if self.storage.get_step_files('0'):
for a in self.storage.get_step_files('0').values():
try:
os.remove(os.path.join(settings.MEDIA_ROOT, a.name))
except Exception:
pass
mark_submissions_false(testcase.program.assignment.id)
return HttpResponseRedirect(reverse('assignments_detailstestcase',
kwargs={'testcaseID': self.kwargs['testcaseID']}))
def mark_submissions_false(assignment_id):
"""
This functions marks all the submissions' evaluation status of a particular assignment as false.
In case when we edit or add sections, test-cases, then evaluate all the submissions again.
"""
assignment = get_object_or_404(Assignment, pk=assignment_id)
# list_of_assignment_ids = [instance.submission.id for instance in LatestSubmission.objects
# .filter(assignment=assignment)]
all_submissions = Upload.objects.filter(assignment=assignment, assignment__trash=False,
# pk__in=list_of_assignment_ids
).order_by("-uploaded_on")
for submission in all_submissions:
submission.to_be_evaluated = True
submission.to_be_practiced = True
submission.save()
@login_required
def detailTestcase(request, testcaseID):
'''
Detail of a testcase based on testcaseID
'''
testcase = get_object_or_404(Testcase, pk=testcaseID)
course = testcase.program.assignment.course
all_assignments = Assignment.objects.filter(course=course).filter(trash=False).order_by('-deadline')
all_programs = Program.objects.filter(assignment=testcase.program.assignment)
all_tc = Testcase.objects.filter(program=testcase.program)
is_moderator = isCourseModerator(course, request.user)
mode = get_mode(request)
if not is_moderator and testcase.program.assignment.hide:
raise PermissionDenied
if is_moderator:
assignments = all_assignments
else:
assignments = [a for a in all_assignments if(not a.hide and
(timezone.now() > a.publish_on if a.publish_on else False))]
is_due = None
if testcase.program.assignment.deadline is not None:
is_due = (timezone.now() >= testcase.program.assignment.deadline)
get_params = {'source': 'testcase', 'id': testcaseID}
testcase_errors = TestcaseErrors.objects.filter(testcase=testcase)
return render_to_response(
'assignments/detailsTestcase.html',
{'testcases': all_tc, 'programs': all_programs, 'testcase': testcase, 'assignments': assignments,
'date_time': timezone.now(), 'course': course, 'is_due': is_due, 'is_moderator': is_moderator,
'testcase_errors': testcase_errors, 'mode': mode, 'get_params': get_params},
context_instance=RequestContext(request)
)
@login_required
def removeTestcase(request, testcaseID):
'''
Deletion of testcase
'''
testcase = get_object_or_404(Testcase, pk=testcaseID)
course = testcase.program.assignment.course
is_moderator = isCourseModerator(course, request.user)
if not is_moderator:
return HttpResponseForbidden("Forbidden 403")
program = testcase.program
testcase.delete()
return HttpResponseRedirect(reverse('assignments_detailsprogram', kwargs={'programID': program.id}))
@login_required
def config_safeexec_params(request, assignmentID):
'''
Logic to provide configuration for program or testcase
set to default configuration; if no configuration change
'''
assignment = get_object_or_404(Assignment, pk=assignmentID)
course = assignment.course
is_moderator = isCourseModerator(course, request.user)
if not is_moderator:
return HttpResponseForbidden("Forbidden 403")
if request.method == 'POST':
form = SafeExecForm(request.POST)
source = request.POST.get('page_source', '')
test_ids = request.POST.getlist('testcases_cbx')
if form.is_valid():
for test_id in test_ids:
testcase_obj = get_object_or_404(Testcase, pk=test_id)
obj = SafeExec.objects.filter(testcase=testcase_obj)
if obj:
form.cleaned_data['testcase'] = testcase_obj
obj.update(**form.cleaned_data)
else:
form.cleaned_data['testcase'] = testcase_obj
SafeExec.objects.create(**form.cleaned_data)
return HttpResponseRedirect(reverse('assignments_details', kwargs={'assignmentID': assignmentID}))
else:
default_limits = {'cpu_time': 10, 'clock_time': 60,
'memory': 32768, 'stack_size': 8192,
'child_processes': 0, 'open_files': 512,
'file_size': 1024}
form = SafeExecForm(initial=default_limits)
source = request.GET.get('source', '')
if source == "section":
section_id = request.GET.get('id', '')
program = get_object_or_404(Program, pk=section_id)
test_cases = Testcase.objects.filter(program=program)
title = program.name
elif source == "testcase":
testcase_id = request.GET.get('id', '')
test_cases = get_object_or_404(Testcase, pk=testcase_id)
title = test_cases.name
else:
programs = Program.objects.filter(
assignment=assignment).order_by('name')
test_cases = []
for a_program in programs:
test_cases.append(Testcase.objects.filter(
program=a_program).order_by('name'))
title = assignment.name
return render_to_response(
'assignments/safeexec_params.html',
{'form': form, 'testcases': test_cases, 'source': source,
'title': title, 'assignment': assignment},
context_instance=RequestContext(request)
)
@login_required
def programList(request):
'''
Logic for displaying section/program list
'''
data = ''
if request.is_ajax():
if request.method == 'GET':
assignmentID = request.GET['asgnid']
assignment = get_object_or_404(Assignment, pk=assignmentID)
programs = Program.objects.filter(
assignment=assignment).order_by('-program_type', 'id')
if programs:
for program in programs:
link = reverse('assignments_detailsprogram',
kwargs={'programID': program.id})
data = data + '<a class="list-group-item" href="%s" id="prog_id-' + \
str(program.id) + '">'
data = data + '<span data-toggle="collapse" data-parent="#a%s" href="#p' + str(program.id) + \
'" class="sign programs"><span class="glyphicon glyphicon-plus"></span></span> '
data = data + program.name + \
' (' + program.program_type + ')'
data = data + '<input type="hidden" class="progid" value="' + \
str(program.id) + '" />'
data = data + '<input type="hidden" class="loaded-testcases" value="1" /></a>'
data = data + '<div class="collapse list-group-submenu programs" id="p' + \
str(program.id) + '">'
data = data % (link, str(assignmentID))
program = get_object_or_404(Program, pk=program.id)
testcases = Testcase.objects.filter(
program=program).order_by('id')
if testcases:
for testcase in testcases:
link = reverse('assignments_detailstestcase', kwargs={
'testcaseID': testcase.id})
data = data + '<a id="testcase_id-' + \
str(testcase.id)
data = data + \
'" class="list-group-item" href="{0}">' + \
testcase.name + '</a></li>'
data = data.format(link)
else:
data += '<a class="list-group-item">No testcases for this program</a>'
data = data + '</div>'
else:
if not assignment.only_uploads:
data = '<a class="list-group-item">No programs for this assignment</a>'
else:
data = 'Error occurred'
else:
data = 'Error occurred'
return HttpResponse(data)
@login_required
def testcaseList(request):
'''
Logic to display testcaselist
'''
data = ''
if request.is_ajax():
if request.method == 'GET':
programID = request.GET['progid']
program = get_object_or_404(Program, pk=programID)
testcases = Testcase.objects.filter(program=program).order_by('id')
if testcases:
for testcase in testcases:
link = reverse('assignments_detailstestcase',
kwargs={'testcaseID': testcase.id})
data = data + \
'<a class="list-group-item" href="{0}">' + \
testcase.name + '</a></li>'
data = data.format(link)
else:
data = '<a class="list-group-item">No testcases for this program</a>'
else:
data = 'Error occurred'
else:
data = 'Error occurred'
return HttpResponse(data)
@login_required
def createChecker(request, programID):
'''
Logic for create checks
'''
program = get_object_or_404(Program, pk=programID)
course = program.assignment.course
is_moderator = isCourseModerator(course, request.user)
if not is_moderator:
return HttpResponseForbidden("Forbidden 403")
try:
checker = Checker.objects.filter(program=program)
except Checker.DoesNotExist:
checker = None
if checker:
messages.success(request, 'Checker Code already exists for this section!!',
extra_tags='safe')
return HttpResponseRedirect(reverse('assignments_detailschecker', kwargs={'checkerID': checker[0].id}))
if request.method == 'POST':
form = CheckerCodeForm(request.POST, request.FILES)
if form.is_valid():
newChecker = Checker(**form.cleaned_data)
newChecker.program = program
newChecker.save()
return HttpResponseRedirect(reverse('assignments_detailschecker', kwargs={'checkerID': newChecker.id}))
else:
initial = {}
initial['execution_command'] = pickle.dumps(['python', '', ''])
form = CheckerCodeForm(initial=initial)
return render_to_response(
'assignments/createChecker.html',
{'form': form, 'program': program,
'course': course, 'is_moderator': is_moderator},
context_instance=RequestContext(request)
)
@login_required
def editChecker(request, checkerID):
'''
Logic for checking modifications
'''
checker = get_object_or_404(Checker, pk=checkerID)
is_moderator = isCourseModerator(
checker.program.assignment.course, request.user)
if not is_moderator:
return HttpResponseForbidden("Forbidden 403")
if request.method == 'POST':
# form is initialized by model then overwritten by request data and files.
form = CheckerCodeForm(request.POST, request.FILES,
initial=model_to_dict(checker))
form.checker_model = checker
if form.is_valid():
# check if new file is uploaded
if 'checker_files' in form.changed_data: # program_files are changed."
if checker.checker_files: # delete older file if any.
checker.checker_files.delete(save=False)
# if file is being cleared.
if not form.cleaned_data['checker_files']:
form.cleaned_data.pop('checker_files')
for key in form.cleaned_data.keys():
setattr(checker, key, form.cleaned_data[key])
checker.delete_error_message()
checker.save()
# Mark all assignment results to stale if program_files/compiler_command/execution_command changed
changed_fields = ['checker_files']
if checker.execution_command:
changed_fields.append('execution_command')
if set(changed_fields) - set(form.changed_data):
list_of_assignment_ids = [instance.submission.id for instance in LatestSubmission.objects
.filter(assignment=checker.program.assignment)]
all_submissions = Upload.objects.filter(assignment=checker.program.assignment, assignment__trash=False,
pk__in=list_of_assignment_ids).order_by("-uploaded_on")
# all_submissions = Upload.objects.filter(assignment=checker.program.assignment)
AssignmentResults.objects.filter(
submission__in=all_submissions).update(is_stale=True)
return HttpResponseRedirect(reverse('assignments_detailschecker', kwargs={'checkerID': checkerID}))
else:
form = CheckerCodeForm(initial=model_to_dict(checker))
program = checker.program
return render_to_response(
'assignments/editChecker.html',
{'form': form, 'checker': checker, 'program': program},
context_instance=RequestContext(request)
)
@login_required
def detailChecker(request, checkerID):
'''
Logic for getting errors for checks
'''
checker = get_object_or_404(Checker, pk=checkerID)
course = checker.program.assignment.course
is_moderator = isCourseModerator(course, request.user)
if not is_moderator:
return HttpResponseForbidden("Forbidden 403")
program = checker.program
all_assignments = Assignment.objects.filter(
course=course).filter(trash=False).order_by('-deadline')
execution_command = get_execution_command(program)
checker_errors = None
try:
checker_errors = CheckerErrors.objects.get(checker=checker)
except CheckerErrors.DoesNotExist:
checker_errors = None
all_testcases = Testcase.objects.filter(program=checker.program)
return render_to_response(
'assignments/detailsChecker.html',
{'program': program, 'checker': checker, 'assignments': all_assignments,
'assignment': program.assignment, 'testcases': all_testcases,
'checker_errors': checker_errors, 'execution_command': execution_command,
'course': course, 'is_moderator': is_moderator},
context_instance=RequestContext(request)
)
@login_required
def removeChecker(request, checkerID):
'''
For deleting checker
'''
checker = get_object_or_404(Checker, pk=checkerID)
is_moderator = isCourseModerator(
checker.program.assignment.course, request.user)
if not is_moderator:
return HttpResponseForbidden("Forbidden 403")
checker.delete()
program = checker.program
return HttpResponseRedirect(reverse('assignments_detailsprogram', kwargs={'programID': program.id}))
@login_required
def readme(request, courseID, topic):
'''
Readme file for course
'''
course = get_object_or_404(Course, pk=courseID)
is_moderator = isCourseModerator(course, request.user)
if not is_moderator:
return HttpResponseForbidden("Forbidden 403")
if topic in README_LINKS.keys():
return render_to_response(
README_LINKS[topic],
{'course': course},
context_instance=RequestContext(request)
)
return HttpResponseRedirect(reverse('assignments_index', kwargs={'courseID': courseID}))
@login_required
def createAssignmentImport(request, courseID):
'''
Logic for importing an assignment
'''
course = get_object_or_404(Course, pk=courseID)
is_moderator = isCourseModerator(course, request.user)
if not is_moderator:
return HttpResponseForbidden("Forbidden 403")
if request.method == 'POST':
form = AssignmentImportForm(request.POST, request.FILES)
form.this_course = course
if form.is_valid():
new_assignment_import = AssignmentImport(**form.cleaned_data)
new_assignment_import.creater = request.user
new_assignment_import.course = course
file_name = request.FILES['assignment_archive'].name
file_name = file_name.split('.')[0]
new_assignment_import.save()
new_assignment = createAssignmentFromArchive(
request, new_assignment_import.assignment_archive, course)
new_assignment = addCommand(
request, new_assignment_import.assignment_archive, course, new_assignment)
if os.path.exists(file_name) and os.path.isdir(file_name):
shutil.rmtree(file_name)
if os.path.exists("elearning_academy/"+file_name) and os.path.isdir("elearning_academy/"+file_name):
shutil.rmtree("elearning_academy/"+file_name)
return HttpResponseRedirect(reverse('assignments_details', kwargs={'assignmentID': new_assignment.id}))
else:
form = AssignmentImportForm()
return render_to_response(
'assignments/createAssignmentImport.html',
{'form': form, 'course': course, 'is_moderator': is_moderator},
context_instance=RequestContext(request)
)
@login_required
def download_demo_assignment_files(request):
'''
This view let instructor download sample bulk assignment
'''
temp_file = os.path.join(settings.MEDIA_ROOT)
file_path = temp_file + 'assignment_sample.tar.gz'
file_name = "assignment_sample.tar.gz"
try:
mime = mimetypes.guess_type(file_path)
except StandardError:
mime = make_tuple('application/octet-stream')
wrapper = FileWrapper(open(file_path, "r"))
response = HttpResponse(wrapper, content_type=mime)
response['Content-Length'] = os.stat(file_path).st_size
response['Content-Disposition'] = 'inline; filename=%s' % smart_str(
os.path.basename(file_name))
response['X-Accel-Redirect'] = smart_str(os.path.join('/media', file_name))
return response
@login_required
def createAssignmentScript(request, courseID):
'''
Logic for creating assignment through script.
'''
course = get_object_or_404(Course, pk=courseID)
is_moderator = isCourseModerator(course, request.user)
if not is_moderator:
return HttpResponseForbidden("Forbidden 403")
if request.method == 'POST':
form = AssignmentMetaForm(request.POST, request.FILES)
form.this_course = course
if form.is_valid():
new_assignment_script = AssignmentScript(**form.cleaned_data)
new_assignment_script.creater = request.user
new_assignment_script.course = course
new_assignment_script.save()
metajson = json.load(new_assignment_script.meta_file.file)
temp_dir = tempfile.mkdtemp(prefix="solution")
os.chdir(temp_dir)
extract_or_copy(
src=new_assignment_script.assignment_archive.file.name, dest=os.getcwd())
new_assignment_script.assignment_archive.close()
next_dir = os.listdir(os.getcwd())
os.chdir(next_dir[0])
new_assignment = createAssignmentFromJson(
request, metajson, course)
new_assignment.save()
createProgramTestcasesFromJson(request, new_assignment, metajson)
return HttpResponseRedirect(reverse('assignments_details', kwargs={'assignmentID': new_assignment.id}))
else:
form = AssignmentMetaForm()
return render_to_response(
'assignments/createAssignmentScript.html',
{'form': form, 'course': course, 'is_moderator': is_moderator},
context_instance=RequestContext(request)
)
@login_required
def addCommand(request, importedTar, course, assignment):
'''
This will add all compiler commands and execution commands provided in importedTar
'''
temp = importedTar.name.split('/')
tar_name = temp[-1].split('.')
tar_name = tar_name[0]
asgn_name = tar_name
temp = os.path.join(settings.MEDIA_ROOT, str(importedTar))
temp_dirarr = temp.split("/")
level = 0
temp_dir = ''
for each_dir_element in temp_dirarr:
if level <= 4:
temp_dir = temp_dir + each_dir_element + '/'
level += 1
temp_dir = temp_dir[:-1]
extract_or_copy(src=temp, dest=temp_dir)
# temp_dir now indicates the assignment folder
temp_dir = temp_dir + '/' + asgn_name
desc_dir = temp_dir + '/Configuration/'
filename = desc_dir + 'Configuration.txt'
lines = [line.rstrip('\n') for line in open(filename)]
for lineno in range(len(lines)):
if lines[lineno].split():
if lines[lineno].split(':')[0].strip() == "SectionType":
programType = lines[lineno].split(':')[1].strip()
lineno = lineno + 1
program = None
compiler_command = None
execution_command = None
description = None
section_name_flag = 0
comp_command_flag = 0
exec_command_flag = 0
description_flag = 0
while(lineno < len(lines) and
(not section_name_flag or (not comp_command_flag and
not exec_command_flag) or not description_flag)):
if lines[lineno].split():
keyword = lines[lineno].split(':', 1)[0].strip()
value = lines[lineno].split(':', 1)[1].strip()
if keyword == "SectionName":
program = value
section_name_flag = 1
if keyword == "CompilerCommand":
compiler_command = value
comp_command_flag = 1
if keyword == "ExecutionCommand":
execution_command = value
exec_command_flag = 1
if keyword == "Description":
description = value.strip("\"")
description_flag = 1
lineno = lineno + 1
obj = Program.objects.filter(
assignment=assignment, program_type=programType, name=program)
editprogram = get_object_or_404(Program, pk=obj[0].id)
editprogram.description = description
temp_dir1 = temp_dir + '/' + programType + '/' + program
if os.path.exists(temp_dir1):
for f in os.listdir(temp_dir1):
if str(f) == "makefile.txt":
fd = open(str(temp_dir1) + '/' + str(f))
editprogram.makefile.save(str(f), File(fd))
temp_dir1 = temp_dir + '/' + programType + '/' + program + '/program_files'
if os.path.exists(temp_dir1):
for f in os.listdir(temp_dir1):
fd = open(str(temp_dir1) + '/' + str(f))
editprogram.program_files.save(str(f), File(fd))
if comp_command_flag:
if compiler_command != '' and len(compiler_command.split('|')) > 1:
editprogram.compiler_command = pickle.dumps([compiler_command.split('|')[0].strip(" "),
compiler_command.split(
'|')[1].strip(" "),
compiler_command.split('|')[2].strip(" ")])
if exec_command_flag:
if execution_command != '' and len(execution_command.split('|')) > 1:
editprogram.execution_command = pickle.dumps([execution_command.split('|')[0].strip(" "),
execution_command.split(
'|')[1].strip(" "),
execution_command.split('|')[2].strip(" ")])
editprogram.save()
while True:
if lineno >= len(lines):
break
if lines[lineno].split():
if lines[lineno].split(':')[0].strip() == "SectionType":
lineno = lineno - 1
break
if lines[lineno].split(':')[0].strip() == "TestcaseName":
testcase_name = lines[lineno].split(':')[1].strip()
lineno = lineno + 1
command_line_args = None
marks = None
description = None
keep_partial_marks = False
args_flag = 0
marks_flag = 0
description_flag = 0
keep_partial_marks_flag = 0
while(lineno < len(lines) and
(not args_flag or not marks_flag or not
description_flag or not keep_partial_marks_flag)):
if lines[lineno].split():
keyword = lines[lineno].split(':', 1)[
0].strip()
try:
value = lines[lineno].split(':', 1)[
1].strip()
except Exception:
value = ''
if keyword == "CommandLineArguments":
command_line_args = value
args_flag = 1
if keyword == "Marks":
marks = value
marks_flag = 1
if keyword == "Description":
description = value.strip("\"")
description_flag = 1
if keyword == "TestcaseLevelPartialMarkingScheme?":
keep_partial_marks = (value == "True")
keep_partial_marks_flag = 1
lineno = lineno + 1
obj1 = Testcase.objects.filter(
program=obj, name=str(testcase_name))
if obj1:
editTestcase = get_object_or_404(
Testcase, pk=obj1[0].id)
editTestcase.description = description
editTestcase.marks = marks
editTestcase.keep_partial_marks = keep_partial_marks
editTestcase.command_line_args = command_line_args
editTestcase.save()
lineno = lineno + 1
assignment.save()
return assignment
@login_required
def createAssignmentFromArchive(request, importedTar, course):
'''
Create assignment from importedTar
'''
temp = importedTar.name.split('/')
tar_name = temp[-1].split('.')
tar_name = tar_name[0]
asgn_name = tar_name
temp = os.path.join(settings.MEDIA_ROOT, str(importedTar))
temp_dirarr = temp.split("/")
level = 0
temp_dir = ''
for each_dir_element in temp_dirarr:
if level <= 4:
temp_dir = temp_dir + each_dir_element + '/'
level += 1
temp_dir = temp_dir[:-1]
extract_or_copy(src=temp, dest=temp_dir)
# temp_dir now indicates the assignment folder
temp_dir = temp_dir + '/' + asgn_name
desc_dir = temp_dir + '/Configuration/'
filename = desc_dir + 'Configuration.txt'
lines = [line.rstrip('\n') for line in open(filename)]
type_of_assgn = None
description = None
publish_date = None
soft_deadline = None
freezing_deadline = None
assignment_duration = None
extra_duration = None
language = None
student_files = None
assignment_duration = None
extra_duration = None
for line in lines:
if line.strip():
if len(line.split(':', 1)) >= 2:
keyword = line.split(':', 1)[0].strip()
value = line.split(':', 1)[1].strip()
if keyword == "AssignmentType":
type_of_assgn = value
elif keyword == "PublishType":
publish_type = value
elif keyword == "Description":
description = value.replace(r"\r\n", "\r\n").strip("\"")
elif keyword == "AssignmentDuration":
values = value.split(":")
assignment_duration = timedelta(hours=int(values[0]), minutes=int(values[1]),
seconds=int(values[2]))
elif keyword == "ExtraFreezingTime":
values = value.split(":")
extra_duration = timedelta(hours=int(values[0]), minutes=int(
values[1]), seconds=int(values[2]))
elif keyword == "PublishOn":
deadline_str = local2utc(value)
publish_date = deadline_str
elif keyword == "Deadline":
deadline_str = local2utc(value)
soft_deadline = deadline_str
elif keyword == "FreezingDeadline":
deadline_str = local2utc(value)
freezing_deadline = deadline_str
elif keyword == "CalculateExecutionTime?":
execution_time_allowed = (
(value == "True") or (value == "true"))
elif keyword == "SendGradingEmailsToAll?":
force_notify = ((value == "True") or (value == "true"))
elif keyword == "AdvanceCorrectnessChecker?":
correctness = ((value == "True") or (value == "true"))
elif keyword == "OrderIsImportant?":
order = ((value == "True") or (value == "true"))
elif keyword == "EnterValueOfErrorRate":
error = value
elif keyword == "SeeHowWellStudentsIndentTheirCode?":
indentation = ((value == "True") or (value == "true"))
elif keyword == "GraphicsProgram?":
graphics = ((value == "True") or (value == "true"))
elif keyword == "ProgrammingLanguage":
language = value
elif keyword == "FilesToBeSubmitted":
student_files = value
if(type_of_assgn and description and
((publish_date and soft_deadline and
freezing_deadline) or (assignment_duration and extra_duration)) and language and student_files):
break
new_assignment = Assignment(name=asgn_name, publish_type=publish_type,
type_of_lab=type_of_assgn, program_language=language,
duration=assignment_duration, freezing_duration=extra_duration,
deadline=soft_deadline, freezing_deadline=freezing_deadline,
publish_on=publish_date, student_program_files=student_files,
description=description.replace("\\n", "\n"), bulk_add=importedTar,
execution_time_allowed=execution_time_allowed, hide=True,
force_notify=force_notify, correctness=correctness,
error=error, indentation=indentation, graphics_program=graphics,
order=order)
new_assignment.course = course
new_assignment.creater = request.user
new_assignment.serial_number = (Assignment.objects.filter(course=course).filter(trash=False).
aggregate(Max('serial_number'))
['serial_number__max'] or 0) + 1
documents = temp_dir + '/Documents/'
if os.path.exists(documents):
for f in os.listdir(documents):
fd = open(str(documents) + '/' + str(f))
new_assignment.document.save(str(f), File(fd))
helper_code = temp_dir + '/Helper-code/'
if os.path.exists(helper_code):
for f in os.listdir(helper_code):
fd = open(str(helper_code) + '/' + str(f))
new_assignment.helper_code.save(str(f), File(fd))
solution_code = temp_dir + '/Solution-code/'
if os.path.exists(solution_code):
for f in os.listdir(solution_code):
fd = open(str(solution_code) + '/' + str(f))
new_assignment.model_solution.save(str(f), File(fd))
new_assignment.save()
Evaluate = temp_dir + '/Evaluate/'
Practice = temp_dir + '/Practice/'
if os.path.exists(Evaluate) or os.path.exists(Practice):
section_bulk_add(new_assignment)
return new_assignment
@login_required
def createAssignmentFromJson(request, jsonobject, course):
'''
Logic for generating assignment creation form
'''
asgn_name = jsonobject["assignment_name"]
deadline_str = jsonobject["soft_deadline"]
year = int(deadline_str.split()[0])
month = int(deadline_str.split()[1])
day = int(deadline_str.split()[2])
hour = int(deadline_str.split()[3].split(":")[0])
minute = int(deadline_str.split()[3].split(":")[1])
soft_deadline = DateTime.datetime(year, month, day, hour, minute)
deadline_str = jsonobject["freezing_deadline"]
year = int(deadline_str.split()[0])
month = int(deadline_str.split()[1])
day = int(deadline_str.split()[2])
hour = int(deadline_str.split()[3].split(":")[0])
minute = int(deadline_str.split()[3].split(":")[1])
freezing_deadline = DateTime.datetime(year, month, day, hour, minute)
language = jsonobject["language"]
student_files = jsonobject["student_files"]
if "documents" in jsonobject:
documents = jsonobject["documents"]
else:
documents = None
if "helper_code" in jsonobject:
helper_code = jsonobject["helper_code"]
else:
helper_code = None
if "solution_code" in jsonobject:
solution_code = jsonobject["solution_code"]
else:
solution_code = None
deadline_str = jsonobject["publish_date"]
year = int(deadline_str.split()[0])
month = int(deadline_str.split()[1])
day = int(deadline_str.split()[2])
hour = int(deadline_str.split()[3].split(":")[0])
minute = int(deadline_str.split()[3].split(":")[1])
publish_date = DateTime.datetime(year, month, day, hour, minute)
description = jsonobject["asgn_description"]
new_assignment = Assignment(name=asgn_name, program_language=language, deadline=soft_deadline,
freezing_deadline=freezing_deadline, publish_on=publish_date,
student_program_files=student_files, description=description)
new_assignment.course = course
new_assignment.creater = request.user
new_assignment.serial_number = (Assignment.objects.filter(course=course).filter(trash=False).
aggregate(Max('serial_number'))
['serial_number__max'] or 0) + 1
if documents:
with open(documents) as f:
new_assignment.document.save(documents, File(f))
if helper_code:
with open(helper_code) as f:
new_assignment.helper_code.save(helper_code, File(f))
if solution_code:
with open(solution_code) as f:
new_assignment.model_solution.save(solution_code, File(f))
new_assignment.save()
return new_assignment
@login_required
def createProgramTestcasesFromJson(request, assignment, jsonobject):
'''
Logic for creating Testcases inside sections
'''
if "sections" in jsonobject:
for progjson in jsonobject["sections"]:
section_name = progjson["section_name"]
section_type = progjson["section_type"]
if "compilation_command" in progjson:
compilation_command = pickle.dumps(
progjson["compilation_command"])
else:
compilation_command = None
if "execution_command" in progjson:
execution_command = pickle.dumps(progjson["execution_command"])
else:
execution_command = None
if "section_files" in progjson:
program_files = progjson["section_files"]
else:
program_files = None
if "sec_description" in progjson:
description = progjson["sec_description"]
else:
description = None
new_program = Program(name=section_name, program_type=section_type)
if program_files:
with open(program_files) as f:
new_program.program_files.save(program_files, File(f))
if compilation_command:
new_program.compiler_command = compilation_command
if execution_command:
new_program.execution_command = execution_command
if description:
new_program.description = description
new_program.assignment = assignment
new_program.is_sane = True
new_program.compile_now = True
new_program.execute_now = True
new_program.language = assignment.program_language
new_program.save()
if "testcases" in progjson:
for testjson in progjson["testcases"]:
test_name = testjson["testcase_name"]
if "input_files" in testjson:
input_files = testjson["input_files"]
std_input = testjson["std_in_file_name"]
else:
input_files = None
std_input = None
if "output_files" in testjson:
output_files = testjson["output_files"]
std_output = testjson["std_out_file_name"]
else:
output_files = None
std_output = None
if "command_line_args" in testjson:
command_line_args = testjson["command_line_args"]
else:
command_line_args = None
if "marks" in testjson:
marks = testjson["marks"]
else:
marks = None
if "test_description" in testjson:
tdescription = testjson["test_description"]
else:
tdescription = None
new_testcase = Testcase(
name=test_name, program=new_program)
if std_input:
new_testcase.std_in_file_name = std_input
else:
new_testcase.std_in_file_name = "None"
if std_output:
new_testcase.std_out_file_name = std_output
else:
new_testcase.std_out_file_name = "None"
if command_line_args:
new_testcase.command_line_args = command_line_args
if marks:
new_testcase.marks = marks
if tdescription:
new_testcase.tdescription = tdescription
if input_files:
with open(input_files) as f:
new_testcase.input_files.save(input_files, File(f))
if output_files:
with open(output_files) as f:
new_testcase.output_files.save(
output_files, File(f))
new_testcase.save()
def file_download(file_recv):
''' Normal file download'''
file_name = file_recv.name.split('.')[-1]
if file_name in ['tar', 'gz', 'gz2', 'gzip', 'bz2', 'zip']:
pass
try:
mime = mimetypes.guess_type(file_recv.path)
except StandardError:
mime = make_tuple('application/octet-stream')
wrapper = FileWrapper(open(file_recv.path, "r"))
response = HttpResponse(wrapper, content_type=mime)
response['Content-Length'] = os.stat(file_recv.path).st_size
response['Content-Type'] = 'text/plain'
response['Content-Disposition'] = 'inline; filename=%s' % smart_str(
os.path.basename(file_recv.name))
response['X-Accel-Redirect'] = smart_str(
os.path.join('/media', file_recv.name))
return response
def file_download_pdf(pdf_file):
""" Logic for downloading PDF files"""
try:
mime = mimetypes.guess_type(pdf_file.path)
except StandardError:
mime = make_tuple('application/octet-stream')
wrapper = FileWrapper(open(pdf_file.path, "r"))
response = HttpResponse(wrapper, content_type=mime)
response['Content-Length'] = os.stat(pdf_file.path).st_size
response['Content-Type'] = 'application/pdf'
response['Content-Disposition'] = 'inline; filename=%s' % smart_str(
os.path.basename(pdf_file.name))
response['X-Accel-Redirect'] = smart_str(
os.path.join('/media', pdf_file.name))
return response
def file_download_nonpdf(nonpdf_file):
""" Logic for downloading non-PDF files eg .txt, .c, .cpp, .java, .py, etc"""
try:
mime = mimetypes.guess_type(nonpdf_file.path)
except StandardError:
mime = make_tuple('application/octet-stream')
wrapper = FileWrapper(open(nonpdf_file.path, "r"))
response = HttpResponse(wrapper, content_type=mime)
response['Content-Length'] = os.stat(nonpdf_file.path).st_size
response['Content-Type'] = 'text/plain'
response['Content-Disposition'] = 'inline; filename=%s' % smart_str(
os.path.basename(nonpdf_file.name))
response['X-Accel-Redirect'] = smart_str(
os.path.join('/media', nonpdf_file.name))
return response
@login_required
def solutionDownload(request, assignmentID):
'''
Only creator of course can download solution file in assignment.
'''
assignment = get_object_or_404(Assignment, pk=assignmentID)
course = assignment.course
is_moderator = isCourseModerator(course, request.user)
if assignment.deadline is not None:
is_due = (timezone.now() >= assignment.deadline)
if not is_moderator and not is_due:
return HttpResponseForbidden("Forbidden 403")
if not assignment.model_solution:
return HttpResponseNotFound("File not found")
return file_download(assignment.model_solution)
@login_required
def documentDownload(request, assignmentID):
'''
Additional document can be downloade by enrolled accounts.
'''
assignment = get_object_or_404(Assignment, pk=assignmentID)
course = assignment.course
if not isEnrolledInCourse(course, request.user):
return HttpResponseForbidden("Forbidden 403")
is_moderator = isCourseModerator(course, request.user)
if timezone.now() < assignment.publish_on and not is_moderator:
return HttpResponseNotFound("Assignment not published")
if not assignment.document:
return HttpResponseNotFound("File not found")
document_name = assignment.document.name.split('.')
if document_name[-1] == 'pdf':
return file_download_pdf(assignment.document)
return file_download_nonpdf(assignment.document)
@login_required
def helperCodeDownload(request, assignmentID):
'''
Only enrolled accounts can download Helper code assignment.
'''
assignment = get_object_or_404(Assignment, pk=assignmentID)
course = assignment.course
if not isEnrolledInCourse(course, request.user):
return HttpResponseForbidden("Forbidden 403")
is_moderator = isCourseModerator(course, request.user)
if timezone.now() < assignment.publish_on and not is_moderator:
return HttpResponseNotFound("Assignment not published")
if not assignment.helper_code:
return HttpResponseNotFound("File not found")
return file_download_nonpdf(assignment.helper_code)
@login_required
def programFileDownload(request, programID):
'''
Logic for downloading program file.
Program file can be downloaded by enrolled accounts.
'''
program = get_object_or_404(Program, pk=programID)
assignment = program.assignment
is_due = None
if assignment.deadline is not None:
is_due = (timezone.now() >= assignment.deadline)
course = assignment.course
if not isEnrolledInCourse(course, request.user):
return HttpResponseForbidden("Forbidden 403")
if not program.program_files:
return HttpResponseNotFound("File not found")
is_moderator = isCourseModerator(course, request.user)
if(is_moderator or program.program_type == "Practice" or is_due):
return file_download(program.program_files)
return HttpResponseForbidden("Forbidden 403")
@login_required
def makefileDownload(request, programID):
'''
Logic for downloading makefile of a section/program
'''
program = get_object_or_404(Program, pk=programID)
assignment = program.assignment
is_due = None
if assignment.deadline is not None:
is_due = (timezone.now() >= assignment.deadline)
course = assignment.course
if not isEnrolledInCourse(course, request.user):
return HttpResponseForbidden("Forbidden 403")
if not program.makefile:
return HttpResponseNotFound("File not found")
is_moderator = isCourseModerator(course, request.user)
if(is_moderator or program.program_type == "Practice" or is_due):
return file_download_nonpdf(program.makefile)
return HttpResponseForbidden("Forbidden 403")
def extract_file(filename, path):
'''
This view let user to extract tar files
'''
opener, mode = tarfile.open, 'r:gz'
tar = opener(filename, mode)
try:
tar.extractall(path)
finally:
tar.close()
@login_required
def testcaseInputDownload(request, testcaseID):
'''
Logic for downloading single testcase input file
'''
testcase = get_object_or_404(Testcase, pk=testcaseID)
program = testcase.program
assignment = program.assignment
is_due = None
if assignment.deadline is not None:
is_due = (timezone.now() >= assignment.deadline)
course = assignment.course
if not isEnrolledInCourse(course, request.user):
return HttpResponseForbidden("Forbidden 403")
if not testcase.input_files:
return HttpResponseNotFound("File not found")
is_moderator = isCourseModerator(course, request.user)
if(is_moderator or program.program_type == "Practice" or is_due):
return file_download(testcase.input_files)
return HttpResponseForbidden("Forbidden 403")
@login_required
def testcaseOutputDownload(request, testcaseID):
'''
Logic for downloading single testcase output file
'''
testcase = get_object_or_404(Testcase, pk=testcaseID)
program = testcase.program
assignment = program.assignment
is_due = None
if assignment.deadline is not None:
is_due = (timezone.now() >= assignment.deadline)
course = assignment.course
if not isEnrolledInCourse(course, request.user):
return HttpResponseForbidden("Forbidden 403")
if not testcase.output_files:
return HttpResponseNotFound("File not found")
is_moderator = isCourseModerator(course, request.user)
if(is_moderator or program.program_type == "Practice" or is_due):
return file_download(testcase.output_files)
return HttpResponseForbidden("Forbidden 403")
@login_required
def chekerDownload(request, checkerID):
'''
Download practice testcases
'''
checker = get_object_or_404(Checker, pk=checkerID)
program = checker.program
assignment = program.assignment
is_due = None
if assignment.deadline is not None:
is_due = (timezone.now() >= assignment.deadline)
course = assignment.course
if not isEnrolledInCourse(course, request.user):
return HttpResponseForbidden("Forbidden 403")
if not checker.checker_files:
return HttpResponseNotFound("File not found")
is_moderator = isCourseModerator(course, request.user)
if(is_moderator or program.program_type == "Practice" or is_due):
return file_download(checker.checker_files)
return HttpResponseForbidden("Forbidden 403")
@login_required
def autotestcase(request, programID):
"""
Funtion to autogenerate testcases
"""
program = get_object_or_404(Program, pk=programID)
testcases = Testcase.objects.filter(program=program)
assignment = program.assignment
is_due = None
if assignment.deadline is not None:
is_due = (timezone.now() >= assignment.deadline)
course = assignment.course
has_submitted = Upload.objects.filter(
owner=request.user, assignment=assignment)
all_assignments = Assignment.objects.filter(
course=course).filter(trash=False).order_by('-deadline')
is_moderator = isCourseModerator(course, request.user)
if is_moderator:
assignments = all_assignments
else:
assignments = [a for a in all_assignments if(not a.hide and
(timezone.now() > a.publish_on if a.publish_on else False))]
compiler_command = get_compilation_command(program)
execution_command = get_execution_command(program)
if request.method == 'POST':
specification = dict()
specification['programid'] = programID
specification['user'] = request.user
specification['course'] = course
specification['assignment'] = assignment.name
specification['min_integer'] = int(request.POST['min_integer'])
specification['max_integer'] = int(request.POST['max_integer'])
specification['marks'] = int(request.POST['marks'])
specification['no_of_testcase'] = int(request.POST['no_of_testcase'])
if 'number' in str(request.get_full_path()):
specification['input_type'] = 1
specification['count_of_numbers'] = int(
request.POST['count_of_numbers'])
elif 'array' in str(request.get_full_path()):
specification['input_type'] = 2
specification['array_size'] = int(request.POST['array_size'])
specification['array'] = int(request.POST['array'])
else:
specification['input_type'] = 3
specification['row_size'] = int(request.POST['row_size'])
specification['column_size'] = int(request.POST['column_size'])
specification['matrix'] = int(request.POST['matrix'])
obj = create_output.CreateTestcase(specification)
obj.testcasecreation()
return HttpResponseRedirect(reverse('assignments_detailsprogram', kwargs={'programID': programID}))
else:
if 'number' in str(request.get_full_path()):
form = CreateTestcaseNumber()
elif 'array' in str(request.get_full_path()):
form = CreateTestcaseArray()
else:
form = CreateTestcaseMatrix()
program_errors = None
if not program.is_sane:
try:
program_errors = ProgramErrors.objects.get(program=program)
except ProgramErrors.DoesNotExist:
program_errors = None
testcase_errors = []
terror_ctype = ContentType.objects.get_for_model(TestcaseErrors)
for error in AssignmentErrors.objects.filter(assignment=program.assignment, content_type=terror_ctype):
testcase_errors.extend(
TestcaseErrors.objects.filter(pk=error.object_id))
get_params = {'source': 'section', 'id': programID}
try:
checker = Checker.objects.filter(program=program)
except Checker.DoesNotExist:
checker = None
if checker:
checker = checker[0]
return render_to_response(
'assignments/createautomaticTestcase.html',
{'form': form, 'program': program, 'testcases': testcases, 'assignment': assignment, 'checker': checker,
'assignments': assignments, 'date_time': timezone.now(),
'program_errors': program_errors, 'compiler_command': compiler_command, 'execution_command': execution_command,
'course': course, 'is_moderator': is_moderator,
'is_due': is_due, 'has_submitted': has_submitted, 'get_params': get_params,
'testcase_errors': testcase_errors},
context_instance=RequestContext(request)
)
@login_required
def modelsolution_changestatus(request, assignment_id):
'''this function changes the model solution status from
publish to unpublish and vice versa'''
assignment = get_object_or_404(Assignment, pk=assignment_id)
if assignment.model_solution_published:
assignment.model_solution_published = False
else:
assignment.model_solution_published = True
assignment.save()
return HttpResponseRedirect(reverse('assignments_details', kwargs={'assignmentID': assignment_id}))
@login_required
@is_moderator_check
def course_scoreboard(request, courseID):
'''
generate scorecard for all students who made atleast one submission for that course
'''
course = get_object_or_404(Course, pk=courseID)
all_students = CourseHistory.objects.filter(
course=course, active='A', is_owner=False).select_related('user')
all_assignment = Assignment.objects.filter(
course=course).filter(trash=False).order_by('-publish_on')
assignment_list = []
ismoderator = isCourseModerator(course, request.user)
for assignment in all_assignment:
assignment_list.append(assignment.name)
student_list = []
for students in all_students:
submission_list = []
submission_list.append(students.user.username)
total_submission_available = False
total_marks = 0
for assignment in all_assignment:
student_assignment_marks = 0
try:
list_of_assignment_ids = [instance.submission.id for instance in LatestSubmission.objects
.filter(assignment=assignment, owner=students.user)]
student_assignment_submission = Upload.objects.filter(assignment=assignment, assignment__trash=False,
pk__in=list_of_assignment_ids).\
order_by("-uploaded_on")
except Upload.DoesNotExist:
student_assignment_marks = 0
if student_assignment_submission:
total_submission_available = True
# scoreboard saving final marks changes
marks = student_assignment_submission[0].final_marks
if marks:
student_assignment_marks = marks + \
student_assignment_submission[0].manualmark
else:
student_assignment_marks = 0 + \
student_assignment_submission[0].manualmark
submission_list.append(str(student_assignment_marks))
total_marks += student_assignment_marks
submission_list.append(str(total_marks))
if total_submission_available:
student_list.append(submission_list)
return render_to_response(
'assignments/Scoreboard.html',
{'is_moderator': ismoderator, 'student_list': student_list, 'assignment_list': assignment_list,
'course': course, 'assignments': all_assignment},
context_instance=RequestContext(request)
)
@login_required
@is_moderator_check
def get_scoreboard_in_csv(request, courseID):
'''
download the whiole scoreboard as csv file for the course
'''
course = get_object_or_404(Course, pk=courseID)
all_students = CourseHistory.objects.filter(
course=course, active='A', is_owner=False).select_related('user')
all_assignment = Assignment.objects.filter(
course=course).filter(trash=False).order_by('publish_on')
line = ""
header = ""
header += "Username ,"
for assignment in all_assignment:
header += str(assignment.name) + ", "
header += "Total Marks \n"
for students in all_students:
submission_list = []
total_submission_available = False
total_marks = 0
for assignment in all_assignment:
student_assignment_marks = 0
try:
list_of_assignment_ids = [instance.submission.id for instance in LatestSubmission.objects
.filter(assignment=assignment, owner=students.user)]
student_assignment_submission = Upload.objects.filter(assignment=assignment, assignment__trash=False,
pk__in=list_of_assignment_ids).\
order_by("-uploaded_on")
except Upload.DoesNotExist:
student_assignment_marks = 0
if student_assignment_submission:
total_submission_available = True
marks = student_assignment_submission[0].final_marks
if marks:
student_assignment_marks = marks + \
student_assignment_submission[0].manualmark
else:
student_assignment_marks = 0 + \
student_assignment_submission[0].manualmark
submission_list.append(student_assignment_marks)
total_marks += student_assignment_marks
if total_submission_available:
line += str(students.user.username) + ", "
for mark_list in submission_list:
line += str(mark_list) + ", "
line += str(total_marks) + "\n"
line = header + line
response = HttpResponse(content_type="text/plain")
response['Content-Disposition'] = 'attachment; filename=Scorecard of %s.csv' % (
course.title)
response.write(line)
return response
@login_required
def paste_assignment(request, assignment_id, course_id):
'''This function pastes the selected assignment'''
course = get_object_or_404(Course, pk=course_id)
assignment = get_object_or_404(Assignment, pk=assignment_id)
duplicate_assignment = Assignment.objects.filter(course=course).filter(
name=assignment.name)
if assignment.name.find("Copy") != -1:
return HttpResponse(
"Assignment with same name already exists in Repository")
if duplicate_assignment and course.title == "Repository":
return HttpResponse(
"Assignment with same name already exists in Repository")
new_assignment = copy.deepcopy(assignment)
new_assignment.id = None
new_assignment.course = course
new_assignment.trash = False
new_assignment.backup = False
if duplicate_assignment:
new_assignment.name = new_assignment.name + " (Copy)"
duplicate_assignment_copy = Assignment.objects.filter(course=course).filter(
name=new_assignment.name)
if duplicate_assignment_copy:
new_assignment.name = new_assignment.name + " (Copy)"
if assignment.course.title == "Repository":
new_assignment.backup = True
new_assignment.save()
program_list = []
program_list = Program.objects.filter(assignment=assignment)
if program_list:
for program in program_list:
temp_program = copy.deepcopy(program)
temp_program.id = None
temp_program.assignment = new_assignment
temp_program.save()
testcase_list = Testcase.objects.filter(program=program)
for testcase in testcase_list:
temp_testcase = copy.deepcopy(testcase)
temp_testcase.id = None
temp_testcase.program = temp_program
testcase_filepath = str(testcase.input_files.file.name)
testcase_filepath.replace(assignment.name, new_assignment.name)
testcase_filepath.replace(
assignment.creater.username, request.user.username)
testcase_filepath.replace(
assignment.course.title, course.title)
file_handler = open(testcase.input_files.file.name)
old_file = File(file_handler)
temp_testcase.input_files.save(
testcase_filepath, old_file, save=True)
testcase_filepath = str(testcase.output_files.file.name)
testcase_filepath.replace(assignment.name, new_assignment.name)
testcase_filepath.replace(
assignment.creater.username, request.user.username)
testcase_filepath.replace(
assignment.course.title, course.title)
file_handler = open(testcase.output_files.file.name)
old_file = File(file_handler)
temp_testcase.output_files.save(
testcase_filepath, old_file, save=True)
temp_testcase.save()
return HttpResponseRedirect(
reverse('assignments_index', kwargs={'courseID': course.id}))
@login_required
def view_assignmentdetails(request, assignment_id):
'''Function to view minimum details of an assignment while copying it from one course to another'''
assignment = get_object_or_404(Assignment, pk=assignment_id)
programs = Program.objects.filter(assignment=assignment)
testcaselist = []
for program in programs:
testcases = Testcase.objects.filter(program=program)
testcaselist.append(testcases)
course = assignment.course
all_assignments = Assignment.objects.filter(
course=course).filter(trash=False).order_by('-deadline')
get_params = {'source': 'assignment', 'id': assignment_id}
mode = get_mode(request)
evaluate_program = [
a_program for a_program in programs if a_program.program_type == "Evaluate"]
practice_program = [
a_program for a_program in programs if a_program.program_type == "Practice"]
formData = AssignmentForm(initial=model_to_dict(
assignment), courseID=course.id)
return render_to_response(
'assignments/details.html',
{'assignment': assignment, 'timer': None, 'course': course, 'has_joined': True,
'is_moderator': True, 'programs': programs, 'form': [],
'submission_allowed': False, 'allowed_exam': True, 'submittedFiles': [],
'programs_with_errors': [], 'disable_grading': False,
'program_not_ready': False, 'practice_program': practice_program,
'assignments': all_assignments, 'program_errors': [], 'test_errors': [],
'published': assignment.publish_on, 'is_due': False, 'rem_time': 0,
'isSubmitted': False, 'date_time': timezone.now(), 'get_params': get_params,
'total_sumissions': 0, 'mode': mode, 'best_submission': "",
'assignmentID': assignment_id, 'now': timezone.now(), 'evaluate_program': evaluate_program,
'formData': formData, 'number_of_submissions': 0, 'user_id': request.user,
'allowed_exam_status': False, 'taList': [],
'deadline': timezone.now()},
context_instance=RequestContext(request),
)
@login_required
@is_moderator_check
def show_repository(request, course_id):
'''This function displays the assignments present in repository'''
repository_course = Course.objects.filter(title='Repository')
if not repository_course:
course = Course()
course.title = 'Repository'
course.category_id = 1
course.save()
course_ids = [history_object.course_id for history_object in CourseHistory.objects.filter(
user=request.user)]
if course_ids:
courses = Course.objects.filter(pk__in=course_ids)
assignments = Assignment.objects.filter(course=repository_course)
course = get_object_or_404(Course, pk=course_id)
repository_course = get_object_or_404(Course, title='Repository')
return render_to_response(
'assignments/showRepository.html',
{'assignments': assignments, 'courses': courses, 'course': course,
'Repository': repository_course},
context_instance=RequestContext(request))
@login_required
def select_assignment(request, course_id):
'''Function to show the assignments from repository'''
course = get_object_or_404(Course, pk=course_id)
repository_course = Course.objects.filter(title="Repository")
assignments = Assignment.objects.filter(course=repository_course)
return render_to_response(
'assignments/selectAssignments.html',
{'assignments': assignments, 'course': course},
context_instance=RequestContext(request))
@login_required
def save_assignment(request, assignment_id):
'''This function copies an assignment to repository'''
assignment = get_object_or_404(Assignment, pk=assignment_id)
is_moderator = isCourseModerator(assignment.course, request.user)
if not is_moderator:
return HttpResponseForbidden("Forbidden 403")
if not assignment.backup:
assignment.backup = True
else:
return HttpResponseForbidden(
"Already created backup for this Assignment")
assignment.save()
course = get_object_or_404(Course, title='Repository')
paste_assignment(request, assignment_id, course.id)
return HttpResponseRedirect(
reverse('assignments_details', kwargs={'assignmentID': assignment_id}))
@login_required
def allcompile(request, new_upload, programs):
'''compiles a code against all programs....here programs is an array of triple...programs[i][0] being program'''
_, temp_fname = tempfile.mkstemp(prefix='user_input')
input_file = open(temp_fname, 'wb+')
input_file.write('')
input_file.close()
input_file = open(temp_fname)
compileresults = ''
for program in programs:
compileall = CustomTestcase(input_file, program[0], new_upload, True)
compileall.setup()
compileall.compile()
result = ''
for inp in compileall.compileResult.get_stderr():
result += inp.decode('utf-8')
result += "\n"
if result == '':
result = 'In section ' + program[0].name + ' there are no errors'
else:
result = 'In section ' + program[0].name + '\n' + result
compileresults += result + '\n'
deleteSubmission(request, new_upload.id)
new_upload.delete()
return HttpResponse(json.dumps({'0': mark_safe(compileresults)}), content_type="application/json")
@login_required
def custom_input(request, new_upload):
'''
Function for handling custom input from editor
'''
pre_dir = os.getcwd()
program = Program.objects.filter(id=request.POST.get('programID'))[0]
test = request.POST.get('test')
_, temp_fname = tempfile.mkstemp(prefix='user_input')
input_file = open(temp_fname, 'wb+')
input_file.write(request.POST.get('inputText'))
input_file.close()
input_file = open(temp_fname)
custom_input_recv = CustomTestcase(
input_file, program, new_upload, True).getResult()
shutil.rmtree(temp_fname, ignore_errors=True)
deleteSubmission(request, new_upload.id)
new_upload.delete()
result = ''
if custom_input_recv.compilable:
if custom_input_recv.compileResult.returnCode == 0:
for inp in custom_input_recv.actualOutput.get_stdout():
result += inp
result += "\n"
for inp in custom_input_recv.actualOutput.get_stderr():
result += (inp)
result += "\n"
else:
for inp in custom_input_recv.compileResult.get_stderr():
result += (inp)
result += "\n"
else:
if custom_input_recv.actualOutput.returnCode == 0:
for inp in custom_input_recv.actualOutput.get_stdout():
result += (inp)
result += "\n"
for inp in custom_input_recv.actualOutput.get_stderr():
result += (inp)
result += "\n"
else:
for inp in custom_input_recv.actualOutput.get_stderr():
result += (inp)
result += "\n"
os.chdir(pre_dir)
custom_input_recv.cleanUp()
shutil.rmtree(temp_fname, ignore_errors=True)
return HttpResponse(json.dumps({'0': mark_safe(result), '1': test}), content_type="application/json")
@login_required
def practiceresults(request):
'''fetch results'''
submission = get_object_or_404(Upload, pk=request.POST.get('submissionID'))
assignment = submission.assignment
if not submission.to_be_practiced:
try:
results = Results(submission, program_type="Practice")
html = '<h4>Practice-Tests results for Assignment ' + \
str(assignment.serial_number) + ' '
html += assignment.name + '</h4><div class="span12">'
for prgrm in results.program_results:
html += '<h5 style="color:green;">' + prgrm.program_result.program.name + '</h5>'
if prgrm.program_result.missing_file_names:
html += '<div class="accordion" id="accordion' + \
str(prgrm.program_result.program.id) + 'pro">'
html += '<div class="accordion-group"><div class="accordion-heading">'
html += '<a class="accordion-toggle" data-toggle="collapse"'
html += 'data-parent="#accordion' + \
str(prgrm.program_result.program.id)
html += 'pro" href="#collapse'
html += str(prgrm.program_result.program.id) + \
'innerpro">Files are not found.</a></div>'
html += '<div id="collapse' + \
str(prgrm.program_result.program.id)
html += 'innerpro" class="accordion-body collapse"> '
html += '<div style="padding-left:40px" class="accordion-inner"> '
html += '<div class="row"><h5>Bad configuration '
html += 'for this section. We couldn\'t find following files -</h5>'
html += prgrm.program_result.missing_file_names
html += '<br/><h5>Your tar contains following files -</h5>'
html += results.assignment_result.submitted_files
html += '</div></div></div></div></div>'
elif prgrm.program_result.compiler_return_code == 0:
html += '<div class="accordion" id="accordion' + \
str(prgrm.program_result.program.id) + 'pro">'
html += '<table id="" class="table table-striped datatable"><thead><tr><th>'
html += 'Test Case ID</th><th>Status</th>'
html += '<th>Marks Awarded</th><th>Input file</th><th>Display I/O</th><th>Compare Output file</th>'
if assignment.execution_time_allowed:
html += '<th>Execution Time</th>'
html += '</tr></thead><tbody>'
for tst in prgrm.testResults:
html += '<tr><td>' + tst.test_case.name + '</td><td>'
if tst.return_code == 0 and tst.test_passed:
html += 'PASS'
else:
html += 'FAIL'
html += '</td><td>' + \
str(tst.marks) + '</td><td><a id="' + \
str(tst.test_case.id)
html += '-linkin" href="/assignments/testcase/input/download/'
html += str(tst.test_case.id) + \
'" target="_blank">Input file</a><textarea readonly="True"'
html += ' title="Click toggle to get download link for inputfiles" style="display: none; '
html += 'background: black; color: white;" class="form-control" id=\''
html += str(tst.test_case.id) + \
'-in\' rows="2" cols="11"></textarea></td><td>'
html += '<label class="switch"><input type="checkbox" '
html += ' checked><span title="Switch between download link and stdin" '
html += 'class="slider round" onclick="getstd(\''
html += str(tst.test_case.id) + '\', \'' + \
str(tst.test_case.program.id)
html += '\')"></span></label></td><td>'
html += '<a href="/evaluate/compare/' + str(tst.id)
html += '" target="formpopup" onclick="window.open(\'\', '
html += '\'formpopup2\',\'width=auto,height=400,resizable,scrollbars\').focus();'
html += 'this.target=\'formpopup2\';"> Compare </a></td>'
if assignment.execution_time_allowed:
html += '<td>' + str(tst.executiontime) + '<td>'
html += '</tr>'
html += '</tr></tbody></table></div>'
else:
html += '<div class="accordion" id="accordion' + \
'prgrm.program_result.program.id' + 'pro">'
html += '<div class="accordion-group"><div class="accordion-heading"><a class="accordion-toggle"'
html += ' data-toggle="collapse"'
html += 'data-parent="#accordion' + \
str(prgrm.program_result.program.id) + \
'pro"href="#collapse'
html += str(prgrm.program_result.program.id) + \
'innerpro">Compilation failed.</a></div>'
html += '<div id="collapse' + \
str(prgrm.program_result.program.id)
html += 'innerpro" class="accordion-body collapse"> <div style="padding-left:40px"'
html += 'class="accordion-inner"><div class="row">Compile command:' + \
prgrm.compiler_command
html += '<br/>Return Code: ' + \
str(prgrm.program_result.compiler_return_code)
html += '<br/><h6 style="color:red;">Error Message</h6>' + \
prgrm.program_result.compiler_errors
html += '</div></div></div></div></div>'
html += '</div>'
return HttpResponse(json.dumps({'html': html}), content_type="application/json")
except Results.DoesNotExist:
return HttpResponse(json.dumps({'html': 'Results not created yet'}), content_type="application/json")
return HttpResponse(json.dumps({'html': 'done'}), content_type="application/json")
@login_required
def evaluateresults(request):
'''fetch results'''
submission = get_object_or_404(Upload, pk=request.POST.get('submissionID'))
assignment = submission.assignment
is_moderator = isCourseModerator(assignment.course, request.user)
is_due = (assignment.deadline <= timezone.now())
if not submission.to_be_evaluated:
try:
results = Results(submission, program_type="Evaluate")
html = '<h4>Evaluate-test results for Assignment ' + \
str(assignment.serial_number) + ' '
html += assignment.name + '</h4><div class="span12">'
for prgrm in results.program_results:
html += '<h5 style="color:green;">' + prgrm.program_result.program.name + '</h5>'
if prgrm.program_result.missing_file_names:
html += '<div class="accordion" id="accordion' + \
str(prgrm.program_result.program.id) + 'pro">'
html += '<div class="accordion-group"><div class="accordion-heading">'
html += '<a class="accordion-toggle" data-toggle="collapse"'
html += 'data-parent="#accordion' + \
str(prgrm.program_result.program.id) + \
'pro" href="#collapse'
html += str(prgrm.program_result.program.id) + \
'innerpro">Files are not found.</a></div>'
html += '<div id="collapse' + \
str(prgrm.program_result.program.id)
html += 'innerpro" class="accordion-body collapse"> '
html += '<div style="padding-left:40px" class="accordion-inner">'
html += '<div class="row"><h5>Bad configuration '
html += 'for this section. We couldn\'t find following files -</h5>'
html += prgrm.program_result.missing_file_names
html += '<br/><h5>Your tar contains following files -</h5>'
html += results.assignment_result.submitted_files
html += '</div></div></div></div></div>'
elif prgrm.program_result.compiler_return_code == 0:
html += '<div class="accordion" id="accordion' + \
str(prgrm.program_result.program.id)+'pro">'
html += '<table id="" class="table table-striped datatable">'
html += '<thead><tr><th>Test Case ID</th><th>Status</th>'
html += '<th>Marks Awarded</th><th>Input file</th><th>Compare Output file</th>'
if assignment.execution_time_allowed:
html += '<th>Execution Time</th>'
html += '</tr></thead><tbody>'
for tst in prgrm.testResults:
html += '<tr><td>' + tst.test_case.name + '</td><td>'
if tst.return_code == 0 and tst.test_passed:
html += 'PASS'
else:
html += 'FAIL'
html += '</td><td>' + str(tst.marks) + '</td><td>'
if is_due or is_moderator:
html += '<a id="' + str(tst.test_case.id)
html += '-linkin" href="/assignments/testcase/input/download/'
html += str(tst.test_case.id) + \
'" target="_blank">Input file</a>'
else:
html += 'Hidden'
html += '</td>'
html += '<td>'
if is_due or is_moderator:
html += '<a href="/evaluate/compare/' + \
str(tst.id) + '" target="formpopup" '
html += 'onclick="window.open(\'\', '
html += '\'formpopup2\',\'width=auto,height=400,resizable,scrollbars\').focus();'
html += 'this.target=\'formpopup2\';"> Compare </a>'
else:
html += 'Hidden'
html += '</td>'
if assignment.execution_time_allowed:
html += '<td>' + str(tst.executiontime) + '<td>'
html += '</tr>'
html += '</tr></tbody></table></div>'
else:
html += '<div class="accordion" id="accordion' + \
'prgrm.program_result.program.id' + 'pro">'
html += '<div class="accordion-group"><div class="accordion-heading">'
html += '<a class="accordion-toggle" data-toggle="collapse"'
html += 'data-parent="#accordion' + \
str(prgrm.program_result.program.id) + \
'pro"href="#collapse'
html += str(prgrm.program_result.program.id) + \
'innerpro">Compilation failed.</a></div>'
html += '<div id="collapse' + \
str(prgrm.program_result.program.id)
html += 'innerpro" class="accordion-body collapse"> <div style="padding-left:40px"'
html += 'class="accordion-inner"><div class="row">Compile command:' + \
prgrm.compiler_command
html += '<br/>Return Code: ' + \
str(prgrm.program_result.compiler_return_code)
html += '<br/><h6 style="color:red;">Error Message</h6>' + \
prgrm.program_result.compiler_errors
html += '</div></div></div></div></div>'
html += '</div>'
return HttpResponse(json.dumps({'html': html}), content_type="application/json")
except Results.DoesNotExist:
return HttpResponse(json.dumps({'html': 'Results not created yet'}), content_type="application/json")
return HttpResponse(json.dumps({'html': 'done'}), content_type="application/json")
def practicerun(request, submission):
'''puts task of practice run of assignment in queue'''
try:
assignment = submission.assignment
# Checking if the user sending the request is either the owner of the submission or the assignment
# creator (the only people authorized to evaluate).
is_moderator = isCourseModerator(assignment.course, request.user)
if not (request.user == submission.owner or is_moderator):
raise PermissionDenied
if submission.to_be_practiced:
evaluate_assignment.delay(submission.id, "Practice")
i = app.control.inspect()
data = i.stats()
if data is not None:
node_name = list(data)[0]
queue_position = len(i.reserved().get(node_name))
else:
queue_position = "Unknown"
else:
# is_student = True if (request.user == submission.owner) else False
# is_due = (assignment.deadline <= timezone.now())
try:
__ = Results(submission, program_type="Practice")
return HttpResponse(json.dumps({'results': 1, 'submissionID': submission.id}),
content_type="application/json")
except Results.DoesNotExist:
raise Http404("Results not created yet")
except Exception as e:
print e
return HttpResponse(json.dumps({'submissionID': submission.id, 'position': queue_position,
'html': '<h4>Practice-Tests Results for Assignment' +
str(assignment.serial_number) + ' ' + assignment.name +
'</h4><div><p style="font-size: 16px;">' +
'Evaluation Status:<span id="current-status" style="color: orange;">' +
' </span></p></div><table class="table table-bordered"><thead><tr><th>' +
'Phase</th><th>Status</th></tr></thead><tbody><tr><td> <b> Compilation ' +
'</b> </td><td><i name="compile" class="fa fa-circle-o-notch fa-spin" ' +
'style="font-size:24px"></i></td></tr><tr ><td> <b> Execution </b> </td>' +
'<td><i name="execute" class="fa fa-circle-o-notch fa-spin" ' +
'style="font-size:24px"></i></td></tr></tbody></table>'}),
content_type="application/json")
def evaluaterun(request, submission):
'''puts task of evaluation of assignment in queue'''
try:
assignment = submission.assignment
# Checking if the user sending the request is either the owner of the submission or the assignment
# creator (the only people authorized to evaluate).
is_moderator = isCourseModerator(assignment.course, request.user)
if not (request.user == submission.owner or is_moderator):
raise PermissionDenied
if submission.to_be_evaluated:
evaluate_assignment.delay(submission.id, "Evaluate")
i = app.control.inspect()
data = i.stats()
if data is not None:
node_name = list(data)[0]
queue_position = len(i.reserved().get(node_name))
else:
queue_position = "Unknown"
else:
try:
__ = Results(submission, program_type="Evaluate")
return HttpResponse(json.dumps({'results': 1, 'submissionID': submission.id,
'position': 'queue_position'}),
content_type="application/json")
except Results.DoesNotExist:
raise Http404("Results not created yet")
except Exception as e:
print e
return HttpResponse(json.dumps({'submissionID': submission.id, 'position': queue_position,
'html': '<h4>Evaluate-Tests Results for Assignment' +
str(assignment.serial_number) + ' ' + assignment.name +
'</h4><div><p style="font-size: 16px;">' +
'Evaluation Status:<span id="current-status" style="color: orange;">' +
' </span></p></div><table class="table table-bordered"><thead><tr><th>' +
'Phase</th><th>Status</th></tr></thead><tbody><tr><td> <b> Compilation ' +
'</b> </td><td><i name="compile" class="fa fa-circle-o-notch fa-spin" ' +
'style="font-size:24px"></i></td></tr><tr ><td> <b> Execution </b> </td>' +
'<td><i name="execute" class="fa fa-circle-o-notch fa-spin" ' +
'style="font-size:24px"></i></td></tr></tbody></table>'}),
content_type="application/json")
@login_required
def online_editor(request, assignment_id):
'''
function for in-browser editor
'''
# pause_status = True
allowed_exam_status = True
if not request.user.is_authenticated:
return HttpResponseForbidden("Forbidden 403")
assignment = get_object_or_404(Assignment, pk=assignment_id)
if not isEnrolledInCourse(assignment.course, request.user):
return HttpResponseRedirect("/courseware/courseslist/")
if assignment.type_of_lab == "Lab":
rem_time = int(
(assignment.deadline - datetime.now(pytz.timezone('UTC'))).total_seconds())
else:
rem_time, __, allowed_exam_status = get_object_from_proctoring(
assignment.exam_group_id, request.user)
if not allowed_exam_status:
return HttpResponseForbidden("Forbidden 403")
if not validateuser(request, assignment):
return HttpResponseForbidden("Forbidden 403")
programs = Program.objects.filter(
assignment=assignment, program_type="Practice")
new_prog = []
for x in programs:
new_prog.append([x, get_compilation_command(x),
get_execution_command(x)])
programs = new_prog
course = assignment.course
is_due = None
if assignment.deadline is not None:
is_due = (timezone.now() >= assignment.deadline)
if (is_due or assignment.program_language == "Others" or
assignment.hide is True or not assignment.student_program_files):
return HttpResponseForbidden("Forbidden 403")
if request.method == 'POST':
if 'practiceresults' in request.POST:
return practiceresults(request)
if 'evaluateresults' in request.POST:
return evaluateresults(request)
if request.method == 'POST' and 'postID' not in request.POST:
files_to_be_submitted = assignment.student_program_files.split(' ')
data = request.POST
student_code = []
for x in files_to_be_submitted:
student_code.append(request.POST[x])
if [""]*len(student_code) == student_code:
if 'inputText' in request.POST or 'compileall' in request.POST:
reerror = mark_safe(
"Error occured during input!!<br>Please check submission.")
return HttpResponse(json.dumps({'0': reerror, '1': request.POST.get('test')}),
content_type="application/json")
return HttpResponseRedirect(request.META["HTTP_REFERER"])
if 'autosave' in request.POST:
try:
in_memory_file = create_upload(request, assignment, student_code,
files_to_be_submitted, 'autosavedsub.zip')
if ServerSavedSubmission.objects.filter(assignment=assignment, owner=request.user).exists():
server_saved_submission = ServerSavedSubmission.objects.filter(assignment=assignment,
owner=request.user)
else:
server_saved_submission = False
if server_saved_submission:
filepath = os.path.join(settings.MEDIA_ROOT, str(
server_saved_submission[0].submission.filePath))
filepath = filepath.rsplit('/', 1)[0]
shutil.rmtree(filepath)
server_saved_submission[0].submission.filePath = in_memory_file
server_saved_submission[0].submission.uploaded_on = timezone.now(
)
server_saved_submission[0].submission.save()
server_saved_submission[0].save()
else:
new_upload = Upload(
owner=request.user,
assignment=assignment,
filePath=in_memory_file
)
new_upload.save()
new_server_sub = ServerSavedSubmission(
assignment=new_upload.assignment,
owner=request.user,
submission=new_upload
)
new_server_sub.save()
return HttpResponse(json.dumps({'FailedAutosave': "0"}), content_type="application/json")
except Exception:
return HttpResponse(json.dumps({'FailedAutosave': "1"}), content_type="application/json")
in_memory_file = create_upload(
request, assignment, student_code, files_to_be_submitted, 'submission.zip')
new_upload = Upload(
owner=request.user,
assignment=assignment,
filePath=in_memory_file
)
if 'inputText' in request.POST or 'compileall' in request.POST:
new_upload.save()
if 'compileall' in request.POST:
try:
programs = Program.objects.filter(
assignment=assignment, program_type="Practice")
new_prog = []
for x in programs:
new_prog.append(
[x, get_compilation_command(x), get_execution_command(x)])
programs = new_prog
return allcompile(request, new_upload, programs)
except Exception as e:
print e
return custom_input(request, new_upload)
else:
if LatestSubmission.objects.filter(assignment=assignment, owner=request.user).exists():
latest_submission = LatestSubmission.objects.filter(
assignment=assignment, owner=request.user)
else:
latest_submission = False
if latest_submission:
if check(assignment, request.POST, latest_submission[0].submission):
new_upload.save()
if latest_submission:
latest_submission[0].submission = new_upload
latest_submission[0].save()
else:
new_upload.save()
submission_to_evaluate = LatestSubmission(
assignment=new_upload.assignment,
owner=request.user,
submission=new_upload
)
submission_to_evaluate.save()
latest_submission = LatestSubmission.objects.filter(
assignment=assignment, owner=request.user)
if 'run' in data:
return practicerun(request, latest_submission[0].submission)
if 'evaluate' in data:
return evaluaterun(request, latest_submission[0].submission)
if 'graphics' in data:
return HttpResponseRedirect("/evaluate/testcases/"+str(latest_submission[0].submission.id))
return HttpResponseRedirect(request.META["HTTP_REFERER"])
else:
sub_id = 0
files_and_content = []
valid = True
submission_allowed = None
is_due = None
all_uploads = Upload.objects.filter(assignment_id=assignment_id, owner=request.user,
assignment__trash=False).order_by("-uploaded_on")
for a_submission in all_uploads:
a_submission.marks_v = a_submission.manualmark + a_submission.final_marks
has_joined = CourseHistory.objects.filter(
course_id=course, user_id=request.user.id)
if assignment.deadline is not None:
submission_allowed = (
timezone.now() <= assignment.deadline) and bool(has_joined)
is_due = (timezone.now() >= assignment.deadline) and bool(
has_joined)
for x in assignment.student_program_files.split(' '):
if x != '':
files_and_content.append([x, ""])
if ServerSavedSubmission.objects.filter(assignment=assignment, owner=request.user).exists():
server_upload = ServerSavedSubmission.objects.filter(
assignment=assignment, owner=request.user)
older_upload = Upload.objects.filter(
id=server_upload[0].submission.id)
y11 = []
for x in all_uploads:
if x.id != older_upload[0].id:
y11.append(x)
all_uploads = y11
older_upload = Upload.objects.filter(
assignment=assignment, owner=request.user).order_by("-uploaded_on")
if older_upload:
student_file = os.path.join(
settings.MEDIA_ROOT, str(older_upload[0].filePath))
if 'postID' in request.POST:
sub_id = int(request.POST['postID'])
older_upload = Upload.objects.filter(
id=sub_id, assignment=assignment, owner=request.user)
student_file = os.path.join(
settings.MEDIA_ROOT, str(older_upload[0].filePath))
loc = student_file
i = 0
while True:
if loc[-1] == '/':
break
else:
loc = loc[:-1]
archive = []
content = []
files_and_content = []
files_to_be_submitted = []
f = ""
if ('.tar' in student_file or '.zip' in student_file) and os.path.exists(student_file):
if '.tar' in student_file:
f = tarfile.open(student_file, 'r')
archive = f.getmembers()
for single_file in archive:
if single_file.name.split('/')[-1] in assignment.student_program_files.split():
student_code = f.extractfile(single_file).read()
content.append(student_code)
file_name = single_file.name.split('/')[-1]
files_and_content.append([file_name, student_code])
files_to_be_submitted.append(file_name)
f.close()
else:
f = zipfile.ZipFile(student_file, 'r')
for file_in_zip in f.namelist():
if file_in_zip.split('/')[-1] in assignment.student_program_files.split():
student_code = f.open(file_in_zip).read()
content.append(student_code)
file_name = file_in_zip.split('/')[-1]
files_and_content.append([file_name, student_code])
files_to_be_submitted.append(file_name)
else:
archive = [student_file.split('/')[-1]]
for file_in_archive in archive:
if os.path.exists(loc+file_in_archive):
student_code = open(loc+file_in_archive, 'r').read()
content.append(student_code)
file_name = file_in_archive.split('/')[-1]
files_and_content.append([file_name, student_code])
files_to_be_submitted.append(file_name)
i = 0
for x in files_and_content:
if x[0] not in assignment.student_program_files.split(' '):
files_and_content.pop(i)
i += 1
for y in assignment.student_program_files.split(' '):
if y not in files_to_be_submitted:
files_and_content.append([y, ""])
if 'postID' in request.POST:
return HttpResponse(json.dumps(files_and_content), content_type="application/json")
programs = Program.objects.filter(
assignment=assignment, program_type="Practice")
new_prog = []
for x in programs:
new_prog.append(
[x, get_compilation_command(x), get_execution_command(x)])
programs = new_prog
return render_to_response(
'assignments/editor.html',
{'valid': valid, 'form': files_and_content, 'programs': programs, 'all_uploads': all_uploads,
'submission_allowed': submission_allowed, 'is_due': is_due, 'rem_time': rem_time,
'files_to_be_submitted': files_to_be_submitted, 'course': course, 'assignment': assignment,
'allowed_exam_status': allowed_exam_status},
context_instance=RequestContext(request))
else:
programs = Program.objects.filter(
assignment=assignment, program_type="Practice")
new_prog = []
for x in programs:
new_prog.append(
[x, get_compilation_command(x), get_execution_command(x)])
programs = new_prog
return render_to_response(
'assignments/editor.html',
{'valid': valid, 'form': files_and_content, 'programs': programs, 'all_uploads': all_uploads,
'submission_allowed': submission_allowed, 'is_due': is_due, 'rem_time': rem_time,
'files_to_be_submitted': assignment.student_program_files.split(' '), 'course': course,
'assignment': assignment, 'allowed_exam_status': allowed_exam_status},
context_instance=RequestContext(request))
return HttpResponseRedirect(request.META["HTTP_REFERER"])
@login_required
@is_moderator_check
def assignment_stats(request, assignmentID):
'''
generate stats for an assignment so that it could be plotted
'''
assignment = get_object_or_404(Assignment, pk=assignmentID)
list_of_assignment_ids = [instance.submission.id for instance in LatestSubmission.objects
.filter(assignment=assignment)]
numsubmissions = Upload.objects.filter(assignment=assignment, assignment__trash=False,
pk__in=list_of_assignment_ids).order_by("-uploaded_on").count()
all_programs = Program.objects.filter(
assignment=assignment).order_by('name')
course = assignment.course
all_assignments = Assignment.objects.filter(
course=course).filter(trash=False).order_by('-deadline')
testcase_list = []
failure_count = []
pass_count = []
testcase_list.append('Compilation')
all_submissions = Upload.objects.filter(assignment=assignment, assignment__trash=False,
pk__in=list_of_assignment_ids).order_by("-uploaded_on")
all_assign_results = AssignmentResults.objects.filter(
submission__in=all_submissions)
all_prog_results = ProgramResults.objects.filter(program=all_programs[0], compiler_return_code=1,
assignment_result__in=all_assign_results)
numcomperrors = len(all_prog_results)
all_prog_results = ProgramResults.objects.filter(
assignment_result__in=all_assign_results)
failure_count.append(numcomperrors)
pass_count.append(numsubmissions-numcomperrors)
for program in all_programs:
all_testcases = Testcase.objects.filter(
program=program).order_by('name')
for testcase in all_testcases:
testcase_list.append(str(testcase.name))
failedtestcases = TestcaseResult.objects.filter(test_case=testcase, test_passed=False,
program_result__in=all_prog_results).count()
failure_count.append(failedtestcases)
pass_count.append(numsubmissions-(numcomperrors + failedtestcases))
assignment_results = AssignmentResults.objects.filter(
submission__in=all_submissions)
marks_list = []
count_list = []
for submission in all_submissions:
marks = [s.get_marks()
for s in assignment_results if s.submission == submission]
if marks:
total = marks[0] + submission.manualmark
else:
total = submission.manualmark
total = str(total)
if total in marks_list:
count_list[marks_list.index(total)] += 1
else:
marks_list.append(total)
count_list.append(1)
for passnum in range(len(marks_list)-1, 0, -1):
for ind in range(passnum):
if float(marks_list[ind]) > float(marks_list[ind+1]):
temp = marks_list[ind]
marks_list[ind] = marks_list[ind+1]
marks_list[ind+1] = temp
temp = count_list[ind]
count_list[ind] = count_list[ind+1]
count_list[ind+1] = temp
marks_list.append("No Submission")
count_list.append(CourseHistory.objects.filter(course=course, active='A',
is_owner=False).select_related('user').count() - numsubmissions)
return render_to_response(
'assignments/testcasestats.html',
{'course': course, 'failurecount': failure_count, 'passcount': pass_count, 'assignment': assignment,
'numSubmission': numsubmissions, 'testcases_list': mark_safe(testcase_list), 'assignments': all_assignments,
'marks_list': mark_safe(marks_list), 'count_list': count_list, 'is_moderator': True},
context_instance=RequestContext(request)
)
@login_required
def config_testcase(request, testcaseID):
'''
function to process Safe Execute parameters at testcase level
'''
testcase_obj = get_object_or_404(Testcase, pk=testcaseID)
assignment_id = testcase_obj.program.assignment.id
assignment = get_object_or_404(Assignment, pk=assignment_id)
title = testcase_obj.name
source = 'testcase'
is_moderator = isCourseModerator(
testcase_obj.program.assignment.course, request.user)
if not is_moderator:
return HttpResponseForbidden("Forbidden 403")
if request.method == 'POST':
form = SafeExecForm(request.POST, request.FILES,
initial=model_to_dict(testcase_obj))
if form.is_valid():
obj = SafeExec.objects.all().filter(testcase=testcase_obj)
length_object = len(obj)
if length_object != 0:
form.cleaned_data['testcase'] = testcase_obj
obj.update(**form.cleaned_data)
else:
form.cleaned_data['testcase'] = testcase_obj
SafeExec.objects.create(**form.cleaned_data)
return HttpResponseRedirect(reverse('assignments_detailstestcase', kwargs={'testcaseID': testcaseID}))
else:
form = SafeExecForm(initial=model_to_dict(
SafeExec.objects.get(testcase_id=testcaseID)))
return render_to_response('assignments/safeexec_params.html',
{'form': form, 'testcases': testcase_obj, 'source': source,
'title': title, 'assignment': assignment},
context_instance=RequestContext(request))
else:
default_limits = {'cpu_time': 10, 'clock_time': 60,
'memory': 32768, 'stack_size': 8192,
'child_processes': 0, 'open_files': 512,
'file_size': 1024}
form = SafeExecForm(initial=default_limits)
if SafeExec.objects.filter(testcase_id=testcaseID).exists():
form = SafeExecForm(initial=model_to_dict(
SafeExec.objects.get(testcase_id=testcaseID)))
return render_to_response('assignments/safeexec_params.html',
{'form': form, 'testcases': testcase_obj, 'source': source, 'title': title,
'assignment': assignment, 'is_moderator': is_moderator},
context_instance=RequestContext(request))
@login_required
def edit_tc_marks(request):
'''
edit testcase marks
'''
if request.method == "POST":
pk = request.POST["pk"]
value = request.POST["value"]
testcase = Testcase.objects.get(id=pk)
course = testcase.program.assignment.course
is_moderator = isCourseModerator(course, request.user)
mode = get_mode(request)
try:
value = float(value)
except ValueError:
return HttpResponse("true")
if value < 0:
return HttpResponse("true")
if is_moderator and mode != 'S':
testcase.marks = value
testcase.save()
mark_submissions_false(testcase.program.assignment.id)
return HttpResponse("true")
else:
raise Http404
else:
raise Http404
@login_required
def reevaluate(request):
'''
edit testcase marks
'''
if request.method == "POST":
pk = request.POST["pk"]
submission = Upload.objects.filter(id=pk)[0]
evaluaterun(request, submission)
return HttpResponse(json.dumps({'success': True}), content_type='application/json')
return HttpResponse(json.dumps({'success': False}), content_type='application/json')
@login_required
def publish_on_demand(request, assignment_id):
""" View for assignments to be published on-demand"""
submission_allowed = None # New initialize
is_due = None # New initialize
assignment = get_object_or_404(Assignment, pk=assignment_id)
course = assignment.course
is_creator = isCourseCreator(course, request.user)
is_moderator = isCourseModerator(course, request.user)
mode = get_mode(request)
assignment.publish_on = timezone.now()
if not assignment.deadline:
assignment.deadline = timezone.now() + timedelta(days=9999)
assignment.freezing_deadline = assignment.deadline
# Updating the database table
this_asgnmnt = Assignment.objects.get(id=assignment_id)
this_asgnmnt.publish_on = assignment.publish_on
this_asgnmnt.deadline = assignment.deadline
this_asgnmnt.freezing_deadline = assignment.freezing_deadline
delete_task = delete_redundant_files.apply_async(
(assignment.id,), eta=assignment.freezing_deadline)
this_asgnmnt.deletesubmissions_task_id = delete_task.id
this_asgnmnt.hide = False
this_asgnmnt.save()
formData = AssignmentForm(initial=model_to_dict(
assignment), courseID=assignment.course.id)
if not is_creator and not is_moderator:
raise PermissionDenied
has_joined = CourseHistory.objects.filter(
course_id=course, user_id=request.user.id)
submission_allowed = (
timezone.now() <= assignment.deadline) and bool(has_joined)
is_due = (timezone.now() >= assignment.deadline) # and bool(has_joined)
if request.method == "POST" and submission_allowed:
form = UploadForm(request.POST, request.FILES,
assignment_model_obj=assignment)
if form.is_valid():
older_upload = Upload.objects.filter(
owner=request.user,
assignment=assignment
)
if older_upload:
older_upload[0].delete()
new_upload = Upload(
owner=request.user,
assignment=assignment,
filePath=request.FILES['docfile']
)
new_upload.save()
return HttpResponseRedirect(reverse('assignments_details', kwargs={'assignmentID': assignment_id,
'formData': formData}))
else:
form = UploadForm()
perror_ctype = ContentType.objects.get_for_model(ProgramErrors)
terror_ctype = ContentType.objects.get_for_model(TestcaseErrors)
program_errors = []
test_errors = []
for error in AssignmentErrors.objects.filter(assignment=assignment, content_type=terror_ctype):
test_errors.extend(TestcaseErrors.objects.filter(pk=error.object_id))
for error in AssignmentErrors.objects.filter(assignment=assignment, content_type=perror_ctype):
program_errors.extend(ProgramErrors.objects.filter(pk=error.object_id))
course = assignment.course
programs = Program.objects.filter(assignment=assignment)
practice_program = [
a_program for a_program in programs if a_program.program_type == "Practice"]
programs_with_errors = []
for aprogram in programs:
if not aprogram.is_sane:
try:
p_error = ProgramErrors.objects.get(program=aprogram)
programs_with_errors.append(p_error)
except ProgramErrors.DoesNotExist:
p_error = None
submitted_files = Upload.objects.filter(
owner=request.user, assignment=assignment)
program_not_ready = False
disable_grading = False
if programs_with_errors or assignment.deadline is not None and submission_allowed is False:
program_not_ready = True
if submitted_files and submitted_files[0].is_stale:
disable_grading = True
all_assignments = Assignment.objects.filter(
course=course).order_by('-deadline')
course_history = CourseHistory.objects.get(
user=request.user, course=course)
if course_history.is_owner:
assignments = all_assignments
else:
assignments = [a for a in all_assignments if(not a.hide and (timezone.now() > a.publish_on if a.publish_on else
False))]
total_sumissions = Upload.objects.filter(assignment=assignment).count()
is_submitted = Upload.objects.filter(assignment=assignment).count() > 0
get_params = {'source': 'assignment', 'id': assignment_id}
formData = AssignmentForm(initial=model_to_dict(
assignment), courseID=assignment.course.id)
allowed_exam = True
if assignment.deadline is not None:
submission_allowed = (
timezone.now() <= assignment.deadline) and bool(has_joined)
is_due = (timezone.now() >= assignment.deadline) and bool(has_joined)
# for remaining time
rem_time = int(
(assignment.deadline - datetime.now(pytz.timezone('UTC'))).total_seconds())
return render_to_response(
'assignments/details.html',
{'assignment': assignment, 'course': course, 'has_joined': has_joined, 'is_moderator': is_moderator,
'programs': programs, 'form': form, 'submission_allowed': submission_allowed,
'submitted_files': submitted_files, 'programs_with_errors': programs_with_errors,
'disable_grading': disable_grading, 'rem_time': rem_time,
'program_not_ready': program_not_ready, 'practice_program': practice_program,
'assignments': assignments, 'program_errors': program_errors, 'test_errors': test_errors,
'published': assignment.publish_on, 'now': timezone.now(), 'is_due': is_due,
'is_submitted': is_submitted, 'date_time': timezone.now(), 'get_params': get_params,
'total_sumissions': total_sumissions, 'formData': formData, 'mode': mode, 'allowed_exam': allowed_exam},
context_instance=RequestContext(request)
)
@login_required
def testcaseInputChange(request, testcaseID):
'''
Logic for downloading single testcase output file
'''
print("Reached herem\n")
testcase = get_object_or_404(Testcase, pk=testcaseID)
print("Reached here\n")
program = testcase.program
assignment = program.assignment
is_due = None
if assignment.deadline is not None:
is_due = (timezone.now() >= assignment.deadline)
course = assignment.course
if not isEnrolledInCourse(course, request.user):
return HttpResponseForbidden("Forbidden 403")
if not testcase.output_files:
return HttpResponseNotFound("File not found")
is_moderator = isCourseModerator(course, request.user)
if(is_moderator or program.program_type == "Practice" or is_due):
input_file = open(testcase.input_files.name,'w')
input_file.write(request.POST['new_input'])
input_file.close()
return HttpResponseRedirect(reverse('assignments_detailstestcase',
kwargs={'testcaseID': testcaseID}))
return HttpResponseForbidden("Forbidden 403")
def get_object_from_proctoring(exam_id, user_id):
'''
This function return remaining time and pause status for that corresponding exam_id
and user_id in Proctoring model in exam module
'''
time_left = Proctoring.objects.filter(key=exam_id, owner=user_id)
if not list(time_left):
return 3600, "True", 1
time = time_left[0].time - \
(datetime.now(pytz.timezone('UTC')) - time_left[0].starttime)
rem = time.seconds + time.days * 86400
return rem, time_left[0].pause, time.days >= 0
| [
"debanjanmondal702@gmail.com"
] | debanjanmondal702@gmail.com |
b903aeff9f0ce8c0a750b446991365b030caf171 | a5bccbcbe8861bb5542edfa99812d2ba107c36f5 | /main.py | bd18e5cb550fe14652b455412691306244f82bed | [] | no_license | ubot1231/koreisai2019_trimmingObjects | 1293db3cda174a2b20dcf65d63054ef7ab1072aa | ed39577719e6f79b3df159a6d5259f8d022c1c3f | refs/heads/main | 2023-08-25T14:29:09.628423 | 2021-10-08T21:22:42 | 2021-10-08T21:22:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | import numpy as np
import sys
import toka
import webcam
from grabcut import App
#写真を出力したい場所(パス名)
filename_Output = "/Users/uboT/Documents/Python/OpenCV/4trim/Code/Output/Output.png"
#とった写真を保存する
webcam.webcam(filename_Output)
#マスク画像の生成
grabcut=App()
grabcut.run(filename_Output)
#撮った写真を透過してOutputに出力する
toka.toka(filename_Output)
| [
"noreply@github.com"
] | noreply@github.com |
454994b05910daee4f982348fb1beb8bab821645 | cf5bfac1e203ae0da1802cf539f32250b57e7224 | /4.exceptions/exception101.py | dafb2cf845bdff6ba846e02634c5ec0ab88084d4 | [] | no_license | jnepal/OReilly-Python-Beyond-the-Basics-OOP | fba2229ffd31b87e2ceab48c6c3f7f445ab47493 | 05050a7ecd0db5c9f18cc6e5ae49a07ddf6054cf | refs/heads/master | 2021-05-01T07:16:26.957247 | 2018-02-11T18:16:36 | 2018-02-11T18:16:36 | 121,152,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 533 | py | '''
Handling Exceptions
'''
import sys
mydict = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
key = input('please input a key: ')
try:
print("The value for {0} is {1}".format(key, mydict[key]))
except KeyError as err:
print('the key ' + key + ' does not exists')
print(err)
# print(sys.exc_info()[0])
'''
Raising Exceptions
'''
def divideByZero(num):
return num / 0
try:
divideByZero(5)
except ZeroDivisionError as error:
raise ZeroDivisionError("ZeroDivisionError: You cannot divide a number by zero")
| [
"johndoe@example.com"
] | johndoe@example.com |
4bb99b23bed553342ef2a0ac976d90997b86be87 | 3db8243d09eea8a5c745b32acd0c4375ab4ad2d9 | /shap/plots/force.py | 927678c366716174c9a05bfa27ad01520b6b0b51 | [
"MIT"
] | permissive | augustinas1/shap | 46fc9d7aa3b7ed3efa03943d10c3c7d4ce0038bb | fd2e24d52d26eddb1db42f6b8f22d59611e04f7f | refs/heads/master | 2020-03-25T17:40:01.300183 | 2018-09-14T15:37:07 | 2018-09-14T15:37:07 | 142,184,628 | 0 | 0 | MIT | 2018-07-31T11:30:25 | 2018-07-24T16:24:17 | Jupyter Notebook | UTF-8 | Python | false | false | 12,857 | py | """ Visualize the SHAP values with additive force style layouts.
"""
from __future__ import unicode_literals
import os
import io
import string
import json
import random
from IPython.core.display import display, HTML
import base64
import numpy as np
import scipy.cluster
import collections
from ..common import convert_to_link, Instance, Model, Data, DenseData, Link
def force_plot(base_value, shap_values, features=None, feature_names=None, out_names=None, link="identity",
plot_cmap="RdBu"):
""" Visualize the given SHAP values with an additive force layout. """
# auto unwrap the base_value
if type(base_value) == np.ndarray and len(base_value) == 1:
base_value = base_value[0]
if (type(base_value) == np.ndarray or type(base_value) == list):
if type(shap_values) != list or len(shap_values) != len(base_value):
raise Exception("In v0.20 force_plot now requires the base value as the first parameter! " \
"Try shap.force_plot(explainer.expected_value, shap_values) or " \
"for multi-output models try " \
"shap.force_plot(explainer.expected_value[0], shap_values[0]).")
assert not type(shap_values) == list, "The shap_values arg looks looks multi output, try shap_values[i]."
link = convert_to_link(link)
if type(shap_values) != np.ndarray:
return visualize(shap_values)
# convert from a DataFrame or other types
if str(type(features)) == "<class 'pandas.core.frame.DataFrame'>":
if feature_names is None:
feature_names = list(features.columns)
features = features.values
elif str(type(features)) == "<class 'pandas.core.series.Series'>":
if feature_names is None:
feature_names = list(features.index)
features = features.values
elif isinstance(features, list):
if feature_names is None:
feature_names = features
features = None
elif features is not None and len(features.shape) == 1 and feature_names is None:
feature_names = features
features = None
if len(shap_values.shape) == 1:
shap_values = np.reshape(shap_values, (1, len(shap_values)))
if out_names is None:
out_names = ["output value"]
if shap_values.shape[0] == 1:
if feature_names is None:
feature_names = [labels['FEATURE'] % str(i) for i in range(shap_values.shape[1])]
if features is None:
features = ["" for _ in range(len(feature_names))]
if type(features) == np.ndarray:
features = features.flatten()
# check that the shape of the shap_values and features match
if len(features) != shap_values.shape[1]:
msg = "Length of features is not equal to the length of shap_values!"
if len(features) == shap_values.shape[1] - 1:
msg += " You might be using an old format shap_values array with the base value " \
"as the last column. In this case just pass the array without the last column."
raise Exception(msg)
instance = Instance(np.zeros((1, len(feature_names))), features)
e = AdditiveExplanation(
base_value,
np.sum(shap_values[0, :]) + base_value,
shap_values[0, :],
None,
instance,
link,
Model(None, out_names),
DenseData(np.zeros((1, len(feature_names))), list(feature_names))
)
return visualize(e, plot_cmap)
else:
if shap_values.shape[0] > 3000:
warnings.warn("shap.force_plot is slow many thousands of rows, try subsampling your data.")
exps = []
for i in range(shap_values.shape[0]):
if feature_names is None:
feature_names = [labels['FEATURE'] % str(i) for i in range(shap_values.shape[1])]
if features is None:
display_features = ["" for i in range(len(feature_names))]
else:
display_features = features[i, :]
instance = Instance(np.ones((1, len(feature_names))), display_features)
e = AdditiveExplanation(
base_value,
np.sum(shap_values[i, :]) + base_value,
shap_values[i, :],
None,
instance,
link,
Model(None, out_names),
DenseData(np.ones((1, len(feature_names))), list(feature_names))
)
exps.append(e)
return visualize(exps, plot_cmap=plot_cmap)
class Explanation:
def __init__(self):
pass
class AdditiveExplanation(Explanation):
def __init__(self, base_value, out_value, effects, effects_var, instance, link, model, data):
self.base_value = base_value
self.out_value = out_value
self.effects = effects
self.effects_var = effects_var
assert isinstance(instance, Instance)
self.instance = instance
assert isinstance(link, Link)
self.link = link
assert isinstance(model, Model)
self.model = model
assert isinstance(data, Data)
self.data = data
err_msg = """
<div style='color: #900; text-align: center;'>
<b>Visualization omitted, Javascript library not loaded!</b><br>
Have you run `initjs()` in this notebook? If this notebook was from another
user you must also trust this notebook (File -> Trust notebook). If you are viewing
this notebook on github the Javascript has been stripped for security.
</div>"""
def initjs():
bundle_path = os.path.join(os.path.split(__file__)[0], "resources", "bundle.js")
bundle_data = io.open(bundle_path, encoding="utf-8").read()
logo_path = os.path.join(os.path.split(__file__)[0], "resources", "logoSmallGray.png")
logo_data = base64.b64encode(open(logo_path, "rb").read()).decode('utf-8')
display(HTML(
"<div align='center'><img src='data:image/png;base64,{logo_data}' /></div>".format(logo_data=logo_data) +
"<script>{bundle_data}</script>".format(bundle_data=bundle_data)
))
def id_generator(size=20, chars=string.ascii_uppercase + string.digits):
return "i"+''.join(random.choice(chars) for _ in range(size))
def ensure_not_numpy(x):
if isinstance(x, bytes):
return x.decode()
elif isinstance(x, np.str):
return str(x)
elif isinstance(x, np.generic):
return float(np.asscalar(x))
else:
return x
def verify_valid_cmap(cmap):
assert (isinstance(cmap, str) or isinstance(cmap, list) or str(type(cmap)).endswith("unicode'>")
),"Plot color map must be string or list! not: " + str(type(cmap))
if isinstance(cmap, list):
assert (len(cmap) > 1), "Color map must be at least two colors."
_rgbstring = re.compile(r'#[a-fA-F0-9]{6}$')
for color in cmap:
assert(bool(_rgbstring.match(color))),"Invalid color found in CMAP."
return cmap
def visualize(e, plot_cmap="RdBu"):
plot_cmap = verify_valid_cmap(plot_cmap)
if isinstance(e, AdditiveExplanation):
return AdditiveForceVisualizer(e, plot_cmap=plot_cmap).html()
elif isinstance(e, Explanation):
return SimpleListVisualizer(e).html()
elif isinstance(e, collections.Sequence) and len(e) > 0 and isinstance(e[0], AdditiveExplanation):
return AdditiveForceArrayVisualizer(e, plot_cmap=plot_cmap).html()
else:
assert False, "visualize() can only display Explanation objects (or arrays of them)!"
try:
# register the visualize function with IPython
ip = get_ipython()
svg_formatter=ip.display_formatter.formatters['text/html']
svg_formatter.for_type(Explanation, lambda x: visualize(x).data)
old_list_formatter = svg_formatter.for_type(list)
def try_list_display(e):
if isinstance(e, collections.Sequence) and len(e) > 0 and isinstance(e[0], AdditiveExplanation):
return visualize(e).data
else:
return str(e) if old_list_formatter is None else old_list_formatter(e)
svg_formatter.for_type(list, try_list_display)
except:
pass
class SimpleListVisualizer:
def __init__(self, e):
assert isinstance(e, Explanation), "SimpleListVisualizer can only visualize Explanation objects!"
# build the json data
features = {}
for i in filter(lambda j: e.effects[j] != 0, range(len(e.data.group_names))):
features[i] = {
"effect": e.effects[i],
"value": e.instance.group_display_values[i]
}
self.data = {
"outNames": e.model.out_names,
"base_value": e.base_value,
"link": str(e.link),
"featureNames": e.data.group_names,
"features": features,
"plot_cmap":e.plot_cmap.plot_cmap
}
def html(self):
return HTML("""
<div id='{id}'>{err_msg}</div>
<script>
if (window.SHAP) SHAP.ReactDom.render(
SHAP.React.createElement(SHAP.SimpleListVisualizer, {data}),
document.getElementById('{id}')
);
</script>""".format(err_msg=err_msg, data=json.dumps(self.data), id=id_generator()))
class AdditiveForceVisualizer:
def __init__(self, e, plot_cmap="RdBu"):
assert isinstance(e, AdditiveExplanation), \
"AdditiveForceVisualizer can only visualize AdditiveExplanation objects!"
# build the json data
features = {}
for i in filter(lambda j: e.effects[j] != 0, range(len(e.data.group_names))):
features[i] = {
"effect": ensure_not_numpy(e.effects[i]),
"value": ensure_not_numpy(e.instance.group_display_values[i])
}
self.data = {
"outNames": e.model.out_names,
"baseValue": ensure_not_numpy(e.base_value),
"outValue": ensure_not_numpy(e.out_value),
"link": str(e.link),
"featureNames": e.data.group_names,
"features": features,
"plot_cmap": plot_cmap
}
def html(self, label_margin=20):
self.data["labelMargin"] = label_margin
return HTML("""
<div id='{id}'>{err_msg}</div>
<script>
if (window.SHAP) SHAP.ReactDom.render(
SHAP.React.createElement(SHAP.AdditiveForceVisualizer, {data}),
document.getElementById('{id}')
);
</script>""".format(err_msg=err_msg, data=json.dumps(self.data), id=id_generator()))
class AdditiveForceArrayVisualizer:
def __init__(self, arr, plot_cmap="RdBu"):
assert isinstance(arr[0], AdditiveExplanation), \
"AdditiveForceArrayVisualizer can only visualize arrays of AdditiveExplanation objects!"
# order the samples by their position in a hierarchical clustering
if all([e.model.f == arr[1].model.f for e in arr]):
#m = np.vstack([e.effects for e in arr])
D = scipy.spatial.distance.pdist(np.vstack([e.effects for e in arr]), 'sqeuclidean')
#D = np.vstack([np.sum((m - m[i,:])**2, 1) for i in range(m.shape[0])])
clustOrder = scipy.cluster.hierarchy.leaves_list(scipy.cluster.hierarchy.complete(D))
else:
assert False, "Tried to visualize an array of explanations from different models!"
# make sure that we put the higher predictions first...just for consistency
if sum(arr[clustOrder[0]].effects) < sum(arr[clustOrder[-1]].effects):
np.flipud(clustOrder) # reverse
# build the json data
clustOrder = np.argsort(clustOrder) # inverse permutation
self.data = {
"outNames": arr[0].model.out_names,
"baseValue": ensure_not_numpy(arr[0].base_value),
"link": arr[0].link.__str__(),
"featureNames": arr[0].data.group_names,
"explanations": [],
"plot_cmap": plot_cmap
}
for (ind,e) in enumerate(arr):
self.data["explanations"].append({
"outValue": ensure_not_numpy(e.out_value),
"simIndex": ensure_not_numpy(clustOrder[ind])+1,
"features": {}
})
for i in filter(lambda j: e.effects[j] != 0 or e.instance.x[0,j] != 0, range(len(e.data.group_names))):
self.data["explanations"][-1]["features"][i] = {
"effect": ensure_not_numpy(e.effects[i]),
"value": ensure_not_numpy(e.instance.group_display_values[i])
}
def html(self):
return HTML("""
<div id='{id}'>{err_msg}</div>
<script>
if (window.SHAP) SHAP.ReactDom.render(
SHAP.React.createElement(SHAP.AdditiveForceArrayVisualizer, {data}),
document.getElementById('{id}')
);
</script>""".format(err_msg=err_msg, data=json.dumps(self.data), id=id_generator()))
| [
"slund1@cs.washington.edu"
] | slund1@cs.washington.edu |
58be87c385080aa2d8610c062e6534b8eb59cef9 | 06adea92d1e66d653d0884e8469b7352f5de4f04 | /matplotlibMine/change/ACF_PACFPlot.py | 525672a6ccb4aba2cba6966636059490d812989e | [] | no_license | Gedanke/FigureDemo | a3cf1f0998fb0dc7acce9b90ff55453372759575 | e37164521d9c4e8c5a05592749f1779bed2b0903 | refs/heads/master | 2023-04-19T08:47:35.417971 | 2021-05-05T05:09:48 | 2021-05-05T05:09:48 | 361,770,137 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | # -*- coding:utf-8 -*-
import pandas
import matplotlib.pyplot as plt
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
# Import Data
df = pandas.read_csv('../dataset/AirPassengers.csv')
# Draw Plot
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6), dpi=80)
plot_acf(df.value.tolist(), ax=ax1, lags=50)
plot_pacf(df.value.tolist(), ax=ax2, lags=20)
# Decorate
# lighten the borders
ax1.spines["top"].set_alpha(.3)
ax2.spines["top"].set_alpha(.3)
ax1.spines["bottom"].set_alpha(.3)
ax2.spines["bottom"].set_alpha(.3)
ax1.spines["right"].set_alpha(.3)
ax2.spines["right"].set_alpha(.3)
ax1.spines["left"].set_alpha(.3)
ax2.spines["left"].set_alpha(.3)
# font size of tick labels
ax1.tick_params(axis='both', labelsize=12)
ax2.tick_params(axis='both', labelsize=12)
plt.savefig("../photos/change/ACF_PACFPlot.png")
plt.show()
| [
"13767927306@163.com"
] | 13767927306@163.com |
405fb8a3a54541c8e098035467d568b7df4ff9ec | afcf26bbd84161f2775b879a68b2c163878984d9 | /trainboarding/trainboarding.py | fc9946fc53b50fad6607bff4c417b78d00ae3e6e | [] | no_license | jakobkhansen/KattisSolutions | 505a0657fa02a5156c853fc0a6566dd51591d36d | 2869d6c9027515fd41eac6fcaee281aa474810c4 | refs/heads/master | 2023-07-07T19:17:37.614836 | 2023-06-28T18:12:50 | 2023-06-28T18:12:50 | 191,001,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 765 | py | import sys
def trainboarding(lines):
n,l,p = [int(x) for x in lines[0].split(" ")]
trains = [0]*n
max_distance = 0
for num_str in lines[1:]:
num = int(num_str)
distance, index = get_train_index_and_distance(num, l, n)
max_distance = distance if distance > max_distance else max_distance
trains[index] += 1
print(max_distance)
print(max(trains))
def get_train_index_and_distance(pos, length, num_cars):
if pos >= length*num_cars:
index = num_cars-1
else:
index = int(pos/length)
door_pos = (index*length) + length/2
distance = int(abs(pos - door_pos))
return distance, index
def main():
lines = [line.strip() for line in sys.stdin]
(trainboarding(lines))
main()
| [
"jakob.hansen@hotmail.no"
] | jakob.hansen@hotmail.no |
879022569a2c91e78dc5739ac73b451f831e7d0b | 34b7d70002ac87723fea3243bcf22abb42fa0577 | /src/Calculator.py | b916959cfe620664b6e1f0d83de6d5a82785df20 | [] | no_license | vishwa742/Calculator | ca46e42accc6696021202def3ff4d2f5256707a8 | 71b3caa6b3d47ebff038d544074160641b9e2291 | refs/heads/master | 2020-08-23T17:50:10.715967 | 2019-12-01T03:15:08 | 2019-12-01T03:15:08 | 216,675,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 863 | py | import math
def addition(a,b):
return a+b
def subtraction(a,b):
return b-a
def multiplication(a,b):
return a*b
def division(a,b):
return float(a)/float(b)
def squared(a):
return a*a
def squareroot(a):
return math.sqrt(a)
class Calc:
rest=0
def __init__(self):
pass
def add(self, a, b):
self.result = addition(a,b)
return self.result
def subtract(self, a, b):
self.result = subtraction(a,b)
return self.result
def multiply(self, a, b):
self.result = multiplication(a,b)
return self.result
def divide(self, a, b):
self.result = division(a,b)
return self.result
def square(self,a):
self.result = squared(a)
return self.result
def root(self,a):
self.result = squareroot(a)
return self.result | [
"ezhilvishwanath@gmail.com"
] | ezhilvishwanath@gmail.com |
f1e6e6ba07ad0e06aca375c4c69b91cf60f4619a | 50e5b1a5821ec5f04c33dfcbaf5213e082194fc4 | /project_houses/wsgi.py | 7ba5d4693b11736d719b41b9d76ed3553636dcfe | [] | no_license | FAenX/houses | 242858a64b4268453a31cca0906c09ffe035937c | e21606d368c6f978f11fef814694d4ce41b98494 | refs/heads/master | 2020-04-18T14:25:19.334787 | 2019-05-23T20:03:32 | 2019-05-23T20:03:32 | 167,588,261 | 0 | 0 | null | 2019-05-23T20:03:33 | 2019-01-25T17:43:28 | Python | UTF-8 | Python | false | false | 405 | py | """
WSGI config for project_houses project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project_houses.settings')
application = get_wsgi_application()
| [
"kipronofb@gmail.com"
] | kipronofb@gmail.com |
28654faa92303808521a1ad97c6e0b89de5f832a | 3aa33ef283df0e36cabd5cb520e7dd62a769bc25 | /Calendar/cal/serializers/Event.py | c7a939947d7c10e0aa6ce1f17ba34e9151de3d59 | [] | no_license | TonyFrancis/DjangoHello | 24df720eb1fe1ec68dee05ba30a4971bc851fc6a | 38ab7bea40a144276c922a770f77bc189c44981f | refs/heads/master | 2021-01-19T08:58:39.442443 | 2017-04-09T12:13:15 | 2017-04-09T12:13:15 | 87,701,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | from rest_framework import serializers
from ..models.events import Event
class EventSerializer(serializers.ModelSerializer):
class Meta:
model = Event
| [
"gtony@suyati.com"
] | gtony@suyati.com |
b1193fd55d5fbb0ab7819fccdce73c9a491f1b0d | 6dfcf6bb912661ce857b9474d62a270c1b2bc96b | /Audiobook/myab.py | c366a6064139319144035fc8179f6ee49a684354 | [] | no_license | utkarsharma01/My-Projects | 98a4ffd2a2a517249f1f25b98bd86df43c788c8a | 0a84de971d101b169ba44f087b1ae25a2dfbadc8 | refs/heads/master | 2023-01-03T17:23:59.144497 | 2020-10-28T18:59:26 | 2020-10-28T18:59:26 | 286,280,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | import pyttsx3
import PyPDF2
book = open('oops.pdf', 'rb')
pdfReader = PyPDF2.PdfFileReader(book)
pages = pdfReader.numPages
print(pages)
speaker = pyttsx3.init()
for num in range(7, pages):
page = pdfReader.getPage(num)
text = page.extractText()
speaker.say(text)
speaker.runAndWait()
| [
"noreply@github.com"
] | noreply@github.com |
c7a4356d7fdf1cd0601244d175b68f0a61ee4a2a | 4c6113392ea456e1eb964172b43f0c9846ca712a | /tests/test_volatility.py | 8ff6fe6b0143161483ac60f94b8552bc2c8018be | [
"MIT"
] | permissive | g8a9/pyti | abd344d4d5eb30f36e6c860eb82567d7cacbd780 | 1697ea000730a2238df70505ba77e165619fdf8c | refs/heads/master | 2020-03-27T11:04:22.417031 | 2019-09-09T08:50:51 | 2019-09-09T08:50:51 | 146,463,237 | 0 | 1 | MIT | 2018-08-28T14:53:08 | 2018-08-28T14:53:07 | null | UTF-8 | Python | false | false | 10,070 | py | from __future__ import absolute_import
import unittest
import numpy as np
from tests.sample_data import SampleData
from pyti import volatility
class TestVolatility(unittest.TestCase):
def setUp(self):
"""Create data to use for testing."""
self.data = SampleData().get_sample_close_data()
self.volatility_period_6_expected = [np.nan, np.nan, np.nan, np.nan,
np.nan, 0.1524903845374864, 0.28384513123292787, 0.27472499826423863,
0.38252018527403447, 0.38119139344971686, 0.3932640765681284,
0.345017172104509, 0.35502207797108942, 0.26270939140810423,
0.24341424180238344, 0.12003756515189819, 0.093471666193184894,
0.069100389420744604, 0.070675428704493393, 0.062386517106180067,
0.076730224716846165, 0.099142360710378297, 0.10592610770119171,
0.095343491114294895, 0.094432880117036253, 0.11449523380936444,
0.19874308222305631, 0.26016821375802046, 0.2507081012898657,
0.24600361380487154, 0.24486737357919627, 0.20644095495335393,
0.083562464522411659, 0.089427901528106007, 0.087018108708016018,
0.059113141553367478, 0.04533882542423192, 0.043745342815681064,
0.060849166597298179, 0.070157646564281986, 0.076687212024214385,
0.076789868891622204, 0.079975193196433952, 0.062270973308414995,
0.065217619381487457, 0.080236726179575529, 0.11968338992681561,
0.11104995450689067, 0.14933752225515703, 0.15539159036348982,
0.18969228060158044, 0.18590923547841665, 0.10597103882205337,
0.10565132353205849, 0.097757896252116783, 0.10432107220772911,
0.15464388622372643, 0.24770610313421526, 0.1937347685557344,
0.18639971736694658, 0.17219385405371773, 0.18521003184180665,
0.19111515274069815, 0.67712758698244713, 0.75084329516417858,
0.2899490374301914, 0.23434490783501213, 0.23349254824431451,
0.19491130883035751, 0.17291688690443052, 0.18952455627896306,
0.14943455591620675, 0.12093538881060768, 0.11352129790844248,
0.13675111326211081, 0.19911771276113485, 0.19719310858321595,
0.20301877064572385, 0.17585792951513424, 0.15166114398944808,
0.12154473460299797, 0.1127687218024727, 0.13396457711138229,
0.11961401876780703, 0.12471283828508464, 0.11990156860184273,
0.15070446430502768, 0.37046083687443693, 0.48587580247276602,
0.48262814317551972, 0.4766783934789619, 0.44934857972966907,
0.32796411485291727, 0.24385698905210901, 0.22975650992357466,
0.29279256778033158, 0.2895923424432123, 0.34144133236091717,
0.37761426331474501, 0.37476224778013606, 0.36994155773453391,
0.78667112121907068, 0.86300585080251269, 0.23534333044989458,
0.20968259166195685, 0.22613400310199541, 0.26667264020071202,
0.19666727318947325, 0.074324483776256126, 0.055897268298958649,
0.050047074730884822, 0.053240810369060795, 0.076958905307395881,
0.25066238890997816, 0.3985022148002676, 0.45339018813190163,
0.40090074005473725, 0.11853669350027883, 0.10192315366136466,
0.084981565206439555, 0.094696345543641286, 0.10816591739333566,
0.14787686072786857, 0.094089878168633442, 0.092418384168373155,
0.087753488657869638, 0.12011498586095044]
self.volatility_period_8_expected = [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, 0.14242985319405954, 0.24169985423921733,
0.3113338136575341, 0.35823660012175351, 0.2897109786723715,
0.33920738862680405, 0.30084397674280794, 0.27874472606006989,
0.14104732116634003, 0.10350850671692319, 0.06808649301377627,
0.06174397939422651, 0.055043294296986407, 0.055977225305731342,
0.063756934514425712, 0.084965776367954382, 0.096566525441470791,
0.11148807968007421, 0.11115393420884391, 0.10616253483420113,
0.12732666627619157, 0.20137858090346494, 0.22437096030734238,
0.26314520377982997, 0.23975292286883237, 0.094119224441386942,
0.092781237738100916, 0.096445271412968908, 0.068309958550182667,
0.053436187360247279, 0.050255241224061296, 0.050347489184081405,
0.051256468547238379, 0.069732912097680774, 0.077163466932232569,
0.080016909130893973, 0.069083593021742828, 0.065739601194198222,
0.058817456815561914, 0.060853857257781578, 0.068147115460754693,
0.10291856257863505, 0.13082035431264472, 0.17108073831653245,
0.17704710115987887, 0.12132604897965137, 0.094112286486332075,
0.085525186449793872, 0.10638905070274754, 0.11330484467160756,
0.12192041336088531, 0.15087971223128982, 0.21614349344681355,
0.19857901026629468, 0.19399819303164684, 0.1818708611384795,
0.20511592974926141, 0.22512870638934221, 0.3249909324804976,
0.25715416486495046, 0.25562259799227699, 0.19332500477233347,
0.16618756076676156, 0.18501467898617538, 0.16520561630664882,
0.13640762590737562, 0.1282284121401932, 0.13201283568134109,
0.11105953157811391, 0.11589605525642854, 0.18343547199822768,
0.19311704180590059, 0.17658236946475381, 0.13926554193674917,
0.12236363220142392, 0.1235239400745423, 0.12530921417976978,
0.12816011884378287, 0.12376469343773101, 0.1363460994814035,
0.13827606997226946, 0.17106893662357836, 0.41897704683504988,
0.43046502750119209, 0.38435154822328638, 0.3510007201166348,
0.27101422613079296, 0.20413836250583231, 0.21157867968786048,
0.22742953561116996, 0.24739832604356007, 0.25462527840422455,
0.30406177112394239, 0.3814716445475102, 0.42768111395987835,
0.42847432237222566, 0.27567929241868661, 0.2289390835731577,
0.21867688679964709, 0.22972338923114549, 0.18365959087967343,
0.076786556913883058, 0.059003697401793037, 0.052832920168568283,
0.049505139847874559, 0.051157688941951211, 0.057120316051869298,
0.083940965662256742, 0.24914260070072689, 0.32979011062763736,
0.1323096052074898, 0.10480876704059268, 0.085936680527470583,
0.086629096266763336, 0.083217014518560464, 0.081182983860638047,
0.073828217218582196, 0.086704492613238301, 0.081142442111067303,
0.090650588908834859]
self.volatility_period_10_expected = [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan, 0.15417809035843458,
0.2408116896603047, 0.25045642049343686, 0.30298855648830314,
0.28271934810077803, 0.30019751989944815, 0.15531803130191857,
0.11220844736986649, 0.074895516632368819, 0.064588551678489051,
0.052438052372962583, 0.049009961768482831, 0.05333860076049448,
0.061982060819676429, 0.071085846449511506, 0.094584376755873154,
0.10922283535084741, 0.12007414225686562, 0.11657088324098044,
0.11597960977183221, 0.13634800090518195, 0.21566211367290425,
0.21050418453382061, 0.1025548335663263, 0.1041811347612574,
0.10414591275448988, 0.075824744175844699, 0.059981588478072154,
0.056211687126943105, 0.05682230691715013, 0.056793800883131212,
0.056888695537798205, 0.0547620613726214, 0.068348344590359697,
0.069404424523249103, 0.071850312728412358, 0.065538294186633275,
0.05876109463482912, 0.051571185068965152, 0.058338048271236238,
0.075289596304524004, 0.11326061831246771, 0.14456281738597818,
0.13184989418600479, 0.10571466044585399, 0.092178120503772679,
0.090290148369258125, 0.09258525448595116, 0.11796784502651182,
0.11401260555428137, 0.11748713942752384, 0.15238500510042516,
0.21846560190322034, 0.20518196327986202, 0.20472687260035038,
0.20236309821966347, 0.2124558034437691, 0.20628022609509283,
0.27463453666990251, 0.20551996997796912, 0.17147408459105828,
0.17970534330383031, 0.16991339139073275, 0.15066791286212405,
0.14064333770797666, 0.13913782012536724, 0.11980998348323495,
0.1102096991747443, 0.11004628609875898, 0.11990645094663482,
0.16908191602784542, 0.15575422107109085, 0.13762855713533648,
0.13846608743399774, 0.13277770682867118, 0.12888861589990716,
0.13074380575879921, 0.13964472589084975, 0.13814264807746032,
0.14421353523639924, 0.14995556537715846, 0.19213160105122412,
0.37883088187714375, 0.32162673649585843, 0.2506596765354181,
0.21433145049850072, 0.2210267024430434, 0.19378146428300974,
0.1856025458775277, 0.20103227506988883, 0.22364031524778469,
0.25160504803461164, 0.33144950644656002, 0.42572082344622619,
0.28448686654260275, 0.24665815278320147, 0.23988027396914213,
0.2335068846511005, 0.17518123479515843, 0.079487247958078391,
0.060986278450694285, 0.052450777256972343, 0.049087834377186737,
0.050147935844908974, 0.049494022236588019, 0.052777461547207034,
0.060753791909360075, 0.088537303234590733, 0.12458655576002062,
0.10764438999368131, 0.089739789240085133, 0.084219952095353462,
0.078835298860090011, 0.072477863673140144, 0.062254121762306984,
0.062903192247182049, 0.064946985330127008, 0.080325571807449661]
def test_volatility_period_6(self):
period = 6
v = volatility.volatility(self.data, period)
np.testing.assert_array_equal(v, self.volatility_period_6_expected)
def test_volatility_period_8(self):
period = 8
v = volatility.volatility(self.data, period)
np.testing.assert_array_equal(v, self.volatility_period_8_expected)
def test_volatility_period_10(self):
period = 10
v = volatility.volatility(self.data, period)
np.testing.assert_array_equal(v, self.volatility_period_10_expected)
def test_volatility_invalid_period(self):
period = 128
with self.assertRaises(Exception) as cm:
volatility.volatility(self.data, period)
expected = "Error: data_len < period"
self.assertEqual(str(cm.exception), expected)
| [
"kyle@collectiveidea.com"
] | kyle@collectiveidea.com |
235f8543683b0f8e93ab3658fce247f2507db2ac | 2a3743ced45bd79826dcdc55f304da049f627f1b | /venv/lib/python3.7/site-packages/deribit_api.py | 29835d45975a42ff171d83291465e8b8813c9460 | [
"MIT"
] | permissive | Dimasik007/Deribit_funding_rate_indicator | 12cc8cd7c0be564d6e34d9eae91940c62492ae2a | 3251602ae5249069489834f9afb57b11ff37750e | refs/heads/master | 2023-05-26T10:14:20.395939 | 2019-08-03T11:35:51 | 2019-08-03T11:35:51 | 198,705,946 | 5 | 3 | MIT | 2023-05-22T22:29:24 | 2019-07-24T20:32:19 | Python | UTF-8 | Python | false | false | 5,534 | py | # -*- coding: utf-8 -*-
import time, hashlib, requests, base64, sys
from collections import OrderedDict
class RestClient(object):
def __init__(self, key=None, secret=None, url=None):
self.key = key
self.secret = secret
self.session = requests.Session()
if url:
self.url = url
else:
self.url = "https://www.deribit.com"
def request(self, action, data):
response = None
if action.startswith("/api/v1/private/"):
if self.key is None or self.secret is None:
raise Exception("Key or secret empty")
signature = self.generate_signature(action, data)
response = self.session.post(self.url + action, data=data, headers={'x-deribit-sig': signature}, verify=True)
else:
response = self.session.get(self.url + action, params=data, verify=True)
if response.status_code != 200:
raise Exception("Wrong response code: {0}".format(response.status_code))
json = response.json()
if json["success"] == False:
raise Exception("Failed: " + json["message"])
if "result" in json:
return json["result"]
elif "message" in json:
return json["message"]
else:
return "Ok"
def generate_signature(self, action, data):
tstamp = int(time.time()* 1000)
signature_data = {
'_': tstamp,
'_ackey': self.key,
'_acsec': self.secret,
'_action': action
}
signature_data.update(data)
sorted_signature_data = OrderedDict(sorted(signature_data.items(), key=lambda t: t[0]))
def converter(data):
key = data[0]
value = data[1]
if isinstance(value, list):
return '='.join([str(key), ''.join(value)])
else:
return '='.join([str(key), str(value)])
items = map(converter, sorted_signature_data.items())
signature_string = '&'.join(items)
sha256 = hashlib.sha256()
sha256.update(signature_string.encode("utf-8"))
sig = self.key + "." + str(tstamp) + "."
sig += base64.b64encode(sha256.digest()).decode("utf-8")
return sig
def getorderbook(self, instrument):
return self.request("/api/v1/public/getorderbook", {'instrument': instrument})
def getinstruments(self):
return self.request("/api/v1/public/getinstruments", {})
def getcurrencies(self):
return self.request("/api/v1/public/getcurrencies", {})
def getlasttrades(self, instrument, count=None, since=None):
options = {
'instrument': instrument
}
if since:
options['since'] = since
if count:
options['count'] = count
return self.request("/api/v1/public/getlasttrades", options)
def getsummary(self, instrument):
return self.request("/api/v1/public/getsummary", {"instrument": instrument})
def index(self):
return self.request("/api/v1/public/index", {})
def stats(self):
return self.request("/api/v1/public/stats", {})
def account(self):
return self.request("/api/v1/private/account", {})
def buy(self, instrument, quantity, price, postOnly=None, label=None):
options = {
"instrument": instrument,
"quantity": quantity,
"price": price
}
if label:
options["label"] = label
if postOnly:
options["postOnly"] = postOnly
return self.request("/api/v1/private/buy", options)
def sell(self, instrument, quantity, price, postOnly=None, label=None):
options = {
"instrument": instrument,
"quantity": quantity,
"price": price
}
if label:
options["label"] = label
if postOnly:
options["postOnly"] = postOnly
return self.request("/api/v1/private/sell", options)
def cancel(self, orderId):
options = {
"orderId": orderId
}
return self.request("/api/v1/private/cancel", options)
def cancelall(self, typeDef="all"):
return self.request("/api/v1/private/cancelall", {"type": typeDef})
def edit(self, orderId, quantity, price):
options = {
"orderId": orderId,
"quantity": quantity,
"price": price
}
return self.request("/api/v1/private/edit", options)
def getopenorders(self, instrument=None, orderId=None):
options = {}
if instrument:
options["instrument"] = instrument
if orderId:
options["orderId"] = orderId
return self.request("/api/v1/private/getopenorders", options)
def positions(self):
return self.request("/api/v1/private/positions", {})
def orderhistory(self, count=None):
options = {}
if count:
options["count"] = count
return self.request("/api/v1/private/orderhistory", options)
def tradehistory(self, countNum=None, instrument="all", startTradeId=None):
options = {
"instrument": instrument
}
if countNum:
options["count"] = countNum
if startTradeId:
options["startTradeId"] = startTradeId
return self.request("/api/v1/private/tradehistory", options)
| [
"dmitriy00vn@gmail.com"
] | dmitriy00vn@gmail.com |
2884afe943fa91d4367c1089231a104805f2e2a3 | dd27d37ebe509b98ef28c2a25f948e94f9d92668 | /cityAlert/venv/bin/unit2-2.7 | 3b67c365c607c9472a30b302c6cc84e15b117b0a | [] | no_license | soru13/cityvoice | 021ed89605ec52531163bc14ed220bd91e3f50d8 | 658177b893330a6cd71f45bb95ffa32e40387132 | refs/heads/master | 2020-05-20T13:46:21.885466 | 2015-03-21T06:17:34 | 2015-03-21T06:17:34 | 32,622,256 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 344 | 7 | #!/home/soru/adsumDeploy/cityAlert/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'unittest2==0.5.1','console_scripts','unit2-2.7'
__requires__ = 'unittest2==0.5.1'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('unittest2==0.5.1', 'console_scripts', 'unit2-2.7')()
)
| [
"jesus.ed13@gmail.com"
] | jesus.ed13@gmail.com |
688e0b91b2a22f349957eb947eebb21ea60f9c26 | 8ed6a3f494795c5c45c28c78dbf597f017a5b496 | /course/apis/views/nested_view.py | 1b1f1736d84af2eafaf68f05e7b4d1bd91c5bdad | [] | no_license | bopopescu/E-Learning-Backend-Django | 917b1521fd07c4800d0dba347a406779069d1618 | cc732482821c491537e1b4d0b778d372640e1d7b | refs/heads/master | 2022-11-19T07:00:13.350826 | 2020-05-08T22:50:59 | 2020-05-08T22:50:59 | 280,950,530 | 0 | 0 | null | 2020-07-19T21:06:03 | 2020-07-19T21:06:02 | null | UTF-8 | Python | false | false | 997 | py | from rest_framework import generics
from ELearning.restconf.permission import IsOwnerOrReadOnlyLec
from rest_framework import permissions
from course.models import Topic
from ..serializers.nested_serializer import TopicNestedSerializers
class NestedList(generics.ListCreateAPIView):
permission_classes = [permissions.DjangoModelPermissionsOrAnonReadOnly, IsOwnerOrReadOnlyLec]
serializer_class = TopicNestedSerializers
queryset = Topic.objects.all()
def perform_create(self, serializer):
serializer.save()
def get_queryset(self):
queryset = Topic.objects.all()
course = self.request.query_params.get('course')
if course:
queryset = queryset.filter(course=course)
return queryset
class NestedUpdate(generics.RetrieveUpdateDestroyAPIView):
permission_classes = [permissions.DjangoModelPermissionsOrAnonReadOnly, IsOwnerOrReadOnlyLec]
queryset = Topic.objects.all()
serializer_class = TopicNestedSerializers
| [
"soumya.pullakhandam1@gmail.com"
] | soumya.pullakhandam1@gmail.com |
47a973711a8b923b936e2065c5d59905c74acf35 | 810412fc189697eaad5731cd66cc291f1d82c3b5 | /cap2/extensions/experimental/strains/merge_snp_graph.py | 17ef682831a202a1ef92a7392795e9a09cedac61 | [
"MIT"
] | permissive | MetaSUB/CAP2 | c511655ed15a7e886d5216a358fc6e5904b25f24 | 5ccdc0af310dd4ee382a81c7330e04927d9ef5fe | refs/heads/master | 2022-12-03T15:50:59.694245 | 2021-12-29T17:03:50 | 2021-12-29T17:03:50 | 213,112,026 | 12 | 7 | MIT | 2022-11-22T09:28:20 | 2019-10-06T05:09:18 | Python | UTF-8 | Python | false | false | 1,890 | py |
from .tasks import StrainCapGroupTask
from ....pipeline.config import PipelineConfig
from .strainotyping import (
VERSION,
merge_filter_graphs_from_filepaths,
write_graph_to_filepath,
graph_node_table,
)
from .make_snp_graph import MakeSNPGraph
class MergeSNPGraph(StrainCapGroupTask):
MIN_WEIGHT = 2
module_description = """
This module
Motivation:
Negatives:
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.config = PipelineConfig(self.config_filename)
@property
def snp_graphs(self):
return self.module_req_list(MakeSNPGraph)
def requires(self):
return self.snp_graphs
@classmethod
def version(cls):
return 'v0.1.0'
def tool_version(self):
return VERSION
@classmethod
def dependencies(cls):
return [MakeSNPGraph]
@classmethod
def _module_name(cls):
return 'experimental::merge_snp_graph'
def output(self):
out = {
f'merged_snp_graph__{self.genome_name}': self.get_target(f'merged_snp_graph__{self.genome_name}', 'gml.gz'),
f'merged_snp_nodes__{self.genome_name}': self.get_target(f'merged_snp_nodes__{self.genome_name}', 'csv.gz'),
}
return out
@property
def graph_path(self):
return self.output()[f'merged_snp_graph__{self.genome_name}'].path
@property
def node_path(self):
return self.output()[f'merged_snp_nodes__{self.genome_name}'].path
def _run(self):
graph_paths = [snp_graph.graph_path for snp_graph in self.snp_graphs]
merged_graph = merge_filter_graphs_from_filepaths(graph_paths, min_weight=self.MIN_WEIGHT)
write_graph_to_filepath(merged_graph, self.graph_path)
tbl = graph_node_table(merged_graph)
tbl.to_csv(self.node_path, compression='gzip')
| [
"dcdanko@gmail.com"
] | dcdanko@gmail.com |
5c84745a3cac02dc06784f3fcbd2f816a8720f6d | ce823ea8016eeb7d584fd097ec10efee8db0eba8 | /fundamentals/learning.py | 2e3cd3e76e44b14f021f4d0dcdd3db03f5797d62 | [] | no_license | mwendo/Python | df21ba9e5341b9765568d33f5f96ec8776c018cc | 23258d0ed1b8cb6ed548561d7110e0b25172bbe0 | refs/heads/master | 2023-04-12T19:18:31.466050 | 2021-04-24T21:18:22 | 2021-04-24T21:18:22 | 354,918,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | def multiply(num_list, num):
for x in num_list:
x *= num
return num_list
a = [2,4,10,16]
b = multiply(a,5)
print(b)
# output:
# >>>[2,4,10,16]
| [
"wendorfmicah0@gmail.com"
] | wendorfmicah0@gmail.com |
7dbe88fdeeb27765e1152b35ae4712ee2ca1cd69 | c2a526e0eb65b1465ce1fffecb3d395a406c0969 | /db2_exec.py | 4e5b9503484f1bc1ca8c6c28afd51c073d66c425 | [] | no_license | mmitache88/CryptoML | 8e9ce20820c29826163d5662f324c71c155a4d0b | 3c03303d674dcdd03f4bf25c094ee73aba9e9c00 | refs/heads/master | 2020-03-15T08:59:05.504938 | 2018-11-07T23:30:01 | 2018-11-07T23:30:01 | 132,064,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,288 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 12 17:12:48 2018
@author: mmita
"""
#%%
import sqlite3
import time
import datetime
import pandas as pd
import numpy as np
import os.path
#%%
LatestDf = pd.read_csv('D:\\Google Drive\\1 Desktop\\Projects\\Cryptos\\Datasets\\price related\\master_hourly_2018-04-27.csv') # Link to latest dataframe
LatestDf = LatestDf.dropna(axis = 0, how='all')
#%
cwd = os.getcwd()
#cwd = "C:\\Users\\mmita\\Desktop\\temp"
db_path = os.path.join (cwd, "HistoricPriceHourly.db")
conn= sqlite3.connect(db_path)
c= conn.cursor()
#%
def read_from_db():
c.execute('SELECT Close, High, Low, Open, VolumeFrom, VolumeTo, Timestamp, Symbol, ComparisonSymbol FROM CryptoDataHourly')
MasterCompareDf = pd.DataFrame(c.fetchall())
MasterCompareDf.columns = ['Close', 'High', 'Low', 'Open', 'VolumeFrom', 'VolumeTo', 'Timestamp', 'Symbol', 'ComparisonSymbol']
read_from_db.max_timestamp = max(MasterCompareDf['Timestamp'])
print(read_from_db.max_timestamp)
#%
read_from_db() #Check for latest dataframe
UpdateDf = LatestDf[LatestDf['timestamp']>read_from_db.max_timestamp]
UpdateDf = UpdateDf.reset_index(drop=True)
#%%
def create_table():
c.execute('CREATE TABLE IF NOT EXISTS CryptoDataHourly(Close NUMERIC, High NUMERIC, Low NUMERIC, Open NUMERIC,VolumeFrom NUMERIC,VolumeTo NUMERIC, Timestamp TEXT, Symbol TEXT, ComparisonSymbol TEXT)')
#%%
def dynamic_data_entry():
c = conn.cursor()
#Close = [(i,) for i in list(MasterDf['close'])]
Close = UpdateDf['close']
High = UpdateDf['high']
Low = UpdateDf['low']
Open = UpdateDf['open']
#Time = UpdateDf['time']
VolumeFrom = UpdateDf['volumefrom']
VolumeTo = UpdateDf['volumeto']
Timestamp = UpdateDf['timestamp']
Symbol = UpdateDf['Symbol']
ComparisonSymbol = UpdateDf['Comparison_symbol']
for i in range(0, len(Symbol)):
c.execute("INSERT INTO CryptoDataHourly(Close, High, Low, Open, VolumeFrom, VolumeTo, Timestamp, Symbol, ComparisonSymbol) VALUES (?,?,?,?,?,?,?,?,?)", (Close[i],High[i],Low[i],Open[i],VolumeFrom[i],VolumeTo[i],Timestamp[i],Symbol[i],ComparisonSymbol[i]))
conn.commit()
c.close()
conn.close()
#%%
create_table()
dynamic_data_entry()
| [
"mmitache@gmail.com"
] | mmitache@gmail.com |
e1c4554b533878bdc79592b9e4cbeac027a29f4b | c89d722f10b71c42ee281ef6024e51de81b32a65 | /frappe-bench/env/bin/gunicorn_paster | e6c065f9da3efac8f72616ea3c50e8bc09df633b | [] | no_license | maheshlangote1/FrappePortal | ff7efe739997400ecc7d30c3d8d8ce6de71cafac | a71009c2e70158edc23f4e3eca8aff6751787e4b | refs/heads/master | 2021-09-11T01:04:54.457492 | 2018-01-13T13:31:30 | 2018-01-13T13:31:30 | 117,345,053 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 277 | #!/home/digitalprizm/workspace/FrappeProject/frappe-bench/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.pasterapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"maheshlangote1@gmail.com"
] | maheshlangote1@gmail.com | |
9fa74e2c50538fd6a1720505384f49ab1e14c7af | 76a7fa828fd2c5f04839516dad2debde8ed6b8a6 | /Manika_tut1.py | 084d0f3a140c3162c3086f978ea1b5ac61817a72 | [] | no_license | manika-m/cp2 | e0f5f94906a631b9d1a4ede359fc3cab3c02707b | 64aed6d9bc4f0608097026a224f381561deb5356 | refs/heads/master | 2020-12-30T11:28:06.168317 | 2017-05-31T07:38:02 | 2017-05-31T07:38:02 | 91,562,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,261 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
''' Manika Moodliar 214582074'''
import numpy as np
from scipy.integrate import simps
from matplotlib import pyplot as plt
#Problem 3,4,5,6 tut 1
pii = np.pi
#Question 3
def vector(n):
x=np.linspace(0,pii/2,n)
return x
def simpleMethod(n): #integrate cos
dx = (pii/2)/(n-1)
y = np.cos(vector(n))
summ =y.sum()*dx
return summ
print('Question 3')
m=[10,30,100,300,1000,200000]
total_a = 0
for n in m:
s =simpleMethod(n)
err=abs(s-1) #abs value of integral-1
a = np.log(err)/(np.log(n))
print 'Simple integral with n=',n, ' points is ',s
print 'Error for ',n,' points is ',err
print 'a = ',a
if n<5 :
total_a+=a #only compute error for first 5 n's in the array not 200000
avg = total_a/5
print 'Average a is ',avg
print'if num points is n then error scales as ~ 1/n'
#Question 4:
print('\n Question 4')
x=np.arange(11)
print(x)
xodd=x[1::2]
print 'Only odd numbers from array: ',xodd
xeven=x[2:-2:2]
print 'Only even numbers from array skipping first and last elements: ',xeven
#Question 5
print '\n\nQuestion 5 /Simpson"s rule'
m=[11,31,101,301,1001]
total_a = 0
for n in m:
x1=np.linspace(0,pii/2,n)
y1 = np.cos(x1)
simp = simps(y1,x1)
err=abs(simp-1)
alpha=np.log(err)/(np.log(n))
print(alpha)
print 'Simpsons rule integral with n=',n, ' points is ',simp
print 'Error for ',n-1,' points is ',err
print 'a = ',a
total_a+=a
avg = total_a/5
print '\n Average a for simpson rule is ',avg
print 'needed ~200000 points to get the same error accuracy'
#Question 6:
print'\nQuestion 6\n'
m=[11,31,101,301,1001,3001,10001,30001,100001]
m=np.array(m)
simpson_err=np.zeros(m.size)
simple_err=np.zeros(m.size)
for ii in range(m.size):
n=m[ii]
x1=np.linspace(0,pii/2,n)
y1 = np.cos(x1)
simp = simps(y1,x1)
err=abs(simp-1)
simpson_err[ii]=err
simple_err[ii]=np.abs(simpleMethod(n)-1)
plt.plot(m,simple_err)
plt.plot(m,simpson_err)
ax=plt.gca()
ax.set_yscale('log')
ax.set_xscale('log')
plt.xlabel('Number of points')
plt.ylabel('Error')
plt.title('Error against number of points for \n the simple method integration (blue) and for Simpsons rule')
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
4098de298ac192a6b879c46fd5099abfe331fc11 | ec36fea08674d71b8361af69dd361e0c2effb035 | /SeedCast/Masters/models.py | be39015d08a5d8687523715b5dc737fc3c5ddaf3 | [] | no_license | Sai-Kumar-PRACS/IRRI-Test | 96fe926ff5381f268c6d21eb4a3054b93c3aefdb | e37cf575c361a456eb0cac20f5cf1337a69091b2 | refs/heads/master | 2021-09-08T22:49:26.447312 | 2018-03-12T16:41:30 | 2018-03-12T16:41:30 | 106,270,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,848 | py | from datetime import datetime
from django.db import models
from django.core.validators import RegexValidator, MinLengthValidator
from multiselectfield import MultiSelectField
from django.utils.translation import ugettext_lazy as _
from django import forms
# from jchart import Chart
# Create your models here.
class States(models.Model):
state_name = models.CharField(max_length=100, primary_key=True)
class Meta:
verbose_name = _('State')
verbose_name_plural = _('States')
def __str__(self):
return self.state_name
class Districts(models.Model):
state_name = models.ForeignKey(States)
dist_name = models.CharField(max_length=100, primary_key=True)
class Meta:
verbose_name = _('District')
verbose_name_plural = _('Districts')
def __str__(self):
return self.dist_name
class Blocks(models.Model):
state_name = models.ForeignKey(States)
dist_name = models.ForeignKey(Districts)
block_name = models.CharField(max_length=100, primary_key=True)
class Meta:
verbose_name = ('Block')
verbose_name_plural = ('Blocks')
def __str__(self):
return self.block_name
class Panchayats(models.Model):
state_name = models.ForeignKey(States)
dist_name = models.ForeignKey(Districts)
block_name = models.ForeignKey(Blocks)
panchayat_name = models.CharField(max_length=100, primary_key=True)
class Meta:
verbose_name = ('Panchayat')
verbose_name_plural = ('Panchayats')
def __str__(self):
return self.panchayat_name
class Villages(models.Model):
state_name = models.ForeignKey(States)
dist_name = models.ForeignKey(Districts)
block_name = models.ForeignKey(Blocks)
panchayat_name = models.ForeignKey(Panchayats)
village_name = models.CharField(max_length=100, primary_key=True)
class Meta:
verbose_name = _('Village')
verbose_name_plural = _('Villages')
def __str__(self):
return self.village_name
class SPO(models.Model):
spo = models.CharField(max_length=255, primary_key=True)
class Meta:
verbose_name = _('SPO')
def __str__(self):
return self.spo
class Dealer_Registration(models.Model):
shop_name = models.CharField(max_length=255, blank=True)
license_num = models.CharField(max_length=255, blank=True)
company_types_list = (
('private', 'PRIVATE'),
('pacs', 'PACS'),
)
company_type = models.CharField(max_length=7, choices=company_types_list)
dealer_name = models.CharField(max_length=255)
contact_num = models.CharField(max_length=10, validators=[RegexValidator(r'^\d{1,10}$'), MinLengthValidator(10)], null=True)
address = models.TextField(max_length=255, blank=True)
state_name = models.ForeignKey(States)
dist_name = models.ForeignKey(Districts)
block_name = models.ForeignKey(Blocks)
dealer_spo = models.ForeignKey(SPO)
date = models.DateTimeField(default= datetime.now())
dealer_pincode = models.CharField(max_length=6, validators=[RegexValidator(r'^\d{1,10}$'), MinLengthValidator(6)], blank=True)
#Meta CLASS
class Meta:
verbose_name = _('Dealer')
verbose_name_plural = _('Dealers')
def __str__(self):
return self.shop_name
class AAO_Registration(models.Model):
aao_name = models.CharField(max_length=255)
contact_number = models.CharField(max_length=10, validators=[RegexValidator(r'^\d{1,10}$'), MinLengthValidator(10)])
state_name = models.ForeignKey(States)
dist_name = models.ForeignKey(Districts)
block_name = models.ForeignKey(Blocks)
#Meta Class
class Meta:
verbose_name = _('AAO')
verbose_name_plural = _('AAOs')
def __str__(self):
return self.aao_name + '-' + str(self.block_name)
class VAW_Registration(models.Model):
VAW_name = models.CharField(max_length=255)
VAW_contact_number = models.CharField(max_length=10, validators=[RegexValidator(r'^\d{1,10}$'), MinLengthValidator(10)])
state_name = models.ForeignKey(States)
dist_name = models.ForeignKey(Districts)
block_name = models.ForeignKey(Blocks)
panchayat_name = models.ForeignKey(Panchayats)
#Meta Class
class Meta:
verbose_name = _('VAW')
verbose_name_plural = _('VAWs')
def __str__(self):
return self.VAW_name
class STRVCategory(models.Model):
def image_tag(self):
return u'<img src="%s" />' % '/home/ubuntu/irri-pracs/SeedCastfromGit/IRRI-Test/SeedCast/pics'
image_tag.short_description = 'Image'
image_tag.allow_tags = True
id = models.AutoField(primary_key=True)
category_name = models.CharField(max_length=255)
category_short_code = models.CharField(max_length=255)
category_description = models.TextField(max_length=650)
image = models.ImageField(upload_to="static/imgs/uploaded/category")
#Meta Class
class Meta:
verbose_name = _('STRV Category')
verbose_name_plural = _('STRV Category')
def __str__(self):
return self.category_short_code
class STRVVariety(models.Model):
id = models.AutoField(primary_key=True)
category_name = models.ForeignKey(STRVCategory)
variety_name = models.CharField(max_length=255)
variety_code = models.CharField(max_length=255)
description = models.TextField(max_length=650)
duration_in_days = models.CharField(max_length=20)
suitable_land_type = models.CharField(max_length=200)
plant_height = models.CharField(max_length=20)
grain_type = models.CharField(max_length=255)
yield_in_tonne = models.CharField(max_length=20)
yield_advantage = models.CharField(max_length=50)
#Meta Class
class Meta:
verbose_name = _('STRV Variety')
verbose_name_plural = _('STRV Varieties')
def __str__(self):
return self.variety_name
# class VAWDemand(models.Model):
# vaw = models.ForeignKey(VAW_Registration, db_column='VAW_Registration_id')
# village_name = models.CharField(max_length=255)
# variety_name = models.ForeignKey(STRVVariety)
# varietyName = models.CharField(max_length=100)
# quantity = models.PositiveIntegerField()
# date_collected = models.DateField(default=datetime.now())
# check = models.BooleanField()
#
# class Meta:
# verbose_name = 'VAW-Demand'
# verbose_name_plural = 'VAW-Demands'
#
# def __str__(self):
# return str(self.village_name)
#
# class DealerDemand(models.Model):
# dealer = models.ForeignKey(Dealer_Registration, db_column='Dealer_Registration_id')
# variety_name = models.ForeignKey(STRVVariety)
# quantity = models.PositiveIntegerField()
# date_collected = models.DateField()
# chk = models.BooleanField(default=True)
#
# class Meta:
# verbose_name = 'Dealer Demand'
# verbose_name_plural = 'Dealer Demands'
#
# def __str__(self):
# return str(self.quantity)
class Mobnum(models.Model):
mobnum = models.CharField(max_length=10, validators=[RegexValidator(r'^\d{1,10}$'), MinLengthValidator(10)])
class Meta:
verbose_name = _('Mobile Number')
verbose_name_plural = _('Mobile numbers')
def __str__(self):
return str(self.mobnum)
class Vawmobnum(models.Model):
vaw_num = models.CharField(max_length=10, validators=[RegexValidator(r'^\d{1,10}$'), MinLengthValidator(10)])
class Meta:
verbose_name = _('VawMobile')
verbose_name_plural = _('VawMobile Numbers')
def __str__(self):
return str(self.vaw_num)
class Varietynew(models.Model):
category = models.IntegerField()
def __str__(self):
return str(self.category)
class Stock(models.Model):
dealer_shop = models.ForeignKey(Dealer_Registration, db_column='Dealer_Registration_id')
variety_name = models.ForeignKey(STRVVariety)
available = models.PositiveIntegerField()
date_wn_available = models.DateField()
check = models.BooleanField()
class Meta:
verbose_name = 'Stock'
verbose_name_plural = 'Stocks'
def __str__(self):
return str(self.variety_name)
class Pilotplots(models.Model):
farmer_name = models.CharField(max_length=255)
contact_num = models.CharField(max_length=10, validators=[RegexValidator(r'^\d{1,10}$'), MinLengthValidator(10)])
dist_name = models.ForeignKey(Districts)
block_name = models.ForeignKey(Blocks)
panchayat_name = models.ForeignKey(Panchayats)
village = models.CharField(max_length=255)
variety = models.ForeignKey(STRVVariety)
class Meta:
verbose_name = 'Pilot Plots'
def __str__(self):
return str(self.dist_name)
class Plotsnew(models.Model):
dist_name = models.ForeignKey(Districts)
block_name = models.ForeignKey(Blocks)
panchayat_name = models.ForeignKey(Panchayats)
class Meta:
verbose_name = "Plots Post"
class Feedback(models.Model):
name = models.CharField(max_length=255)
mobile_number = models.CharField(max_length=10, validators=[RegexValidator(r'^\d{1,10}$'), MinLengthValidator(10)])
email = models.EmailField(blank=True)
suggestion = models.TextField(max_length=1650)
class Meta:
verbose_name = 'Feedback'
def __str__(self):
return self.name
class ViewDealerlist(models.Model):
district = models.ForeignKey(Districts)
def __str__(self):
return str(self.district)
class STRAvailability(models.Model):
variety = models.ForeignKey(STRVVariety)
#shop = models.ForeignKey(Dealer_Registration)
def __str__(self):
return str(self.variety)
# #Graphs...
# class Graph1(VAWDemand, models):
# class Meta:
# proxy = True
# verbose_name = 'Variety wise Demand'
# verbose_name_plural = 'Variety wise Demands'
#
#
| [
"sai@pracs.in"
] | sai@pracs.in |
00e9f5fe14e266706112b3eda5db3a81edd109a1 | 4fdd98d5e82385393d4eb2f6526cddb15563c477 | /src/morphforge/core/quantities/__init__.py | 8ed61d5caaf565c48693fc05504751f56db48a69 | [
"BSD-2-Clause"
] | permissive | bmerrison/morphforge | f8541d4471ce13519986c42d4ebb3714a238e390 | 6d06845493bf01aae94a706bfde5d4eb9c733659 | refs/heads/master | 2021-01-18T07:49:47.645031 | 2012-09-26T20:54:13 | 2012-09-26T20:54:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,047 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
#from quantities import *
from morphforge.core.quantities.fromcore import factorise_units_from_list
from morphforge.core.quantities.fromcore import unit
import common_neuroscience_defs
from morphforge.core.quantities.wrappers import NpPqWrappers
from morphforge.core.quantities.common_neuroscience_defs import mS, uS, nS, pS
from morphforge.core.quantities.common_neuroscience_defs import mF, uF, nF, pF
from morphforge.core.quantities.common_neuroscience_defs import um2, cm2
from morphforge.core.quantities.common_neuroscience_defs import mm2, m2
from morphforge.core.quantities.common_neuroscience_defs import Molar, nMolar
from morphforge.core.quantities.common_neuroscience_defs import uMolar
from morphforge.core.quantities.common_neuroscience_defs import ohmcm
from morphforge.core.quantities.common_neuroscience_defs import MOhm
from morphforge.core.quantities.common_neuroscience_defs import mV
from morphforge.core.quantities.common_neuroscience_defs import pA_um2
from quantities import ms, Quantity, millivolt, milliamp, picoamp
from quantities import milli, siemens, millisecond, volt, J, second
U = unit
__all__ = [
'factorise_units_from_list',
'unit',
'NpPqWrappers',
'common_neuroscience_defs',
'mS', 'uS', 'nS', 'pS',
'mF', 'uF', 'nF', 'pF',
'um2', 'cm2', 'mm2', 'm2',
'Molar', 'uMolar', 'nMolar',
'ohmcm', 'MOhm',
'mV','pA_um2',
'ms',
'Quantity',
'millivolt','milliamp','picoamp',
'milli', 'siemens',
'millisecond',
'volt','J','second'
]
| [
"mikehulluk@googlemail.com"
] | mikehulluk@googlemail.com |
95b4ba670fad9aa6e2ada7300f4aa62646de42ef | 897d82d4953ed7b609746a0f252f3f3440b650cb | /day07/exercise_personal/08_exercise.py | 36811015b50dfbbced51178172c00201a0a3c549 | [] | no_license | haiou90/aid_python_core | dd704e528a326028290a2c18f215b1fd399981bc | bd4c7a20950cf7e22e8e05bbc42cb3b3fdbe82a1 | refs/heads/master | 2022-11-26T19:13:36.721238 | 2020-08-07T15:05:17 | 2020-08-07T15:05:17 | 285,857,695 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | list_poker = []
for r in range(1,7):
for c in range(1,7):
for v in range(1,7):
list_poker.append((r,c,v))
print(list_poker) | [
"caoho@outlook.com"
] | caoho@outlook.com |
8a752594fbaede8a55376c2bb862d7962842e631 | fa67314df981eb8c72790819ca29f45c37c52c69 | /Assignment-1_CS16BTECH11036/Question4/Dtree.py | a37b07fb18afb5767de65c0ba9122cf331dccafb | [] | no_license | omsitapara23/AML | 5ce142751354cee72a8007ba952c55ae8a90d193 | 7d320ef6ce342590dfbce9e70d9d9fff7561939b | refs/heads/master | 2020-04-20T01:45:41.095561 | 2019-03-12T17:13:57 | 2019-03-12T17:13:57 | 168,553,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,800 | py | import numpy as np
import csv
import json
from sklearn.neighbors import KNeighborsClassifier
from sklearn import preprocessing
from collections import Counter
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import BernoulliNB
from sklearn import tree
def makeTrain(data_train):
totAttr = set([])
count=0
labels = []
for item in data_train:
labels.append(item.get("cuisine"))
ingredits = item.get("ingredients")
for i in ingredits:
totAttr.add(i)
count += 1
featureVec = []
for i in totAttr:
featureVec.append(i)
data = np.zeros((count, len(totAttr)))
count =0
for item in data_train:
ingredits = item.get("ingredients")
for i in ingredits:
if i in featureVec:
ind = featureVec.index(i)
data[count,ind] = 1
count +=1
return data, len(totAttr), labels, featureVec
def makeTest(data_test, totAttr, featureVec):
no = 0
for item in data_test:
no += 1
ids = []
data = np.zeros((no, totAttr))
count = 0
for item in data_test:
ids.append(item.get("id"))
ingredits = item.get("ingredients")
for i in ingredits:
if i in featureVec:
ind = featureVec.index(i)
data[count,ind] = 1
count += 1
return data, ids
def preprocessing_data(data_train, data_test):
return preprocessing.scale(data_train), preprocessing.scale(data_test)
def learn(data_train, labels):
model = tree.DecisionTreeClassifier()
model.fit(data_train, labels)
return model
def test(data_test, model):
output = model.predict(data_test)
return output
def write_csv(output, ids):
text_file = open("Output.csv", "w")
text_file.write("id,cuisine\n")
counter = 0
for instance in output:
text_file.write("%d,%s\n" % (ids[counter] , instance))
counter += 1
text_file.close()
if __name__ == "__main__":
#opening the files
with open('train.json') as f:
data_train = json.load(f)
with open('test.json') as f1:
data_test = json.load(f1)
data_train, totAttr, labels, featureVec = makeTrain(data_train)
print "Train loaded"
data_test, ids = makeTest(data_test, totAttr, featureVec)
print "Test loaded"
print "Preprocessing..."
data_train, data_test = preprocessing_data(data_train, data_test)
print "Preprocessing complete"
print "Learning..."
model = learn(data_train, labels)
print "Model learned"
print "Predicting..."
output = test(data_test, model)
print "Predection complete writing to file..."
write_csv(output, ids)
print "Writing success"
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
42cf07fd048084be0a2a5628d4d24208c37b1b2e | 176878355049c726e8a3a09e645fac99809745fc | /game/camera.py | 274969e176d7a354a8a376835ca3212b8eddb24b | [
"MIT"
] | permissive | senhordaluz/jc1-python | d0e2d62d84010ffad19c3025c86b40e6de540594 | 7ed14199147dcc74c970740b670a2da22d91e550 | refs/heads/master | 2021-01-19T13:15:17.366353 | 2017-08-22T02:57:52 | 2017-08-22T02:57:52 | 100,836,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,811 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 21 22:33:58 2017
@author: Pedro da Luz
"""
import pygame
class SpriteSheet(object):
"""
Classe para lidar com sprites animados
reatangulo = (x, y, x + offset, y + ofset)
"""
def __init__(self, filename):
self.sheet = pygame.image.load(filename).convert()
@property
def rect(self):
"""Retorna rect da posição a ser cordada do sprite"""
return self._rect
@rect.setter
def rect(self, value):
"""Grava rect da posição a ser cordata do sprite"""
self._rect = value
@property
def image(self):
"""Retorna a imagem cortada a partir do rect salvo na classe"""
return self.image_at(self.rect, -1)
# Load a specific image from a specific rectangle
def image_at(self, retangulo, colorkey = None):
"""Loads image from x,y,x+offset,y+offset"""
rect = pygame.Rect(retangulo)
image = pygame.Surface(rect.size).convert()
image.blit(self.sheet, (0, 0), rect)
if colorkey is not None:
if colorkey is -1:
colorkey = image.get_at((0,0))
image.set_colorkey(colorkey, pygame.RLEACCEL)
return image
# Load a whole bunch of images and return them as a list
def images_at(self, rects, colorkey = None):
"""Loads multiple images, supply a list of coordinates"""
return [self.image_at(rect, colorkey) for rect in rects]
# Load a whole strip of images
def load_strip(self, rect, image_count, colorkey = None):
"Loads a strip of images and returns them as a list"
tups = [(rect[0]+rect[2]*x, rect[1], rect[2], rect[3])
for x in range(image_count)]
return self.images_at(tups, colorkey) | [
"pedro.luz@hotmail.com.br"
] | pedro.luz@hotmail.com.br |
828e53f2e62d6cc45ed309a2d29a4778afa6d5a6 | 057bdbd048d8b99064eb06af45d9e40beff6fe80 | /examples/app.py | 5726ced00da4c2d832a28e7d5bce9fbca39c9927 | [
"MIT"
] | permissive | miguelgrinberg/APIFairy | 5a058f9763c381b765a4139366e35e579b4a1723 | ed2c9b99e8ed8b7cd61a1b95f7f295bd2a902590 | refs/heads/main | 2023-07-24T14:22:21.282560 | 2023-07-15T23:01:50 | 2023-07-15T23:01:50 | 299,060,489 | 303 | 28 | MIT | 2023-01-05T15:49:05 | 2020-09-27T15:24:33 | Python | UTF-8 | Python | false | false | 2,202 | py | """Welcome to the APIFairy Simple Example project!
## Overview
This is a short and simple example that demonstrates many of the features of
APIFairy.
"""
from typing import Annotated
from uuid import uuid4
from flask import Flask, abort
from flask_marshmallow import Marshmallow
from apifairy import APIFairy, body, response, other_responses
app = Flask(__name__)
app.config['APIFAIRY_TITLE'] = 'APIFairy Simple Example'
app.config['APIFAIRY_VERSION'] = '1.0'
ma = Marshmallow(app)
apifairy = APIFairy(app)
users = []
class UserSchema(ma.Schema):
class Meta:
description = 'This schema represents a user'
id = ma.String(dump_only=True, description="The user's id")
username = ma.String(required=True, description="The user's username")
first_name = ma.String(description="The user's first name")
last_name = ma.String(description="The user's last name")
age = ma.Integer(description="The user's age")
password = ma.String(load_only=True, description="The user's password")
@app.get('/users')
@response(UserSchema(many=True), description="The users")
def get_users():
"""Return all the users."""
return users
@app.post('/users')
@body(UserSchema)
@response(UserSchema, description="The new user")
@other_responses({400: 'Duplicate username or validation error'})
def new_user(user):
"""Create a new user."""
if any([u['username'] == user['username'] for u in users]):
abort(400)
new_id = uuid4().hex
user['id'] = new_id
users.append(user)
return user
@app.get('/users/<id>')
@response(UserSchema, description="The requested user")
@other_responses({404: 'User not found'})
def get_user(id: Annotated[str, 'The id of the user']):
"""Return a user."""
user = [u for u in users if u['id'] == id]
if not user:
abort(404)
return user[0]
@app.errorhandler(400)
def bad_request(e):
return {'code': 400, 'error': 'bad request'}
@app.errorhandler(404)
def not_found(e):
return {'code': 404, 'error': 'not found'}
@apifairy.error_handler
def validation_error(status_code, messages):
return {'code': status_code, 'error': 'validation error',
'messages': messages['json']}
| [
"miguel.grinberg@gmail.com"
] | miguel.grinberg@gmail.com |
5a2841e67c9fc377ea825cd9e34b1e98c26b6e0e | 60a267a7136b3cec2727824122bc6cda28c331e5 | /src/compas/files/csv.py | 17e17921601bb0662b4c6f1ddc31e317ed607396 | [
"MIT"
] | permissive | BlenderCN-Org/compAS | 4a257637d181188c0b68210f1126fa826be226d5 | 9796066a2dc26f39fe6ad0a0d44a1ef8a84a608a | refs/heads/master | 2020-05-30T02:11:18.495302 | 2017-03-21T13:42:17 | 2017-03-21T13:42:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,328 | py | from __future__ import print_function
try:
import urllib.request as urllib2
except ImportError:
import urllib2
__author__ = ['Tom Van Mele <vanmelet@ethz.ch>', ]
__copyright__ = 'Copyright 2014, Block Research Group - ETH Zurich'
__license__ = 'MIT License'
__email__ = 'vanmelet@ethz.ch'
class CSVReader(object):
"""Read the contents of a *csv* file.
Parameters:
filepath (str): Path to the file.
delimiter (str): Optional. Cell delimiter. Default is ``','``.
remote (bool): Optional. Is the file in a remote location? Default is ``False``.
"""
def __init__(self, filepath, delimiter=',', remote=False):
self.filepath = filepath
self.delimiter = delimiter
self.remote = remote
self._content = None
self._headers = []
self._rows = []
self.open()
self.pre()
self.read()
self.post()
def open(self):
if self.remote:
resp = urllib2.urlopen(self.filepath)
self._content = resp.readlines()
else:
with open(self.filepath) as fh:
self._content = fh.readlines()
def pre(self):
pass
def read(self):
self._headers = self._content[0].strip().split(self.delimiter)
for line in iter(self._content[1:]):
line = line.strip()
row = line.split(self.delimiter)
self._rows.append(row)
def post(self):
pass
def headers(self):
return self._headers
def rows(self, include_headers=False):
"""Retrieve the row data.
Parameters:
include_headers (bool): Optional. If ``True``, return per row a
dictionary with the headers as keys and the corresponding columns
as values. Default is ``False``.
Returns:
list of list: If ``include_headers=False``. The row data.
list of dict: If ``include_headers=True``. The row data as a list of dicts.
"""
if include_headers:
return [dict((self._headers[i], row[i]) for i in range(len(row))) for row in self._rows]
return self._rows
def columns(self, include_headers=False):
"""Retrieve the column data.
Parameters:
include_headers (bool): Optional. Default is ``False``.
Returns:
list of list: If ``include_headers=False``. The column data.
list of dict: If ``include_headers=True``. The column data as a dictionary.
"""
columns = zip(*self._rows)
if include_headers:
return dict((self._headers[i], columns[i]) for i in range(len(columns)))
return columns
class CSVWriter(object):
"""Write the contents of a *csv* file.
Parameters:
filepath (str): Path to the file.
rows (list of list, list of dict): The row data.
headers (list): Optional. Column headers. Default is ``None``.
delimiter (str): Optional. Cell delimiter. Default is ``','``.
"""
def __init__(self, filepath, rows, headers=None, delimiter=','):
self.filepath = filepath
self.rows = rows
self.headers = headers
self.delimiter = delimiter
self.pre()
self.write()
def pre(self):
if self.headers:
h = len(self.headers)
assert all([len(row) <= h for row in self.rows]), 'Some rows contain more data than there are headers.'
def write(self):
with open(self.filepath, 'wb+') as fp:
if self.headers:
fp.write('{0}\n'.format(self.delimiter.join(self.headers)))
for row in self.rows:
if isinstance(row, dict):
pass
else:
fp.write('{0}\n'.format(self.delimiter.join(row)))
# ==============================================================================
# Debugging
# ==============================================================================
if __name__ == '__main__':
csv = CSVReader('make_blocks.csv', ',')
print(csv.headers())
print(csv.rows())
print(csv.columns(True))
| [
"rippmanm@localhost"
] | rippmanm@localhost |
8fe14d308d8109ed2ecf4cde166009167e97a67c | feaed316ea3653015e79d39d3ca4feb6d8c5d1f0 | /web_practice_python/3rd_week/queue_class.py | c593562de8f2c1dd0ee353379b77514296cad695 | [] | no_license | seunghee63/study_web_python | ee0afd86cee90b06be10f76397bda74f5f84dcaf | b2eb91265090916895d316f865d7ac9514236175 | refs/heads/master | 2020-05-04T00:20:21.235950 | 2019-05-02T13:04:49 | 2019-05-02T13:04:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,665 | py | class queue_class:
def __init__(self):
self.__queue_list = list()
self.__isVariable = False
@property
def isVariable(self):
return self.__isVariable
@isVariable.setter
def isVariable(self, new_isVar):
self.__isVariable = new_isVar
def info(self):
print(self.__queue_list)
print(self.__isVariable)
def pop(self):
cnt = len(self.__queue_list)
if cnt == 0:
print("* 더이상 삭제가 불가능합니다.")
return
self.__queue_list.pop(0)
def push(self, value):
if self.__isVariable != False and len(self.__queue_list) == self.__isVariable:
print("* 더이상 삽입이 불가능합니다.")
return
self.__queue_list.append(value)
def front(self):
if len(self.__queue_list) == 0 :
return
return self.__queue_list[0]
def back(self):
cnt = len(self.__queue_list)
if cnt == 0 :
return
return self.__queue_list[cnt-1]
def size(self):
return len(self.__queue_list)
def empty(self):
if len(self.__queue_list) <= 0 :
return True
else :
return False
if __name__ == "__main__":
queue = queue_class()
queue.push(1)
queue.isVariable = 2
queue.push(2)
queue.push(3)
queue.empty()
print(queue.front())
print(queue.back())
queue.push(4)
queue.push(4)
queue.push(4)
queue.back()
queue.pop()
queue.pop()
queue.pop()
queue.empty()
queue.size()
queue.front() | [
"0603yang@naver.com"
] | 0603yang@naver.com |
49f8181467ef64c97ff48bc65bd315a49e3c3d18 | 4f8e99d998b3248febcdd087df5109213133dd0c | /bluerobotics/pymavlink/sensors_reading.py | 482c306c7ae97a72f3a749708f52a4923e58a7a8 | [] | no_license | xuzhizun/pixhawk_tools | 7a5d938c24da5edd5dfd8ebe78c9d628decc2661 | b4166ed0d1731ae3b069d5b5f5da4814a37b4577 | refs/heads/main | 2023-04-15T01:43:07.046061 | 2021-04-27T10:24:33 | 2021-04-27T10:24:33 | 361,980,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,268 | py |
'''
set stream rate on an APM
'''
from __future__ import print_function
from builtins import range
import sys
from pymavlink import mavutil
def wait_heartbeat(m):
'''wait for a heartbeat so we know the target system IDs'''
print("Waiting for APM heartbeat")
m.wait_heartbeat()
print("Heartbeat from APM (system %u component %u)" % (m.target_system, m.target_system))
def show_messages(m):
'''show incoming mavlink messages'''
while True:
msg = m.recv_match(blocking=True)
if not msg:
return
if msg.get_type() == "BAD_DATA":
if mavutil.all_printable(msg.data):
sys.stdout.write(msg.data)
sys.stdout.flush()
if msg.get_type() == "RAW_IMU":
msg_d = msg.to_dict()
print(msg)
# create a mavlink serial instance
master = mavutil.mavlink_connection('udpin:0.0.0.0:14550')
# wait for the heartbeat msg to find the system ID
wait_heartbeat(master)
#print("Sending all stream request for rate %u" % args.rate)
for i in range(0, 3):
master.mav.request_data_stream_send(master.target_system, master.target_component,
mavutil.mavlink.MAV_DATA_STREAM_ALL, 9600, 1)
show_messages(master)
| [
"zhizun_222@163.com"
] | zhizun_222@163.com |
88048ffd2436f9c6fe46b6324788e53e8153f674 | f1e3cdbc478711828c3940b93903c2a7d7c7331e | /conanfile.py | b60bd3b6117997a2191fde766e320c5f7339fcaa | [
"MIT"
] | permissive | Jean1995/cubic_interpolation | 85027e46ca20a69e9a3c40ea3cfb8a03e4879de1 | dffa2af3110d77bffb525d428efea54ea7ee6982 | refs/heads/main | 2023-03-23T02:17:08.224667 | 2021-03-19T16:41:20 | 2021-03-19T16:41:20 | 338,343,132 | 0 | 0 | null | 2021-02-12T14:34:14 | 2021-02-12T14:34:14 | null | UTF-8 | Python | false | false | 3,618 | py | from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
import os
required_conan_version = ">=1.32.0"
class CubicInterpolationConan(ConanFile):
name = "cubicinterpolation"
homepage = "https://github.com/MaxSac/cubic_interpolation"
license = "MIT"
url = "https://github.com/conan-io/conan-center-index"
description = "Leightweight interpolation library based on boost and eigen."
topics = ("interpolation", "splines", "cubic", "bicubic", "boost", "eigen3")
settings = "os", "compiler", "build_type", "arch"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True
}
exports_sources = "*"
generators = "cmake"
_cmake = None
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
def requirements(self):
self.requires("boost/1.75.0")
self.requires("eigen/3.3.9")
@property
def _minimum_compilers_version(self):
return {
"Visual Studio": "16",
"gcc": "5",
"clang": "5",
"apple-clang": "5.1",
}
@property
def _required_boost_components(self):
return ["filesystem", "math", "serialization"]
def validate(self):
miss_boost_required_comp = any(getattr(self.options["boost"], "without_{}".format(boost_comp), True) for boost_comp in self._required_boost_components)
if self.options["boost"].header_only or miss_boost_required_comp:
raise ConanInvalidConfiguration("{0} requires non header-only boost with these components: {1}".format(self.name, ", ".join(self._required_boost_components)))
if self.settings.compiler.cppstd:
tools.check_min_cppstd(self, "14")
minimum_version = self._minimum_compilers_version.get(
str(self.settings.compiler), False
)
if not minimum_version:
self.output.warn(
"CubicInterpolation requires C++14. Your compiler is unknown. Assuming it supports C++14."
)
elif tools.Version(self.settings.compiler.version) < minimum_version:
raise ConanInvalidConfiguration(
"CubicInterpolation requires C++14, which your compiler does not support."
)
if self.settings.compiler == "Visual Studio" and self.options.shared:
raise ConanInvalidConfiguration("cubicinterpolation shared is not supported with Visual Studio")
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["BUILD_EXAMPLE"] = False
self._cmake.definitions["BUILD_DOCUMENTATION"] = False
self._cmake.configure()
return self._cmake
def build(self):
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("LICENSE", dst="licenses" )
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
def package_info(self):
self.cpp_info.names["cmake_find_package"] = "CubicInterpolation"
self.cpp_info.names["cmake_find_package_multi"] = "CubicInterpolation"
self.cpp_info.libs = ["CubicInterpolation"]
self.cpp_info.requires = ["boost::headers", "boost::filesystem", "boost::math", "boost::serialization", "eigen::eigen"]
| [
"maximilian.sackel@udo.edu"
] | maximilian.sackel@udo.edu |
39694e63e136c7de35f1644beaf3721a4977f8c9 | e6f62e277c63ad417664c292989ac7b18b5d3027 | /sanguo/core/plunder.py | 6a6fd5fdf9222be63677815da601b879f2ae45c8 | [] | no_license | yueyoum/sanguo-server | 1fe8df69ca923e0166fd6f75e11d08a2b4bbde37 | 08df991a3bffea4b4f56f20ffea23bc373465332 | refs/heads/master | 2021-01-15T15:33:00.427811 | 2016-06-21T09:28:03 | 2016-06-21T09:28:03 | 13,704,516 | 1 | 3 | null | 2014-10-22T07:45:31 | 2013-10-19T16:38:19 | Python | UTF-8 | Python | false | false | 16,744 | py | # -*- coding: utf-8 -*-
__author__ = 'Wang Chao'
__date__ = '1/22/14'
import time
import random
import base64
import dill
from mongoscheme import DoesNotExist
from core.server import server
from core.character import Char
from core.battle import PlunderBattle
from core.mongoscheme import MongoPlunder, MongoAffairs, MongoPlunderBoard
from core.exception import SanguoException
from core.task import Task
from core.prison import Prison
from core.resource import Resource
from core.attachment import make_standard_drop_from_template, get_drop
from core.achievement import Achievement
from core.formation import Formation
from core.signals import plunder_finished_signal
from core.msgpipe import publish_to_char
from core.msgfactory import create_character_infomation_message
from core.times_log import TimesLogPlunder
from core.activity import ActivityEntry
from utils.api import apicall, api_server_list
from utils import pack_msg
from preset.settings import (
PRISONER_POOL,
PLUNDER_GOT_GOLD_PARAM_BASE_ADJUST,
PLUNDER_GET_PRISONER_PROB,
PLUNDER_GET_DROPS_TIMES,
PLUNDER_DROP_DECREASE_FACTOR,
PLUNDER_DROP_MIN_FACTOR,
)
from preset import errormsg
from preset.data import VIP_FUNCTION, BATTLES
from protomsg import GetPlunderLeaderboardResponse
from protomsg import Battle as MsgBattle
from protomsg import PlunderNotify
from protomsg import Plunder as MsgPlunder
class PlunderCurrentTimeOut(Exception):
pass
class PlunderRival(object):
@classmethod
def search(cls, city_id, exclude_char_id=None, return_dumps=False):
docs = MongoAffairs._get_collection().find(
{'hang_city_id': city_id},
{'_id': 1}
)
affair_ids = [doc['_id'] for doc in docs]
rival_id = 0
while affair_ids:
rival_id = random.choice(affair_ids)
if rival_id != exclude_char_id:
break
affair_ids.remove(rival_id)
rival_id = 0
obj = cls(rival_id, city_id)
if not return_dumps:
return obj
return base64.b64encode(dill.dumps(obj))
@classmethod
def search_all_servers(cls, city_id, exclude_char_id=None):
# 跨服掠夺
# 流程
# 1. 向HUB取到所有的servers
# 2. random choice一个 server,并且调用其API,获得目标玩家数据
# 3. 开打
# 4. 调用HUB 打完的API
# 5. HUB收到请求后,根据target_char_id所在的server,并调用其对于API
data = {
'is_test': 1 if server.test else 0
}
servers = api_server_list(data=data)
s = random.choice(servers['data'])
url = "https://{0}:{1}/api/plunder/search/".format(s['host'], s['port_https'])
data = {
'city_id': city_id,
'exclude_char_id': exclude_char_id,
}
res = apicall(data=data, cmd=url)
target = res['data']
obj = dill.loads(base64.b64decode(target))
obj.server_url = "https://{0}:{1}".format(s['host'], s['port_https'])
return obj
def __init__(self, char_id, city_id):
from core.affairs import Affairs
from core.battle.hero import BattleHero
self.city_id = city_id
if char_id:
char = Char(char_id)
self.char_id = char_id
self.name = char.mc.name
self.level = char.mc.level
self.power = char.power
self.leader = char.leader_oid
f = Formation(char_id)
self.formation = f.in_formation_hero_ids()
self.hero_original_ids = f.in_formation_hero_original_ids()
self.gold = Affairs(self.char_id).get_drop()['gold']
self.msg_char_information = create_character_infomation_message(self.char_id).SerializeToString()
battle_heros = []
for hid in self.formation:
if hid == 0:
battle_heros.append(None)
else:
battle_heros.append(BattleHero(hid))
self.battle_heros = base64.b64encode(dill.dumps(battle_heros))
else:
self.char_id = 0
self.name = ""
self.level = 0
self.power = 0
self.leader = 0
self.formation = []
self.hero_original_ids = []
self.gold = 0
self.msg_char_information = ""
self.battle_heros = base64.b64encode(dill.dumps([None] * 9))
def get_plunder_gold(self, level):
level_diff = self.level - level
if level_diff > 8:
level_diff = 8
if level_diff < -8:
level_diff = -8
result = level_diff * 0.025 + PLUNDER_GOT_GOLD_PARAM_BASE_ADJUST
return int(result * self.gold)
def make_plunder_msg(self, level):
msg = MsgPlunder()
msg.char.MergeFromString(self.msg_char_information)
msg.gold = self.get_plunder_gold(level)
return msg
def __bool__(self):
return self.char_id != 0
__nonzero__ = __bool__
class Plunder(object):
def __init__(self, char_id):
self.char_id = char_id
self.load_mongo_record()
def load_mongo_record(self):
try:
self.mongo_plunder = MongoPlunder.objects.get(id=self.char_id)
self.set_default_value()
except DoesNotExist:
self.mongo_plunder = MongoPlunder(id=self.char_id)
self.mongo_plunder.current_times = self.max_plunder_times()
self.mongo_plunder.save()
def set_default_value(self):
# 后面新增加的fileds需要初始化数值的。 比如 current_times
data = {
'current_times': self.max_plunder_times(),
'current_times_lock': False,
'char_id': 0,
'char_name': "",
'char_gold': 0,
'char_power': 0,
'char_leader': 0,
'char_formation': [],
'char_hero_original_ids': [],
'char_city_id': 0
}
changed = False
record = self.mongo_plunder._get_collection().find_one({'_id': self.char_id})
for k, v in data.iteritems():
if k not in record:
setattr(self.mongo_plunder, k, v)
changed = True
if changed:
self.mongo_plunder.save()
def get_plunder_target(self, city_id):
"""
@:rtype: PlunderRival
"""
target = PlunderRival.search_all_servers(city_id, exclude_char_id=self.char_id)
self.mongo_plunder.char_id = target.char_id
self.mongo_plunder.char_name = target.name
self.mongo_plunder.char_gold = target.get_plunder_gold(Char(self.char_id).mc.level)
self.mongo_plunder.char_power = target.power
self.mongo_plunder.char_leader = target.leader
self.mongo_plunder.char_formation = target.formation
self.mongo_plunder.char_hero_original_ids = target.hero_original_ids
self.mongo_plunder.char_city_id = target.city_id
self.mongo_plunder.battle_heros = target.battle_heros
self.mongo_plunder.server_url = target.server_url
self.mongo_plunder.save()
if target:
gold_needs = BATTLES[city_id].refresh_cost_gold
resource = Resource(self.char_id, "Plunder Refresh")
resource.check_and_remove(gold=-gold_needs)
return target
def max_plunder_times(self):
char = Char(self.char_id)
times = VIP_FUNCTION[char.mc.vip].plunder
ae = ActivityEntry(self.char_id, 40007)
if not ae or not ae.is_ok():
return times
if times > 10:
return times
return 10
def clean_plunder_target(self):
self.mongo_plunder.char_id = 0
self.mongo_plunder.char_name = ""
self.mongo_plunder.char_gold = 0
self.mongo_plunder.char_power = 0
self.mongo_plunder.char_leader = 0
self.mongo_plunder.char_formation = []
self.mongo_plunder.char_hero_original_ids = []
self.mongo_plunder.char_city_id = 0
self.mongo_plunder.battle_heros = ""
self.mongo_plunder.server_url = ""
self.mongo_plunder.save()
def change_current_plunder_times(self, change_value, allow_overflow=True):
max_times = self.max_plunder_times()
if change_value > 0 and not allow_overflow and self.mongo_plunder.current_times > max_times:
return
# for i in range(10):
# self.load_mongo_record()
# if not self.mongo_plunder.current_times_lock:
# self.mongo_plunder.current_times_lock = True
# self.mongo_plunder.save()
# break
# else:
# time.sleep(0.2)
# else:
# raise PlunderCurrentTimeOut()
#
# try:
# self.mongo_plunder.current_times += change_value
# if self.mongo_plunder.current_times < 0:
# self.mongo_plunder.current_times = 0
#
# if not allow_overflow and change_value > 0:
# if self.mongo_plunder.current_times > max_times:
# self.mongo_plunder.current_times = max_times
# finally:
# self.mongo_plunder.current_times_lock = False
# self.mongo_plunder.save()
# self.send_notify()
MongoPlunder._get_collection().update(
{'_id': self.char_id},
{'$inc': {'current_times': change_value}}
)
self.load_mongo_record()
if self.mongo_plunder.current_times < 0:
MongoPlunder._get_collection().update(
{'_id': self.char_id},
{'$set': {'current_times': 0}}
)
if not allow_overflow:
if self.mongo_plunder.current_times > max_times:
MongoPlunder._get_collection().update(
{'_id': self.char_id},
{'$set': {'current_times': max_times}}
)
self.send_notify()
def plunder(self):
if not self.mongo_plunder.char_id:
raise SanguoException(
errormsg.PLUNDER_NO_RIVAL,
self.char_id,
"Plunder Battle",
"no rival target"
)
if self.mongo_plunder.current_times <= 0:
raise SanguoException(
errormsg.PLUNDER_NO_TIMES,
self.char_id,
"Plunder Battle",
"no times"
)
self.change_current_plunder_times(change_value=-1)
rival_battle_heros = dill.loads(base64.b64decode(self.mongo_plunder.battle_heros))
msg = MsgBattle()
pvp = PlunderBattle(
self.char_id,
self.mongo_plunder.char_id,
msg,
self.mongo_plunder.char_name,
rival_battle_heros,
)
pvp.start()
t = Task(self.char_id)
t.trig(3)
to_char_id = self.mongo_plunder.char_id
target_server_url = self.mongo_plunder.server_url
if msg.self_win:
standard_drop = self._get_plunder_reward(
self.mongo_plunder.char_city_id,
self.mongo_plunder.char_gold,
self.mongo_plunder.char_hero_original_ids
)
self.clean_plunder_target()
achievement = Achievement(self.char_id)
achievement.trig(12, 1)
PlunderLeaderboardWeekly.incr(self.char_id)
TimesLogPlunder(self.char_id).inc()
else:
standard_drop = make_standard_drop_from_template()
self.mongo_plunder.plunder_times += 1
self.mongo_plunder.save()
self.send_notify()
plunder_finished_signal.send(
sender=None,
from_char_id=self.char_id,
from_char_name=Char(self.char_id).mc.name,
to_char_id=to_char_id,
from_win=msg.self_win,
standard_drop=standard_drop,
target_server_url=target_server_url,
)
return (msg, standard_drop)
def _get_plunder_reward(self, city_id, gold, hero_original_ids):
def _get_prisoner():
prison = 0
heros = [hid for hid in hero_original_ids if hid]
while heros:
hid = random.choice(heros)
heros.remove(hid)
if hid in PRISONER_POOL:
prison = hid
break
ac = ActivityEntry(self.char_id, 30005)
"""@type: core.activity.Activity30005"""
if not ac:
_prob = PLUNDER_GET_PRISONER_PROB
else:
_prob = ac.get_prisoner_prob()
ae = ActivityEntry(self.char_id, 50005)
if ae and ae.is_valid():
_vip = ae.get_current_value(self.char_id)
if _vip == 6:
_prob = 50
elif _vip >= 7:
_prob = 100
if random.randint(1, 100) <= _prob:
return prison
return 0
char = Char(self.char_id).mc
vip_plus = VIP_FUNCTION[char.vip].plunder_addition
standard_drop = make_standard_drop_from_template()
standard_drop['gold'] = int(gold * (1 + vip_plus / 100.0))
# 战俘
got_hero_id = _get_prisoner()
if got_hero_id:
p = Prison(self.char_id)
p.prisoner_add(got_hero_id, gold/2)
achievement = Achievement(self.char_id)
achievement.trig(13, 1)
# 掉落
city = BATTLES[city_id]
if city.normal_drop:
drop_ids = [int(i) for i in city.normal_drop.split(',')]
drop_prob = max(
PLUNDER_GET_DROPS_TIMES - (self.mongo_plunder.plunder_times - 1) * PLUNDER_DROP_DECREASE_FACTOR,
PLUNDER_GET_DROPS_TIMES * PLUNDER_DROP_MIN_FACTOR
)
drop = get_drop(drop_ids, multi=int(drop_prob))
drop.pop('gold')
standard_drop.update(drop)
resource = Resource(self.char_id, "Plunder Reward")
resource.add(**standard_drop)
self.send_notify()
if got_hero_id:
standard_drop['heros'] = [(got_hero_id, 1)]
return standard_drop
def send_notify(self):
self.load_mongo_record()
msg = PlunderNotify()
msg.current_times = self.mongo_plunder.current_times
msg.max_times = self.max_plunder_times()
msg.success_times_weekly = PlunderLeaderboardWeekly.get_char_times(self.char_id)
publish_to_char(self.char_id, pack_msg(msg))
@staticmethod
def cron_job():
MongoPlunder._get_collection().update({}, {'$set': {'plunder_times': 0}}, multi=True)
class PlunderLeaderboardWeekly(object):
@staticmethod
def incr(char_id, times=1):
try:
board = MongoPlunderBoard.objects.get(id=char_id)
except DoesNotExist:
board = MongoPlunderBoard(id=char_id)
board.times = 0
board.times += times
board.save()
@staticmethod
def get_leaderboard(length=10):
boards = MongoPlunderBoard.objects.order_by('-times').limit(length)
return [(b.id, b.times) for b in boards]
@staticmethod
def get_char_times(char_id):
try:
board = MongoPlunderBoard.objects.get(id=char_id)
except DoesNotExist:
board = MongoPlunderBoard(id=char_id)
board.times = 0
board.save()
return board.times
@staticmethod
def clean():
MongoPlunderBoard.drop_collection()
@staticmethod
def make_get_response():
msg = GetPlunderLeaderboardResponse()
msg.ret = 0
for cid, times in PlunderLeaderboardWeekly.get_leaderboard():
leader = msg.leaders.add()
leader.char.MergeFrom(create_character_infomation_message(cid))
leader.times = times
return msg
@staticmethod
def load_from_redis():
# 仅运行一次,用作将redis中的数据导入mongodb
# 因为已经清除redis_persistence的配置,所以这里写死,先前的配置是 127.0.0.1:6380
import redis
from core.server import server
REDISKEY = '_plunder_leaderboard_weekly:{0}'.format(server.id)
r = redis.Redis(host='127.0.0.1', port=6380)
data = r.zrange(REDISKEY, 0, -1, withscores=True)
for char_id, times in data:
char_id = int(char_id)
times = int(times)
PlunderLeaderboardWeekly.incr(char_id, times)
| [
"yueyoum@gmail.com"
] | yueyoum@gmail.com |
8786f7cea9480d2dd0083629d6f050e773cfd72b | ace48bbf0af822a2f78d53c1c83dc9460ba0b1aa | /Machine_Learning_Models/2LogisticRegression_sklearn.py | 4fb103c49428ece76049edd1ccc06726928103a1 | [] | no_license | 15940260868/HDK_On_Electrode_Arrays | 5cb139f7643441adcbc7e1078f3086cc6422a8d5 | c39124994e6529f8e28358db0bf5c91fc6827cc9 | refs/heads/master | 2020-09-11T07:13:30.734629 | 2019-11-15T18:54:08 | 2019-11-15T18:54:08 | 221,984,558 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,215 | py | # -*- coding: utf-8 -*-
from scipy import io as spio
import numpy as np
from sklearn import svm
from sklearn.linear_model import LogisticRegression
def logisticRegression_oneVsAll():
# data = loadmat_data("data_digits.mat")
# X = data['X'] # 获取X数据,每一行对应一个数字20x20px
# y = data['y'] # 这里读取mat文件y的shape=(5000, 1)
# y = np.ravel(y) # 调用sklearn需要转化成一维的(5000,)
dataX = loadtxtAndcsv_data("../documents/input.txt", "\t", np.float64) # 读取数据
dataY = loadtxtAndcsv_data("../documents/output.txt", ",", np.float64)
X = dataX[:, :] # 获取X数据,每一行对应一个数字20x20px
y = dataY[:, :] # 这里读取mat文件y的shape=(5000, 1)
#y = np.ravel(y) # 调用sklearn需要转化成一维的(5000,)
model = LogisticRegression()
model.fit(X, y) # 拟合
predict = model.predict(X) # 预测
print(u"预测准确度为{}".format(np.mean(np.float64(predict == y) * 100)))
def loadtxtAndcsv_data(fileName, split, dataType):
return np.loadtxt(fileName, delimiter=split, dtype=dataType)
# 加载mat文件
def loadmat_data(fileName):
return spio.loadmat(fileName)
if __name__ == "__main__":
logisticRegression_oneVsAll() | [
"noreply@github.com"
] | noreply@github.com |
5e0cbc10b2fcdd26f9f263d4bd7390fed7058dbd | bfc57b805a7e38c6dede3a74dddd221b8731bb11 | /apps/secrets/models.py | 4e9c2a47ad27a56f88f8de2df5d08d558a763bed | [] | no_license | dancinturtle/multiappsDemo | 213aa024e78ba1b6b0df686023c4e8eb07237daa | fd301b4c00d0be72de674efc2104c7fad8ab2c73 | refs/heads/master | 2021-01-21T10:46:39.657685 | 2017-02-28T22:58:10 | 2017-02-28T22:58:10 | 83,487,889 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,581 | py | from __future__ import unicode_literals
from django.db import models
# Create your models here.
class UserManager(models.Manager):
def register(self, postedName):
if len(postedName) < 1:
return(False, "Username cannot be empty")
try:
self.get(username=postedName)
return(False, "Username already exists")
except:
newuser = self.create(username=postedName)
return(True, newuser)
def login(self, postedName):
try:
founduser = self.get(username=postedName)
return(True, founduser)
except:
return(False, "Username was not found in our database")
class SecretManager(models.Manager):
def validate(self, postedSecret, userid):
# all posted secrets should have more than 3 characters
if len(postedSecret)<4:
return(False, "Secrets must be at least four characters long")
try:
currentuser = User.objects.get(id=userid)
self.create(secret=postedSecret, author=currentuser)
return(True, "Your secret is safe with us")
except:
return(False, "We could not create this secret")
def newlike(self, secretid, userid):
try:
secret = self.get(id=secretid)
except:
return(False, "This secret is not found in our database")
user = User.objects.get(id=userid)
if secret.author == user:
return(False, "Shame on you, you shouldn't like your own secrets")
secret.likers.add(user)
return(True, "You liked this secret!")
def deleteLike(self, secretid, userid):
try:
secret = self.get(id=secretid)
except:
return(False, "This secret is not found in our database")
user = User.objects.get(id=userid)
if secret.author != user:
return(False, "Shame on you, you shouldn't delete secrets that aren't your own")
secret.delete()
return(True, "Secret deleted")
class User(models.Model):
username = models.CharField(max_length=45)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = UserManager()
class Secret(models.Model):
secret = models.CharField(max_length=400)
author = models.ForeignKey(User)
likers = models.ManyToManyField(User, related_name="likedsecrets")
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = SecretManager()
| [
"amy.giver@gmail.com"
] | amy.giver@gmail.com |
dbc1e900db999d09373cf8e18de8e6d965592723 | b7a50b6336adbb36218e9aec446eba5268081ec8 | /signout.py | 967e4804d4bdba7bcfc83cc0746ed5603c6d2fa0 | [] | no_license | shengshuyang/my-udacity-web-homework | 61055ba57429ee77caaeae46b723b1a4f9f0cbe7 | 1b805044e431718cd76814097b56d00bd6e6e1a5 | refs/heads/master | 2020-04-06T07:07:17.326882 | 2016-09-01T21:10:33 | 2016-09-01T21:10:33 | 65,122,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | import webapp2
import handler as hd
from validation_util import *
class SignOutHandler(hd.Handler):
def get(self):
self.response.set_cookie(key='user', value=None)
self.redirect("/")
app = webapp2.WSGIApplication([
('/signout', SignOutHandler)
], debug=True) | [
"shengshuyangdlut@gmail.com"
] | shengshuyangdlut@gmail.com |
91292598fcf145537d4abcdd43250c13e2a22d47 | 2e3133ea649337fbf1f99727dddd927496f4c9d8 | /QGraphViz/Engines/__init__.py | ed152db452864694071d8bfb26ead3b4a364b1a4 | [
"MIT"
] | permissive | ParisNeo/QGraphViz | 7f58d5c44f54174e8372431a705215e446b3f7a3 | dbc51be0a4055645c6295716df4ed8a978d3e7a4 | refs/heads/master | 2022-11-03T19:16:15.764651 | 2022-11-01T10:26:41 | 2022-11-01T10:26:41 | 224,505,604 | 28 | 12 | MIT | 2022-11-01T10:13:50 | 2019-11-27T19:46:44 | Python | UTF-8 | Python | false | false | 94 | py | from QGraphViz.Engines.LayoutEngine import LayoutEngine
from QGraphViz.Engines.Dot import Dot
| [
"aloui.seifeddine@gmail.com"
] | aloui.seifeddine@gmail.com |
40ea2bd23263e70ce428df735e47a5a1bc64e604 | 280e088d5d4216a2f2e2dee8ca99f984353d31d7 | /tuple8.py | 6175043e10b2cd8b5581198ad139d549d9b268f1 | [] | no_license | karthick1808/CDB20DW061 | f4c5648048b6e9ec644bfaa4bb115156fb90b6cb | 47649f3065d70de253eb6d06eecc9b65fc7a9b94 | refs/heads/main | 2023-03-17T12:55:40.875667 | 2021-03-12T05:33:55 | 2021-03-12T05:33:55 | 328,544,917 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | fruits=("apple","banana","cherry")
#method 1
for x in fruits:
print(x)
#method2
for i in range(len(fruits)):
print(i)
#join
tuple1=(1,2,3,4,5,6,7,8,9)
tuple2=("a","b","c","d")
tuple3=tuple1+tuple2
print(tuple3)
#multiple
tuple4=tuple1*3
print(tuple4) | [
"noreply@github.com"
] | noreply@github.com |
0fe93c0f349e5a259077b13ba91fe7fbd81f1ea3 | 73bde0a4401f3fca1dd434ca682de3ce9c82e545 | /checkPerson.py | 84c724ff46693ff9b402d2e19cbd7340c669151a | [] | no_license | kaanozbudak/pythonCreditCardCheck | ccd0b434c04e4b5ee4477ccad31b6e9ac7ade87e | 8ea7d4a9eec3a8c640deb8cb291c1e577d04f266 | refs/heads/master | 2020-04-14T18:23:58.367743 | 2019-01-03T20:15:33 | 2019-01-03T20:15:33 | 164,017,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,129 | py | import json
ageStatement = ''
salaryStatement = ''
stateStatement = ''
nameLengthMax = 0
age = 0
salary = 0
name = ''
state = ''
def checkPerson(ageStatement, salaryStatement, stateStatement):
if eval(ageStatement):
if eval(salaryStatement):
if eval(stateStatement):
print 'yes you can'
else:
print 'city problem'
else:
print 'salary problem'
else:
print 'age problem'
with open('rule1.txt') as json_file:
data = json.load(json_file)
for f in data['rules']:
ageStatement = f['ageStatement']
salaryStatement = f['salaryStatement']
stateStatement = f['stateStatement']
print (ageStatement + ' ' + salaryStatement + ' ' + stateStatement + ' ')
with open('data.txt') as json_file:
data = json.load(json_file)
for p in data['people']:
salary = p['salary']
age = p['age']
name = p['name']
state = p['state']
print (name + ' ' + str(age) + ' ' + state + ' ' + str(salary))
checkPerson(ageStatement, salaryStatement, stateStatement)
| [
"noreply@github.com"
] | noreply@github.com |
a0567a9df8877498d5d248d62931f7264b44c8ba | dc884618408a2df2c72a8b44e5006eca468c2fd2 | /L17/171.py | d6492a0c0218ca4771dddbfb647057e16c73352e | [] | no_license | datnerdatner/RTR105 | fdc280cadddcfb7e681fd9b1b3705a5a303beef9 | d36363d2a5fa310075ebe5c6d15d9a480ffb8912 | refs/heads/master | 2020-03-28T11:00:30.888171 | 2019-07-23T10:00:24 | 2019-07-23T10:00:24 | 148,168,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 903 | py | # Fails:170.py
# Autors: Renats Jakubovskis
# Apliecibas numurs: 181REB261
# Datums: 09.12.2018
# Sagatave funkcijas saknes mekleeshanai ar dihatomijas metodi
# -*- coding: utf -8 -*-
from math import sin , fabs
from time import sleep
def f(x):
return sin(x)
# Definejam argumenta x robezhas :
a = 1.1
b = 3.2
# Aprekjinam funkcijas vertibas dotajos punktos :
funa = f(a)
funb = f(b)
# Paarbaudam , vai dotajaa intervaalaa ir saknes :
if ( funa * funb > 0.0 ):
print "Dotajaa intervaalaa [%s, %s] saknju nav"%(a,b)
sleep(1); exit() # Zinjo uz ekraana , gaida 1 sec . un darbu pabeidz
# Defineejam precizitaati , ar kaadu mekleesim sakni :
deltax = 0.0001
# Sashaurinam saknes mekleeshanas robezhas :
k=0
while ( fabs(b-a) > deltax ):
k+=1
x = (a+b)/2; funx = f(x)
if ( funa*funx < 0. ):
b = x
else:
a = x
print 'x = %f; f(x) = %f; k = %d' % (x,f(x),k)
| [
"datner@inbox.lv"
] | datner@inbox.lv |
d6471ac2a7a5c6eca1f4c8f56df3a79378bc7895 | 4365cdadad0026cabdf008bb46cacbaa397122f3 | /SaeHyeon/Algorithms/Brute_Force/BOJ14620.py | 8bdcd0d5ad0feb88bda1460f95d5d196ba5daeaa | [] | no_license | thalals/Algorithm_Study | 31e124df727fb0af9bf9d4905f3eade35a722813 | 3d067442e5e0d6ca6896a0b0c8e58a0dc41e717e | refs/heads/main | 2023-07-06T05:52:04.679220 | 2021-08-19T06:11:56 | 2021-08-19T06:11:56 | 331,019,217 | 1 | 1 | null | 2021-01-19T15:10:19 | 2021-01-19T15:10:18 | null | UTF-8 | Python | false | false | 1,145 | py | import sys
input=sys.stdin.readline
dx=[0,0,-1,0,1]
dy=[0,1,0,-1,0]
n=int(input())
gold=[]
visit=[[0]*n for _ in range(n)]
result=9999
for _ in range(n):
gold.append(list(map(int,input().split())))
# print(gold)
# print(visit)
cnt=0
def check(x,y):
global n
for i in range(5):
nx=x+dx[i]
ny=y+dy[i]
if nx<0 or nx>n-1 or ny<0 or ny>n-1 or visit[nx][ny]:
return False
return True
def gold_calculate(x,y):
global n
a=gold[x][y]
for i in range(5):
nx=x+dx[i]
ny=y+dy[i]
a+=gold[nx][ny]
return a
def solve(value,cnt):
global result
if cnt==3:
result=min(result,value)
return
for i in range(n):
for j in range(n):
if check(i,j):
for i in range(5):
nx=j+dx[i]
ny=i+dy[i]
visit[nx][ny]=1
solve(value+gold_calculate(i,j),cnt+1)
print(visit)
for i in range(5):
nx=j+dx[i]
ny=i+dy[i]
visit[nx][ny]=0
solve(0,0)
print(result)
| [
"tpgus19@gmail.com"
] | tpgus19@gmail.com |
91c04102d7309c5dc96caf9dbaefa29ae8dc3d40 | ecb113be53f2fe1768e85a1004d571c74d87ae8d | /tests/fmlaas/model/model.py | 0621cd46327918a951006e15f1e784933fe91ece | [] | no_license | Internet-SmokeAlarm/core | 39351e4d5bddf19bd59faf51bbc225c0e0521905 | 87b66a10042ec41916c490bb20cb4117f3caf1ba | refs/heads/master | 2023-02-17T18:40:12.822530 | 2020-07-05T20:28:38 | 2020-07-05T20:28:38 | 216,093,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,953 | py | import unittest
from dependencies.python.fmlaas.s3_storage import JobAggregateModelPointer
from dependencies.python.fmlaas.model import Model
class ModelTestCase(unittest.TestCase):
def test_to_json_pass(self):
model = Model("1234", str(JobAggregateModelPointer("4456", "5567", "1234")), "123552")
json_data = model.to_json()
self.assertEqual(model.get_entity_id(), json_data["entity_id"])
self.assertEqual("4456/5567/1234/aggregate_model", json_data["name"])
self.assertEqual(model.get_size(), json_data["size"])
def test_from_json_pass(self):
json_data = {
'entity_id': '1234',
'name': '4456/5567/1234/aggregate_model',
'size': "123552"}
model = Model.from_json(json_data)
self.assertEqual(model.get_entity_id(), "1234")
self.assertEqual(model.get_name(), JobAggregateModelPointer("4456", "5567", "1234"))
self.assertEqual(model.get_size(), "123552")
def test_is_valid_json_pass(self):
self.assertTrue(Model.is_valid_json(
{'entity_id': '1234', 'name': '4456/5567/1234', 'size': "123552"}))
self.assertFalse(Model.is_valid_json(
{'name': '4456/5567/1234', 'size': "123552"}))
self.assertFalse(Model.is_valid_json(
{'entity_id': '1234', 'size': "123552"}))
self.assertFalse(Model.is_valid_json(
{'entity_id': '1234', 'name': '4456/5567/1234'}))
def test_eq_pass(self):
model_1 = Model("123123", "23123/123123/1231231", "12312313")
model_2 = Model("564543", "23123/123123/1231231", "12312313")
model_3 = Model("564543", "23123/123123/1231231", "12312313")
model_4 = Model("564543", "23123/123123/1231231", "123512313")
self.assertTrue(model_1 == model_1)
self.assertFalse(model_1 == model_2)
self.assertTrue(model_2 == model_3)
self.assertFalse(model_2 == model_4)
| [
"valetolpegin@gmail.com"
] | valetolpegin@gmail.com |
7f63b70e3dcfea3037118ea458a4d8773a7c3489 | 948832084b0ad09574e9407b8de555cda77d1bd2 | /standard_pcnn.py | 2553d97f44bd038745ec4fdc984e73b8045f0db0 | [] | no_license | wglassly/CID_ATTCNN | bea56ef3e539ac4cc2d42de9cc846c82c813f275 | e0162498fc134a30e2d7c178618e743baca2dbc4 | refs/heads/master | 2021-01-01T18:13:13.205536 | 2017-07-25T07:13:45 | 2017-07-25T07:13:45 | 98,276,458 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 17,781 | py | import os,sys
import numpy as np
os.environ['THEANO_FLAGS'] = "floatX=float32,device=gpu1"#,lib.cnmem=1"
np.random.seed(1337) # for reproducibility
from keras.models import Sequential, Graph, model_from_json, Model
from keras.layers.core import Dense, Dropout, Activation, Flatten, Merge
from keras.layers import Input, merge, Embedding
from keras.layers.convolutional import Convolution1D, MaxPooling1D,Convolution2D, MaxPooling2D
from keras.optimizers import SGD, RMSprop
from keras.callbacks import ModelCheckpoint, Callback
from keras.utils import np_utils
from gensim.models import Word2Vec
import re
from gensim.parsing import strip_multiple_whitespaces
from w2v import train_word2vec
from collections import Counter
import itertools
from layers import MaxPiecewisePooling1D
from log import log_error
#basic superparameter
w2c_len = 30
dropout_prob = (0.25,0.5)
num_filters = 100
filter_sizes = (3, 4)
hidden_dims = 100
hidden_dims_for_manual = 120
nb_epoch = 10
batch_size = 32
val_size = 0.1
pos_len = 5
sparse_fea = 1
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
chemical_label_sentence, disease_label_sentence = "ent1", "ent2"
chemical_label_sentence_end , disease_label_sentence_end = "ent1end", "ent2end"
def build_pos_flag(sentence, sequence_length):
def pos_flag(label_start, label_end):
start = sentence.index(label_start)
end = sentence.index(label_end)
pos_list = [sequence_length]*sequence_length
for i in xrange(0,start):
pos_list[i] = i - start
for i in xrange(start,end):
pos_list[i] = 0
for i in xrange(end, len(sentence)):
pos_list[i] = i - end
return pos_list
pos_1 = pos_flag(chemical_label_sentence,chemical_label_sentence_end)
pos_2 = pos_flag(disease_label_sentence,disease_label_sentence_end)
return pos_1, pos_2
def build_vocab(sentences):
"""
Builds a vocabulary mapping from word to index based on the sentences.
Returns vocabulary mapping and inverse vocabulary mapping.
"""
# Build vocabulary
word_counts = Counter(itertools.chain(*sentences))
# Mapping from index to word
vocabulary_inv = [x[0] for x in word_counts.most_common()]
# Mapping from word to index
vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}
return [vocabulary, vocabulary_inv]
def build_input_data(sentences, labels, vocabulary, pos1_sentences, pos2_sentences):
"""
Maps sentencs and labels to vectors based on a vocabulary.
"""
x = np.array([[vocabulary[word] for word in sentence] for sentence in sentences])
y = np.array(labels)
a1 = np.array(pos1_sentences)
a2 = np.array(pos2_sentences)
return [x, y, a1, a2]
class input_data:
def __init__(self,head,btwn,tail,y,vocabulary):
self.vocabulary = vocabulary
self.head = self.reads(head) #head contains [head_text, pos1_index, pos2_index]
self.btwn = self.reads(btwn)
self.tail = self.reads(tail)
self.y = self.read_y(y)
def get_x(self):
return [self.head[0],self.btwn[0],self.tail[0]]
def get_index(self):
head_index = self.head[0].shape[1]
between_index = head_index + self.btwn[0].shape[1]
tail_index = between_index + self.tail[0].shape[1]
return [head_index,between_index,tail_index]
def get_x_concatenate(self):
#print self.head[0]
return np.concatenate((self.head[0],self.btwn[0],self.tail[0]),axis=1)
def get_pos_concatenate(self):
return [np.concatenate((self.head[1],self.btwn[1],self.tail[1]),axis=1), np.concatenate((self.head[2],self.btwn[2],self.tail[2]),axis=1)]
def reads(self,head):
x = self.read_x(head)
pos1,pos2 =self.read_pos(head)
return [x,pos1,pos2]
def read_x(self, head):
return np.array([[self.vocabulary[word] for word in sentence] for sentence in head[0]])
def read_pos(self,head):
return np.array(head[1]),np.array(head[2])
def read_y(self,y):
return np.array(y)
def pad_sentences(sentences, padding_word="<PAD/>",sequence_length = 0):
"""
Pads all sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences.
"""
if sequence_length == 0:
sequence_length = max(len(x) for x in sentences)
padded_sentences = []
for i in range(len(sentences)):
sentence = sentences[i]
num_padding = sequence_length - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences
#return train_x, train_y, test_x, test_y, sentence_length
def read_data(trainfile,testfile,w2c_file):
#file to padded_sentences
train_all_text, train_head, train_btwn, train_tail, train_y, max_lengths = data2numpy(trainfile)
test_all_text, test_head, test_btwn, test_tail, test_y, max_lengths = data2numpy(testfile,max_lengths=max_lengths,mode='test')
#map to vocabulary
vocabulary, vocabulary_inv = build_vocab(train_all_text + test_all_text )
train_datas , test_datas = input_data(train_head, train_btwn, train_tail,train_y, vocabulary),\
input_data(test_head, test_btwn, test_tail, test_y, vocabulary)
return train_datas, test_datas, max_lengths, vocabulary, vocabulary_inv
#with split one sentence to three part, head, between and tail.
def data2numpy(filename,max_lengths = [], mode = 'train'):
dataset = open(filename).read().strip().split('\n')
index = []
x,y,datas = [],[],[]
for i,data in enumerate(dataset):
label, sentence = data.split('\t')
if label.strip() == "1":
y.append(1)
else:
y.append(0)
datas.append(sentence)
index.append(i)
x_text = [clean_str(sentence) for sentence in datas] #Tokenization
x_text = [s.split(" ") for s in x_text] #split
#split it to three part, each part contains [sentences, index_chemical, index_disease]
head, between, tail, max_lengths = split_x_add_padding(x_text,max_lengths,mode)
x_text = pad_sentences(x_text)
return x_text, head, between, tail, y, max_lengths
def split_x_add_padding(x_text,max_lengths, mode):
#position added
a1, a2 = [], []
head, between,tail = [],[],[]
for sentence in x_text:
a1_pos, a2_pos = build_pos_flag(sentence, len(sentence))
a1.append(a1_pos)
a2.append(a2_pos)
c_start, c_end, d_start, d_end = sentence.index(chemical_label_sentence),sentence.index(chemical_label_sentence_end),\
sentence.index(disease_label_sentence), sentence.index(disease_label_sentence_end)
if c_start > d_start: #if not chemical first, switch them
c_start, d_start = d_start, c_start
c_end, d_end = d_end, c_end
#add sentence and pos info to head\b\t
head.append([sentence[:c_end+1], a1_pos[:c_end+1], a2_pos[:c_end+1]])
between.append([sentence[c_start:d_end+1], a1_pos[c_start:d_end+1], a2_pos[c_start:d_end+1]])
tail.append([sentence[d_start:], a1_pos[d_start:], a2_pos[d_start:]])
head ,between, tail = zip(*head),zip(*between),zip(*tail)
if mode == 'train':
head_max = max(len(sen) for sen in head[0])
between_max = max(len(sen) for sen in between[0])
tail_max = max(len(sen) for sen in tail[0])
max_lengths = [head_max, between_max, tail_max]
head = add_padding(sentences=head[0],a1=head[1],a2=head[2],sequence_length=max_lengths[0])
between = add_padding(between[0],between[1],between[2], sequence_length = max_lengths[1])
tail = add_padding(tail[0],tail[1],tail[2],sequence_length=max_lengths[2])
return head, between, tail, max_lengths
def add_padding(sentences, a1, a2, padding_word="<PAD/>", sequence_length = 0):
if sequence_length == 0:
sequence_length = max(len(x) for x in sentences)
padded_sentences, pos1_sentences, pos2_sentences = [],[],[]
for i in range(len(sentences)):
sentence = sentences[i]
num_padding = sequence_length -len(sentence)
new_sentence = sentence + [padding_word] * num_padding
a = a1[i] + [sequence_length] * num_padding
b = a2[i] + [sequence_length] * num_padding
padded_sentences.append(new_sentence)
pos1_sentences.append(a)
pos2_sentences.append(b)
return padded_sentences, pos1_sentences, pos2_sentences
def get_embedding_weights(train_x,test_x,vocabulary_inv,min_count=1, context = 10):
x = np.concatenate((train_x,test_x),axis=0)
return train_word2vec(x, vocabulary_inv, w2c_len, min_count, context)
def model_load(max_lengths,index, embedding_weights, vocabulary):#, manual_length):
#################CNN0#######################
sentence_input = Input(shape=(max_lengths,),dtype='int32',name='sentence_input')
myembed = Embedding(len(vocabulary), w2c_len, input_length=max_lengths,
weights=embedding_weights)(sentence_input)
pos_input1 = Input(shape=(max_lengths,),dtype='int32',name='pos_input1')
p1embed = Embedding(max_lengths*2+1, pos_len,input_length=max_lengths)(pos_input1)
pos_input2 = Input(shape=(max_lengths,),dtype='int32',name='pos_input2')
p2embed = Embedding(max_lengths*2+1, pos_len,input_length=max_lengths)(pos_input2)
m = merge([myembed,p1embed,p2embed],mode='concat',concat_axis=-1 )
drop1 = Dropout(dropout_prob[0])(m)
cnn2 = [Convolution1D(nb_filter=num_filters,
filter_length= fsz,
border_mode='valid',
activation='relu',
subsample_length=1)(drop1) for fsz in filter_sizes]
pool2 = [MaxPiecewisePooling1D(pool_length=2, split_index=index)(item) for item in cnn2]
flatten2 = [Flatten()(pool_node) for pool_node in pool2]
merge_cnn2 = merge(flatten2,mode='concat')
x2 = Dense(hidden_dims,activation='relu')(merge_cnn2)
x3 = Dropout(dropout_prob[1])(x2)
'''
manual_input = Input(shape=(manual_length,),dtype='float32',name='manual_input')
x4 = Dense(hidden_dims_for_manual*2,activation='relu')(manual_input)
x4 = Dense(hidden_dims_for_manual,activation='relu')(x4)
#x4 = Dense(hidden_dims_for_manual,activation='relu')(x4)
m3 = merge([x3,x4],mode='concat')
'''
main_loss = Dense(1,activation='sigmoid',name='main_output')(x3)
model = Model(input= [sentence_input, pos_input1, pos_input2], output=main_loss)
model.compile(optimizer='adadelta',loss='binary_crossentropy',metrics=['accuracy'])
return model#, out_layer
def model_save(model, model_file):
json_string = model.to_json()
open( model_file+'.json', 'w').write(json_string)
model.save_weights( model_file + '.h5',overwrite=True)
def fscore(y_test, y_predict):
right , wrong, miss = 0.0, 0.0, 0.0
#print y_test
for i,j in zip(y_test, y_predict):
#i = 1 if i[0]<i[1] else 0
#j = 1 if j[0]<j[1] else 0
#print i
if type(i) == np.array([]):
i = i[0]
if i == 1 and j >= 0.5:
right += 1
elif i == 1 and j < 0.5:
miss += 1
elif i == 0 and j>= 0.5:
wrong += 1
p = right/(right+wrong) if right+ wrong != 0 else 0.001
r = right/(right+miss) if right + miss != 0 else 0.001
f = 2*p*r/(p+r) if p+r != 0 else 0.0
#print p,r,f
return p,r,f
###model check for each epoch
class CheckBench(Callback):
def __init__(self,test_data,test_y):
self.test_data = test_data
self.test_y = test_y
self.max_fscore = 0.0
self.max_info = {}
self.counter = 0
def on_batch_end(self,batch, logs={}):
#for search faster
#if batch < 200:
# return 1
#result = self.model.predict(self.test_data,batch_size = batch_size)
#print self.model.validation_data
result = self.model.predict(self.model.validation_data[:3], batch_size=batch_size)
#p,r,f = fscore(self.test_y,result)
p,r,f = fscore(self.model.validation_data[-3],result)
if f > self.max_fscore:
self.max_fscore = f
self.max_info['p'] = p
self.max_info['r'] = r
self.max_info['fscore'] = f
self.max_info['batch'] = batch
if f > 0.45:
#model_save(self.model, "best_model_save")
print "*************In test data**************"
result_test = self.model.predict(self.test_data,batch_size=batch_size)
print "Best PRF:",fscore(self.test_y,result_test)
np.savetxt("best_pcnn_without_feature.txt",result_test)
print "***************************************"
print "PRF on val-data:", p,r,f,batch
def log_out(self,predict,golden,log_name):
log_error(testfile,predict,golden,log_name)
def on_epoch_end(self,epoch,logs={}):
print "==================epoch end========================"
#result = self.model.predict(self.test_data,batch_size=batch_size)
#print fscore(self.test_y,result)
self.counter += 1
'''
Split dataset to train and dev.
input: ALL train dataset or train label
output: (train,dev)
'''
def split_x(train_x, val_size):
if type(train_x) == type([]):
val_point = int((1-val_size)*len(train_x[0]))
return [data[:val_point] for data in train_x] , [data[val_point:] for data in train_x]
else:
val_point = int((1-val_size)*len(train_x))
return train_x[:val_point], train_x[val_point:]
def model_run(model,train_x,train_y,test_x,test_y,\
result_output,
model_output,
batch_size=batch_size,
nb_epoch= nb_epoch,
validation_split = val_size):
#val_point = int((1-val_size)*len(train_x))
'''
run model with stable mode, without
'''
t_x, v_x = split_x(train_x,validation_split)
t_y, v_y = split_x(train_y ,validation_split)
save_epoch_result = CheckBench(test_data=test_x,test_y = test_y) #save each epoch result
model.fit(t_x,t_y,batch_size=batch_size,nb_epoch=nb_epoch,
#validation_split=val_size,
validation_data = (v_x,v_y),
verbose=2,
callbacks=[save_epoch_result]) # without split validation_data use test as val
#result_y = model.predict(test_x,batch_size =batch_size)
#print result_y
#np.savetxt(result_output,result_y)
#model_save(model,model_output)
return model
def feature_manual(feature_file, length = -1):
from text_svm import svm_format_load as sfl
feature_list = []
for ff in feature_file:
feature_list.append( sfl(ff,x_format='array')[0] ) #add fea to label_list
#print feature_list
tmp = feature_list[0]
for fl in feature_list[1:]:
tmp = np.append(tmp,fl,axis =1)
print tmp.shape
if length > 1:
ap = np.zeros((tmp.shape[0],length - tmp.shape[1]))
tmp = np.append(tmp,ap, axis = 1)
return tmp
def processing(trainfile, testfile, train_feature_file, test_feature_file):
#training && test
train_datas, test_datas,\
max_lengths, vocabulary, vocabulary_inv = read_data(trainfile= trainfile,
testfile = testfile, w2c_file= './data/newbin')
embedding_weights = get_embedding_weights(train_datas.get_x_concatenate(), test_datas.get_x_concatenate(),vocabulary_inv)
train_x = [train_datas.get_x_concatenate()] + train_datas.get_pos_concatenate()
test_x = [test_datas.get_x_concatenate()] + test_datas.get_pos_concatenate()
train_index = train_datas.get_index()
test_index = test_datas.get_index()
train_y,test_y = train_datas.y , test_datas.y
'''
#add features
manual_train = feature_manual(train_feature_file)
manual_test = feature_manual(test_feature_file, length = manual_train.shape[1])
print manual_train.shape, manual_test.shape
manual_length = manual_train.shape[1]
train_x.append(manual_train)
test_x.append(manual_test)
'''
model = model_load(max_lengths=sum(max_lengths),index = train_index, embedding_weights=embedding_weights, vocabulary=vocabulary) #, manual_length = manual_length)
model_run(model,train_x,train_y,test_x, test_y,"./result_report/result_cnn.txt", "./data/cnn_model")
if __name__ == '__main__':
import warnings
warnings.filterwarnings("ignore")
trainfile, testfile = sys.argv[1], sys.argv[2]
train_feature_file = ['./data/train_medi_fea.svm', './data/train_ctd_fea.svm', \
'./data/train_mesh_fea.svm', './data/train_sider_fea.svm',
'./data/train_mention_fea.svm']
test_feature_file = ['./data/test_medi_fea.svm', './data/test_ctd_fea.svm', \
'./data/test_mesh_fea.svm', './data/test_sider_fea.svm',
'./data/test_mention_fea.svm']
processing(trainfile, testfile,train_feature_file, test_feature_file)
| [
"lhd911107@gmail.com"
] | lhd911107@gmail.com |
0ba8d2d20e7f1c7002a5148ac638bbd65f2dc39a | 043c39a95ae03399d830b9656067be9c2cf28030 | /day1/binary.py | 9788d01e40ae68d810a6a1f2e7702017f2639873 | [] | no_license | george-waf/bootcamp-week-1 | 9cc1c796fda3e478d9bbd5ced2ad4ed644ac2c50 | 907465e1ef250154334c54abd0b22ac5d448f61b | refs/heads/master | 2021-01-19T04:44:39.207795 | 2017-04-07T07:01:14 | 2017-04-07T07:01:14 | 87,387,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | def binary_converter(n):
if(n==0):
return "0"
elif(n<0):
return "Enter a positive integer"
else:
ans=""
while(n>0):
v=n%2
ans=str(v)+ans
n=n//2
return ans
| [
"noreply@github.com"
] | noreply@github.com |
d813100dbe311f99b58c56523b449bc958e0692c | 9ffc7823e7de17f2ea7a87dd6b995251203e1977 | /apps/app_GoogleData/appGoogleData.py | 2b9522fb65c2203c6e4d5460d7aebbf57c186b64 | [] | no_license | josedanielcl18/ceo-permitting-processes-timelines | 8203f1055b065dc3fe9979ef567abd3737d0893d | c558f629e4db5e77fc38c18b115340b24d0ca574 | refs/heads/main | 2023-06-26T12:33:40.385047 | 2021-07-26T18:52:30 | 2021-07-26T18:52:30 | 386,749,611 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,136 | py | # Import libraries
# Dash packages
import dash_html_components as html
import dash_table
from dash.dependencies import Input, Output
from dash.exceptions import PreventUpdate
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import plotly.express as px
# Import dataframes
#from data.googleData import PROCESSES_ID1, getData
#df = getData(PROCESSES_ID1)
from db.df_preprocessing import df2
#from templates.app_BP_issued.df_BP_issued import df2_fil
df = df2.copy()
# --------------------------------------------------------------------------------------------------------------------------------------
# testGoogleData LAYOUT
# layout = html.Div([
# html.H3('Processes Table'),
# dash_table.DataTable(
# id='datatable-interactivity',
# columns=[
# {"name": i, "id": i, "deletable": False, "selectable": True} for i in df.columns
# ],
# data=df.to_dict('records'),
# filter_action='native',
# sort_action='native',
# #sort_mode="single",
# #row_deletable=False,
# #selected_columns=[],
# #page_action='none',
# page_size=15,
# #fixed_rows={'headers': True},
# style_cell={'textAlign':'left', 'padding': '15px'},
# #style_header={},
# #style_table={'height': '700px'},
# style_data={'width': '150px', 'minWidth': '150px', 'maxWidth': '150px',
# 'overflow': 'hidden',
# 'fontSize':'small',
# 'textOverflow': 'ellipsis'}
# )
# ])
# --------------------------------------------------------------------------------------------------------------------------------------
# Functions to Create Bootstrap Cards
def Card(title, subtitle, fig):
card = dbc.Card(
[
html.H4('{}'.format(title), style={'text-align':'center'}, className="card-title"),
html.H6('{}'.format(subtitle), style={'text-align':'center'}, className="card-subtitle"),
dcc.Graph(id='fig', figure=fig),
],
body=True,
color="light",
inverse=False,
)
return card
# Figure 1: Boxplot for project duration based on Application Type. Source: df_duration.
df1 = df['OBJECTDEFDESCRIPTION'].value_counts().to_frame()
fig1 = px.bar(df1, x=df1.index, y='OBJECTDEFDESCRIPTION')
card1 = Card('Google Data!', " ", fig1)
layout = html.Div([
html.Div([
#Title
dbc.CardGroup([card1]),
])
],
)
# --------------------------------------------------------------------------------------------------------------------------------------
# CALLBACKS
#from app import app
# @app.callback(
# Output('datatable-interactivity', 'style_data_conditional'),
# Input('datatable-interactivity', 'selected_columns')
# )
# def update_styles(selected_columns):
# return [{
# 'if': { 'column_id': i },
# 'background_color': '#D2F3FF'
# } for i in selected_columns] | [
"63196651+josedanielcl18@users.noreply.github.com"
] | 63196651+josedanielcl18@users.noreply.github.com |
fef23173bc7753e9b2b55c0e3bee148722833018 | 4da58b65fd3094c3b0556c7a3108d4cd1ffea0f3 | /policy_gradients/reinforce/hyperparameters.py | 574a7ecb1d0d9a9b994a56235a563aa9c60efbea | [] | no_license | willclarktech/policy-gradient-implementations | b7d6d55910cf6bc25e86368365f58c51b843df24 | 311276053322272319ffac8206f1e41960495ad7 | refs/heads/main | 2023-07-25T22:00:18.445628 | 2023-07-07T13:03:56 | 2023-07-07T13:03:56 | 252,439,207 | 1 | 0 | null | 2023-08-19T11:33:14 | 2020-04-02T11:42:47 | Jupyter Notebook | UTF-8 | Python | false | false | 311 | py | from typing import Any, Dict
def default_hyperparameters() -> Dict[str, Any]:
return dict(
algorithm="reinforce",
env_name="LunarLander-v2",
n_episodes=3000,
log_period=1,
hidden_features=[128, 128],
alpha=5e-4,
gamma=0.99,
seed=None,
)
| [
"willclarktech@users.noreply.github.com"
] | willclarktech@users.noreply.github.com |
63ac371425bfee192c11828633b0297f5b879935 | 386d08a9cc997efcf2041881cf67d6fa811f25a5 | /2_Greedy_314p.py | d298ae3ee09f994d7b7ca8b2e6bba58b02fbeae3 | [] | no_license | electricalboy1991/Python_for_CodingTest_Pycharm | 7febd10225494eb3c1d510594e36c2759f0c2652 | df2d1979dd7d6131e319b2e01eeee3fd7043308c | refs/heads/master | 2023-03-12T13:03:17.854003 | 2021-03-04T12:24:10 | 2021-03-04T12:24:10 | 342,104,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | import itertools
n = int(input())
data = list(map(int,input().split()))
total_candi =[]
combi_list = [0]*n
for i in range(1,n+1):
combi_list[i-1] = list(itertools.combinations(data, i))
for j in range(0, len(combi_list)):
for k in range(0, len(combi_list[j])):
candi=sum(combi_list[j][k])
total_candi.append(candi)
minimum = 1
while 1:
if minimum in total_candi:
minimum +=1
continue
else:
break
print(minimum)
| [
"worldest0628@gmail.com"
] | worldest0628@gmail.com |
062bc978f5e8732e730f902c6794b247b77d5cd6 | 69155520dbdb6b3fce635aae9374bf7a1de30324 | /EulerP_050.py | 7a06c8fe24c307d636fee4ffdb8abfaaae0807fc | [] | no_license | nickvazz/EulerProblems | c5bb6c2dd8c4d8fdec4e6f323d5204b4807e89e2 | e2ee9a27d90b2e4f28227815e2dcc235278747bf | refs/heads/master | 2021-05-03T23:01:30.377259 | 2018-12-13T08:08:49 | 2018-12-13T08:08:49 | 120,395,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 953 | py | import numpy as np
import time
start = time.time()
def sievePrimes(n):
sieve = np.ones(n, dtype=bool)
sieve[:2] = False
sieve[::2] = False
num = 3
while num < n:
sieve[slice(2*num,n,num)] = False
num += 2
return np.concatenate(([2], np.arange(n)[sieve]), axis=0), sieve
def main():
potentials = []
n = 100000
primes, sieve = sievePrimes(n)
counter = 0
print ('{} primes finished'.format(len(primes)))
print (primes[-20:])
for i in range(len(primes)):
# print (i)
if i == 0:
start = 2
if i > 0:
start = 1
for j in range(i+start,len(primes)+1, 2):
if j - i == 1:
continue
counter += 1
Sum = np.sum(primes[i:j])
if Sum > n:
break
if Sum in primes:
potentials.append((Sum, j-i))
print (counter)
print (sorted(potentials, key=lambda x: x[1], reverse=True)[:5])
if __name__ == '__main__':
start = time.time()
main()
end = time.time()
print (end - start)
| [
"nickvazquezz@gmail.com"
] | nickvazquezz@gmail.com |
0050e1d249929d696b14019561b751ab2cc39b52 | 851fba1a861f28783d10cfadc91743b4ea53ce49 | /mytest/input_data.py | 46f1980ec8c74d6fca1c6122e700528dc87666cd | [
"MIT"
] | permissive | javelir/word2vec_pytorch | 005414bfeb95c97792f7eeaf2d7769529499a120 | 389fe578aea7808c0008e586f593b02969d09fca | refs/heads/master | 2020-04-22T15:48:18.625378 | 2019-02-24T11:18:21 | 2019-02-24T11:18:21 | 170,488,347 | 0 | 0 | MIT | 2019-02-13T10:28:07 | 2019-02-13T10:28:07 | null | UTF-8 | Python | false | false | 4,152 | py | """ Input data for word2vec
"""
from collections import defaultdict, deque
import numpy
class InputData:
""" Load input data and build training samples for word2vec
"""
def __init__(self, path, min_count=5):
self.path = path
self.load_vocabulary(min_count=min_count)
self.word_pair_catch = deque()
self.init_sample_table()
print("Finish input data initiation")
print("sentence_length", self.sentence_length)
print("sentence_count", self.sentence_count)
def load_vocabulary(self, min_count):
""" Load data, build vocabulary which appear at leat min_count times
"""
self.input_stream = open(self.path, encoding="utf-8")
self.sentence_length = 0
self.sentence_count = 0
word_frequencies = defaultdict(int)
for line in self.input_stream:
self.sentence_count += 1
words = line.lower().strip().split()
self.sentence_length += len(words)
for _word in words:
word_frequencies[_word] += 1
self.word2id = dict()
self.id2word = dict()
wid = 0
self.word_frequencies = dict()
for _word, _count in word_frequencies.items():
if _count < min_count:
self.sentence_length -= _count
continue
self.word2id[_word] = wid
self.id2word[wid] = _word
self.word_frequencies[wid] = _count
wid += 1
@property
def word_count(self):
""" vocabulary size
"""
return len(self.word2id)
def init_sample_table(self):
""" Init sample table for negative samples
"""
self.sample_table = []
table_size = 1e8
pow_freq = numpy.array(list(self.word_frequencies.values()))**0.75
pow_freq_sum = sum(pow_freq)
ratio = pow_freq / pow_freq_sum
count = numpy.round(ratio * table_size)
for wid, wcount in enumerate(count):
self.sample_table += [wid] * int(wcount)
self.sample_table = numpy.array(self.sample_table)
def get_batch_pairs(self, batch_size, window_size):
""" Get batch of word id pairs
"""
while len(self.word_pair_catch) < batch_size:
sentence = self.input_stream.readline()
if sentence is None or not sentence:
self.input_stream = open(self.path, encoding="utf-8")
sentence = self.input_stream.readline()
word_ids = []
for _word in sentence.lower().strip().split():
if _word not in self.word2id:
continue
word_ids.append(self.word2id[_word])
for idx1st, wid1st in enumerate(word_ids):
idx_start = max(idx1st - window_size, 0)
neighbors = word_ids[idx_start : idx1st + window_size]
for idx2nd, wid2nd in enumerate(neighbors, start=idx_start):
assert wid1st < self.word_count
assert wid2nd < self.word_count
if idx1st == idx2nd:
continue
self.word_pair_catch.append((wid1st, wid2nd))
batch_pairs = [self.word_pair_catch.popleft() for _ in range(batch_size)]
return batch_pairs
def get_neg_pairs(self, pos_pairs, count):
""" Get negative word id pairs
"""
neg_pairs = numpy.random.choice(
self.sample_table, size=(len(pos_pairs), count))
neg_pairs = neg_pairs.tolist()
return neg_pairs
def estimate_pair_count(self, window_size):
""" Estimate total number of pairs
"""
return (self.sentence_length * (2 * window_size - 1)
- (self.sentence_count - 1) * (1 + window_size) * window_size)
def test():
""" Simple grammar test
"""
data = InputData("./data.txt")
print("Estimated pair count:", data.estimate_pair_count(window_size=2))
for idx in range(10):
print("batch", idx, data.get_batch_pairs(batch_size=5, window_size=2))
if __name__ == "__main__":
test()
| [
"jz.mraz@gmail.com"
] | jz.mraz@gmail.com |
a6cf7e5fad2b83a00fd867b3d283b7b570f05c20 | 3b688547595af0fe429d2500a2a53926d3052f56 | /freshquant/factors/util_world_alpha.py | e0212e54aa5b514b2673170e31d1b00610d56251 | [] | no_license | huning2009/FreshQuant | 75b386cbdde26c8edbb15e4faa8f260dd11873d9 | b33aa23bb6cc6e875af3ac93627fb52d7ce84275 | refs/heads/master | 2021-09-07T11:58:59.262887 | 2018-02-22T14:18:12 | 2018-02-22T14:18:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,584 | py | # -*- coding: UTF-8 -*-
import numpy as np
import math
import pandas as pd
import statsmodels.formula.api as smf
class FactorData(object):
def __init__(self, name, stocks, mean_ret, cum_ret):
self.name = name
self.stocks=stocks
self.mean_ret=mean_ret
self.cum_ret=cum_ret
def single_analyze(name,all_data,alpha,top):
df = alpha[name]
stocks = df.groupby(level=0).apply(
lambda x: x.T.nlargest(top, x.T.columns[0]) / x.T.nlargest(top, x.T.columns[0]).sum())
stocks.columns = ['weights']
stocks.index.names=['dt','tick']
inc = all_data['period_inc'].shift(-1)
if isinstance(inc,pd.DataFrame):
inc=inc['period_inc']
inc.name = 'period_inc'
combine = stocks.join(inc)
mean_ret = combine.groupby(level=0).apply(lambda x: (x['weights'] * x['period_inc']).sum())
cum_ret = (mean_ret + 1).cumprod() - 1
factor_data= FactorData(name=name,
stocks=stocks,
mean_ret=mean_ret,
cum_ret=cum_ret)
return factor_data
def risk_rejust(df,fac):
"""风格因子:个股Beta、流通市值、BP;
行业因子:中信一级行业哑变量"""
df.mkt=np.log10((df.mkt+0.000000000001).values.tolist())
cols=[col for col in df.columns if col not in [fac]]
formula='Y~'+'+'.join(cols)
ret=pd.DataFrame(smf.ols(formula.replace('Y', fac), data=df.loc[df[fac].dropna().index, :].fillna(0)).fit().resid,
index=df[fac].dropna().index, columns=[fac]).loc[df.index, :]
return ret | [
"sjj6love@126.com"
] | sjj6love@126.com |
94e544a15e0e29b8f771385dfbdcefcb09413fcd | 30467bd47c29412687a384d824655daa7400cef4 | /examples/dockerbuild.py | d7f0e1ae2176bdb6508f530e8a2f4f6e916f5b3c | [] | no_license | dpedu/shipper | 556409843c6da888338d2a791d4f06b17c709a52 | e5544416c2b0ee818285b9a13761f1c351d7676f | refs/heads/master | 2020-05-17T17:39:45.645549 | 2019-02-03T00:59:34 | 2019-02-03T00:59:34 | 183,860,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,308 | py | from shipper.lib import ShipperJob, SshConnection, GiteaCheckoutTask, LambdaTask, \
DockerBuildTask, DockerTagTask, DockerPushTask
# This job accepts gitea webooks and builds docker images. If the "imagename" parameter is passed, it will be used to
# name the image. Otherwise, a repo named "docker-image-name" would builds/pushes a docker image called "image-name".
job = ShipperJob()
job.default_connection(SshConnection(None, None, key="testkey.pem"))
job.add_task(GiteaCheckoutTask("code", allow_branches=["master"]))
def getimgname(job):
if "imagename" in job.props: # prefer "imagename" url param
imagename = job.props["imagename"]
else: # fall back to repo name, stripping 'docker-' prefix if needed.
imagename = job.props["payload"]["repository"]["name"] # Expecting a repo name like "docker-nginx"
if imagename.startswith("docker-"): # strip the "docker-" repo name prefix
imagename = imagename[len("docker-"):]
job.props["docker_imagename"] = "dpedu/" + imagename # we'll build the image locally as this
job.props["docker_tag"] = "apps2reg:5000/dpedu/" + imagename # then tag and push it as this
job.add_task(LambdaTask(getimgname))
job.add_task(DockerBuildTask())
job.add_task(DockerTagTask())
job.add_task(DockerPushTask())
| [
"dave@davepedu.com"
] | dave@davepedu.com |
33e0f6e0f58713cd6b9e0bf434b0190abffc395a | a47e4480d1584c5a2bb4c31ac512c864d0c2c240 | /core/settings.py | 8e44faed4463d4a577291f1a56a01053b9a77cef | [
"MIT"
] | permissive | shaymk1/ke-nako-shop | 014bd960e2048d4e2b5cc77c0b2d99f2058208d4 | 5c6f3dfb6b1e89efe111c1c6daa21434c7843ddc | refs/heads/main | 2023-08-02T08:19:31.702068 | 2021-09-20T19:21:53 | 2021-09-20T19:21:53 | 406,715,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,243 | py |
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-twj=7-)r(w9l5r96^0xf30w$w-id1f3uo=8pqc_d6_o#d!6i!#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'store',
'category',
'accounts.apps.AccountsConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'core.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'category.context_processors.menu_links',
],
},
},
]
WSGI_APPLICATION = 'core.wsgi.application'
# saying we are using custom user model
AUTH_USER_MODEL = 'accounts.Account'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATICFILES_DIRS = [
BASE_DIR/'static'
]
MEDIA_ROOT = BASE_DIR/'static'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"mkekae@gmail.com"
] | mkekae@gmail.com |
c999af3d157708b2a0cd590d9f95fbf4f3453e02 | 57e45293c97578a3f69574368534dc090833c6e3 | /test_xCos_all_datasets.py | cb3015248a00f4a3ce7fe92e0da82319a6e33468 | [
"MIT"
] | permissive | ntubiolin/InsightFace_Pytorch | 96f908443050a38adbdc2b35141659e3e1d15524 | 98f712a4111e7bdc5d98944d2af96a324bd27ac7 | refs/heads/master | 2020-07-08T14:34:52.621905 | 2020-05-10T13:43:28 | 2020-05-10T13:43:28 | 203,702,338 | 0 | 0 | MIT | 2019-10-26T03:30:21 | 2019-08-22T02:50:19 | Jupyter Notebook | UTF-8 | Python | false | false | 5,050 | py | import numpy as np
from config import get_config
import argparse
from Learner import face_learner
from data.data_pipe import get_val_pair
from torchvision import transforms as trans
from tqdm import tqdm_notebook as tqdm
from utils import getUnitAttention, getCorrAttention
def testWithDatasets(test_datasets, conf, learner, attention):
for test_dataset in test_datasets:
pair_imgs, pair_issame = get_val_pair(conf.emore_folder, test_dataset)
accuracy, best_threshold, roc_curve_tensor = \
learner.evaluate_attention(conf,
pair_imgs,
pair_issame,
nrof_folds=10,
tta=True,
attention=attention)
print(test_dataset +
' - accuray:{}, threshold:{}'.format(accuracy, best_threshold))
def testBaselineModel(model_name):
print(f'>>> Testing model {model_name}')
learner.load_state(conf, model_name,
model_only=True, from_save_folder=True,
strict=False, model_atten=False)
test_datasets = ['lfw', 'vgg2_fp', 'agedb_30',
'calfw', 'cfp_ff', 'cfp_fp', 'cplfw']
testWithDatasets(test_datasets, conf, learner, unit_attention)
def testMyModel(model_name, test_type='atten_xCos'):
assert test_type in ['patch_xCos', 'corr_xCos', 'atten_xCos']
print(f'>>> Testing model {model_name} with {test_type}')
if test_type == 'atten_xCos':
print('>>> None attention')
parameters = {
'attention': None
}
elif test_type == 'corr_xCos':
print('>>> Extracting corr attention')
learner.load_state(conf, model_name,
model_only=True,
from_save_folder=True,
strict=True, model_atten=True)
lfw, lfw_issame = get_val_pair(conf.emore_folder, 'lfw')
corrPlot, corr_eff = \
learner.plot_CorrBtwPatchCosAndGtCos(conf,
lfw, lfw_issame,
nrof_folds=10, tta=True,
attention=None)
corr_attention = getCorrAttention(corr_eff, conf)
print('>>> Corr attention extracted')
parameters = {
'attention': corr_attention
}
elif test_type == 'patch_xCos':
parameters = {
'attention': unit_attention
}
learner.load_state(conf, model_name,
model_only=True, from_save_folder=True,
strict=True, model_atten=True)
test_datasets = ['lfw', 'vgg2_fp', 'agedb_30',
'calfw', 'cfp_ff', 'cfp_fp', 'cplfw']
testWithDatasets(test_datasets, conf, learner, parameters['attention'])
conf = get_config(training=False)
# XXX Why bs_size can only be the number that divide 6000 well?
conf.batch_size = 200
# conf.net_depth = 100
unit_attention = getUnitAttention(conf)
learner = face_learner(conf, inference=True)
# model_name = '2019-11-12-03-59_accuracy:0.9269999999999999_step:191058_CosFace_ResNet50_detach_False_MS1M_detachedtwcc.pth'
# model_name = 'ir_se50.pth'
# model_name = '2019-11-12-04-06_accuracy:0.9301428571428572_step:172029_CosFace_ResNet100_detach_False_MS1M_detachedtwcc.pth'
# model_name = '2019-11-11-17-07_accuracy:0.9135714285714286_step:132330_CosFace_ResNet100_detach_False_MS1M_detachedtwcc.pth'
# model_name = '2019-11-12-16-09_accuracy:0.8971_step:191058_ArcFace_ResNet50_detach_True_MS1M_detachedreproduce.pth'
# testBaselineModel(model_name)
# model_name = '2019-11-11-18-58_accuracy:0.99533_step:100078_ArcFace_ResNet50_detach_False_MS1M_detachedxCosNoDe.pth'
model_name = '2019-09-02-08-21_accuracy:0.9968333333333333_step:436692_CosFace.pth'
# model_name = '2019-09-06-08-07_accuracy:0.9970000000000001_step:1601204_CosFace.pth'
# model_name = '2019-11-15-12-49_accuracy:0.99650_step:327519_CosFace_ResNet50_detach_False_MS1M_detachedxCosNoDeL1.pth'
# model_name = '2019-11-15-15-25_accuracy:0.99583_step:363910_ArcFace_ResNet50_detach_False_MS1M_detachedxCosNoDeL1.pth'
# model_name = '2019-11-15-18-02_accuracy:0.99500_step:400301_ArcFace_ResNet50_detach_False_MS1M_detachedxCosNoDeL1.pth'
# model_name = '2019-11-12-08-13_accuracy:0.99567_step:154666_ArcFace_ResNet50_detach_False_MS1M_detachedxCosNoDe.pth'
model_name = '2019-11-12-16-32_accuracy:0.99183_step:168207_CosFace_ResNet100_detach_False_MS1M_detachedxCosNoDe.pth'
model_name = '2019-08-25-14-35_accuracy:0.9931666666666666_step:218349_None.pth'
model_name = '2019-08-30-07-36_accuracy:0.9953333333333333_step:655047_None.pth'
model_name = '2019-11-12-17-02_accuracy:0.99500_step:191058_ArcFace_ResNet50_detach_False_MS1M_detachedxCosNoDe.pth'
test_types = ['atten_xCos', 'corr_xCos', 'patch_xCos']
for test_type in test_types:
testMyModel(model_name, test_type)
| [
"shex.ck@gmail.com"
] | shex.ck@gmail.com |
7e7ce291c3cfacaa8494620fc55d30f8f51185ba | 3199ff7a6d62f6d69c68d95d04eec83312f3677c | /web/views.py | 0877d7e1e4d0a58df3cd315becffcddbed5c1919 | [] | no_license | ShiqinHuo/University_Admin_app | 7891fd0b630f42140cf7f786346fd4cb449a6eae | 554294f384da0bdef34cacb9fb1cae4208fb8e4d | refs/heads/master | 2022-12-22T01:01:06.355913 | 2019-04-10T07:15:25 | 2019-04-10T07:15:25 | 180,492,070 | 3 | 0 | null | 2022-12-08T02:31:09 | 2019-04-10T03:10:30 | Python | UTF-8 | Python | false | false | 17,324 | py | # -*- coding: utf-8 -*-
import os
import time
import uuid
import datetime
from flask import render_template, send_from_directory, session, redirect, url_for
from flask_login import login_user, logout_user, current_user, login_required
from flask import Markup, request
from app import app
from forms import SchoolForm, PageInfo, InstitutionForm, BulletinForm, AccountForm
from DB import orm
from Utils import Util
from Logic import restful, logic
@app.route('/bd/web/<path:path>')
def rootDir_web(path):
index_key = path.rfind('py')
if index_key > (len(path) - 4):
return redirect(url_for('view_schools'))
return send_from_directory(os.path.join(app.root_path, '.'), path)
UPLOAD_PATH = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'files/')
@app.route("/")
def rootDir():
return redirect(url_for('view_schools'))
@app.route('/bd/view_school', methods=['GET', 'POST'])
def view_school():
school_id = request.args.get('id')
q = request.args.get('q')
if q is not None:
return redirect(url_for('view_schools', page=1, q=q))
form = SchoolForm(request.form)
form.area_id.choices = logic.g_choices_area
form.schooltype_id.choices = logic.g_choices_schooltype
form.feature_ids.choices = logic.g_choices_feature
# form.message = form.data
if request.method == 'POST' and form.validate():
print("longitude:", form.longitude.data)
if form.id.data:
school = orm.School.query.get(int(form.id.data))
school.name = form.name.data
school.area_id = form.area_id.data
school.teachdesc = form.teachdesc.data
school.address = form.address.data
school.schooltype_id = form.schooltype_id.data
school.website = form.website.data
school.distinguish = form.distinguish.data
school.leisure = form.leisure.data
school.threashold = form.threashold.data
school.partner = form.partner.data
school.artsource = form.artsource.data
school.feedesc = form.feedesc.data
school.longitude = form.longitude.data
school.latitude = form.latitude.data
orm.db.session.commit()
else:
school = orm.School(
form.name.data, form.area_id.data, form.teachdesc.data,
form.address.data, form.schooltype_id.data, form.website.data,
form.distinguish.data, form.leisure.data, form.threashold.data,
form.partner.data, form.artsource.data, form.feedesc.data,
form.longitude.data, form.latitude.data)
orm.db.session.add(school)
orm.db.session.commit()
form.id.data = school.id
logic.SetSchoolFeatures(int(form.id.data), form.feature_ids.data)
if 'upload' in request.form:
file = request.files['image']
if file:
file_server = str(uuid.uuid1()) + Util.file_extension(
file.filename)
pathfile_server = os.path.join(UPLOAD_PATH, file_server)
file.save(pathfile_server)
if os.stat(pathfile_server).st_size < 1 * 1024 * 1024:
schoolimage = orm.Schoolimage(school.id, file_server)
orm.db.session.merge(schoolimage)
orm.db.session.commit()
else:
os.remove(pathfile_server)
else:
return redirect(url_for('view_school'))
elif request.method == 'GET' and school_id:
form = logic.GetSchoolFormById(school_id)
logic.LoadBasePageInfo('修改学校', '输入并确定', form)
else:
logic.LoadBasePageInfo('新建学校', '输入并确定', form)
if form.id.data:
school = orm.School.query.get(int(form.id.data))
form.school = school
if form.school:
form.schoolimages = form.school.schoolimages
return render_template('view_school.html', form=form)
@app.route('/bd/view_schools', methods=['GET', 'POST'])
def view_schools():
page = request.args.get('page', 1)
q = request.args.get('q')
schools = restful.GetSchools(int(page), q)
if restful.ITEM_OBJECTS not in schools:
return redirect(url_for('view_schools'))
schoolforms = [
logic.GetSchoolFormById(x[restful.ITEM_ID])
for x in schools[restful.ITEM_OBJECTS]
]
while None in schoolforms:
schoolforms.remove(None)
# form.message = form.data
if request.method == 'POST':
form = SchoolForm(request.form)
if 'delete' in request.form:
for x in orm.Schoolimage.query.filter_by(
school_id=int(form.id.data)).all():
pathfile_server = os.path.join(UPLOAD_PATH, x.file)
if os.path.exists(pathfile_server):
os.remove(pathfile_server)
orm.db.session.delete(orm.School.query.get(int(form.id.data)))
orm.db.session.commit()
return redirect(url_for('view_schools', page=page, q=q))
form = PageInfo()
logic.LoadBasePageInfo('所有学校', '查看', form)
return render_template(
'view_schools.html',
forms=schoolforms,
form=form,
paging=restful.GetPagingFromResult(schools))
@app.route('/bd/delete_image', methods=['GET', 'POST'])
def delete_image():
backurl = request.args.get('backurl', '/')
print("backurl......", backurl)
file = request.args.get('file')
if file:
for x in orm.Schoolimage.query.filter_by(file=file).all():
orm.db.session.delete(x)
for x in orm.Institutionimage.query.filter_by(file=file).all():
orm.db.session.delete(x)
for x in orm.Bulletinimage.query.filter_by(file=file).all():
orm.db.session.delete(x)
pathfile_server = os.path.join(UPLOAD_PATH, file)
if os.path.exists(pathfile_server):
os.remove(pathfile_server)
orm.db.session.commit()
return redirect(backurl)
@app.route('/bd/view_institution', methods=['GET', 'POST'])
def view_institution():
institution_id = request.args.get('id')
q = request.args.get('q')
if q is not None:
return redirect(url_for('view_institutions', page=1, q=q))
form = InstitutionForm(request.form)
form.area_id.choices = logic.g_choices_area
form.feature_ids.choices = logic.g_choices_feature
form.agespan_id.choices = logic.g_choices_agespan
form.feetype_id.choices = logic.g_choices_feetype
form.timeopen.data = datetime.time(8, 30)
form.timeclose.data = datetime.time(22, 00)
# form.message = form.data
if request.method == 'POST' and form.validate():
if form.id.data:
institution = orm.Institution.query.get(int(form.id.data))
institution.name = form.name.data
institution.agespan_id = form.agespan_id.data
institution.area_id = form.area_id.data
institution.address = form.address.data
institution.location = form.location.data
institution.website = form.website.data
institution.telephone = form.telephone.data
institution.feedesc = form.feedesc.data
institution.timeopen = form.timeopen.data
institution.timeclose = form.timeclose.data
institution.feetype_id = form.feetype_id.data
institution.longitude = form.longitude.data
institution.latitude = form.latitude.data
orm.db.session.commit()
else:
institution = orm.Institution(
form.name.data, form.agespan_id.data, form.area_id.data,
form.address.data, form.location.data, form.website.data,
form.telephone.data, form.feedesc.data, form.timeopen.data,
form.timeclose.data, form.feetype_id.data, form.longitude.data,
form.latitude.data, None)
orm.db.session.add(institution)
orm.db.session.commit()
form.id.data = institution.id
logic.SetInstitutionFeatures(int(form.id.data), form.feature_ids.data)
if 'upload' in request.form:
file = request.files['image']
if file:
file_server = str(uuid.uuid1()) + Util.file_extension(
file.filename)
pathfile_server = os.path.join(UPLOAD_PATH, file_server)
file.save(pathfile_server)
if os.stat(pathfile_server).st_size < 1 * 1024 * 1024:
institutionimage = orm.Institutionimage(
institution.id, file_server)
orm.db.session.merge(institutionimage)
orm.db.session.commit()
else:
os.remove(pathfile_server)
else:
return redirect(url_for('view_institution'))
elif request.method == 'GET' and institution_id:
form = logic.GetInstitutionFormById(institution_id)
logic.LoadBasePageInfo('修改培训机构', '输入并确定', form)
else:
logic.LoadBasePageInfo('新建培训机构', '输入并确定', form)
if form.id.data:
institution = orm.Institution.query.get(int(form.id.data))
form.institution = institution
if form.institution:
form.institutionimages = form.institution.institutionimages
return render_template('view_institution.html', form=form)
@app.route('/bd/view_institutions', methods=['GET', 'POST'])
def view_institutions():
page = request.args.get('page', 1)
q = request.args.get('q')
institutions = restful.GetInstitutions(int(page), q)
if restful.ITEM_OBJECTS not in institutions:
return redirect(url_for('view_institutions'))
institutionforms = [
logic.GetInstitutionFormById(x[restful.ITEM_ID])
for x in institutions[restful.ITEM_OBJECTS]
]
while None in institutionforms:
institutionforms.remove(None)
# form.message = form.data
if request.method == 'POST':
form = InstitutionForm(request.form)
if 'delete' in request.form:
for x in orm.Institutionimage.query.filter_by(
institution_id=int(form.id.data)).all():
pathfile_server = os.path.join(UPLOAD_PATH, x.file)
if os.path.exists(pathfile_server):
os.remove(pathfile_server)
orm.db.session.delete(orm.Institution.query.get(int(form.id.data)))
orm.db.session.commit()
return redirect(url_for('view_institutions', page=page, q=q))
form = PageInfo()
logic.LoadBasePageInfo('所有培训机构', '查看', form)
return render_template(
'view_institutions.html',
forms=institutionforms,
form=form,
paging=restful.GetPagingFromResult(institutions))
@app.route('/bd/view_bulletin', methods=['GET', 'POST'])
def view_bulletin():
bulletin_id = request.args.get('id')
q = request.args.get('q')
if q is not None:
return redirect(url_for('view_bulletins', page=1, q=q))
form = BulletinForm(request.form)
if request.method == 'POST' and form.validate():
if form.id.data:
bulletin = orm.Bulletin.query.get(int(form.id.data))
bulletin.dt = form.dt.data
bulletin.title = form.title.data
bulletin.content = form.content.data
bulletin.source = form.source.data
bulletin.author = form.author.data
orm.db.session.commit()
else:
bulletin = orm.Bulletin(form.dt.data, form.title.data,
form.content.data, form.source.data,
form.author.data)
orm.db.session.add(bulletin)
orm.db.session.commit()
form.id.data = bulletin.id
if 'upload' in request.form:
file = request.files['image']
if file:
file_server = str(uuid.uuid1()) + Util.file_extension(
file.filename)
pathfile_server = os.path.join(UPLOAD_PATH, file_server)
file.save(pathfile_server)
if os.stat(pathfile_server).st_size < 1 * 1024 * 1024:
bulletinimage = orm.Bulletinimage(bulletin.id, file_server)
orm.db.session.merge(bulletinimage)
orm.db.session.commit()
else:
os.remove(pathfile_server)
else:
return redirect(url_for('view_bulletin'))
elif request.method == 'GET' and bulletin_id:
form = logic.GetBulletinFormById(bulletin_id)
logic.LoadBasePageInfo('修改公告', '输入并确定', form)
else:
form.dt.data = datetime.datetime.now()
logic.LoadBasePageInfo('新建公告', '输入并确定', form)
if form.id.data:
bulletin = orm.Bulletin.query.get(int(form.id.data))
form.bulletin = bulletin
if form.bulletin:
form.bulletinimages = form.bulletin.bulletinimages
return render_template('view_bulletin.html', form=form)
@app.route('/bd/view_bulletins', methods=['GET', 'POST'])
def view_bulletins():
page = request.args.get('page', 1)
q = request.args.get('q')
bulletins = restful.GetBulletins(int(page), q)
if restful.ITEM_OBJECTS not in bulletins:
return redirect(url_for('view_bulletins'))
bulletinforms = [
logic.GetBulletinFormById(x[restful.ITEM_ID])
for x in bulletins[restful.ITEM_OBJECTS]
]
while None in bulletinforms:
bulletinforms.remove(None)
if request.method == 'POST':
form = BulletinForm(request.form)
if 'delete' in request.form:
for x in orm.Bulletinimage.query.filter_by(
bulletin_id=int(form.id.data)).all():
pathfile_server = os.path.join(UPLOAD_PATH, x.file)
if os.path.exists(pathfile_server):
os.remove(pathfile_server)
orm.db.session.delete(orm.Bulletin.query.get(int(form.id.data)))
orm.db.session.commit()
return redirect(url_for('view_bulletins', page=page, q=q))
form = PageInfo()
logic.LoadBasePageInfo('所有公告', '查看', form)
return render_template(
'view_bulletins.html',
forms=bulletinforms,
form=form,
paging=restful.GetPagingFromResult(bulletins))
@app.route('/bd/view_account', methods=['GET', 'POST'])
def view_account():
account_id = request.args.get('id')
q = request.args.get('q')
if q is not None:
return redirect(url_for('view_accounts', page=1, q=q))
form = AccountForm(request.form)
if request.method == 'POST' and form.validate():
if form.id.data:
account = orm.Account.query.get(int(form.id.data))
account.username = form.telephone.data
account.name = form.telephone.data
account.telephone = form.telephone.data
account.role = 0
account.flag_telephone = 1 if form.flag_telephone.data else 0
account.checkcode = form.checkcode.data
account.source = form.source.data
account.dtcreate = form.dtcreate.data
orm.db.session.commit()
else:
account = orm.Account(form.telephone.data, '1234',
form.telephone.data, form.telephone.data, 0,
1 if form.flag_telephone.data else 0, '1234',
form.source.data, form.dtcreate.data)
orm.db.session.add(account)
orm.db.session.commit()
form.id.data = account.id
return redirect(url_for('view_account'))
elif request.method == 'GET' and account_id:
form = logic.GetAccountFormById(account_id)
logic.LoadBasePageInfo('修改用户', '输入并确定', form)
else:
logic.LoadBasePageInfo('新建用户', '输入并确定', form)
if form.id.data:
account = orm.Account.query.get(int(form.id.data))
form.account = account
return render_template('view_account.html', form=form)
@app.route('/bd/view_accounts', methods=['GET', 'POST'])
def view_accounts():
page = request.args.get('page', 1)
q = request.args.get('q')
accounts = restful.GetAccounts(int(page), q)
if restful.ITEM_OBJECTS not in accounts:
return redirect(url_for('view_accounts'))
accountforms = [
logic.GetAccountFormById(x[restful.ITEM_ID])
for x in accounts[restful.ITEM_OBJECTS]
]
while None in accountforms:
accountforms.remove(None)
if request.method == 'POST':
form = AccountForm(request.form)
if 'delete' in request.form:
orm.db.session.delete(orm.Account.query.get(int(form.id.data)))
orm.db.session.commit()
return redirect(url_for('view_accounts', page=page, q=q))
form = PageInfo()
logic.LoadBasePageInfo('所有用户', '查看', form)
return render_template(
'view_accounts.html',
forms=accountforms,
form=form,
paging=restful.GetPagingFromResult(accounts))
| [
"ShiqinHuo@gmail.com"
] | ShiqinHuo@gmail.com |
463594fe03643d1587a546c3e3785d7a6d8abe7f | bf87df58b73db47d4eee7ac41266b122a26c4166 | /paytmmall.py | 3e2821d5d5fef776c15f2afc5de6064efbbc2219 | [] | no_license | amanjha18/scraping-paytmmall | 4c151e1de4a5c729e7e080668c030f551dcf9847 | 4b907b81b1cdd8341537ce3d74665bc9035f8629 | refs/heads/master | 2020-04-24T16:11:25.831424 | 2019-02-22T16:27:31 | 2019-02-22T16:27:31 | 172,098,070 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,542 | py |
##scrapping in paytmmall.
###redmi note mobiles:
import requests
from bs4 import BeautifulSoup
# url="https://paytmmall.com/shop/search?q=redmi%20note&from=organic&child_site_id=6&site_id=2&category=66781&brand=18550"
# page=requests.get(url)
# parse=BeautifulSoup(page.text,"html.parser")
# main_div=parse.find("div", class_="_3RA-")
# div1=main_div.findAll("div", class_="_1fje")
# # print div1
# index=1
# for i in div1:
# # print i,"\n"
# div2=i.findAll("div", class_="_2i1r")
# # print div2
# for j in div2:
# div3=j.find("div",class_="_3WhJ")
# # print div3
# div4=div3.find("a")
# # print div4
# div5=div4.find("div",class_="pCOS")
# # print div5
# mobile_name=div5.find("div",class_="_2apC")
# # print div6.text
# div7=div5.find("div",class_="_2bo3")
# div8=div7.find("div",class_="_1kMS")
# price=div8.find("span")
# print (index,price.text,mobile_name.text)
# index=index+1
##DELL LAPTOPS
link="https://paytmmall.com/laptops-glpid-6453?use_mw=1&page=1&brand=1758&src=store"
dell=requests.get(link)
# print (dell)
parser=BeautifulSoup(dell.text,"html.parser")
# print (parser)
main_div1=parser.find("div",class_="_3RA-")
# print (main_div1)
lap=main_div1.findAll("div",class_="_1fje")
for k in lap:
lap1=k.findAll("div",class_="_2i1r")
for m in lap1:
lap2=m.find("div",class_="_3WhJ")
a=lap2.find("a")
lap3=a.find("div",class_="pCOS")
name=lap3.find("div",class_="_2apC")
lap4=lap3.find("div",class_="_2bo3")
lap5=lap4.find("div",class_="_1kMS")
print (lap5.text,name.text)
| [
"noreply@github.com"
] | noreply@github.com |
323f0305705be55f1f68dc252f2df08b8fefa4b3 | 49366c279a72ada80bede5648d20baf5eb881acb | /src/mds/mrs/admin.py | 38063e6bb2c5448f0d097f09196edebbedbaa4ce | [
"BSD-3-Clause"
] | permissive | SanaMobile/middleware_mds_v1 | fb28d1c78dc06ad325047d6fabd83a1bd5fcd946 | 7b3d3da7f460deb4981811d7b7592808add0aa4b | refs/heads/master | 2021-01-02T09:34:43.119034 | 2014-10-07T21:43:14 | 2014-10-07T21:43:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,470 | py | """Sana mDS Django admin interface
:Authors: Sana dev team
:Version: 1.1
"""
from django.contrib import admin
from sana.mrs.models import Patient,Procedure,BinaryResource,SavedProcedure,Notification,QueueElement,RequestLog,ClientEventLog
from sana.mrs.api import maybe_upload_procedure
class ProcedureAdmin(admin.ModelAdmin):
pass
class ClientEventLogAdmin(admin.ModelAdmin):
list_display = ('client', 'event_time', 'event_type', 'event_value', 'encounter_reference', 'patient_reference', 'user_reference',)
list_filter = ('event_time', 'event_type', 'encounter_reference', 'patient_reference', 'user_reference',)
date_hierarchy = 'event_time'
# exclude = ('created', 'modified',)
def resend(request,queryset):
''' Checks whether zero, or more, SavedProcedure objects selected
in the admin interface are ready to POST to the backend EMR and
executes the POST if they are.
'''
for sp in queryset:
try:
maybe_upload_procedure(sp)
except:
pass
resend.short_description = "Resend Selected to EMR"
class SavedProcedureAdmin(admin.ModelAdmin):
actions = [resend,]
admin.site.register(Procedure)
admin.site.register(Patient)
admin.site.register(BinaryResource)
admin.site.register(SavedProcedure, SavedProcedureAdmin)
admin.site.register(Notification)
admin.site.register(QueueElement)
admin.site.register(RequestLog)
admin.site.register(ClientEventLog, ClientEventLogAdmin)
| [
"winkler.em@gmail.com"
] | winkler.em@gmail.com |
8bba7a407baa22d183d9835ce359f0d5da875f90 | ad831f18866784f205ac8f11ca3d27afc9cd8d2a | /venv/bin/easy_install-2.7 | 84d3baed839319d40038b8452dc3029ee29d1723 | [] | no_license | habibrahmanbd/Sent.Mining | 65cf9f2434dfa3a2cf7d305c39cfebd85aad7a4b | a3c1d63710cbc4c68ee039a83d4c53bb5a874bb9 | refs/heads/master | 2023-08-14T08:08:36.091888 | 2021-09-24T04:21:22 | 2021-09-24T04:21:22 | 182,139,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | 7 | #!/home/habib/Documents/Sent.Mining/venv/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"bdhabib94@gmail.com"
] | bdhabib94@gmail.com |
bfbdcf492e78323b834eff43127e234f25966d15 | f769bfb711e8163e6fdac734847fac1af571148f | /Gaussian Elimination.py | 493cdb614f066369b27ea2e157a19dbf461b69a5 | [] | no_license | WalidElsayed9/Numerical-Methods- | 2c187934d43b4fe4325ebb8d070734bebacca5ba | 03824bb011ce666687ae22349d9b8b0d9db68214 | refs/heads/master | 2022-04-08T21:04:36.355878 | 2020-03-23T23:44:51 | 2020-03-23T23:44:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 860 | py | import numpy as np
import math
import scipy as sp
from scipy import linalg
import pandas as pd
import matplotlib.pyplot as plt
import sympy
np.random.seed(2)
a = np.ceil(10*np.random.random((4,4)))
#print(a)
b = np.floor(10*np.random.random((4,1)))
#print(b)
A = np.hstack((a,b))
#print(A)
#print(A.shape[0])
N = A.shape[0]
for k in range(N-1):
for i in range (k+1,N):
r = -A[i,k] / A[k,k]
for j in range (k+1,N+1):
A[i,j] = A[i,j] + r*A[k,j]
for j in range(k+1):
A[i,j] = 0
#print(A)
'''BACK SUBSTITUTION'''
A[N-1,N] = A[N-1,-1] / A[N-1, -2]
for i in range(N-2, -1, -1):
sum = 0
for j in range(i+1,N):
sum = sum +A[i,j] * A[j,N]
A[i,N] = (A[i,N]-sum)/A[i,i]
print(A[:,N])
| [
"noreply@github.com"
] | noreply@github.com |
78b752d117f11c2a5a5d056b47227a18ba096e0b | 185b7529d9d439a0d554db2fc7b60a1531a5a836 | /scrappy_settings/asgi.py | 2d14dbc27187113e3031e51b3b38ab18a5531eeb | [] | no_license | cavidanhasanli/Scrappy_price | 8901baeaa40beb7102042d687d405258ae20d7fe | b5cc50010f727ba95686d89cac29f76533d860c2 | refs/heads/main | 2023-03-11T00:14:56.576016 | 2021-02-16T09:58:00 | 2021-02-16T09:58:00 | 338,854,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | """
ASGI config for scrappy_settings project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'scrappy_settings.settings')
application = get_asgi_application()
| [
"cavidan.hasanli@mail.ru"
] | cavidan.hasanli@mail.ru |
da0a0322e7f8ed48fdd52a386ef2ac266993a344 | 61587c1ad4040967edb0a87a1c84d3d61e792939 | /Part1_Array/Code3_Stack_To_Queue.py | caa9e07aa3ff48891bf26ac3e6ee24fd8ce4dd16 | [] | no_license | forvendettaw/algorithm_practice | 2d2ebfc19b40afdfde0f0cf2e8b5394af6f2c0e6 | 01bbde5421bd4894efe69755342fc75b724c45bc | refs/heads/master | 2021-04-27T13:20:38.152516 | 2018-02-06T02:45:20 | 2018-02-06T02:45:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 836 | py | """使用栈结构实现一个队列结构:栈转换成队列"""
class Stack_To_Queue(object):
"""两个栈结构实现队列
栈结构:先进后出
队列结构:先进先出
"""
def __init__(self):
self.stackPush = []
self.stackPop = []
def add(self, item):
self.stackPush.append(item)
def poll(self):
if len(self.stackPop) == 0 and len(self.stackPush) == 0:
raise ArrayIndexOutOfBoundsException("The queue is empty!")
if len(self.stackPop) != 0:
return self.stackPop.pop(-1)
while len(self.stackPush) == 0:
self.stackPop.append(self.stackPush.pop(-1))
return self.stackPop.pop(-1)
def peak(self):
if len(self.stackPop) != 0:
return self.stackPop[-1]
if len(self.stackPush) != 0:
return self.stackPush[0]
raise ArrayIndexOutOfBoundsException("The queue is empty!")
| [
"lianyingteng@hotmail.com"
] | lianyingteng@hotmail.com |
d1439964f6c85daa3a40a61cad84441eddf2408b | 670d136ee38f8b6bc1ce8acf20efe1fdf6d3aa63 | /config/config.py | e8c1538aa262dbd469283d57f6b95783ea7f3d51 | [] | no_license | HuiDBK/auto_clear | 1df7c463dc2a522d921db7663f3d9e3303e7c3c3 | 13796725912d60c96de1574a7c7d1e35249b0327 | refs/heads/master | 2023-08-25T01:02:42.655609 | 2021-04-04T15:45:24 | 2021-04-04T15:45:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,697 | py | # -*- coding:utf-8 -*-
"""
配置文件模块
author: Mr Liu
version: 1.0
"""
import os
import sys
import yaml
import common
import traceback
import configparser
import logging.config
import PySimpleGUI as sg
# 日志配置文件路径
LOG_CONF_PATH = os.path.join(common.BASE_DIR, 'config\\log_conf.yaml')
# 程序默认的配置文件
DEF_CONF = os.path.join(common.BASE_DIR, 'config\\config.txt')
def setup_logging(default_path=LOG_CONF_PATH, default_level=logging.INFO, env_key="LOG_CFG"):
"""加载日志yaml配置文件信息"""
path = default_path
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, "r") as log_file:
conf = yaml.load(log_file, Loader=yaml.FullLoader)
logging.config.dictConfig(conf)
else:
logging.basicConfig(level=default_level)
class Config(object):
"""信息配置基类"""
conf_parser = configparser.ConfigParser() # 获取配置文件解析器
@classmethod
def parser(cls):
try:
cls.conf_parser.read(DEF_CONF) # 解析配置文件
except Exception:
logging.error(traceback.format_exc())
sg.PopupError(u'\n无法正常解析配置文件\n\n请勿随意更改配置文件信息!!!\n',
title=u'配置文件错误', background_color='#242834', font=45, text_color='WHITE')
sys.exit()
class TagEraseConf(Config):
"""电子标签需要清除的配置类"""
USER = None
DEFAULT_IP = None
ENCODING_SECTION = 'encode' # 编码板信息节点名称
TELNET_SECTION = 'telnet' # telnet信息节点名称
WIN_SECTION = 'win_style' # gui窗口信息节点名称
SPIT_STR = '#' # telnet密码分隔符
def __init__(self):
self.ENCODING_LIST = list() # 需要清除的编码板列表
self.TEL_PWD_LIST = list() # telnet账户密码列表
self.WIN_THEME = None
self.parser()
def parser(self):
"""解析数据库配置信息"""
super().parser()
# 解析编码板节点信息并封装到列表中
encoding_options = self.conf_parser.options(self.ENCODING_SECTION)
for option in encoding_options:
code_value = self.conf_parser.get(self.ENCODING_SECTION, option)
self.ENCODING_LIST.append(str(code_value).upper())
# 解析telnet节点信息
self.USER = self.conf_parser.get(self.TELNET_SECTION, 'user')
self.TEL_PWD_LIST = str(self.conf_parser.get(self.TELNET_SECTION, 'password')).split(self.SPIT_STR)
self.DEFAULT_IP = self.conf_parser.get(self.TELNET_SECTION, 'default_ip')
# 解析win_style节点信息
self.WIN_THEME = self.conf_parser.get(self.WIN_SECTION, 'win_theme')
def add_code(self, code):
"""添加编码板信息"""
new_code_option = 'code_' + str(len(self.ENCODING_LIST) + 1)
self.conf_parser.set(self.ENCODING_SECTION, new_code_option, code)
self.conf_parser.write(open(DEF_CONF, mode='w'))
self.ENCODING_LIST.append(str(code))
def remove_code(self, code):
"""删除指定的编码板信息"""
# 找到要删除编码对应的option信息
items = self.conf_parser.items(self.ENCODING_SECTION)
option_values = [str(item[1]).upper() for item in items]
option_index = option_values.index(code)
self.conf_parser.remove_option(self.ENCODING_SECTION, option=items[option_index][0])
self.conf_parser.write(open(DEF_CONF, mode='w'))
self.ENCODING_LIST.remove(str(code))
def add_tel_pwd(self, add_tel_pwd):
"""添加Telnet密码"""
tel_pwd = self.conf_parser.get(self.TELNET_SECTION, 'password')
new_tel_pwd = tel_pwd + '#' + add_tel_pwd
self.conf_parser.set(self.TELNET_SECTION, 'password', new_tel_pwd)
self.conf_parser.write(open(DEF_CONF, mode='w'))
self.TEL_PWD_LIST.append(add_tel_pwd)
def change_win_theme(self, new_theme):
"""
改变窗口主题
:param new_theme: 新主题名称
"""
self.conf_parser.set(self.WIN_SECTION, 'win_theme', new_theme)
self.conf_parser.write(open(DEF_CONF, mode='w'))
self.WIN_THEME = new_theme
def main():
conf = TagEraseConf()
print(conf.ENCODING_LIST)
print(conf.USER)
print(conf.TEL_PWD_LIST)
print(conf.DEFAULT_IP)
print(conf.conf_parser.items(conf.ENCODING_SECTION))
items = [option[1] for option in list(conf.conf_parser.items(conf.ENCODING_SECTION))]
print(items)
print(items.index('0302C1E4'))
if __name__ == '__main__':
main()
| [
"2228157440@qq.com"
] | 2228157440@qq.com |
6b7df363e07c32497d7b6a3ae77012127a2fb79a | 789f108a849be99052f13cdec68953266458e646 | /nfe_mde/nfe_schedule.py | c0e580b66329ba0eeea7f4afc2737145ed796e5a | [] | no_license | rick-romero/odoo-brazil-eletronic-documents | 6ebe1b30deaa854861aa632ee62b022b8eeb2d8a | 2a1f144612ef23b77b57b9edcf2089a2b2b3077a | refs/heads/8.0 | 2021-01-14T14:07:27.728313 | 2016-07-12T22:11:29 | 2016-07-12T22:11:29 | 59,238,349 | 0 | 0 | null | 2016-05-19T19:58:39 | 2016-05-19T19:58:39 | null | UTF-8 | Python | false | false | 7,908 | py | # coding=utf-8
###############################################################################
# #
# Copyright (C) 2015 Danimar Ribeiro www.trustcode.com.br #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
import re
import base64
import logging
from lxml import objectify
from datetime import datetime
from .service.mde import distribuicao_nfe
from openerp import models, api, fields
from openerp.exceptions import Warning as UserError
from openerp.addons.nfe.sped.nfe.validator.config_check import \
validate_nfe_configuration
_logger = logging.getLogger(__name__)
class nfe_schedule(models.TransientModel):
_name = 'nfe.schedule'
state = fields.Selection(
string="Estado",
selection=[('init', 'Não iniciado'), ('done', 'Finalizado')],
default='init'
)
@staticmethod
def _mask_cnpj(cnpj):
if cnpj:
val = re.sub('[^0-9]', '', cnpj)
if len(val) == 14:
cnpj = "%s.%s.%s/%s-%s" % (val[0:2], val[2:5], val[5:8],
val[8:12], val[12:14])
return cnpj
@api.model
def schedule_download(self, raise_error=False):
companies = self.env['res.company'].search([])
for company in companies:
try:
validate_nfe_configuration(company)
nfe_result = distribuicao_nfe(company, company.last_nsu_nfe)
env_events = self.env['l10n_br_account.document_event']
if nfe_result['code'] == '137' or nfe_result['code'] == '138':
event = {
'type': '12', 'company_id': company.id,
'response': 'Consulta distribuição: sucesso',
'status': nfe_result['code'],
'message': nfe_result['message'],
'create_date': datetime.now(),
'write_date': datetime.now(),
'end_date': datetime.now(),
'state': 'done', 'origin': 'Scheduler Download'
}
obj = env_events.create(event)
self.env['ir.attachment'].create(
{
'name': u"Consulta manifesto - {0}".format(
company.cnpj_cpf),
'datas': base64.b64encode(
nfe_result['file_returned']),
'datas_fname': u"Consulta manifesto - {0}".format(
company.cnpj_cpf),
'description': u'Consulta distribuição: sucesso',
'res_model': 'l10n_br_account.document_event',
'res_id': obj.id
})
env_mde = self.env['nfe.mde']
for nfe in nfe_result['list_nfe']:
if nfe['schema'] == 'resNFe_v1.00.xsd':
root = objectify.fromstring(nfe['xml'])
cnpj_forn = self._mask_cnpj(('%014d' % root.CNPJ))
partner = self.env['res.partner'].search(
[('cnpj_cpf', '=', cnpj_forn)])
invoice_eletronic = {
'chNFe': root.chNFe,
'nSeqEvento': nfe['NSU'], 'xNome': root.xNome,
'tpNF': str(root.tpNF), 'vNF': root.vNF,
'cSitNFe': str(root.cSitNFe),
'state': 'pending',
'dataInclusao': datetime.now(),
'CNPJ': cnpj_forn,
'IE': root.IE,
'partner_id': partner.id,
'dEmi': datetime.strptime(str(root.dhEmi)[:19],
'%Y-%m-%dT%H:%M:%S'),
'company_id': company.id,
'formInclusao': u'Verificação agendada'
}
obj_nfe = env_mde.create(invoice_eletronic)
file_name = 'resumo_nfe-%s.xml' % nfe['NSU']
self.env['ir.attachment'].create(
{
'name': file_name,
'datas': base64.b64encode(nfe['xml']),
'datas_fname': file_name,
'description': u'NFe via manifesto',
'res_model': 'nfe.mde',
'res_id': obj_nfe.id
})
company.last_nsu_nfe = nfe['NSU']
else:
event = {
'type': '12',
'response': 'Consulta distribuição com problemas',
'company_id': company.id,
'file_returned': nfe_result['file_returned'],
'file_sent': nfe_result['file_sent'],
'message': nfe_result['message'],
'create_date': datetime.now(),
'write_date': datetime.now(),
'end_date': datetime.now(),
'status': nfe_result['code'],
'state': 'done', 'origin': 'Scheduler Download'
}
obj = env_events.create(event)
self.env['ir.attachment'].create(
{
'name': u"Consulta manifesto - {0}".format(
company.cnpj_cpf),
'datas': base64.b64encode(
nfe_result['file_returned']),
'datas_fname': u"Consulta manifesto - {0}".format(
company.cnpj_cpf),
'description': u'Consulta manifesto com erro',
'res_model': 'l10n_br_account.document_event',
'res_id': obj.id
})
except Exception as ex:
_logger.error("Erro ao consultar Manifesto", exc_info=True)
if raise_error:
raise UserError(
u'Atenção',
u'Não foi possivel efetuar a consulta!\n Verifique o log')
@api.one
def execute_download(self):
self.schedule_download(raise_error=True)
| [
"danimaribeiro@gmail.com"
] | danimaribeiro@gmail.com |
cddcfff3c99de91ed211eba9f6cdd906c162f1ca | 9c8921b8cc2b442c565fa93d763666f671612d2b | /verify/reduce/array.py | 6391f4153bc4e8bfcdd4074e03f0318199521905 | [] | no_license | iCodeIN/cocass | 09f39f02e58f43e8572a99670106d93b863cab9d | 5f38dd732ecb659fb75c13293d4e44b57d36e2da | refs/heads/master | 2023-08-18T20:26:44.836261 | 2021-01-18T07:08:46 | 2021-01-18T07:08:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | def expect(*args):
res = ""
res += "0x04030201\n"
res += "0x05040302\n"
res += "0x07060504\n"
res += "0x0B0A0908\n"
res += "0x05040302\n" * 2
res += "0x07060504\n" * 2
res += "0x0B0A0908\n" * 2
res += "0x13121110\n" * 2
res += "0x07030502\n"
res += "0x05070305\n"
res += "0x07060507\n"
res += "0x0B0A090B\n"
res += "0x05070305\n"
res += "0x07060507\n"
res += "0x0B0A090B\n"
res += "0x13121112\n"
return (0, res, "")
data = [
[],
]
| [
"neven.villani@gmail.com"
] | neven.villani@gmail.com |
3e72b0f6f2e21379659c298b7bafcf50d609e056 | 3e6e661079e5cc08312c9c7623d21a4ccd2b479f | /flickry/imagesbytags/apps.py | 13258623e77b57232c818ac25f3d245502848ba2 | [] | no_license | ruduran/flickry | 1392276b9b41cbc99c3a2b05c78e7cd70c4db795 | 7d4141e7c43ffeb8a93d0a272cf7b89aa1b4b350 | refs/heads/master | 2020-03-22T19:54:26.917697 | 2018-07-13T15:14:57 | 2018-07-13T15:29:43 | 140,559,426 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | import sys
from django.apps import AppConfig
class ImagesbytagsConfig(AppConfig):
name = 'imagesbytags'
def ready(self):
if any(cmd in sys.argv for cmd in ('migrate', 'makemigrations', 'test', 'check', 'shell')):
return
from .worker import Worker
w = Worker()
w.start()
| [
"ruduran@users.noreply.github.com"
] | ruduran@users.noreply.github.com |
172b1d40c473ddedcf33224aafa3faec54b15beb | 681ef48d8200b408ebc765f39e38ca36f2df9482 | /5.py | dc8e50fcc990040e7e52eaaeb6d54d9e36519ff8 | [] | no_license | code-abil/Python-Garage | cc53b09eefe8899ffc81cd604d70277ed2fdb389 | b886cf62c5b6c6f7dcf0a0920f98a409c9d620de | refs/heads/master | 2020-05-27T09:05:38.650820 | 2019-05-25T11:58:03 | 2019-05-25T11:58:03 | 188,560,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | import os
# #print(dir(os))
# print(type(dir(5)))
# print(type(os.getcwd()))
# os.chdir('/home/abilash/Desktop')
# print(os.getcwd())
# print(os.listdir())
# print(os.getcwd())
# os.chdir('/home/abilash/Desktop')
# os.makedirs('Abias/a')
# os.removedirs('Abias/a') #will
print(os.listdir())
os.chdir('../')
os.rename('python','Py') | [
"gabilash1104@gmail.com"
] | gabilash1104@gmail.com |
507a7efb3c2ae19dbd1cfb2fa60575893e49856e | 25a263124549cd09f664b9cdb37b3a9b1dd23dc6 | /honcore/exceptions.py | cb6c7affb53e1ce148c66fb2406021a3281b944e | [
"Unlicense",
"LicenseRef-scancode-public-domain"
] | permissive | Joev-/HoNCore | b5896027cac2aa6b5fb200f826186a01384a33eb | 27e76645b06fc31df430fd4f3098686897a97bcd | refs/heads/master | 2021-01-20T05:08:37.154966 | 2012-01-26T19:53:18 | 2012-01-26T19:53:18 | 2,199,889 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,443 | py | """
HoNCore. Python library providing connectivity and functionality
with HoN's chat server.
Custom exceptions to be raised by this library and caught by any program using this library.
Helps debug login/logout events/incorrect data and socket errors (timeout, broken pipes etc).
TODO
* urllib2 sometimes throws BadStatusLine which means the server responded with an unknown HTTP status code.
Needs to be handled correctly, it should throw a MasterServerError 109, but it needs a stronger definition.
"""
class HoNException(Exception):
""" Base exception for all exceptions for this library. """
def __init__(self, code, *args):
self.code = code
self.error = _errormap[code]
def __str__(self):
return repr("Error %d: %s" % (self.code, self.error))
class HoNCoreError(HoNException):
""" Exception to be used for honcore internals such as a socket error which will be handled
by something else inside honcore, or when a client tries to connect a method to an event
that does not exist.
"""
pass
class HoNConfigError(HoNException):
""" Exception relating to the configuration data.
Can be raised if the configuration passed does not satisfy the requirements.
"""
pass
class MasterServerError(HoNException):
""" Exceptions related to the master server.
Can be raised if invalid data is returned or if the connection times out.
"""
pass
class ChatServerError(HoNException):
""" Ecxceptions related to the chat server.
Can be raised if invalid data is received or if the socket times out and the
connection to the server is lost.
"""
pass
_errormap = {
10 : 'Socket error.',
11 : 'Socket timed out.',
12 : 'Unknown packet received',
13 : 'Unknown event ID',
14 : 'Method is not connected to this event ID.',
100 : 'Could not connect to the masterserver.',
101 : 'Could not obtain login data.',
102 : 'Incorrect username/password.',
103 : 'Failed to get login data after 3 attempts.',
104 : 'Connection to the master server timed out.',
105 : 'Connection to the master server was rejected.',
106 : 'Master server failed to receieve logout request, forcing logout.',
107 : 'Requester HTTP error.', # Don't leave this in, expand it to handle each different HTTP/URL Error?
108 : 'Unexpected opcode when parsing PHP serialisation.',
109 : 'Bad HTTP status code.',
110 : 'Connection reset by peer', # Good sign it's down, it's dropping connections?
111 : 'Connection refused', # Very good sign it's down, it's refusing connections?
112 : 'Connection timed out',
113 : 'Message of the day data error',
114 : 'No address associate with hostname',
120 : 'No buddies found',
121 : 'No ban list found',
122 : 'No ignored users found',
123 : 'No clan members found',
200 : 'Chat server did not respond to authentication request.',
201 : 'Connection to the chat server timed out.',
202 : 'Connection to the chat server was rejected.',
203 : 'Failed to connect to the chat server after 3 attempts.',
204 : 'Empty packet received.',
205 : 'No cookie/auth hash provided.',
206 : 'Broken Pipe, is the chat version correct?',
207 : 'Chat server error, connection lost.',
208 : 'Could not connect to the chat server.',
209 : 'Socket was not connected.',
}
| [
"joe.vaughan92@gmail.com"
] | joe.vaughan92@gmail.com |
3bd68f15f1ba900bd732975bf7fe77e8c8d0874c | c4cfce852c59bdd65d5ab5e77021e42cb7b02ff8 | /eng_to_kana_test/test_eng_to_kana.py | c7f683673655332c566e7794109d70e9fe281858 | [
"MIT"
] | permissive | yokolet/transcript | 5749be490a7f53e907b2143696afaa592647dc59 | 4a83cc70d868bb243846ebee8c322c63c2092141 | refs/heads/master | 2020-05-28T09:47:27.771042 | 2019-06-15T21:46:53 | 2019-06-15T21:46:59 | 188,961,209 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 683 | py | import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import unittest
from eng_to_kana.eng_to_kana import EngToKana
class TestEngToKana(unittest.TestCase):
def setUp(self):
self.list_func = EngToKana().fromWordList
self.file_func = EngToKana().fromFile
def test_1(self):
words = ['what', 'girl', 'cat', 'judge', 'majority']
expected = [['ワット', 'ホワット'], ['ガール'], ['キャット'], ['ジャッジ'], ['マジョリティー']]
self.assertEqual(expected, self.list_func(words))
def test_2(self):
words = ['gaga']
self.assertEqual([['E_DIC']], self.list_func(words)) | [
"yokolet@gmail.com"
] | yokolet@gmail.com |
360c489c9cbf919e2cb62c14bf5a0f370355366e | 96f9c82d0331a853abb602aa0e214ba10d97e782 | /gcp/plugins/modules/gcp_compute_target_pool_info.py | 904029bfbefcc02c8d4b9c707949d7d57de1ea26 | [] | no_license | gundalow-collections/google | c5a269477dd44e44d50fba3e1145ba3150585ba8 | d4148513bec8926ec617c4450900236adb5e87bf | refs/heads/master | 2020-07-25T02:11:46.852809 | 2019-09-16T20:13:35 | 2019-09-16T20:13:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,814 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: google.gcp.gcp_compute_target_pool_info
description:
- Gather info for GCP TargetPool
- This module was called C(google.gcp.gcp_compute_target_pool_facts) before Ansible 2.9. The
usage has not changed.
short_description: Gather info for GCP TargetPool
version_added: 2.7
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
filters:
description:
- A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters).
- Each additional filter in the list will act be added as an AND condition (filter1
and filter2) .
type: list
region:
description:
- The region where the target pool resides.
required: true
type: str
extends_documentation_fragment: google.gcp.gcp
'''
EXAMPLES = '''
- name: get info on a target pool
gcp_compute_target_pool_info:
region: us-west1
filters:
- name = test_object
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
resources:
description: List of resources
returned: always
type: complex
contains:
backupPool:
description:
- This field is applicable only when the containing target pool is serving a
forwarding rule as the primary pool, and its failoverRatio field is properly
set to a value between [0, 1].
- 'backupPool and failoverRatio together define the fallback behavior of the
primary target pool: if the ratio of the healthy instances in the primary
pool is at or below failoverRatio, traffic arriving at the load-balanced IP
will be directed to the backup pool.'
- In case where failoverRatio and backupPool are not set, or all the instances
in the backup pool are unhealthy, the traffic will be directed back to the
primary pool in the "force" mode, where traffic will be spread to the healthy
instances with the best effort, or to all instances when no instance is healthy.
returned: success
type: dict
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource.
returned: success
type: str
failoverRatio:
description:
- This field is applicable only when the containing target pool is serving a
forwarding rule as the primary pool (i.e., not as a backup pool to some other
target pool). The value of the field must be in [0, 1].
- 'If set, backupPool must also be set. They together define the fallback behavior
of the primary target pool: if the ratio of the healthy instances in the primary
pool is at or below this number, traffic arriving at the load-balanced IP
will be directed to the backup pool.'
- In case where failoverRatio is not set or all the instances in the backup
pool are unhealthy, the traffic will be directed back to the primary pool
in the "force" mode, where traffic will be spread to the healthy instances
with the best effort, or to all instances when no instance is healthy.
returned: success
type: str
healthCheck:
description:
- A reference to a HttpHealthCheck resource.
- A member instance in this pool is considered healthy if and only if the health
checks pass. If not specified it means all member instances will be considered
healthy at all times.
returned: success
type: dict
id:
description:
- The unique identifier for the resource.
returned: success
type: int
instances:
description:
- A list of virtual machine instances serving this pool.
- They must live in zones contained in the same region as this pool.
returned: success
type: list
name:
description:
- Name of the resource. Provided by the client when the resource is created.
The name must be 1-63 characters long, and comply with RFC1035. Specifically,
the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
sessionAffinity:
description:
- 'Session affinity option. Must be one of these values: - NONE: Connections
from the same client IP may go to any instance in the pool.'
- "- CLIENT_IP: Connections from the same client IP will go to the same instance
in the pool while that instance remains healthy."
- "- CLIENT_IP_PROTO: Connections from the same client IP with the same IP protocol
will go to the same instance in the pool while that instance remains healthy."
returned: success
type: str
region:
description:
- The region where the target pool resides.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible_collections.google.gcp.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), region=dict(required=True, type='str')))
if module._name == 'gcp_compute_target_pool_facts':
module.deprecate("The 'gcp_compute_target_pool_facts' module has been renamed to 'gcp_compute_target_pool_info'", version='2.13')
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))}
module.exit_json(**return_value)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/targetPools".format(**module.params)
def fetch_list(module, link, query):
auth = GcpSession(module, 'compute')
return auth.list(link, return_if_object, array_name='items', params={'filter': query})
def query_options(filters):
if not filters:
return ''
if len(filters) == 1:
return filters[0]
else:
queries = []
for f in filters:
# For multiple queries, all queries should have ()
if f[0] != '(' and f[-1] != ')':
queries.append("(%s)" % ''.join(f))
else:
queries.append(f)
return ' '.join(queries)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| [
"brian.coca+git@gmail.com"
] | brian.coca+git@gmail.com |
7f0e509fd90816fd6fc1ee90569c03579a6cf0c6 | 3fee1411d1ca6d0b4ddfb2c9bf28f5209b3f19fc | /run/src/wrapper.py | 012f6375f12f9e91ea04b1bd587918269d36c239 | [] | no_license | ccohane/WebTrader | ceba675a95e8aa00a9a3436dd82ae84d9d4a27b9 | 09a98e81d15498adf04b06e023dbf954d2a61c85 | refs/heads/master | 2020-04-10T00:56:53.457918 | 2018-12-07T19:45:13 | 2018-12-07T19:45:13 | 160,700,869 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,379 | py |
#!/usr/bin/env python3
import requests
import json
class Markit:
def __init__(self, user_input):
self.scheme= 'http://'
self.subdomain = 'dev.'
self.domain = 'markitondemand.'
self.tld = 'com'
self.lookup_path = '/MODApis/Api/v2/Lookup/json'
self.lookup_querystring = '?input='
self.quote_path = '/MODApis/Api/v2/Quote/json'
self.quote_querystring = '?symbol='
self.endpoint=self.scheme \
+ self.subdomain \
+ self.domain \
+ self.tld
self.user_input=user_input
def __enter__(self):
return self
def __exit__(self, type, value ,traceback):
pass
def quote(self):
response=json.loads(
requests.get(
self.endpoint
+ self.quote_path
+ self.quote_querystring
+ self.user_input)
.text)
return response["LastPrice"]
def lookup(self):
response=json.loads(
requests.get(
self.endpoint
+ self.lookup_path
+ self.lookup_querystring
+ self.user_input)
.text)
return response[0]["Symbol"]
| [
"ccohane@gmail.com"
] | ccohane@gmail.com |
b387a5180e37579818a6be9f9076d862769b9bca | eb8dd326cc0d448bf88eb3557c4bcf6e28416bd7 | /listaCuadrado.py | d2f3c8d5eea2c283f2e5b4e8eb2b007761aa9ea1 | [] | no_license | Sarai-cr/python-programs | 931cc963045844f1544c49ba2abe37d5ccd37179 | c41f39f33276f294cc97bebd9a17cfb13c128095 | refs/heads/master | 2022-12-29T00:44:38.539110 | 2020-10-21T04:04:16 | 2020-10-21T04:04:16 | 290,906,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | cantidadDeNumeros = int (input())
listaDeNumeros = []
listaDeNumerosCuadrado = []
for numero in range (cantidadDeNumeros):
listaDeNumeros.append(int(input()))
print (listaDeNumeros)
for numero in listaDeNumeros:
cuadrado = numero**2
listaDeNumerosCuadrado.append(cuadrado)
print (listaDeNumerosCuadrado) | [
"noreply@github.com"
] | noreply@github.com |
104d482ce82b48b18de798968058e27e632c58a4 | 11f1eb37db10e13e8393bcd68496ff01b7e2e4e2 | /genetic_algorithm.py | fbfb12dd625e50a86e306a35117bb36fef86b631 | [] | no_license | ErnaC-ucl/genetic-algorithm-hyperparams-opt | a8fb635f9e711543da1469108696eaf997183e02 | 24d54694d96b7b718a99b64f63abc77d3e38200f | refs/heads/main | 2023-08-04T11:12:19.831706 | 2021-09-16T16:31:40 | 2021-09-16T16:31:40 | 407,234,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,621 | py |
import random
import numpy as np
from sklearn import preprocessing
from sklearn.model_selection import KFold
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics import mean_squared_error
def generate_data(xmin,xmax, delta, noise):
#Calculate f=sin(x1)+cos(x2)
x1=np.arange(xmin,xmax+delta,delta) #generate x1 values
x2=np.arange(xmin, xmax+delta, delta) #generate x2 values
x1,x2=np.meshgrid(x1,x2) #make x1, x2 grid of values
f=np.sin(x1)+np.cos(x2) #calculate for all (x1,x2) grid
#Add random noise to f
random.seed(123) #set random seed for reproducibility
for i in range(len(f)):
for j in range(len(f[0])):
f[i][j]=f[i][j]+random.uniform(-noise,noise)
return x1,x2,f
#Transform X into a 2D numpy array and y into a 1D numpy array
def prepare_data(x1,x2,f):
X=[]
for i in range(len(f)):
for j in range(len(f)):
X_temp=[]
X_temp.append(x1[i][j])
X_temp.append(x2[i][j])
X.append(X_temp)
y=f.flatten()
X=np.array(X)
y=np.array(y)
return X,y
def KRR_function(hyperparams,X,y, f_provi):
#Assign hyperparams
alpha_value,gamma_value=hyperparams
#Split data into test and train_set: random state fixed for reproducibility
kf=KFold(n_splits=10, shuffle=True,random_state=123)
y_pred=[]
y_test=[]
#KFold cross-validation loop
for train_index, test_index in kf.split(X):
X_train, X_test=X[train_index], X[test_index]
y_train, y_test_temp=y[train_index], y[test_index]
#Scale X_train and X_test
scaler=preprocessing.StandardScaler().fit(X_train)
X_train_scaled=scaler.transform(X_train)
X_test_scaled=scaler.transform(X_test)
# Fit KRR with scaled datasets
KRR=KernelRidge(kernel='rbf',alpha=alpha_value, gamma=gamma_value)
y_pred_temp=KRR.fit(X_train_scaled, y_train).predict(X_test_scaled)
#Append y_pred_temp and y_test_temp of this k-fold step to the list
y_pred.append(y_pred_temp)
y_test.append(y_test_temp)
#Flatten lists with test and predicted values
y_pred=[i for sublist in y_pred for i in sublist]
y_test=[i for sublist in y_test for i in sublist]
#Estimate error metric of test and predicted value
rmse=np.sqrt(mean_squared_error(y_test,y_pred))
print('alpha: %.6f . gamma: %.6f . rmse: %.6f' %(alpha_value,gamma_value,rmse)) # Uncomment to print intermediate results
f_provi.write("%.20f %.20f %.12f\n" %(alpha_value,gamma_value,rmse))
return rmse | [
"noreply@github.com"
] | noreply@github.com |
e1ff8838e74408dffc4bbb3722723ff62a425439 | 5afa0b8e447bb6b1565a64d201ee38adfa406e44 | /rapidsmsrw1000/apps/ubuzima/reports/utils.py | d393926a512649803396bf1457d11c5f819a56f9 | [] | no_license | daaray/rapidsmsrw1000 | 98ad2cb24a4b5cbbd8b496c64ad357c6ff687874 | 013a06a61987b18e61bdb0da8da09140b8b16d9a | refs/heads/master | 2020-12-25T00:38:29.934693 | 2013-03-26T07:15:11 | 2013-03-26T07:15:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,183 | py | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
from rapidsmsrw1000.apps.ubuzima.models import *
from rapidsmsrw1000.apps.ambulances.models import *
from rapidsmsrw1000.apps.ubuzima.models import *
from rapidsmsrw1000.apps.chws.models import *
from django.utils.translation import ugettext as _
from django.utils.translation import activate, get_language
from decimal import *
from exceptions import Exception
import traceback
from datetime import *
from time import *
from django.db.models import Q
from django.conf import settings
import re
from random import randint
from rapidsms.router import send
from rapidsms.models import Connection
def forward (message, identity, text):
if message.connection:
conn = Connection(backend = message.connection.backend, identity = identity)
send( text, conn)
#print conn, text
return True
else:
return False
def read_weight(code_string, weight_is_mothers=False):
try:
field_type = FieldType.objects.get(key="child_weight" if not weight_is_mothers else "mother_weight")
value = Decimal(code_string[2:])
field = Field(type=field_type, value=value)
return field
except: return None
def read_height(code_string, height_is_mothers=False):
try:
field_type = FieldType.objects.get(key="child_height" if not height_is_mothers else "mother_height")
value = Decimal(code_string[2:])
field = Field(type=field_type, value=value)
return field
except: return None
def read_key(code_string):
try:
field_type = FieldType.objects.get(key = code_string.lower())
field = Field(type=field_type)
return field
except: return None
def parse_date(dob_string):
"""Tries to parse a string into some kind of date representation. Note that we don't use Date objects
to store things away, because we want to accept limited precision dates, ie, just the year if
necessary."""
# simple #### date.. ie, 1987 or 87
m3 = re.search("^(\d+)$", dob_string)
if m3:
value = m3.group(1)
# two digit date, guess on the first digits based on size
if len(value) == 2:
if int(value) <= date.today().year % 100:
value = "20%s" % value
else:
value = "19%s" % value
# we have a four digit date, does it look reasonable?
if len(value) == 4:
return value
# full date: DD.MM.YYYY
m3 = re.search("^(\d+)\.(\d+)\.(\d+)$", dob_string)
if m3:
dd = m3.group(1)
mm = m3.group(2)
yyyy = m3.group(3)
# print "%s = '%s' '%s' '%s'" % (dob_string, dd, mm, yyyy)
# make sure we are in the right format
if len(dd) > 2 or len(mm) > 2 or len(yyyy) > 4:
raise Exception(_("Invalid date format, must be in the form: DD.MM.YYYY"))
# invalid month
if int(mm) > 12 or int(mm) < 1:
raise Exception(_("Invalid date format, must be in the form: DD.MM.YYYY"))
# invalid day
if int(dd) > 31 or int(dd) < 1:
raise Exception(_("Invalid date format, must be in the form: DD.MM.YYYY"))
# Otherwise, parse into our format
return "%02d.%02d.%04d" % (int(dd), int(mm), int(yyyy))
return None
def read_fields(code_string, accept_date=False, weight_is_mothers=False):
"""Tries to parse all the fields according to our set of action and movement codes. We also
try to figure out if certain fields are dates and stuff them in as well. """
# split our code string by spaces
codes = code_string.split()
fields = []
invalid_codes = []
num_mov_codes = 0
# the dob we might extract from this
dob = None
# for each code
for code in codes:
try:
# first try to look up the code in the DB
field_type = FieldType.objects.get(key=code.lower())
fields.append(Field(type=field_type))
7
# if the action code is a movement code, increment our counter of movement codes
# messages may only have one movement code
if field_type.category.id == 4:
num_mov_codes += 1
# didn't recognize this code? then it is a scalar value, run some regexes to derive what it is
except FieldType.DoesNotExist:
m1 = re.search("(\d+\.?\d*)(k|kg|kilo|kilogram)", code, re.IGNORECASE)
m2 = re.search("(\d+\.?\d*)(c|cm|cent|centimeter)", code, re.IGNORECASE)
# this is a weight
if m1:
field_type = FieldType.objects.get(key="child_weight" if not weight_is_mothers else "mother_weight")
value = Decimal(m1.group(1))
field = Field(type=field_type, value=value)
fields.append(field)
# this is a length
elif m2:
field_type = FieldType.objects.get(key="muac")
value = Decimal(m2.group(1))
field = Field(type=field_type, value=value)
fields.append(field)
# unknown
else:
# try to parse as a dob
date = parse_dob(code)
if accept_date and date:
dob = date
else:
invalid_codes.append(code)
# take care of any error messaging
error_msg = ""
if len(invalid_codes) > 0:
error_msg += _("Unknown action code: %(invalidcode)s. ") % \
{ 'invalidcode': ", ".join(invalid_codes)}
if num_mov_codes > 1:
error_msg += unicode(_("You cannot give more than one location code"))
if error_msg:
error_msg = _("Error. %(error)s") % { 'error': error_msg }
# there's actually an error, throw it over the fence
raise Exception(error_msg)
return (fields, dob)
def get_or_create_patient(reporter, national_id):
"""Takes care of searching our DB for the passed in patient. Equality is determined
using the national id only (IE, dob doesn't come into play). This will create a
new patient with the passed in reporter if necessary."""
# try to look up the patent by id
try:
patient = Patient.objects.get(national_id=national_id)
except Patient.DoesNotExist, e:
# not found? create the patient instead
patient = Patient.objects.create(national_id=national_id, location = reporter.health_centre)
return patient
def create_report(report_type_name, patient, reporter):
"""Convenience for creating a new Report object from a reporter, patient and type """
report_type = ReportType.objects.get(name=report_type_name)
report = Report(patient=patient, reporter=reporter, type=report_type,
location=reporter.health_centre, village=reporter.village)
return report
def run_triggers(message, report):
"""Called whenever we get a new report. We run our triggers, figuring out if there
are messages to send out to supervisors. We return the message that should be sent
to the reporter themselves, or None if there is no matching trigger for the reporter."""
try:
# find all matching triggers
triggers = TriggeredText.get_triggers_for_report(report)
# the message we'll send back to the reporter
reporter_message = None
# for each one
for trigger in triggers:
lang = get_language()
alert = TriggeredAlert(reporter=report.reporter, report=report, trigger=trigger, location = report.location, village = report.reporter.village,\
cell = report.reporter.cell, sector = report.reporter.sector, district = report.reporter.district,\
province = report.reporter.province, nation= report.reporter.nation)
alert.save()
curloc = report.location
# if the destination is the reporter himself, need to respond correctly
if trigger.destination == TriggeredText.DESTINATION_CHW:
# calculate our message based on language, we'll return it in a bit
lang = get_language()
reporter_message = trigger.message_kw
if lang == 'en':
reporter_message = trigger.message_en
elif lang == 'fr':
reporter_message = trigger.message_fr
# if we are supposed to tell the district supervisor and our current location
# is a health clinic, then walk up the tree looking for a hospital
elif trigger.destination == TriggeredText.DESTINATION_DIS or trigger.destination == TriggeredText.DESTINATION_SUP:
# find the parent
location = curloc
sups = Supervisor.objects.filter(health_centre = location).order_by("pk")
if trigger.destination == TriggeredText.DESTINATION_DIS:
location = report.reporter.referral_hospital
sups = Supervisor.objects.filter(health_centre = location).order_by("pk")
# couldn't find it? oh well, we'll alert the normal supervisor
#print [sup.connection() for sup in sups]
# for each supervisor
for sup in sups:
# load the connection for it
conn = sup.connection()
lang = sup.language
# get th appropriate message to send
text = trigger.message_kw
code_lang = trigger.triggers.all()[0].kw
if lang == 'en':
text = trigger.message_en
code_lang = trigger.triggers.all()[0].en
elif lang == 'fr':
text = trigger.message_fr
code_lang = trigger.triggers.all()[0].fr
# and send this message to them
msg_forward = text % (message.connection.identity, report.patient.national_id, report.reporter.village, code_lang)
forward(message, conn.identity, msg_forward)
elif trigger.destination == TriggeredText.DESTINATION_AMB:
try:
ambs = AmbulanceDriver.objects.filter(health_centre = curloc)
if ambs.count() < 1:
curloc = report.reporter.referral_hospital
ambs = AmbulanceDriver.objects.filter(referral_hospital = curloc)
for amb in ambs:
amb.send_notification(message, report)
forward(message, amb.phonenumber, trigger.message_kw % (message.connection.identity, report.patient.national_id, report.reporter.village, trigger.triggers.all()[0].kw))
except Exception, e:
print e
continue
# return our advice texts
if is_mother_weight_loss(report):
forward(message, message.connection.identity, "Uyu mubyeyi %s yatakaje ibiro, nukureba uko wamugira inama." % report.patient.national_id)
elif is_mother_risky(report):
forward(message, message.connection.identity, "Uyu mubyeyi %s afite uburebure budashyitse, nukureba uko mwamuba hafi kugeza abyaye." \
% report.patient.national_id)
return reporter_message
except Exception, e:
print e
return None
def cc_supervisor(message, report):
""" CC's the supervisor of the clinic for this CHW """
try:
# now look up to see if we have any reporters in this group with the same location as
# our reporter
sups = Supervisor.objects.filter(health_centre = message.reporter.health_centre).order_by("pk")
# reporter identity
reporter_ident = message.reporter.connection().identity
#reporter village
reporter_village = message.reporter.village
# we have at least one supervisor
if sups:
for sup in sups:
# load the connection for it
conn = sup.connection()
# and send this message to them
msg_forward = _("%(phone)s: %(report)s" % { 'phone': reporter_ident, 'report': report.as_verbose_string() })
forward(message, conn.identity, msg_forward)
except Exception, e:
#print e
pass
def parse_dob(dob_string):
"""Tries to parse a string into some kind of date representation. Note that we don't use Date objects
to store things away, because we want to accept limited precision dates, ie, just the year if
necessary."""
# simple #### date.. ie, 1987 or 87
m3 = re.search("^(\d+)$", dob_string)
if m3:
value = m3.group(1)
# two digit date, guess on the first digits based on size
if len(value) == 2:
if int(value) <= date.today().year % 100:
value = "20%s" % value
else:
value = "19%s" % value
# we have a four digit date, does it look reasonable?
if len(value) == 4:
return value
# full date: DD.MM.YYYY
m3 = re.search("^(\d+)\.(\d+)\.(\d+)$", dob_string)
if m3:
dd = m3.group(1)
mm = m3.group(2)
yyyy = m3.group(3)
# print "%s = '%s' '%s' '%s'" % (dob_string, dd, mm, yyyy)
# make sure we are in the right format
if len(dd) > 2 or len(mm) > 2 or len(yyyy) > 4:
raise Exception(_("Invalid date format, must be in the form: DD.MM.YYYY"))
# invalid month
if int(mm) > 12 or int(mm) < 1:
raise Exception(_("Invalid date format, must be in the form: DD.MM.YYYY"))
# invalid day
if int(dd) > 31 or int(dd) < 1:
raise Exception(_("Invalid date format, must be in the form: DD.MM.YYYY"))
# is the year in the future
if int(yyyy) > int(date.today().year):
raise Exception(_("Invalid date, cannot be in the future."))
#is the the date in future
dob="%02d.%02d.%04d" % (int(dd), int(mm), int(yyyy))
if datetime.strptime(dob,"%d.%m.%Y").date() > date.today():
raise Exception(_("Invalid date, cannot be in the future."))
# Otherwise, parse into our format
return "%02d.%02d.%04d" % (int(dd), int(mm), int(yyyy))
return None
def read_muac(code_string):
try:
field_type = FieldType.objects.get(key="muac")
value = Decimal(code_string[4:])
field = Field(type=field_type, value=value)
return field
except: return None
def read_number(code_string):
try:
field_type = FieldType.objects.get(key="child_number")
value = Decimal(code_string)
field = Field(type=field_type, value=value)
return field
except: return None
def read_gravity(code_string):
try:
field_type = FieldType.objects.get(key="gravity")
value = Decimal(code_string)
field = Field(type=field_type, value=value)
return field
except: return None
def read_parity(code_string):
try:
field_type = FieldType.objects.get(key="parity")
value = Decimal(code_string)
field = Field(type=field_type, value=value)
return field
except: return None
def read_bmi(report):
try:
weight = report.fields.get(type__key = 'mother_weight').value
height = report.fields.get(type__key = 'mother_height').value
bmi = weight*100*100/(height*height)
return bmi
except: pass
def is_mother_weight_loss(report):
try:
weight = report.fields.get(type__key = 'mother_weight').value
history = Report.objects.filter(patient = report.patient).order_by('-id')[0].fields.get(type__key = 'mother_weight').value
if weight < history: return True
else: return False
except: return False
def is_mother_risky(report):
try:
height = report.fields.get(type__key = 'mother_height').value
if height < 145: return True
else: return False
except: return False
def read_nid(message, nid):
if len(nid) != 16:
err = ErrorNote(errmsg = message.text, type = ErrorType.objects.get(name = "Invalid ID"), errby = message.reporter, identity =\
message.connection.identity, location =message.reporter.health_centre , village=message.reporter.village,\
cell = message.reporter.cell, sector = message.reporter.sector, district = message.reporter.health_centre.district,\
province = message.reporter.health_centre.province, nation = message.reporter.health_centre.nation).save()
raise Exception(_("Error. National ID must be exactly 16 digits, you sent the nid: %(nat_id)s with only %(uburefu)d digits") %
{ "nat_id": nid , "uburefu": len(nid) } )
else: return nid
def set_date_string(date_string):
"""
Trap anybody setting the date_string and try to set the date from it.
"""
try:
date = datetime.strptime(date_string, "%d.%m.%Y").date()
return date
except ValueError,e:
# no-op, just keep the date_string value
pass
def message_reporter(message):
try:
return Reporter.objects.filter(national_id = message.connection.contact.name )[0]
except :
if settings.TRAINING_ENV == True: return anonymous_reporter(message.connection.identity)
else: raise Exception(_("You need to be registered first, use the REG keyword"))
def anonymous_reporter(identity):
reporter = None
try:
names = "ANONYMOUS"
telephone = identity
try:
hc = HealthCentre.objects.get(name = "TEST")
hp = Hospital.objects.get(name = "TEST")
telephone = parse_phone_number(telephone)
nid = "%s%s" % ( telephone[3:] , str(random_with_N_digits(6)))
try: tester = Reporter.objects.get(telephone_moh = telephone, health_centre = hc, referral_hospital = hp)
except:
tester, created = Reporter.objects.get_or_create(telephone_moh = telephone, national_id = nid, health_centre = hc, referral_hospital = hp)
tester.surname = names
tester.role = Role.objects.get(code = 'asm')
tester.sex = Reporter.sex_male
tester.education_level = Reporter.education_universite
tester.date_of_birth = datetime.today()
tester.join_date = datetime.today()
tester.district = hc.district
tester.nation = hc.nation
tester.province = hc.province
tester.sector = Sector.objects.get(name = 'TEST')
tester.cell = Cell.objects.get(name = 'TEST')
tester.village = Village.objects.get(name = 'TEST')
tester.updated = datetime.now()
tester.language = Reporter.language_kinyarwanda
tester.save()
confirm, created = RegistrationConfirmation.objects.get_or_create(reporter = tester)
confirm.save()
reporter = tester
except Exception, e:
print e
pass
except Exception, e:
print e
pass
return reporter
def parse_phone_number(number):
number = number
try:
number = str(int(float(number)))
except:
try:
number = str(int(number))
except:
try:
number = str(number)
except:
return False
number = number.replace(" ", "")
try:
if type(number)!=str:
number=str(int(number))
if number[:3]=="+25" and len(number[3:])==10:
number=number
elif number[:3]=="250" and len(number[3:])==9:
number="+"+number
elif number[:3]=="078" and len(number[3:])==7:
number="+25"+number
elif number[:2]=="78" and len(number[2:])==7:
number="+250"+number
return number
except:
return False
def random_with_N_digits(n):
range_start = 10**(n-1)
range_end = (10**n)-1
return randint(range_start, range_end)
| [
"zigdidier@gmail.com"
] | zigdidier@gmail.com |
ab953ee95eb426992b00d40883863abbb3b98acb | 4912d86bc18cb393f312d9c65042fe3607e3a7c6 | /LinearRegression.py | 6738eaf475de6fc462a73d166242f3bfc5ab6e3f | [] | no_license | ChristineWeitw/Tensorflow-ML | 4bc2a7c5570d277475e572874a596bd1fe6156bb | 33e59a6514e006c0fed42ca67a970c34306ad7ed | refs/heads/master | 2022-11-24T22:49:13.081059 | 2020-08-02T13:39:58 | 2020-08-02T13:39:58 | 277,881,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,312 | py | from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from IPython.display import clear_output
from six.moves import urllib
pip install --upgrade tensorflow
import tensorflow.compat.v2.feature_column as fc
import tensorflow as tf
# load dataset.
dftrain = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/train.csv')
dfeval = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/eval.csv')
ytrain = dftrain.pop('survived')
y_eval = dfeval.pop('survived')
# EDA graphs
dftrain.age.hist(bins=20)
dftrain['class'].value_counts().plot(kind='barh')
# deal with different data types (preprocessing)
CATEGORICAL_FEATURES = ['sex','n_siblings_spouses','parch','class','deck','embark_town','alone']
NUMERIC_FEATURES = ['age','fare']
feature_columns = []
for feature_name in CATEGORICAL_FEATURES:
vocabulary = dftrain[feature_name].unique()
feature_columns.append(tf.feature_column.categorical_column_with_vocabulary_list(feature_name,vocabulary))
print(feature_columns)
for feature_name in NUMERIC_FEATURES:
feature_columns.append(tf.feature_column.numeric_column(feature_name,dtype=tf.float32))
print(feature_columns)
# create proper input_data
def make_input_fn(data_df,label_df,num_epochs=10,shuffle=True,batch_size=32):
def input_function():
# 1. dictiona"rize" -- create tf.data.Dataset object
ds = tf.data.Dataset.from_tensor_slices((dict(data_df),label_df))
# 2. randomize
if shuffle:
ds = ds.shuffle(1000)
# 3. split data to batch_size & take care of epochs
ds = ds.batch(batch_size).repeat(num_epochs)
return ds
return input_function
train_input_fn = make_input_fn(dftrain,ytrain)
test_input_fn = make_input_fn(dfeval,y_eval,num_epochs=1,shuffle=False)
# create the model
## 1. define the model
linear_mdl = tf.estimator.LinearClassifier(feature_columns=feature_columns)
## 2. fit in the training dataset
linear_mdl.train(train_input_fn)
## 3. apply on testing dataset
result = linear_mdl.evaluate(test_input_fn)
print(result['accuracy'])
print(result)
# how to see the results model predicted
results = list(linear_mdl.predict(test_input_fn))
print(results[0]['probabilities']) | [
"hywei1701@gmail.com"
] | hywei1701@gmail.com |
ba41fa13d03b20981772cc3457a2e5162dc15736 | 8035606557a1d9df69846a7e253b152683a2ba63 | /core_logic/RegimeClassifier.py | 855c2a764335f2f52336c5e2ecf8c38493c04d75 | [] | no_license | Kaisaking/DAFD | c0f8ac2858271832c4be64ea651e011ec3e57cdd | 4d0a226913f205f9d884eb6d38733c9c9db1405c | refs/heads/master | 2023-03-01T08:41:19.193247 | 2020-07-13T13:28:19 | 2020-07-13T13:28:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,110 | py | from models.regime_models.NeuralNetModel_regime import NeuralNetModel_regime
from helper_scripts.ModelHelper import ModelHelper
import sklearn.metrics
import numpy as np
load_model = True # Load the file from disk
class RegimeClassifier:
"""
Small adapter class that handles regime prediction
"""
neuralnet = None
def __init__(self):
self.MH = ModelHelper.get_instance() # type: ModelHelper
self.neuralnet = NeuralNetModel_regime()
print("regime classifier")
if load_model:
print("Loading classifier")
self.neuralnet.load_model()
else:
print("Training classifier")
print("Data points: " + str(len(self.MH.train_features_dat_wholenorm)))
self.neuralnet.train_model(self.MH.train_features_dat_wholenorm, self.MH.train_regime_dat)
train_features = np.stack(self.MH.train_features_dat_wholenorm)
train_labels = np.stack(self.MH.train_regime_dat)
print("Train accuracy: " + str(sklearn.metrics.accuracy_score(train_labels-1,self.neuralnet.classifier_model.predict_classes(train_features))))
print()
def predict(self,features):
return self.neuralnet.predict(features)
| [
"chrizrodz@gmail.com"
] | chrizrodz@gmail.com |
477a74afd8d690d5b81e99a291ab6500127ddfe2 | eb8af57967f4b1dd771ca2ae4bf987cb8f46556d | /charset/settings.py | 543a08fd21a125c395ddd28cff12752e5df17ba3 | [] | no_license | glomumo/glowie | 210f4a0f0684ab90bee66c88ad1cbe3cb4fa769a | e307f0ce5d0dca78b8c5cf212f1601bd5f5711f4 | refs/heads/master | 2020-05-02T20:02:55.347044 | 2019-04-01T08:37:29 | 2019-04-01T08:37:29 | 178,177,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,356 | py | """
Django settings for charset project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'xh)p(%u3#x9*vprfe)m!ybvb*_@&v4575v9rus@kz8x43+91%w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'phonebook',
'crispy_forms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'charset.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'charset.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
CRISPY_TEMPLATE_PACK = 'bootstrap4'
LOGIN_URL= 'phonebook:login'
LOGIN_REDIRECT_URL = 'phonebook:contact-index'
LOGOUT_REDIRECT_URL = 'phonebook:contact-login'
| [
"glowmumo@gmail.com"
] | glowmumo@gmail.com |
4094dc115614d752fdc61bd95ecac6cfb7797367 | 7b3711d4c6d7284255ba0270d49d120f984bf7c6 | /problems/2361_minimum_cost_using_the_train_line.py | 546dcff817a1d4ba91ccab3ae95614fb4d2f1ff7 | [] | no_license | loganyu/leetcode | 2d336f30feb55379aaf8bf0273d00e11414e31df | 77c206305dd5cde0a249365ce7591a644effabfc | refs/heads/master | 2023-08-18T09:43:10.124687 | 2023-08-18T00:44:51 | 2023-08-18T00:44:51 | 177,875,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,677 | py | '''
A train line going through a city has two routes, the regular route and the express route. Both routes go through the same n + 1 stops labeled from 0 to n. Initially, you start on the regular route at stop 0.
You are given two 1-indexed integer arrays regular and express, both of length n. regular[i] describes the cost it takes to go from stop i - 1 to stop i using the regular route, and express[i] describes the cost it takes to go from stop i - 1 to stop i using the express route.
You are also given an integer expressCost which represents the cost to transfer from the regular route to the express route.
Note that:
There is no cost to transfer from the express route back to the regular route.
You pay expressCost every time you transfer from the regular route to the express route.
There is no extra cost to stay on the express route.
Return a 1-indexed array costs of length n, where costs[i] is the minimum cost to reach stop i from stop 0.
Note that a stop can be counted as reached from either route.
Example 1:
Input: regular = [1,6,9,5], express = [5,2,3,10], expressCost = 8
Output: [1,7,14,19]
Explanation: The diagram above shows how to reach stop 4 from stop 0 with minimum cost.
- Take the regular route from stop 0 to stop 1, costing 1.
- Take the express route from stop 1 to stop 2, costing 8 + 2 = 10.
- Take the express route from stop 2 to stop 3, costing 3.
- Take the regular route from stop 3 to stop 4, costing 5.
The total cost is 1 + 10 + 3 + 5 = 19.
Note that a different route could be taken to reach the other stops with minimum cost.
Example 2:
Input: regular = [11,5,13], express = [7,10,6], expressCost = 3
Output: [10,15,24]
Explanation: The diagram above shows how to reach stop 3 from stop 0 with minimum cost.
- Take the express route from stop 0 to stop 1, costing 3 + 7 = 10.
- Take the regular route from stop 1 to stop 2, costing 5.
- Take the express route from stop 2 to stop 3, costing 3 + 6 = 9.
The total cost is 10 + 5 + 9 = 24.
Note that the expressCost is paid again to transfer back to the express route.
Constraints:
n == regular.length == express.length
1 <= n <= 105
1 <= regular[i], express[i], expressCost <= 105
'''
class Solution:
def minimumCosts(self, regular: List[int], express: List[int], expressCost: int) -> List[int]:
prevReg = 0
prevExp = expressCost
ans = [None] * len(regular)
for i in range(1, len(regular) + 1):
reg = regular[i-1] + min(prevReg, prevExp)
exp = express[i-1] + min(expressCost + prevReg, prevExp)
ans[i-1] = min(reg, exp)
prevReg = reg
prevExp = exp
return ans
| [
"yu.logan@gmail.com"
] | yu.logan@gmail.com |
fcb0036c4b6ffcc7226d569c70e903d2a62098f7 | 6bb5994a760e823262aa2dbced083bc1b4be4c32 | /mundo_TRES/ex083.py | deea3aa86418785a2722877b5395aafe4744f956 | [] | no_license | haell/AulasPythonGbara | b9b489b917b9edb4ff64ab397434ade285719ad4 | b3270765c972bc2c3e73ee55f2b04b63705b529e | refs/heads/master | 2023-06-25T18:08:53.678208 | 2023-06-19T18:05:08 | 2023-06-19T18:05:08 | 292,722,474 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 600 | py | # Crie um programa onde o usuário digite uma expressão qualquer que use parênteses.
# Seu aplicativo deverá analisar se a expressão passada está com os parênteses abertos e fechados na ordem correta.
expressão = input("Digite uma expressão: ")
abre_p = fecha_p = 0
inválida = False
for l in expressão:
if l == "(":
abre_p += 1
elif l == ")":
fecha_p += 1
if fecha_p > abre_p:
inválida = True
break
if not inválida and abre_p == fecha_p:
print('Sua expressão está CORRETA!')
else:
print('Sua expressão está INCORRETA!')
| [
"israelaraujo.emply@gmail.com"
] | israelaraujo.emply@gmail.com |
0b7ff06c8aa9f6a941ff4fe8a749d7d0a028286b | 4da0c8906c9cd671e3a4bee3a6ee801a353e3d9a | /Water/watres/migrations/0012_targetusewo_checkin.py | 82f4465901163fac353bd3219380d8d3cb10db6e | [] | no_license | avpakh/GVK | 2a5a699caa8a986a3fd0dadbe2160fc9da5bf193 | ac8b8d8ad5cd5ef8485e98cd532a29cd420e0cae | refs/heads/master | 2020-06-13T10:35:36.663668 | 2017-01-06T09:01:42 | 2017-01-06T09:01:42 | 75,392,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-02 14:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('watres', '0011_auto_20160816_1523'),
]
operations = [
migrations.AddField(
model_name='targetusewo',
name='checkin',
field=models.BooleanField(default=1),
preserve_default=False,
),
]
| [
"aliaksandr.pakhomau@gmail.com"
] | aliaksandr.pakhomau@gmail.com |
324a4ac05fe45323313eabb325470f4e7835dea8 | b0fe44512e3969d47b684929f33269dee0135af3 | /FN_v1/utils/generate_test_splits.py | c13abd64e0fa51a1307ffc61049c9de695a9f73c | [
"Apache-2.0"
] | permissive | zhuyq123/FN_v1 | cd9393b7703474fe51148bb0e1ea64df81085b3e | f72bbdc463309bad7d44539cb04e531df63a2831 | refs/heads/master | 2021-01-21T17:41:17.010004 | 2017-05-21T20:40:27 | 2017-05-21T20:40:27 | 91,981,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,012 | py | import random
import os
from collections import defaultdict
def generate_hold_out_split (dataset, training = 0.8, base_dir="spiltnew"):
r = random.Random()
r.seed(1489215)
article_ids = list(dataset.articles.keys()) # get a list of article ids
r.shuffle(article_ids) # and shuffle that list
training_ids = article_ids[:int(training * len(article_ids))]
hold_out_ids = article_ids[int(training * len(article_ids)):]
# write the split body ids out to files for future use
with open(base_dir+ "/"+ "training_ids.txt", "w+") as f:
f.write("\n".join([str(id) for id in training_ids]))
with open(base_dir+ "/"+ "test_ids.txt", "w+") as f:
f.write("\n".join([str(id) for id in hold_out_ids]))
def read_ids(file,base):
ids = []
with open(base+"/"+file,"r") as f:
for line in f:
ids.append(int(line))
return ids
def kfold_split(dataset, training = 0.8, n_folds = 10, base_dir="split_teaching"):
if not (os.path.exists(base_dir+ "/"+ "training_ids.txt")
and os.path.exists(base_dir+ "/"+ "test_ids.txt")):
generate_hold_out_split(dataset,training,base_dir)
training_ids = read_ids("training_ids.txt", base_dir)
hold_out_ids = read_ids("test_ids.txt", base_dir)
folds = []
for k in range(n_folds):
folds.append(training_ids[int(k*len(training_ids)/n_folds):int((k+1)*len(training_ids)/n_folds)])
return folds,hold_out_ids
def get_stances_for_folds(dataset,folds,hold_out):
stances_folds = defaultdict(list)
stances_hold_out = []
for stance in dataset.stances:
if stance['Body ID'] in hold_out:
stances_hold_out.append(stance)
else:
fold_id = 0
for fold in folds:
if stance['Body ID'] in fold:
stances_folds[fold_id].append(stance)
fold_id += 1
return stances_folds,stances_hold_out
| [
"noreply@github.com"
] | noreply@github.com |
3421962f015eb96a5e9610002b5965f0e34a3367 | 400c5490bcfe06820dcf2b431eba40639b56d6ed | /ads/kth_lowest/kth_lowest.py | 9ed224a80ea90ef28c12f3f3c2484c2000b68a38 | [] | no_license | Adman/school | 1e84a5fef75a7481711392b164b616e514feaf76 | 10b88201857100ec66ae9484df8c679b1144874d | refs/heads/master | 2020-12-31T07:21:05.722771 | 2020-02-18T14:48:25 | 2020-02-18T14:48:25 | 58,227,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,014 | py | class Node:
def __init__(self, val):
self.left = None
self.right = None
self.val = val
self.lchilds = 1
def add_child(self, node):
if node.val < self.val:
self.lchilds += 1
if self.left is None:
self.left = node
else:
self.left.add_child(node)
else:
if self.right is None:
self.right = node
else:
self.right.add_child(node)
def kth_lowest(self, k):
if self.lchilds > k + 1:
return self.left.kth_lowest(k)
elif self.lchilds == k + 1:
return self.val
return self.right.kth_lowest(k-self.lchilds)
q = int(input())
root = None
for _ in range(q):
inp = input().split(' ')
op, k = inp[0], int(inp[1])
if op == 'i':
if root is None:
root = Node(k)
else:
root.add_child(Node(k))
elif op == 'f':
print(root.kth_lowest(k))
| [
"a.matejov@centrum.sk"
] | a.matejov@centrum.sk |
d9c22fad56843b09972e0829aef2e28f8042033c | 7da493e3f7d96ded64052e98f3631d8f1bf3f468 | /PyBank/main.py | fe95f1fa6efda1162c3ac8fd8e2c89085cc399d2 | [] | no_license | zyx88615/python-challenge | 796cec1af918dfaee85fcdedcbb918ae715cf0d8 | 144db4c7c646465e6aab22ea735de13ca3be2509 | refs/heads/master | 2020-04-27T10:49:11.416903 | 2019-03-12T07:52:27 | 2019-03-12T07:52:27 | 174,270,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,359 | py |
import os
# Module for reading CSV files
import csv
csvpath = os.path.join(os.path.dirname(__file__), 'Resources', 'budget_data.csv')
# Method 2: Improved Reading using CSV module
with open(csvpath, newline='') as csvfile:
# CSV reader specifies delimiter and variable that holds contents
csvreader = csv.reader(csvfile, delimiter=',')
# Read the header row first
csv_header = next(csvreader)
#initialize arrays for month, total, profit/loss and change in profit/loss
month, value, to, changeval=[],[],[],[]
count=0
total = 0
for row in csvreader: #loop
if row[0] != "Jan-2010":
month.append(row[0]) #filter out the first month from array because we dont need it
value.append(row[1]) #get profit/loss each month store in array called value
count+=1; #count total month
intvalue= [int(x) for x in value] #covert profit/loss from string to int
total = sum(intvalue) #calculate the sum of profit/loss
for x in range(len(value)-1):
changeval.append(intvalue[x+1]-intvalue[x]) #calculate the change of profit/loss everymonth
avg = round(sum(changeval) / (len(value) - 1),2) #perform time series analysis
to=dict(zip(month,changeval)) #zip(month and change in profit/loss) into a dictionary set
#get min and max
maxmonth=(max(to, key=to.get))
maxval= max(to.values())
minmonth = (min(to, key=to.get))
minval = min(to.values())
#print to terminal
def print1():
print("Financial Analysis \n-----------------------------\nTotal Month: {}\nTotal: ${}".format(count,total))
print("Average Change: $"+str(avg))
print("Greatest Increase in Profits: {} (${})".format(maxmonth, maxval))
print("Greatest Decrease in Profits: {} (${})".format(minmonth, minval))
print1()
#save output to txt file
output_file = os.path.join(os.path.dirname(__file__), "final_output_pybank.txt")
with open(output_file, 'w') as the_file:
the_file.write("Financial Analysis \n-----------------------------\nTotal Month: {}\nTotal: ${}".format(count,total))
the_file.write("\nAverage Change: $"+str(avg))
the_file.write("\nGreatest Increase in Profits: {} (${})".format(maxmonth, maxval)+"\nGreatest Decrease in Profits: {} (${})".format(minmonth, minval)) | [
"zyx88615@gmail.com"
] | zyx88615@gmail.com |
71bdcb4ccd482d037956922b220b0c27958377c8 | 235b8512f0d210f434b1d5a7bc2ce3340016f010 | /mysite/settings.py | 8cfc2e5e03ed96eb1fee900932b29295ffbd9a55 | [] | no_license | Aerin-and-Mom/my-first-blog | 46c32d60400fe710bc5e901828fc1f7ce9a7e131 | 70d66532fef9094585fdfcc61fe49c0c971894d9 | refs/heads/master | 2016-09-01T15:07:26.304607 | 2016-02-27T21:48:04 | 2016-02-27T21:48:04 | 52,684,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,224 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'pks_xn&)spt+6_u=l=3xo^zgi_y9m5#!yr+3-5tt7asff0b2ue'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"samantha.zirkin@gmail.com"
] | samantha.zirkin@gmail.com |
cc15539f09c655e2a85fd8d417d67c0477c45e87 | a323fc11db97690c4ea50d92766d9d5db0418aac | /article/migrations/0020_auto_20200719_1016.py | ae9b1b99f0de83746e98a9195dcf7750dc6193a6 | [] | no_license | sparshjaincs/articleplus | ad909f937ebf856b6da87bd623af0776f8faafc3 | 0fa34a5384d8cfc52181be42c130aadd03ad8ef2 | refs/heads/master | 2023-08-10T23:21:44.845993 | 2021-09-30T22:29:13 | 2021-09-30T22:29:13 | 279,252,336 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,419 | py | # Generated by Django 2.2.6 on 2020-07-19 04:46
import datetime
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('article', '0019_auto_20200719_0951'),
]
operations = [
migrations.RemoveField(
model_name='articles',
name='mute',
),
migrations.RemoveField(
model_name='articles',
name='subscribe',
),
migrations.AddField(
model_name='profile',
name='mute',
field=models.ManyToManyField(blank=True, default=None, related_name='mute_title', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='profile',
name='subscribe',
field=models.ManyToManyField(blank=True, default=None, related_name='subscribe_title', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='activity',
name='activity_time',
field=models.TimeField(default=datetime.datetime(2020, 7, 19, 10, 16, 18, 77112)),
),
migrations.AlterField(
model_name='articles',
name='time',
field=models.TimeField(default=datetime.datetime(2020, 7, 19, 10, 16, 18, 61070)),
),
]
| [
"sparshjaincs@gmail.com"
] | sparshjaincs@gmail.com |
22e7a4595b3f330fb0935bd689d8379a884f376b | cfaa945d4ba5cdc58f89533b1ea2a8713a2377b9 | /logroad/CostSurface.py | e7238a3d5f083404d25373752d1979c9a0f22aa8 | [] | no_license | ustroetz/log-road | 60afaefea369266bad6bed876a2eb079af87d2a5 | 0741ff9e8b580fe829c1b407c15c77daae03dda2 | refs/heads/master | 2016-09-11T02:49:17.055727 | 2014-01-14T22:05:27 | 2014-01-14T22:05:27 | 14,438,848 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,543 | py | import gdal, ogr, osr, os
import numpy as np
def raster2array(rasterfn):
raster = gdal.Open(rasterfn)
band = raster.GetRasterBand(1)
array = band.ReadAsArray()
return array
def shp2raster(inputfn,baseRasterfn):
outputfn = 'rasterized.tif'
source_ds = ogr.Open(inputfn)
source_layer = source_ds.GetLayer()
raster = gdal.Open(baseRasterfn)
geotransform = raster.GetGeoTransform()
originX = geotransform[0]
originY = geotransform[3]
pixelWidth = geotransform[1]
pixelHeight = geotransform[5]
cols = raster.RasterXSize
rows = raster.RasterYSize
target_ds = gdal.GetDriverByName('GTiff').Create(outputfn, cols, rows, 1, gdal.GDT_Byte)
target_ds.SetGeoTransform((originX, pixelWidth, 0, originY, 0, pixelHeight))
band = target_ds.GetRasterBand(1)
NoData_value = 0
band.SetNoDataValue(NoData_value)
band.FlushCache()
gdal.RasterizeLayer(target_ds, [1], source_layer, burn_values=[1])
target_dsSRS = osr.SpatialReference()
target_dsSRS.ImportFromEPSG(32610)
target_ds.SetProjection(target_dsSRS.ExportToWkt())
def array2raster(rasterfn,costSurfaceArray):
newRasterfn = 'testdata/CostSurface.tif'
raster = gdal.Open(rasterfn)
geotransform = raster.GetGeoTransform()
originX = geotransform[0]
originY = geotransform[3]
pixelWidth = geotransform[1]
pixelHeight = geotransform[5]
cols = raster.RasterXSize
rows = raster.RasterYSize
driver = gdal.GetDriverByName('GTiff')
outRaster = driver.Create(newRasterfn, cols, rows, 1, gdal.GDT_Float32)
outRaster.SetGeoTransform((originX, pixelWidth, 0, originY, 0, pixelHeight))
outband = outRaster.GetRasterBand(1)
outband.WriteArray(costSurfaceArray)
outRasterSRS = osr.SpatialReference()
outRasterSRS.ImportFromWkt(raster.GetProjectionRef())
outRaster.SetProjection(outRasterSRS.ExportToWkt())
outband.FlushCache()
def main(riverfn,slopefn):
shp2raster(riverfn,slopefn)
riverArray = raster2array('rasterized.tif')
os.remove('rasterized.tif')
costSurfaceArray = raster2array(slopefn)
costSurfaceArray[costSurfaceArray > 50] **=2
costSurfaceArray[riverArray == 1] **=5
costSurfaceArray[costSurfaceArray < 0.001] = 1 # change value of 0 to 1 otherwise they mix up with 0 values of roads
array2raster(slopefn,costSurfaceArray)
if __name__ == "__main__":
riverfn = 'rivers.shp'
slopefn = 'Slope.tif'
main(riverfn, slopefn) | [
"ustroetz@gmail.com"
] | ustroetz@gmail.com |
2e0fad46c16958cbd3582723916a0ac1dda5a23e | 0bd14d7590db43af015433edc95c101b325f2b45 | /simple_sso/sso_server/admin.py | e21b92dc68b986e617291c935f1c31c63bf08af1 | [
"BSD-3-Clause"
] | permissive | chrisglass/django-simple-sso | 21f390535c012af4bba9a1b78a23b298592611df | b63d37ac64450ff5a506e6b1c2e34e42109b8cd8 | refs/heads/master | 2020-12-25T03:22:00.352693 | 2011-08-10T15:40:09 | 2011-08-10T15:40:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | # -*- coding: utf-8 -*-
from django.contrib import admin
from simple_sso.sso_server.models import Client
admin.site.register(Client)
| [
"jonas.obrist@divio.ch"
] | jonas.obrist@divio.ch |
b691a775217c0d8fb8af26e7711d140e68d08694 | ef8c6b71648907407d771288eb63ef4582e0c7d9 | /main/main/settings.py | 65cb25e751ed53584d75d9edd4bcb5a5961d6fb7 | [] | no_license | zaapen/unknown | 21f5bf2c485149b77b30708105e20cc67711e590 | 552956edbafd67d9a7fd7847dac1fe8cada68137 | refs/heads/master | 2021-08-23T03:32:25.156065 | 2017-12-02T23:13:31 | 2017-12-02T23:13:31 | 112,884,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,111 | py | """
Django settings for main project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ngmxm*jt2nnym68^+ywhk)qfk=mjmax3k+u!i9fc=pdo7ix*0y'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps.travelbuddy'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'main.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'main.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"dannyhua@Dannys-MBP.socal.rr.com"
] | dannyhua@Dannys-MBP.socal.rr.com |
f52a21600fcadee5be4dfbc92203f8087fb6df5c | 157a0ee44720ea65f3d3ef38c9e500307588a171 | /lesson 2.1.1.py | 016ab72819396eed55e52bcba249cb39be4626fc | [] | no_license | Tobkirill/stepik_selenium | cb6550c30e99715b30be4d57e559763f3bfb1677 | b155fd80397d0d7a18cfdb782c2404f594fbee4a | refs/heads/main | 2023-01-29T13:13:23.517384 | 2020-12-12T16:59:57 | 2020-12-12T16:59:57 | 320,875,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 908 | py | from selenium import webdriver
import time
import math
def calc(x):
return str(math.log(abs(12*math.sin(int(x)))))
link = "http://suninjuly.github.io/math.html"
try:
browser = webdriver.Chrome()
browser.get(link)
x_element = browser.find_element_by_id("input_value")
x = x_element.text
y = calc(x)
answer_field = browser.find_element_by_id("answer")
answer_field.send_keys(y)
checkbox = browser.find_element_by_css_selector("[for='robotCheckbox']")
checkbox.click()
radiobutton = browser.find_element_by_css_selector("[for='robotsRule']")
radiobutton.click()
button = browser.find_element_by_css_selector("[type='submit']")
button.click()
finally:
# успеваем скопировать код за 30 секунд
time.sleep(30)
# закрываем браузер после всех манипуляций
browser.quit() | [
"tobkirill@gmail,.com"
] | tobkirill@gmail,.com |
2445f62695bc503243d90b47fd380b81e2c25e92 | 3528abad46b15133b2108c237f926a1ab252cbd5 | /Core/ableton/v2/control_surface/elements/optional.py | 86a72e77d342aa7dee2a59777b27af577769514a | [] | no_license | scottmudge/MPK261_Ableton | 20f08234f4eab5ba44fde6e5e745752deb968df2 | c2e316b8347367bd157276f143b9f1a9bc2fe92c | refs/heads/master | 2020-03-20T10:56:32.421561 | 2018-06-14T19:12:47 | 2018-06-14T19:12:47 | 137,389,086 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,241 | py | # Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/ableton/v2/control_surface/elements/optional.py
# Compiled at: 2018-04-23 20:27:04
from __future__ import absolute_import, print_function, unicode_literals
from ...base import listens
from .combo import ToggleElement
class ChoosingElement(ToggleElement):
u"""
An Element wrapper that enables one of the nested elements based on
the value of the given flag.
"""
def __init__(self, flag=None, *a, **k):
super(ChoosingElement, self).__init__(*a, **k)
self.__on_flag_changed.subject = flag
self.__on_flag_changed(flag.value)
@listens('value')
def __on_flag_changed(self, value):
self.set_toggled(value)
class OptionalElement(ChoosingElement):
u"""
An Element wrapper that enables the nested element IFF some given
flag is set to a specific value.
"""
def __init__(self, control=None, flag=None, value=None, *a, **k):
on_control = control if value else None
off_control = None if value else control
super(OptionalElement, self).__init__(on_control=on_control, off_control=off_control, flag=flag, *a, **k)
return
| [
"mail@scottmudge.com"
] | mail@scottmudge.com |
e1e7e5926e41eed4f9c7838703feb1f72bc2a637 | 8d78d4e63223d7ab810cbe71ac1bd3590099f1b9 | /game_vars/map_vars.py | 0c6f4afd128e3b9ce291f26c197c6fc1da62dab3 | [] | no_license | KirylShakh/Roguelike-ROP | d780901cdd4dba9649835a75284d7f05f8ed3fef | 01374b33599cf12e6d8f50f6a8892c4ae0450d47 | refs/heads/master | 2023-01-10T11:18:38.088381 | 2020-08-25T14:11:57 | 2020-08-25T14:11:57 | 312,552,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59 | py | world_width = 10
world_height = 10
width = 80
height = 43
| [
"wm167@yandex.ru"
] | wm167@yandex.ru |
d7c7808b30cf10716b5bf15cc2e0b385d8a79d93 | b9d93fcdef3e0e09d7bbc41284484c997b3e19ec | /main/data_loader.py | 760d6a8872d8d9af9a32b54fb724b05d1bc9d780 | [] | no_license | Mohammad-Af/BannerService | 79e40e7e2724aeded34968155b06f168ea493516 | f5fbfeae6c062b7c8f5cfddf230e554f2ba211a9 | refs/heads/main | 2023-05-30T07:12:21.979602 | 2021-06-14T06:40:31 | 2021-06-14T06:40:31 | 375,035,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,229 | py | import csv
import os
import django
import logging
from django.db import IntegrityError, transaction
from BannerService.settings import BASE_DIR
def load(csv_dir='csv'):
logger = logging.getLogger("data_loader")
# to use models we should set this environment first
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "BannerService.settings")
django.setup()
from main.models import Click, Impression, Conversion
data = {
Click: os.path.join(BASE_DIR, csv_dir, '{}/clicks_{}.csv'),
Impression: os.path.join(BASE_DIR, csv_dir, '{}/impressions_{}.csv'),
Conversion: os.path.join(BASE_DIR, csv_dir, '{}/conversions_{}.csv')
}
for quarter in range(1, 5):
for model, path in data.items():
with open(path.format(quarter, quarter), 'r') as fin:
dr = csv.DictReader(fin)
for record in dr:
try:
with transaction.atomic():
model.objects.create(quarter=quarter, **record)
except IntegrityError as error:
logger.warning(f"Duplicate record found: quarter={quarter}, {record}")
if __name__ == '__main__':
load()
| [
"mohammad@mohammads-MacBook-Pro.local"
] | mohammad@mohammads-MacBook-Pro.local |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.