blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
002efcfadbaaadc2cad01fb292a4a4ad65565a90 | ca3a1b6386e44f8222b0f9d93dcb382027d26018 | /choix/ep.py | a457f2efc0712505ba7e9b6db3a815a10f126aac | [
"MIT"
] | permissive | pkrouth/choix | 80ef6fceaffbbc618fb6496217f4e077b3d8e6d4 | 05a57a10bb707338113a9d91601ca528ead7a881 | refs/heads/master | 2020-04-27T15:34:29.404796 | 2018-08-08T13:17:01 | 2018-08-08T13:17:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,314 | py | import functools
import numpy as np
import numpy.random as nprand
from numpy.linalg import norm
from math import exp, log, pi, sqrt # Faster than numpy equivalents.
from scipy.misc import logsumexp
from .utils import normal_cdf, inv_posdef, SQRT2, SQRT2PI
# EP-related settings.
THRESHOLD = 1e-4
MAT_ONE = np.array([[1.0, -1.0], [-1.0, 1.0]])
MAT_ONE_FLAT = MAT_ONE.ravel()
# Some magic constants for a stable computation of _log_phi(z).
CS = [
0.00048204, -0.00142906, 0.0013200243174, 0.0009461589032, -0.0045563339802,
0.00556964649138, 0.00125993961762116, -0.01621575378835404,
0.02629651521057465, -0.001829764677455021, 2*(1-pi/3), (4-pi)/3, 1, 1,]
RS = [
1.2753666447299659525, 5.019049726784267463450, 6.1602098531096305441,
7.409740605964741794425, 2.9788656263939928886,]
QS = [
2.260528520767326969592, 9.3960340162350541504, 12.048951927855129036034,
17.081440747466004316, 9.608965327192787870698, 3.3690752069827527677,]
def ep_pairwise(n_items, data, alpha, model="logit", max_iter=100,
initial_state=None):
"""Compute a distribution of model parameters using the EP algorithm.
This function computes an approximate Bayesian posterior probability
distribution over model parameters, given pairwise-comparison data (see
:ref:`data-pairwise`). It uses the expectation propagation algorithm, as
presented, e.g., in [CG05]_.
The prior distribution is assumed to be isotropic Gaussian with variance
``1 / alpha``. The posterior is approximated by a a general multivariate
Gaussian distribution, described by a mean vector and a covariance matrix.
Two different observation models are available. ``logit`` (default) assumes
that pairwise-comparison outcomes follow from a Bradley-Terry model.
``probit`` assumes that the outcomes follow from Thurstone's model.
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Pairwise-comparison data.
alpha : float
Inverse variance of the (isotropic) prior.
model : str, optional
Observation model. Either "logit" or "probit".
max_iter : int, optional
Maximum number of iterations allowed.
initial_state : tuple of array_like, optional
Natural parameters used to initialize the EP algorithm.
Returns
-------
mean : numpy.ndarray
The mean vector of the approximate Gaussian posterior.
cov : numpy.ndarray
The covariance matrix of the approximate Gaussian posterior.
Raises
------
ValueError
If the observation model is not "logit" or "probit".
"""
if model == "logit":
match_moments = _match_moments_logit
elif model == "probit":
match_moments = _match_moments_probit
else:
raise ValueError("unknown model '{}'".format(model))
return _ep_pairwise(
n_items, data, alpha, match_moments, max_iter, initial_state)
def _ep_pairwise(
n_items, comparisons, alpha, match_moments, max_iter, initial_state):
"""Compute a distribution of model parameters using the EP algorithm.
Raises
------
RuntimeError
If the algorithm does not converge after ``max_iter`` iterations.
"""
# Static variable that allows to check the # of iterations after the call.
_ep_pairwise.iterations = 0
m = len(comparisons)
prior_inv = alpha * np.eye(n_items)
if initial_state is None:
# Initially, mean and covariance come from the prior.
mean = np.zeros(n_items)
cov = (1 / alpha) * np.eye(n_items)
# Initialize the natural params in the function space.
tau = np.zeros(m)
nu = np.zeros(m)
# Initialize the natural params in the space of thetas.
prec = np.zeros((n_items, n_items))
xs = np.zeros(n_items)
else:
tau, nu = initial_state
mean, cov, xs, prec = _init_ws(
n_items, comparisons, prior_inv, tau, nu)
for _ in range(max_iter):
_ep_pairwise.iterations += 1
# Keep a copy of the old parameters for convergence testing.
tau_old = np.array(tau, copy=True)
nu_old = np.array(nu, copy=True)
for i in nprand.permutation(m):
a, b = comparisons[i]
# Update mean and variance in function space.
f_var = cov[a,a] + cov[b,b] - 2 * cov[a,b]
f_mean = mean[a] - mean[b]
# Cavity distribution.
tau_tot = 1.0 / f_var
nu_tot = tau_tot * f_mean
tau_cav = tau_tot - tau[i]
nu_cav = nu_tot - nu[i]
cov_cav = 1.0 / tau_cav
mean_cav = cov_cav * nu_cav
# Moment matching.
logpart, dlogpart, d2logpart = match_moments(mean_cav, cov_cav)
# Update factor params in the function space.
tau[i] = -d2logpart / (1 + d2logpart / tau_cav)
delta_tau = tau[i] - tau_old[i]
nu[i] = ((dlogpart - (nu_cav / tau_cav) * d2logpart)
/ (1 + d2logpart / tau_cav))
delta_nu = nu[i] - nu_old[i]
# Update factor params in the weight space.
prec[(a, a, b, b), (a, b, a, b)] += delta_tau * MAT_ONE_FLAT
xs[a] += delta_nu
xs[b] -= delta_nu
# Update mean and covariance.
if abs(delta_tau) > 0:
phi = -1.0 / ((1.0 / delta_tau) + f_var) * MAT_ONE
upd_mat = cov.take([a, b], axis=0)
cov = cov + upd_mat.T.dot(phi).dot(upd_mat)
mean = cov.dot(xs)
# Recompute the global parameters for stability.
cov = inv_posdef(prior_inv + prec)
mean = cov.dot(xs)
if _converged((tau, nu), (tau_old, nu_old)):
return mean, cov
raise RuntimeError(
"EP did not converge after {} iterations".format(max_iter))
def _log_phi(z):
"""Stable computation of the log of the Normal CDF and its derivative."""
# Adapted from the GPML function `logphi.m`.
if z * z < 0.0492:
# First case: z close to zero.
coef = -z / SQRT2PI
val = functools.reduce(lambda acc, c: coef * (c + acc), CS, 0)
res = -2 * val - log(2)
dres = exp(-(z * z) / 2 - res) / SQRT2PI
elif z < -11.3137:
# Second case: z very small.
num = functools.reduce(
lambda acc, r: -z * acc / SQRT2 + r, RS, 0.5641895835477550741)
den = functools.reduce(lambda acc, q: -z * acc / SQRT2 + q, QS, 1.0)
res = log(num / (2 * den)) - (z * z) / 2
dres = abs(den / num) * sqrt(2.0 / pi)
else:
res = log(normal_cdf(z))
dres = exp(-(z * z) / 2 - res) / SQRT2PI
return res, dres
def _match_moments_logit(mean_cav, cov_cav):
# Adapted from the GPML function `likLogistic.m`.
# First use a scale mixture.
lambdas = sqrt(2) * np.array([0.44, 0.41, 0.40, 0.39, 0.36]);
cs = np.array([
1.146480988574439e+02,
-1.508871030070582e+03,
2.676085036831241e+03,
-1.356294962039222e+03,
7.543285642111850e+01
])
arr1, arr2, arr3 = np.zeros(5), np.zeros(5), np.zeros(5)
for i, x in enumerate(lambdas):
arr1[i], arr2[i], arr3[i] = _match_moments_probit(
x * mean_cav, x * x * cov_cav)
logpart1 = logsumexp(arr1, b=cs)
dlogpart1 = (np.dot(np.exp(arr1) * arr2, cs * lambdas)
/ np.dot(np.exp(arr1), cs))
d2logpart1 = (np.dot(np.exp(arr1) * (arr2 * arr2 + arr3),
cs * lambdas * lambdas)
/ np.dot(np.exp(arr1), cs)) - (dlogpart1 * dlogpart1)
# Tail decays linearly in the log domain (and not quadratically).
exponent = -10.0 * (abs(mean_cav) - (196.0 / 200.0) * cov_cav - 4.0)
if exponent < 500:
lambd = 1.0 / (1.0 + exp(exponent))
logpart2 = min(cov_cav / 2.0 - abs(mean_cav), -0.1)
dlogpart2 = 1.0
if mean_cav > 0:
logpart2 = log(1 - exp(logpart2))
dlogpart2 = 0.0
d2logpart2 = 0.0
else:
lambd, logpart2, dlogpart2, d2logpart2 = 0.0, 0.0, 0.0, 0.0
logpart = (1 - lambd) * logpart1 + lambd * logpart2
dlogpart = (1 - lambd) * dlogpart1 + lambd * dlogpart2
d2logpart = (1 - lambd) * d2logpart1 + lambd * d2logpart2
return logpart, dlogpart, d2logpart
def _match_moments_probit(mean_cav, cov_cav):
# Adapted from the GPML function `likErf.m`.
z = mean_cav / sqrt(1 + cov_cav)
logpart, val = _log_phi(z)
dlogpart = val / sqrt(1 + cov_cav) # 1st derivative w.r.t. mean.
d2logpart = -val * (z + val) / (1 + cov_cav)
return logpart, dlogpart, d2logpart
def _init_ws(n_items, comparisons, prior_inv, tau, nu):
"""Initialize parameters in the weight space."""
prec = np.zeros((n_items, n_items))
xs = np.zeros(n_items)
for i, (a, b) in enumerate(comparisons):
prec[(a, a, b, b), (a, b, a, b)] += tau[i] * MAT_ONE_FLAT
xs[a] += nu[i]
xs[b] -= nu[i]
cov = inv_posdef(prior_inv + prec)
mean = cov.dot(xs)
return mean, cov, xs , prec
def _converged(new, old, threshold=THRESHOLD):
for param_new, param_old in zip(new, old):
if norm(param_new - param_old, ord=np.inf) > threshold:
return False
return True
| [
"lucas@maystre.ch"
] | lucas@maystre.ch |
126873c0fcc19baf57489e7490480bbd0db04f65 | 89479ecbe1d983931f70a889b5dfb2a5754d7cc6 | /prob1346/check_if_n_and_its_double_exist.py | ab28700e26eeb23198ea28265ec603f7d4df8de7 | [] | no_license | sharath28/leetcode | 29e1f08841df3332e40fec6c99a253e72c8c5957 | 2f53c4e16d244c83aad9b4d67a249f669b9da92a | refs/heads/master | 2021-06-16T04:26:00.061353 | 2021-03-21T01:05:48 | 2021-03-21T01:05:48 | 176,925,136 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | class Solution(object):
def checkIfExist(self, arr):
"""
:type arr: List[int]
:rtype: bool
"""
temp_dict = {}
for i in range(len(arr)):
temp_dict[arr[i]] = i
for i in range(len(arr)):
if 2*arr[i] in temp_dict:
if temp_dict[2*arr[i]] != i:
return True
return False
| [
"sharath0628@gmail.com"
] | sharath0628@gmail.com |
02921030cae9656b9b594b06b3e10f6019ea509c | 95d23a68a2b4179a8bfa3ef8b5989ff5c122c4df | /nastyGalCrawler/nastyGalCrawler/middlewares.py | 41ded5094100691fd3ee5313d8b1a12c4caac9c2 | [] | no_license | giovannamascarenhas/django-scrapy | 87d06180d2350bfb2686f3fa0161149264b83cfd | dc3854d67d4da0996fc7f446d7ce96c445685297 | refs/heads/master | 2022-12-05T23:20:44.825713 | 2020-08-19T15:39:05 | 2020-08-19T15:39:05 | 288,480,011 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,666 | py | # Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
class NastygalcrawlerSpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, or item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request or item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class NastygalcrawlerDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"giovanna.mascarenhas.1@gmail.com"
] | giovanna.mascarenhas.1@gmail.com |
82e5f74cb9e4d564e4c9db40175c77111f664934 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5630113748090880_1/Python/Hichamdz38/b.py | ec503006a405070048db8b02218200889a3eaef9 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | import numpy as np
for i in xrange(1,input()+1):
N=input()
z=np.array(N**2*2-N)
SS=[]
for j in xrange(N*2-1):
S=map(int,raw_input().split())
SS.extend(S)
f=[]
for j in SS:
if SS.count(j)%2!=0:
if j not in f:
f.append(j)
f.sort()
print "Case #{}:".format(i),
for j in f:
print j,
print | [
"alexandra1.back@gmail.com"
] | alexandra1.back@gmail.com |
1b8f3b0e355280d5ba0828cdf4f84dbcc5059fa7 | 79a1608d18fbda833842f25b49fa53f039c1c3ba | /Formal Interfaces/abcRegister.py | 32d7803afa2cb94d99a32a7ddcd3c93ef0090702 | [] | no_license | andrey136/Python-OOP-Interfaces | 22b08b8b11d467b907242351fe22bb650912ffdf | 51f1fcecc4a7472644d8e400fadaf189452af603 | refs/heads/master | 2023-06-20T10:26:19.721415 | 2021-07-20T21:11:53 | 2021-07-20T21:11:53 | 387,912,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | import abc
def main():
print('>>> issubclass(float, Double)')
print(issubclass(float, Double))
print('')
print('>>> isinstance(1.53453, Double)')
print(isinstance(1.53453, Double))
print('')
print('>>> issubclass(Double64, Double)')
print(issubclass(Double64, Double))
print('')
class Double(metaclass=abc.ABCMeta):
...
Double.register(float)
@Double.register
class Double64:
...
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
main()
| [
"andrey.mardash@gmail.com"
] | andrey.mardash@gmail.com |
8f94e5779efa70a5d2aedfc42da9575fba54cdff | 3f132d187ba9065ea7b45cf4c732fb8179497a90 | /member/migrations/0006_remove_member_access_token.py | 25f7dfadc13ac516ed3978bd9110b388b01987ba | [] | no_license | aibop/tianmimi | 537763840ba97413246d1e4a241653d2ca885e02 | abe100b6ab174f7c88dfcd47766d22128a3da3b5 | refs/heads/master | 2020-06-14T14:54:54.851106 | 2016-12-02T10:28:44 | 2016-12-02T10:28:44 | 75,170,100 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-02 06:20
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('member', '0005_auto_20161202_1110'),
]
operations = [
migrations.RemoveField(
model_name='member',
name='access_token',
),
]
| [
"testgit@testgit.cn"
] | testgit@testgit.cn |
b8bfbea2644ffc3c047a9a81d7b34414257703be | 6d29d9435d2c86848200ebc495ef78560e275eaf | /First Missing Positive.py | 65b02766e15164ab8a1d01296ae6ae5ee0ae5f95 | [] | no_license | xukaiyuan/leetcode-medium | d7a54929a329987e00a1b809808eb4d5d60a419b | 48196dedf60076bbc3769e067f1ecbaa36ca0b5f | refs/heads/master | 2021-07-21T07:27:43.512774 | 2018-11-12T04:45:58 | 2018-11-12T04:45:58 | 138,909,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | class Solution:
def firstMissingPositive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
n = len(nums)
for i in range(n):
while(nums[i] > 0 and nums[i] < n and nums[i] != nums[nums[i] - 1]):
tmp = nums[nums[i] - 1]
nums[nums[i] - 1] = nums[i]
nums[i] = tmp
for i in range(n):
if(nums[i] != i + 1):
return i + 1
return n + 1 | [
"kyxu95@gmail.com"
] | kyxu95@gmail.com |
d06656ea7e56513fb0c358a10ab4b943f4065459 | 60ffab7cebf811a2e95b1f1245a52bc2baebae1d | /news/migrations/0001_initial.py | e8a1ce2c57384480b2e2171663ebd86433d392c5 | [] | no_license | mstarck8/FSV | 557e1fb24eee8ea681c467c584eb6d59cce953ef | a48ea5115cf1d440611b031e07459c6329d631f3 | refs/heads/master | 2020-05-13T16:08:31.045454 | 2019-04-16T20:39:49 | 2019-04-16T20:39:49 | 169,286,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 719 | py | # Generated by Django 2.1.5 on 2019-02-03 11:19
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ArtikelPost',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('überschrift', models.TextField(max_length=50)),
('inhalt', models.TextField(max_length=10000)),
('created_at', models.DateTimeField(default=django.utils.timezone.now, max_length=10)),
],
),
]
| [
"noreply@github.com"
] | mstarck8.noreply@github.com |
f5d568c111f11d16f41c88a8e54432ea19eaf893 | 89c85fdb2e66fe80d8d06fe1538292b48f0dc559 | /blog/migrations/0005_auto_20200215_0811.py | 224f42cfa7362e39127b7fdaad4cd09fcd57e606 | [] | no_license | ayush-srivastava99/hack36-1 | 256d49dd98de8f63a007f633faea6e9476c29bd7 | a0e1d9189154d6345244a2393e7768e5651b50de | refs/heads/master | 2021-07-10T20:51:12.431636 | 2020-12-27T11:22:46 | 2020-12-27T11:22:46 | 240,576,508 | 0 | 0 | null | 2020-02-14T18:44:28 | 2020-02-14T18:44:27 | null | UTF-8 | Python | false | false | 407 | py | # Generated by Django 3.0.3 on 2020-02-15 08:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_post_image'),
]
operations = [
migrations.AlterField(
model_name='post',
name='image',
field=models.ImageField(default='images/rose2.jpg', upload_to='images/'),
),
]
| [
"harshit02gangwar@gmail.com"
] | harshit02gangwar@gmail.com |
60f5c9f2ba7d058e620dcae958fd5a90609389ec | de8b2b0de2ba522493e2c86fa055df8c7c40aa69 | /sorting-algorithms/s02_insertion_sort.py | e8715c5307aace5f4742f9a219ff3c4932a8c39d | [
"Unlicense"
] | permissive | bayramcicek/mini-programs | 56edbd2013704813d6730ecaf684baf9042d21ab | 3f876e3274b7beeb5e7413ac9c5275813d9f0d2d | refs/heads/master | 2021-07-03T03:57:02.874127 | 2020-09-27T11:09:07 | 2020-09-27T11:09:07 | 138,440,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | #!/usr/bin/python3.6
# created by cicek on 21.03.2019 18:09
print("Enter list: ")
# unsortedList = input().split() # string
unsortedList = [int(num) for num in input().split()] # integer
"""90 42 45 2 18 47 6 99 23 11"""
print("unsorted list:\n" + str(unsortedList) + "\n")
# if len(unsortedList) == 1:
# print(unsortedList)
def sortMeInsertion(unsortedList):
size = len(unsortedList)
for index in range(1, size):
while (index > 0 and unsortedList[index - 1] > unsortedList[index]):
unsortedList[index - 1], unsortedList[index] = unsortedList[index], unsortedList[index - 1]
index -= 1
print(unsortedList)
return unsortedList
print("\nsorted list:\n" + str(sortMeInsertion(unsortedList)))
"""
Enter list:
90 42 45 2 18 47 6 99 23 11
unsorted list:
[90, 42, 45, 2, 18, 47, 6, 99, 23, 11]
[42, 90, 45, 2, 18, 47, 6, 99, 23, 11]
[42, 45, 90, 2, 18, 47, 6, 99, 23, 11]
[2, 42, 45, 90, 18, 47, 6, 99, 23, 11]
[2, 18, 42, 45, 90, 47, 6, 99, 23, 11]
[2, 18, 42, 45, 47, 90, 6, 99, 23, 11]
[2, 6, 18, 42, 45, 47, 90, 99, 23, 11]
[2, 6, 18, 42, 45, 47, 90, 99, 23, 11]
[2, 6, 18, 23, 42, 45, 47, 90, 99, 11]
[2, 6, 11, 18, 23, 42, 45, 47, 90, 99]
sorted list:
[2, 6, 11, 18, 23, 42, 45, 47, 90, 99]
Process finished with exit code 0
"""
| [
"mail@bayramcicek.com.tr"
] | mail@bayramcicek.com.tr |
3250fe07dd9649b7dacf63edf8cc4d2172f633fb | ec367cdb13545b4ae8469a9373b475c3bd4fe6cc | /blog/views.py | c20f3b58b034285204c1bf320cf1cf8931b112b0 | [] | no_license | MarcMQC/firstDjango | 0d0ef42133caa4f5986f109946e36e7c25ec2466 | 3b252bd7b515adfca57d558c5bbd24c5f7efaa4e | refs/heads/master | 2020-03-14T17:09:12.803517 | 2018-05-01T13:27:27 | 2018-05-01T13:27:27 | 131,713,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def hello(request):
return HttpResponse('<html>hello world</html>')
| [
"marceemeng@outlook.com"
] | marceemeng@outlook.com |
bbc173a76f3c9f09e624e6a49758ef186cad7ec3 | bc16dd3fbc9db84812c753da7df4af4f6529bfec | /guided-backprop.py | ce6e4cb3dff777f155536138d031b1f15bfbb70c | [] | no_license | Hrehory/pytorch-grad-cam | b3cb1a030deb9d4c6904ebf7d60302d0e5072b14 | cd843b5bbd0a5b81c0c59fe530b90d030a7705f6 | refs/heads/master | 2021-01-21T14:27:50.582294 | 2017-07-09T09:49:24 | 2017-07-09T09:49:24 | 95,282,167 | 0 | 0 | null | 2017-06-24T07:16:14 | 2017-06-24T07:16:14 | null | UTF-8 | Python | false | false | 1,406 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# project modules
from io_utils import preprocess_image
# torch modules
import torch
import torchvision
from torch.autograd import Variable
import torch.nn as nn
from torchvision.models import VGG
# science modules
import cv2
import numpy as np
import matplotlib.pyplot as plt
# misc
import sys
'''
def backward(gradient=None, retain_graph=None, create_graph=None, retain_variables=None):
print (gradient)
torch.autograd.backward(self, gradient, retain_graph, create_graph, retain_variables)
'''
model = torchvision.models.vgg19(pretrained=True)
#model.backward = backward
def guided_hook(grad):
print (grad.size())
grad[grad < 0] = 0.0
return grad
for name, param in model.named_parameters():
param.register_hook(guided_hook)
img = cv2.imread(sys.argv[1], 1)
img = np.float32(cv2.resize(img, (224, 224))) / 255
input = preprocess_image(img)
output = model(input)
values, indices = torch.max(output, 0)
winning_class = np.argmax(values.data.numpy())
target = Variable(torch.zeros(1000))
target[winning_class] = 1
criterion = nn.MSELoss()
loss = criterion(output, target)
loss.backward()
#for name, param in model.named_parameters():
# print (param.grad)
gradient_img = input.grad.data.numpy().reshape(224,224,3)
gray = cv2.cvtColor(gradient_img, cv2.COLOR_BGR2GRAY)
plt.imshow(gray, cmap='gray')
plt.show()
| [
"grzegorz.gwardys@gmail.com"
] | grzegorz.gwardys@gmail.com |
ffda37ececaa2d150eb974bb033faf76012f625f | dc6e8aa6ae9e9519e4c3dca2ae599e78e5aeefee | /Packages/Package Control/package_control/unicode.py | d9d340d02205eedcfded8cd015fed2a564deb549 | [
"MIT"
] | permissive | zeke/sublime-setup | d570f28b8411e88905de85483edeeddfb9c146a2 | 5ae704400e8e2a6ee03afd5836983534741a6535 | refs/heads/master | 2022-06-18T06:21:08.924549 | 2013-07-15T05:07:17 | 2013-07-15T05:07:17 | 11,414,888 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,293 | py | import os
import locale
import sublime
def unicode_from_os(e):
"""
This is needed as some exceptions coming from the OS are
already encoded and so just calling unicode(e) will result
in an UnicodeDecodeError as the string isn't in ascii form.
:param e:
The exception to get the value of
:return:
The unicode version of the exception message
"""
fallback_encodings = ['utf-8', 'cp1252']
# Sublime Text on OS X does not seem to report the correct encoding
# so we hard-code that to UTF-8
encoding = 'UTF-8' if os.name == 'darwin' else locale.getpreferredencoding()
if int(sublime.version()) > 3000:
return str(e)
try:
if isinstance(e, Exception):
e = e.message
if isinstance(e, unicode):
return e
if isinstance(e, int):
e = str(e)
return unicode(e, encoding)
# If the "correct" encoding did not work, try some defaults, and then just
# obliterate characters that we can't seen to decode properly
except UnicodeDecodeError:
for encoding in fallback_encodings:
try:
return unicode(e, encoding, errors='strict')
except:
pass
return unicode(e, errors='replace')
| [
"zeke@sikelianos.com"
] | zeke@sikelianos.com |
2581bc06418e89922799e3df503a7348a21c752a | 914b12f9c29656afe7533301369c2e0b8c07ecc0 | /parsers/getGenomesWithMarkers.py | 9e01188c8baf3b5dd4cd3633407d7d8f287f989a | [] | no_license | ashishjain1988/Scripts | 73c53b972223b8949de5aa002f1725fc75efd870 | 5c0395d1ada4039f0d235f80f1e8b71ab164daed | refs/heads/master | 2021-01-21T12:40:23.037845 | 2016-04-06T19:45:32 | 2016-04-06T19:45:32 | 42,592,218 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 874 | py | #!/usr/bin/env python
import os
import sys
def filter(handle,trueFile,falseFile):
for line in handle:
flag = line.split(" ")[0]
accession = line.split(" ")[1]
if(flag == "True"):
trueFile.write(accession+"\n")
else:
falseFile.write(accession+"\n")
def main():
handle = open("/home/jain/Gram_Positive_Bacteria_Study/Organisms_Lists_from_PATRIC/Firmicutes/Firmicutes_Genome_Flag.txt",'r');
trueFile = open("/home/jain/Gram_Positive_Bacteria_Study/Organisms_Lists_from_PATRIC/Firmicutes/firmicutes_with_marker.txt","w")
falseFile = open("/home/jain/Gram_Positive_Bacteria_Study/Organisms_Lists_from_PATRIC/Firmicutes/firmicutes_without_marker.txt","w")
filter(handle, trueFile, falseFile)
trueFile.close()
falseFile.close()
handle.close()
if __name__ == "__main__":
main() | [
"jain.ashishjain1@gmail.com"
] | jain.ashishjain1@gmail.com |
ddebf90774be84b529ceff3dc4606e94e81e7a91 | 58b97f4a24884f793e8fe992c2266190c38faefd | /Airbnb-Lite/myproject/__init__.py | ac53b16e1880695b30d656366c93380b153f5e29 | [] | no_license | jrong525/Airbnb-Lite | 53757526be51f42f862737e568627dd783f20914 | 12e3984d74c1fd0b3a2c34ceb7baefd5cdcc0f96 | refs/heads/master | 2020-07-30T02:04:17.608697 | 2019-09-21T20:33:18 | 2019-09-21T20:33:18 | 210,048,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
login_manager = LoginManager()
app = Flask(__name__)
app.config['SECRET_KEY'] = 'mysecretkey'
basedir = os.path.abspath(os.path.dirname(__file__))
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'data.sqlite')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
Migrate(app,db)
login_manager.init_app(app)
login_manager.login_view = "login"
| [
"rong@dhcp-wifi-8021x-155-41-21-178.bu.edu"
] | rong@dhcp-wifi-8021x-155-41-21-178.bu.edu |
a1a2b8a9308f7f485a674bb9d3967bc31bac6bed | d77c12a37a647acd20366c345fbab25384ea0bb1 | /TAKE_A_BREAK/BREAK.py | eb686fa6d7dc1cf0dbfd22e5760cc4982e56c3c7 | [] | no_license | JPatrick9793/Udacity_Fullstack | 4681d7d014cab82a6aa908ec5109f01e0b54554c | aa8905f0b5a6f1e62451b306709aeb6bb5adf64f | refs/heads/master | 2020-12-02T19:31:35.896033 | 2017-07-10T23:06:35 | 2017-07-10T23:06:35 | 96,353,588 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | import webbrowser
import time
i = 1
print("This program started on " + time.ctime())
while (i < 4):
time.sleep(5)
webbrowser.open("https://www.youtube.com/watch?v=dQw4w9WgXcQ")
i += 1
| [
"johnnyboy9793@JohnConwaysMBP.home"
] | johnnyboy9793@JohnConwaysMBP.home |
4570d38cc342698b9f5e6bcaaca77a8459c0408c | b385fc2f18bbb43ec1bca1606b62ae83f33dcb2d | /Programming-Basics/While Loop/Sequence 2k+1/Sequence 2k+1.py | d1b977f06e673eb316811f75cd6f44e4799f9e5f | [] | no_license | rishinkaku/Software-University---Software-Engineering | d9bee36de12affc9aed7fcc0b8b6616768340e51 | b798a0c6927ef461491c8327451dd00561d836e4 | refs/heads/master | 2023-06-10T19:52:51.016630 | 2021-07-08T00:45:06 | 2021-07-08T00:45:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | n = int(input())
i = 1
while True:
print(i)
i = 2 * i + 1
if i > n:
break
| [
"66394357+DimAntDim@users.noreply.github.com"
] | 66394357+DimAntDim@users.noreply.github.com |
75cacb0e97a551d8646a1e672dac3bee1fe1a301 | 12611e896fd526358e8f4f8a4d9f7ec41d7b2d42 | /Group_13_Ravichandran/Group_13_Ravichandran_Code/create_database/global_variables.py | 06952815e5551bbbfbf846e965351b0a4dee16ff | [] | no_license | narendrakumar92/Yelp-Trend-propagation---NetGel | 18359f44ded454d93f69e5d4941dcb0dbf9a28b3 | 5f895a4673e4d745c93e15f659cadc48b338feca | refs/heads/master | 2021-06-28T17:11:35.486374 | 2017-06-09T03:07:40 | 2017-06-09T03:07:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | #!/usr/bin/python
# coding=utf-8
sqlLite_database_name = 'smlProject.db'
sqlLite_database_name_test = 'smlProject_test.db'
sqlLite_database_name_formatted = 'smlProject_formatted_v1.db'
file_path = '/Users/ravichandran/Documents/Misc/Docs/ASU_Subjects/Spring_17/SML/Project/Code/Yelp-Trend-propagation---NetGel/data/'
added_files = []
#added_files = ['yelp_academic_dataset_user.csv','yelp_academic_dataset_business.csv','yelp_academic_dataset_checkin.csv','yelp_academic_dataset_tip.csv']
file_names = ['yelp_academic_dataset_user.csv','yelp_academic_dataset_business.csv','yelp_academic_dataset_checkin.csv','yelp_academic_dataset_tip.csv','yelp_academic_dataset_review.csv'] | [
"akhilravichandran11@gmail.com"
] | akhilravichandran11@gmail.com |
93c45bbc181002e52b0d0a7f8d48ceb22faed466 | 922c4f9203d5bdde7c471de393e5726d96d82209 | /app/gql_client/downvote_answer.py | c33a55c1396abe9ccd9b3089e60a5d4be29d422b | [] | no_license | bhavidhingra/AcadOverflow | e9ebc1c115ea0c29f90c175c37263939c191e14c | 8d2ba583b89e1395f947f5da7c1d9697f1412259 | refs/heads/master | 2022-08-25T18:47:41.175761 | 2020-05-23T18:46:12 | 2020-05-23T18:46:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | from graphql_client import gql, client
def downvote_answer(aId):
downvote = gql('''
mutation {
update_Answers(
where: {
Id: { _eq: ''' + str(aId) + ''' }
},
_inc: { VoteCount: -1 }
) {
returning { VoteCount }
}
}
''')
try:
mutation_result = client.execute(downvote)
print (mutation_result)
except Exception as e:
print (e)
if __name__ == "__main__":
aId = 2
downvote_answer(aId) | [
"bhavi.dhingra@gmail.com"
] | bhavi.dhingra@gmail.com |
dc4c76d92552dd6f6afe5feda18891dd318e2c84 | 8ee8ed465c1aaab72a26f6bce0c9ea7c144f003b | /main/authentication/migrations/0001_initial.py | 0776f6b80b1bb7dc8f23c166153eb214b41212b0 | [] | no_license | teja0404/Httponly_JWT_Authentication | 45ef0c14a083b2e5cf8d3eb1279b7bf2ea4be78d | ac6d8e25d02ab385e121f099ae98ac5094c89d65 | refs/heads/main | 2023-04-19T08:59:13.301287 | 2021-05-15T10:34:31 | 2021-05-15T10:34:31 | 367,597,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,953 | py | # Generated by Django 3.1.1 on 2021-05-12 10:16
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='CustomUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('fav_color', models.CharField(blank=True, max_length=120)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| [
"tejailla0404@gmail.com"
] | tejailla0404@gmail.com |
6e8626b6d1ab6278796b1458b5d9a42efe0e48e3 | d384c2d81ce575cc87efd28ecfa9ae5698ef3cb2 | /bolig/migrations/0016_auto_20160328_1436.py | 4035022c1cddbce0be17746ae059aabd0bf64ce1 | [] | no_license | tbrygge/bolig | be9bc19d1b0502edd62b9bfcd8aab61dc437f97e | 0827979502455d0bd8a30d33ae664ed5949193e4 | refs/heads/master | 2021-01-01T05:43:21.367351 | 2016-05-08T14:05:14 | 2016-05-08T14:05:14 | 58,315,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-28 12:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bolig', '0015_auto_20160328_0308'),
]
operations = [
migrations.AlterField(
model_name='person',
name='roller',
field=models.ManyToManyField(blank=True, to='bolig.Rolle'),
),
]
| [
"pgsoi@online.no"
] | pgsoi@online.no |
36b3ff2616e80e43666cf2eb969d0edb15fcff5a | 5cd0945b55f88ae6fd602b9ddb24e479afec78a0 | /helloWorld.py | da6a3726caf3374b5f19e4228756535e50a432b7 | [] | no_license | lupetimayaraturbiani/EstudoPython | b1b44c4c17e8791b001088532b288736d9d60b3f | 4d88087fb44e222d4a22b332e3a5c7f5da67c6ee | refs/heads/master | 2023-07-01T14:13:47.881340 | 2021-08-12T20:30:50 | 2021-08-12T20:30:50 | 395,436,507 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47 | py | num = 2
print("Hello World!")
print(num * num) | [
"mayaralupetiturbiani@gmail.com"
] | mayaralupetiturbiani@gmail.com |
9f2797f7ec61e1beba2df83d4671a275d44af30c | a0c53168a4bdcfb0aa917d6d2c602f0999443a10 | /projexui/widgets/xcalendarwidget/xcalendarscene.py | 6f9462a72e6858bc49065498b0206a2d26eb0e3a | [] | no_license | kanooshka/DPS_PIPELINE | 8067154c59ca5c8c9c09740969bb6e8537021903 | df2fcdecda5bce98e4235ffddde1e99f334562cc | refs/heads/master | 2021-05-24T04:32:03.457648 | 2018-09-07T13:25:11 | 2018-09-07T13:25:11 | 29,938,064 | 3 | 2 | null | 2020-07-23T23:06:37 | 2015-01-27T22:26:01 | Python | UTF-8 | Python | false | false | 19,573 | py | #!/usr/bin/python
"""
Defines a calendar widget similar to the ones found in outlook or ical.
"""
# define authorship information
__authors__ = ['Eric Hulser']
__author__ = ','.join(__authors__)
__credits__ = []
__copyright__ = 'Copyright (c) 2011, Projex Software'
__license__ = 'LGPL'
# maintanence information
__maintainer__ = 'Projex Software'
__email__ = 'team@projexsoftware.com'
#------------------------------------------------------------------------------
from PyQt4.QtCore import Qt,\
QDate, \
QLine,\
QRectF,\
QDateTime,\
QTime
from PyQt4.QtGui import QGraphicsScene,\
QPalette,\
QCursor
from projex.enum import enum
from projexui.widgets.xcalendarwidget.xcalendaritem import XCalendarItem
from projexui.widgets.xpopupwidget import XPopupWidget
class XCalendarScene(QGraphicsScene):
Mode = enum('Day', 'Week', 'Month', 'Agenda')
TimelineScale = enum('Day', 'Week', 'Month', 'Year')
def __init__( self, parent = None ):
super(XCalendarScene, self).__init__( parent )
# define custom properties
self._currentDate = QDate.currentDate()
self._currentMode = XCalendarScene.Mode.Month
self._timelineScale = XCalendarScene.TimelineScale.Week
self._minimumDate = QDate()
self._maximumDate = QDate()
self._dateGrid = {}
self._dateTimeGrid = {}
self._buildData = {}
self._rebuildRequired = False
# set default properties
# create connections
def addCalendarItem( self ):
"""
Adds a new calendar item to the scene.
:return <XCalendarItem>
"""
item = XCalendarItem()
self.addItem(item)
return item
def addItem( self, item ):
"""
Adds the item to the scene and redraws the item.
:param item | <QGraphicsItem>
"""
result = super(XCalendarScene, self).addItem(item)
if ( isinstance(item, XCalendarItem) ):
item.rebuild()
return result
def currentDate( self ):
"""
Returns the current date displayed with this calendar widget.
:return <QDate>
"""
return self._currentDate
def currentMode( self ):
"""
Returns what calendar mode this calendar is currently displaying.
:return <XCalendarScene.Mode>
"""
return self._currentMode
def dateAt( self, point ):
"""
Returns the date at the given point.
:param point | <QPoint>
"""
for date, data in self._dateGrid.items():
if ( data[1].contains(point) ):
return QDate.fromJulianDay(date)
return QDate()
def dateTimeAt( self, point ):
"""
Returns the date time at the inputed point.
:param point | <QPoint>
"""
for dtime, data in self._dateTimeGrid.items():
if ( data[1].contains(point) ):
return QDateTime.fromTime_t(dtime)
return QDateTime()
def dateRect( self, date ):
"""
Returns the rect that is defined by the inputed date.
:return <QRectF>
"""
data = self._dateGrid.get(date.toJulianDay())
if ( data ):
return QRectF(data[1])
return QRectF()
def dateTimeRect( self, dateTime ):
"""
Returns the rect that is defined by the inputed date time.
:return <QRectF>
"""
data = self._dateTimeGrid.get(dateTime.toTime_t())
if ( data ):
return QRectF(data[1])
return QRectF()
def drawBackground( self, painter, rect ):
"""
Draws the background of the scene using painter.
:param painter | <QPainter>
rect | <QRectF>
"""
if ( self._rebuildRequired ):
self.rebuild()
super(XCalendarScene, self).drawBackground(painter, rect)
palette = self.palette()
# draw custom options
if ( 'curr_date' in self._buildData ):
clr = palette.color(QPalette.Highlight)
clr.setAlpha(40)
painter.setBrush(clr)
painter.setPen(Qt.NoPen)
painter.drawRect(self._buildData['curr_date'])
painter.setBrush(Qt.NoBrush)
if ( 'today' in self._buildData ):
painter.setPen(Qt.NoPen)
clr = palette.color(QPalette.AlternateBase)
clr.setAlpha(120)
painter.setBrush(clr)
painter.drawRect(self._buildData['today'])
painter.setBrush(Qt.NoBrush)
# draw the grid
painter.setPen(palette.color(QPalette.Mid))
painter.drawLines(self._buildData.get('grid', []))
# draw text fields
painter.setPen(palette.color(QPalette.Text))
for data in self._buildData.get('regular_text', []):
painter.drawText(*data)
# draw mid text fields
painter.setPen(palette.color(QPalette.Mid))
for data in self._buildData.get('mid_text', []):
painter.drawText(*data)
def helpEvent( self, event ):
"""
Displays a tool tip for the given help event.
:param event | <QHelpEvent>
"""
item = self.itemAt(event.scenePos())
if ( item and item and item.toolTip() ):
parent = self.parent()
rect = item.path().boundingRect()
point = event.scenePos()
point.setY(item.pos().y() + rect.bottom())
point = parent.mapFromScene(point)
point = parent.mapToGlobal(point)
XPopupWidget.showToolTip(item.toolTip(),
point = point,
parent = parent)
event.accept()
else:
super(XCalendarScene, self).helpEvent(event)
def markForRebuild( self, state = True ):
"""
Marks this scene as needing to be rebuild.
:param state | <bool>
"""
self._rebuildRequired = state
self.invalidate()
def maximumDate( self ):
"""
Returns the maximum date for this widget. This value will be used \
when in timeline mode to determine the end for the date range to \
search for.
:return <QDate>
"""
return self._maximumDate
def mousePressEvent( self, event ):
"""
Changes the current date to the clicked on date.
:param event | <QMousePressEvent>
"""
XPopupWidget.hideToolTip()
# update the current date
self.setCurrentDate(self.dateAt(event.scenePos()))
super(XCalendarScene, self).mousePressEvent(event)
def minimumDate( self ):
"""
Returns the minimum date for this widget. This value will be used \
when in timeline mode to determine the start for the date range to \
search for.
:return <QDate>
"""
return self._minimumDate
def rebuild( self ):
"""
Rebuilds the information for this scene.
"""
self._buildData.clear()
self._dateGrid.clear()
self._dateTimeGrid.clear()
curr_min = self._minimumDate
curr_max = self._maximumDate
self._maximumDate = QDate()
self._minimumDate = QDate()
self.markForRebuild(False)
# rebuilds the month view
if ( self.currentMode() == XCalendarScene.Mode.Month ):
self.rebuildMonth()
elif ( self.currentMode() in (XCalendarScene.Mode.Week,
XCalendarScene.Mode.Day)):
self.rebuildDays()
# rebuild the items in the scene
items = sorted(self.items())
for item in items:
item.setPos(0, 0)
item.hide()
for item in items:
if ( isinstance(item, XCalendarItem) ):
item.rebuild()
if ( curr_min != self._minimumDate or curr_max != self._maximumDate ):
parent = self.parent()
if ( parent and not parent.signalsBlocked() ):
parent.dateRangeChanged.emit(self._minimumDate,
self._maximumDate)
def rebuildMonth( self ):
"""
Rebuilds the month for this scene.
"""
# make sure we start at 0 for sunday vs. 7 for sunday
day_map = dict([(i+1, i+1) for i in range(7)])
day_map[7] = 0
today = QDate.currentDate()
curr = self.currentDate()
first = QDate(curr.year(), curr.month(), 1)
last = QDate(curr.year(), curr.month(), curr.daysInMonth())
first = first.addDays(-day_map[first.dayOfWeek()])
last = last.addDays(6-day_map[last.dayOfWeek()])
cols = 7
rows = (first.daysTo(last) + 1) / cols
hlines = []
vlines = []
padx = 6
pady = 6
header = 24
w = self.width() - (2 * padx)
h = self.height() - (2 * pady)
dw = (w / cols) - 1
dh = ((h - header) / rows) - 1
x0 = padx
y0 = pady + header
x = x0
y = y0
for row in range(rows + 1):
hlines.append(QLine(x0, y, w, y))
y += dh
for col in range(cols + 1):
vlines.append(QLine(x, y0, x, h))
x += dw
self._buildData['grid'] = hlines + vlines
# draw the date fields
date = first
row = 0
col = 0
# draw the headers
x = x0
y = pady
regular_text = []
mid_text = []
self._buildData['regular_text'] = regular_text
self._buildData['mid_text'] = mid_text
for day in ('Sun', 'Mon','Tue','Wed','Thu','Fri','Sat'):
regular_text.append((x + 5,
y,
dw,
y0,
Qt.AlignLeft | Qt.AlignVCenter,
day))
x += dw
for i in range(first.daysTo(last) + 1):
top = (y0 + (row * dh))
left = (x0 + (col * dw))
rect = QRectF(left - 1, top, dw, dh)
# mark the current date on the calendar
if ( date == curr ):
self._buildData['curr_date'] = rect
# mark today's date on the calendar
elif ( date == today ):
self._buildData['today'] = rect
# determine how to draw the calendar
format = 'd'
if ( date.day() == 1 ):
format = 'MMM d'
# determine the color to draw the text
if ( date.month() == curr.month() ):
text = regular_text
else:
text = mid_text
# draw the text
text.append((left + 2,
top + 2,
dw - 4,
dh - 4,
Qt.AlignTop | Qt.AlignLeft,
date.toString(format)))
# update the limits
if ( not i ):
self._minimumDate = date
self._maximumDate = date
self._dateGrid[date.toJulianDay()] = ((row, col), rect)
if ( col == (cols - 1) ):
row += 1
col = 0
else:
col += 1
date = date.addDays(1)
def rebuildDays( self ):
"""
Rebuilds the interface as a week display.
"""
time = QTime(0, 0, 0)
hour = True
x = 6
y = 6 + 24
w = self.width() - 12 - 25
dh = 48
indent = 58
text_data = []
vlines = []
hlines = [QLine(x, y, w, y)]
time_grids = []
for i in range(48):
if ( hour ):
hlines.append(QLine(x, y, w, y))
text_data.append((x,
y + 6,
indent - 6,
dh,
Qt.AlignRight | Qt.AlignTop,
time.toString('hap')))
else:
hlines.append(QLine(x + indent, y, w, y))
time_grids.append((time, y, dh / 2))
# move onto the next line
hour = not hour
time = time.addSecs(30 * 60)
y += dh / 2
hlines.append(QLine(x, y, w, y))
h = y
y = 6 + 24
# load the grid
vlines.append(QLine(x, y, x, h))
vlines.append(QLine(x + indent, y, x + indent, h))
vlines.append(QLine(w, y, w, h))
today = QDate.currentDate()
curr_date = self.currentDate()
# load the days
if ( self.currentMode() == XCalendarScene.Mode.Week ):
date = self.currentDate()
day_of_week = date.dayOfWeek()
if ( day_of_week == 7 ):
day_of_week = 0
min_date = date.addDays(-day_of_week)
max_date = date.addDays(6-day_of_week)
self._minimumDate = min_date
self._maximumDate = max_date
dw = (w - (x + indent)) / 7.0
vx = x + indent
date = min_date
for i in range(7):
vlines.append(QLine(vx, y, vx, h))
text_data.append((vx + 6,
6,
dw,
24,
Qt.AlignCenter,
date.toString('ddd MM/dd')))
self._dateGrid[date.toJulianDay()] = ((0, i),
QRectF(vx, y, dw, h - y))
# create the date grid for date time options
for r, data in enumerate(time_grids):
time, ty, th = data
dtime = QDateTime(date, time)
key = dtime.toTime_t()
self._dateTimeGrid[key] = ((r, i), QRectF(vx, ty, dw, th))
if ( date == curr_date ):
self._buildData['curr_date'] = QRectF(vx, y, dw, h - 29)
elif ( date == today ):
self._buildData['today'] = QRectF(vx, y, dw, h - 29)
date = date.addDays(1)
vx += dw
# load a single day
else:
date = self.currentDate()
self._maximumDate = date
self._minimumDate = date
text_data.append((x + indent,
6,
w,
24,
Qt.AlignCenter,
date.toString('ddd MM/dd')))
self._dateGrid[date.toJulianDay()] = ((0, 0),
QRectF(x, y, w - x, h - y))
# create the date grid for date time options
for r, data in enumerate(time_grids):
time, ty, th = data
dtime = QDateTime(date, time)
key = dtime.toTime_t()
rect = QRectF(x + indent, ty, w - (x + indent), th)
self._dateTimeGrid[key] = ((r, 0), rect)
self._buildData['grid'] = hlines + vlines
self._buildData['regular_text'] = text_data
rect = self.sceneRect()
rect.setHeight(h + 6)
super(XCalendarScene, self).setSceneRect(rect)
def setCurrentDate( self, date ):
"""
Sets the current date displayed by this calendar widget.
:return <QDate>
"""
if ( date == self._currentDate or not date.isValid() ):
return
self._currentDate = date
self.markForRebuild()
parent = self.parent()
if ( not parent.signalsBlocked() ):
parent.currentDateChanged.emit(date)
parent.titleChanged.emit(self.title())
def setCurrentMode( self, mode ):
"""
Sets the current mode that this calendar will be displayed in.
:param mode | <XCalendarScene.Mode>
"""
self._currentMode = mode
self.markForRebuild()
def setSceneRect( self, *args ):
"""
Updates the scene rect for this item.
:param *args
"""
h = self.height()
super(XCalendarScene, self).setSceneRect(*args)
if ( self.currentMode() != XCalendarScene.Mode.Month ):
rect = self.sceneRect()
rect.setHeight(h)
super(XCalendarScene, self).setSceneRect(rect)
self.markForRebuild()
def setTimelineScale( self, timelineScale ):
"""
Sets the timeline scale that will be used when rendering a calendar in \
timeline mode.
:param timelineScale | <XCalendarScene.TimelineScale>
"""
self._timelineScale = timelineScale
def title( self ):
"""
Returns the title for this scene based on its information.
:return <str>
"""
if ( self.currentMode() == XCalendarScene.Mode.Day ):
return self.currentDate().toString('dddd, MMMM dd, yyyy')
elif ( self.currentMode() == XCalendarScene.Mode.Week ):
title = str(self.minimumDate().toString('dddd, MMMM dd'))
title += ' - '
title += str(self.maximumDate().toString('dddd, MMMM dd, yyyy'))
return title
elif ( self.currentMode() == XCalendarScene.Mode.Month ):
return self.currentDate().toString('MMMM yyyy')
else:
return ''
def timelineScale( self ):
"""
Returns the timeline scale that will be used when rendering a calendar \
in timeline mode.
:return <XCalendarScene.TimelineScale>
"""
return self._timelineScale | [
"kanooshka@gmail.com"
] | kanooshka@gmail.com |
b9be26523a79e0ed4ebc0819a2cf4003d2b1ee59 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02838/s053367568.py | d3c49f223dc225bd9cca1700aa01ef3296ab9707 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,631 | py | # coding: utf-8
import sys
#from operator import itemgetter
sysread = sys.stdin.buffer.readline
read = sys.stdin.buffer.read
#from heapq import heappop, heappush
#from collections import defaultdict
sys.setrecursionlimit(10**7)
#import math
#from itertools import product, accumulate, combinations, product
#import bisect
#import numpy as np
#from copy import deepcopy
#from collections import deque
#from decimal import Decimal
#from numba import jit
INF = 1 << 50
EPS = 1e-8
mod = 10 ** 9 + 7
def mapline(t = int):
return map(t, sysread().split())
def mapread(t = int):
return map(t, read().split())
def generate_inv(n,mod):
"""
逆元行列
n >= 2
Note: mod must bwe a prime number
"""
ret = [0, 1]
for i in range(2,n+1):
next = -ret[mod%i] * (mod // i)
next %= mod
ret.append(next)
return ret
def run():
N, *A = mapread()
maxA = max(A)
L = maxA.bit_length()
subs = [0] * L
for k in range(L):
sum = 0
for a in A:
if (a >> k) & 1:
sum += 1 << k
sum %= mod
subs[k] = sum
sumA = 0
for a in A:
sumA += a
sumA %= mod
ret = 0
ret += (sumA * N) % mod
ret += (sumA * N) % mod
sub_sum = 0
for a in A:
sums = 0
for k in range(L):
if (a >> k) & 1:
sums += subs[k] * 2
sums %= mod
sub_sum += sums
sub_sum %= mod
ret -= sub_sum
ret %= mod
inv = generate_inv(2, mod)
ret *= inv[2]
ret %= mod
print(ret)
if __name__ == "__main__":
run()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
4fd3ad15ddd33c92cdffecb72052595b15ddd601 | 4beabdb5089e3284251dcaf046366c35d3afe02f | /rectangles.py | 06768e5dd0cb13903384183826b1e5920a411701 | [] | no_license | AndrewFendrich/Mandelbrot | c3fa2b1463d6e01b91ac0a3c53ef88c8e1716641 | 074ebd9028c13a9f840c2436ab2c8c3d2275dbf6 | refs/heads/master | 2021-01-13T00:52:24.060863 | 2017-05-08T14:30:02 | 2017-05-08T14:30:02 | 50,623,517 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 212 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 27 23:25:48 2015
@author: User
"""
import pygame
pygame.init()
rectangle = pygame.Rect(50,50,100,100)
print(rectangle)
rectangle.inflate_ip(2,2)
print(rectangle) | [
"admin@admin.com"
] | admin@admin.com |
db3ab44121cab89eb49405a33887cb45c038dfcc | 488d2c6ff5306f8b5e84e1629a320ddb13d6e32a | /run_api.py | cb26001c046a50f808fd6f7aa1b2015b53dc0bc9 | [
"MIT"
] | permissive | PraveenKumarSridhar/Random-learning | da26a4285beb47b7a053c8036da002816ba2697d | 726fc855c26b0efab8ad870e1cf95847a4027fd3 | refs/heads/main | 2023-01-02T00:27:42.352457 | 2020-10-20T19:48:21 | 2020-10-20T19:48:21 | 305,799,369 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | from src.api.random_article_api import *
if __name__ == "__main__":
RandomArticle.start_rest_api() | [
"prasri.pk@gmail.com"
] | prasri.pk@gmail.com |
cb05c3d122bc3c321df4d6ab0d22c20210c1f236 | c2e281fad181befbe711159a4a84e2d949439da2 | /movieratings_app/migrations/0007_newreviews.py | 474d1f73560415a47a08a1c49de056b772c632b3 | [] | no_license | benhigh22/real_movie_ratings | e50c3b23742314acc6db3d5d031426fbe88437ca | e76e0a6ab4f299bef579bda8eb27b3c651baa1c1 | refs/heads/master | 2021-01-10T14:07:30.944585 | 2016-03-15T19:54:32 | 2016-03-15T19:54:32 | 52,557,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 735 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-28 02:34
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('movieratings_app', '0006_auto_20160227_1632'),
]
operations = [
migrations.CreateModel(
name='NewReviews',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rating', models.IntegerField()),
('movie', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='movieratings_app.Movie')),
],
),
]
| [
"benjaming.high@gmail.com"
] | benjaming.high@gmail.com |
bc7bb44a3249e33689a43cac7d40c1af489113c8 | ed12e58e5eef42f10455a5792892aa79c3bb6f4c | /2015/18/day18.py | 25caf0810380f9c63c3555fdec939da7306dd81f | [] | no_license | mzarecky/advent-of-code | 3c968f3c9021a9d08db2a7f3ebdbe853c28ea4d7 | 88901a8cee42b934f72df1a3f5db5d55cd855d6e | refs/heads/master | 2021-12-15T01:13:01.230262 | 2021-12-10T00:38:17 | 2021-12-10T00:38:17 | 227,000,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,234 | py |
import itertools
class ConwayLights:
def __init__(self, initial_state, force_corners_on=False):
self.x_dim = len(initial_state[0])
self.y_dim = len(initial_state)
self.force_corners_on = force_corners_on
self.state = {(x, y): False for x in range(self.x_dim) for y in range(self.y_dim)}
self.corners = [(0, 0), (0, self.y_dim-1), (self.x_dim-1, 0), (self.x_dim-1, self.y_dim-1)]
self.next_state = {}
self.adj = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)]
self.step = 0
self.set_initial_state(initial_state)
def __str__(self):
return ""
def is_valid_pos(self, pos):
return 0 <= pos[0] < self.x_dim and 0 <= pos[1] < self.y_dim
def set_initial_state(self, initial_state):
for y, s in enumerate(initial_state):
for x, c in enumerate(s):
if c == "#":
self.state[(x, y)] = True
if self.force_corners_on:
for pos in self.corners:
self.state[pos] = True
def get_num_adjacent(self, pos):
w = filter(lambda y: self.is_valid_pos(y), map(lambda x: (x[0]+pos[0], x[1]+pos[1]), self.adj))
return sum(map(lambda x: self.state[x], w))
def get_total_on(self):
return sum(self.state.values())
def next(self):
self.step += 1
for pos in self.state:
n = self.get_num_adjacent(pos)
if (self.state[pos] and 2 <= n <= 3) or (not self.state[pos] and n == 3):
self.next_state[pos] = True
else:
self.next_state[pos] = False
self.state = self.next_state.copy()
if self.force_corners_on:
for pos in self.corners:
self.state[pos] = True
# Parse Input
with open("./2015/18/input.txt") as f:
data = [d.strip() for d in f.readlines()]
test_data = [".#.#.#", "...##.", "#....#", "..#...", "#.#..#", "####.."]
# Part 1
cl = ConwayLights(data)
while cl.step < 100:
cl.next()
print(f"Total lights on: {cl.get_total_on()}")
# Part 2
cl = ConwayLights(data, force_corners_on=True)
while cl.step < 100:
cl.next()
print(f"Total lights on: {cl.get_total_on()}")
| [
"mdzarecky@gmail.com"
] | mdzarecky@gmail.com |
5ea61961f4522d0dc70427058f1ccd7098700d47 | b07fac938ebe0f44e1984b9e3315401d931be9b4 | /script/discover_auhors.py | 2b4a041130adac0dd83c74d5a60b55d51c0b2c60 | [] | no_license | datalogism/lifranum-carto | d31402bdc44c450b89f52b891610e28af7dc43f0 | b3945d2aa7aabc0d69aeddf3d627e369db5d183d | refs/heads/master | 2023-02-27T13:34:12.675281 | 2021-02-05T16:38:45 | 2021-02-05T16:38:45 | 211,826,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,455 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 3 09:20:49 2019
@author: Celian
"""
import json
def normalize_names(rw_name):
name=rw_name.strip()
idx=name.find("(")
if idx>0:
name=name[0:idx-1]
idx=name.find("[")
if idx>0:
name=name[0:idx-1]
splitted_names=name.split(",")
if(len(splitted_names)==2):
resp=[n.title().strip() for n in splitted_names]
return resp
elif(len(splitted_names)==1):
splitted_names=name.split(" ")
if(len(splitted_names)<3):
resp=[n.title().strip() for n in splitted_names]
return resp
else:
first_name_cand=[]
last_name_cand=[]
for part in splitted_names:
if(len(part)==sum(1 for c in part if c.isupper())):
last_name_cand.append(part)
else:
first_name_cand.append(part)
if(len(first_name_cand)>0 and len(last_name_cand)>0):
resp=[" ".join(first_name_cand).title().strip()," ".join(last_name_cand).title().strip()]
return resp
resp=[n.title().strip() for n in splitted_names]
return resp
#### SPLA FILE
file='C:/Users/Celian/Desktop/lifranum_carto/data/spla_haiti_final.json'
with open(file, encoding='utf-8') as json_file:
data = json.load(json_file)
corpus_haiti_spla=[]
for d in data:
if("country" in d.keys() and d["country"]=="Haiti"):
corpus_haiti_spla.append(d)
if("desc" in d.keys() and "content" in d["desc"].keys() and "haiti" in d["desc"]["content"].lower()):
corpus_haiti_spla.append(d)
list_author_spla=list(set([d['nom auteur'] for d in corpus_haiti_spla if d['nom auteur']!=""]))
list_author_spla_norm=["_".join(normalize_names(n)) for n in list_author_spla]
#### ILE EN ILE FILE
file='C:/Users/Celian/Desktop/lifranum_carto/data/ile_en_ile.json'
with open(file, encoding='utf-8') as json_file:
data = json.load(json_file)
corpus_haiti_ile_en_ile=[]
for d in data.keys():
corpus_haiti_ile_en_ile.append(d)
list_author_ile_en_ile_norm=["_".join(normalize_names(n)) for n in corpus_haiti_ile_en_ile]
# COTE CORPUS
corpus_haiti_cote=[]
import csv
file='C:/Users/Celian/Desktop/lifranum_carto/data/Corpus_Haitiv3.csv'
with open(file, encoding='utf-8') as csvfile:
readCSV = csv.reader(csvfile, delimiter=';')
header=next(readCSV)
for row in readCSV:
current={header[i]:row[i] for i in range(len(header))}
current["URL"]=current["URL"][0:len(current["URL"])-1]
corpus_haiti_cote.append(current)
list_url_cote=list(set([d['URL'] for d in corpus_haiti_cote if d['URL']!=""]))
list_author_cote=list(set([d['Auteur'] for d in corpus_haiti_cote if d['Auteur']!=""]))
list_author_cote_norm=["_".join(normalize_names(n)) for n in list_author_cote]
both=set(list_author_ile_en_ile_norm).intersection(set(list_author_spla_norm))
every=set(list_author_ile_en_ile_norm).union(set(list_author_spla_norm))
with open('C:/Users/Celian/Desktop/lifranum_carto/authors_all_SES2', 'w') as f:
for name in every:
ok=name.replace("_"," ")
f.write(ok+'\n')
name="Willems Edouard"
name2="_".join(normalize_names(name))
#### CORPUS RDF
from rdflib.graph import Graph
file_path='C:/Users/Celian/Desktop/lifranum_carto/data/LIFRANUM.rdf'
g = Graph()
from urllib import parse
g.parse(file_path)
meta_by_domain={}
for subj, pred, obj in g:
url=str(subj)
p=pred.replace("http://purl.org/dc/elements/1.1/","")
if url not in meta_by_domain.keys():
meta_by_domain[url]={}
if p not in meta_by_domain[url].keys():
meta_by_domain[url][p]=[]
if obj not in meta_by_domain[url][p]:
meta_by_domain[url][p].append(str(obj))
##### RECOUPEMENT DES URLS RDF-COTE
common_url=set(list_url_cote).intersection(set(meta_by_domain.keys()))
print("nb url common:",len(common_url))
print("nb url cote:",len(list_url_cote))
print("nb url rdf:",len(meta_by_domain.keys()))
#### GET INTERESTING DATA FROM RDF
list_url_from_rdf=[]
for k in meta_by_domain.keys():
if('haiti' in k):
print("haiti link")
list_url_from_rdf.append(k)
if("creator" in meta_by_domain[k].keys()):
if("haiti" in ' '.join(meta_by_domain[k]["creator"])):
print("haiti authors")
list_url_from_rdf.append(k)
for creator in meta_by_domain[k]["creator"]:
normalized="_".join(normalize_names(creator))
if(normalized in list_author_cote_norm or normalized in list_author_spla_norm):
print("cited authors")
list_url_from_rdf.append(k)
if("title" in meta_by_domain[k].keys()):
if("haiti" in ' '.join(meta_by_domain[k]["title"]).lower()):
print("haiti title")
list_url_from_rdf.append(k)
if("rights" in meta_by_domain[k].keys()):
if("haiti" in ' '.join(meta_by_domain[k]["rights"]).lower()):
print("haiti rights")
list_url_from_rdf.append(k)
if("publisher" in meta_by_domain[k].keys()):
if("haiti" in ' '.join(meta_by_domain[k]["publisher"]).lower()):
print("haiti publisher")
list_url_from_rdf.append(k)
if("contributor" in meta_by_domain[k].keys()):
if("haiti" in ' '.join(meta_by_domain[k]["contributor"]).lower()):
print("haiti contributor")
list_url_from_rdf.append(k)
# /!\ UN SITE SANS CREATEUR
#list_rdf_author=[meta_by_domain[k]["creator"] for k in list_url_from_rdf]
list_rdf_author=list(set([meta_by_domain[k]["creator"] for k in list_url_from_rdf if "creator" in meta_by_domain[k].keys()]))
list_rdf_author_norm=["_".join(normalize_names(aut[0])) for aut in list_rdf_author]
###### SEARCH DATA BNF VIA AUTHOR LIST
import requests
url = 'https://data.bnf.fr/sparql?'
every_author_norm=list(set(list_author_cote_norm+list_author_spla_norm+list_author_ile_en_ile_norm))
bfn_found={}
for auth in every_author_norm:
splitted=auth.split("_")
combinaisons=[]
if(len(splitted)>1):
for i in range(len(splitted)):
if i >0:
first_part=splitted[0:i]
first_part=' '.join(first_part)
second_part=splitted[i:len(splitted)]
second_part=' '.join(second_part)
query = """
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX rdagroup2elements: <http://rdvocab.info/ElementsGr2/>
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
PREFIX isni: <http://isni.org/ontology#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX dcterms: <http://purl.org/dc/terms/>
PREFIX dc: <http://purl.org/dc/elements/1.1/>
PREFIX bio: <http://vocab.org/bio/0.1/>
SELECT ?identity ?type ?genre ?country ?lang ?family ?name ?link
WHERE {
?concept foaf:focus ?identity.
?identity foaf:familyName ?family.
?identity foaf:givenName ?name.
?identity rdf:type ?type.
?identity foaf:gender ?genre.
OPTIONAL{
?identity owl:sameAs ?link.
?identity rdagroup2elements:countryAssociatedWithThePerson ?country.
?identity rdagroup2elements:languageOfThePerson ?lang.
}
FILTER ((?family='"""+first_part+"""' && ?name='"""+second_part+"""') || (?family='"""+second_part+"""' && ?name='"""+first_part+"""'))
}
"""
r = requests.get(url, params = {'format': 'json', 'query': query})
try:
data = r.json()
vars_found=data["head"]["vars"]
res=data["results"]["bindings"]
if(len(res)>0):
print("FOUND SOMETHING for >"+first_part+"_"+second_part)
bfn_found[first_part+"_"+second_part]=data
break
except:
print("PB")
file='C:/Users/Celian/Desktop/lifranum_carto/data/bnf_data_for_authors.json'
with open(file, encoding='utf-8') as json_file:
data = json.load(json_file)
with open(file, 'w', encoding='utf-8') as f:
json.dump(bfn_found, f, ensure_ascii=False, indent=4)
viaf_found={}
for auth in bfn_found.keys():
res=bfn_found[auth]["results"]["bindings"]
for r in res:
if "link" in r.keys():
if "viaf" in r["link"]["value"]:
r=requests.get(r["link"]["value"]+"/viaf.xml")
data=r.text
viaf_found[auth]=data
with open(file, encoding='utf-8') as json_file:
data = json.load(json_file)
file='C:/Users/Celian/Desktop/lifranum_carto/data/viaf_data_for_authors.json'
with open(file, encoding='utf-8') as json_file:
data = json.load(json_file)
with open(file, 'w', encoding='utf-8') as f:
json.dump(viaf_found, f, ensure_ascii=False, indent=4)
| [
"celian.ringwald@hotmail.fr"
] | celian.ringwald@hotmail.fr |
3ce5238e09417f7485d1ba0b45638d66493d8afd | e1df9bdf8394e91d8ff63bdc7eab102ab8146960 | /django_project_boilerplate-master/djecommerce/urls.py | 3a07ed24efa22a02a5c845bfac7293cad675518a | [] | no_license | ameerarx/Django_1 | 5cfcbaaf1957c6cdb53699401122387aa3166deb | 5e0973070ea6f46abc443dd57815fde8f60d21f6 | refs/heads/master | 2022-12-17T20:12:02.714025 | 2020-04-01T13:50:04 | 2020-04-01T13:50:04 | 235,368,689 | 0 | 0 | null | 2022-12-08T03:57:03 | 2020-01-21T14:59:58 | Python | UTF-8 | Python | false | false | 591 | py | from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('grappelli/', include('grappelli.urls')),
path('admin/', admin.site.urls),
path('accounts/', include('allauth.urls')),
path('', include('core.urls', namespace='core'))
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL,
document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| [
"ameerarx@outlook.com"
] | ameerarx@outlook.com |
eed5bdee529131d4bd81b9b6314053f0a2bf8e86 | c94fd4ed66e76406a6d3ecc79bda890bf3571378 | /vcd/image_iter.py | d7eeebb724f0f8b4c52c5c59ecac835cd692f118 | [] | no_license | c-connors/variational-change-detection | 5d12ba314c59e56a92909b98957e50d5c9e39350 | 20ed5fc0d1bd5a6b3ef4c1351cd35a590f5afef0 | refs/heads/master | 2021-01-20T21:11:47.050757 | 2017-08-30T22:17:56 | 2017-08-30T22:17:56 | 101,755,342 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,634 | py | import theano
import numpy as np
from six.moves import cPickle
# Convenience class to hold image datasets.
class ImageDataset:
def __init__(self, images, patch_shape, batch_size=None, shuffle=False):
self.images, self.patch_shape, self.batch_size, self.shuffle = images, patch_shape, batch_size, shuffle
self.patch_space_shape = ((images.shape[2] - patch_shape[0] + 1), (images.shape[3] - patch_shape[1] + 1))
self.patch_count = np.prod(self.patch_space_shape)
# Convenience class to hold labeled datasets.
class LabeledDataset:
def __init__(self, patches, labels, batch_size=None, shuffle=False, augment=False, aug_noise_std=0):
self.patches, self.labels, self.batch_size, self.shuffle, self.augment, self.aug_noise_std = patches, labels, batch_size, shuffle, augment, aug_noise_std
# Sweeps a function over patches taken from a set of images. This is the main training and evaluation loop.
def image_iter(iter_fn, image_dataset,
labeled_dataset=None,
eval_fn=None,
network_functions=None,
valid_labeled_dataset=None,
early_stop_rate=1,
test_labeled_dataset=None,
n_iter=None,
give_results=False,
history_rate=None,
print_rate=None,
metric_names=None):
# Initialize and calculate default values
if n_iter == None: n_iter = (image_dataset.patch_count - 1) // image_dataset.batch_size + 1
combined_batch_size = image_dataset.batch_size + (0 if labeled_dataset == None else labeled_dataset.batch_size)
batch = np.empty((combined_batch_size,) + image_dataset.images.shape[:2] + image_dataset.patch_shape, theano.config.floatX)
if labeled_dataset != None:
batch_labels = np.zeros((combined_batch_size,) + labeled_dataset.labels.shape[1:], theano.config.floatX)
batch_label_present = (np.arange(combined_batch_size) >= image_dataset.batch_size).astype(theano.config.floatX)
if history_rate != None: history = [None] * (1 + (n_iter - 1) // history_rate)
results = None
has_valid_or_test = valid_labeled_dataset != None or test_labeled_dataset != None
best_valid_xent, test_at_best_valid_xent, best_network_functions_str = None, None, None
# Run requested number of batches
for i in range(n_iter + (1 if has_valid_or_test else 0)):
# Evaluate on validation and test labeled sets if they were given
eval_labeled_datasets = (valid_labeled_dataset, test_labeled_dataset)
eval_xents = [None] * len(eval_labeled_datasets)
for ii, eval_labeled_dataset in enumerate(eval_labeled_datasets):
if eval_labeled_dataset != None:
eval_xent = 0.
for eval_batch_start in range(0, eval_labeled_dataset.patches.shape[0], eval_labeled_dataset.batch_size):
eval_batch_stop = min(eval_labeled_dataset.patches.shape[0], eval_batch_start + eval_labeled_dataset.batch_size)
eval_xent += (eval_batch_stop - eval_batch_start) * eval_fn(eval_labeled_dataset.patches[eval_batch_start:eval_batch_stop], eval_labeled_dataset.labels[eval_batch_start:eval_batch_stop])
eval_xents[ii] = eval_xent / eval_labeled_dataset.patches.shape[0]
valid_xent, test_xent = eval_xents
# Early stopping with validation set
if i % early_stop_rate == 0 and (best_valid_xent == None or valid_xent < best_valid_xent):
best_network_functions_str = cPickle.dumps(network_functions)
best_valid_xent = valid_xent
test_at_best_valid_xent = test_xent
if i == n_iter:
if print_rate != None:
# If training has ended, print the final validation and test evaluations
if test_xent != None: print('End test result: %g' % test_xent)
if best_valid_xent != None: print('Best validation result: %g' % best_valid_xent)
if test_at_best_valid_xent != None: print('Test at best validation result: %g' % test_at_best_valid_xent)
else:
# Extract current batch from images
batch_start = i * image_dataset.batch_size
for ii in range(image_dataset.batch_size):
idx = (batch_start + ii) % image_dataset.patch_count
if image_dataset.shuffle:
if idx == 0: perm = np.random.permutation(image_dataset.patch_count)
idx = perm[idx]
r_ind, c_ind = idx // image_dataset.patch_space_shape[1], idx % image_dataset.patch_space_shape[1]
batch[ii] = image_dataset.images[:, :, r_ind:(r_ind + image_dataset.patch_shape[0]), c_ind:(c_ind + image_dataset.patch_shape[1])]
batch_filled = min(image_dataset.patch_count, batch_start + image_dataset.batch_size) - batch_start
# Extract current batch of labeled samples if labeled set was given
if labeled_dataset == None: batch_args = (batch,)
else:
if labeled_dataset.augment: aug = np.random.randint(6, size=labeled_dataset.batch_size)
for ii in range(labeled_dataset.batch_size):
idx = (batch_start + ii) % labeled_dataset.patches.shape[0]
if labeled_dataset.shuffle:
if idx == 0: labeled_perm = np.random.permutation(labeled_dataset.patches.shape[0])
idx = labeled_perm[idx]
patch = labeled_dataset.patches[idx]
batch_labels[image_dataset.batch_size + ii] = labeled_dataset.labels[idx]
# Apply data augmentation
if labeled_dataset.augment:
if aug[ii] == 0: patch = patch[:, :, ::-1] # Flip rows
elif aug[ii] == 1: patch = patch[:, :, :, ::-1] # Flip columns
elif aug[ii] == 2: patch = np.rot90(patch, axes=(2, 3)) # Rotate 90
elif aug[ii] == 3: patch = np.rot90(patch, k=2, axes=(2, 3)) # Rotate 180
elif aug[ii] == 4: patch = np.rot90(patch, k=3, axes=(2, 3)) # Rotate 270
patch = patch + np.random.normal(scale=labeled_dataset.aug_noise_std, size=patch.shape) # Add noise
batch[image_dataset.batch_size + ii] = patch
batch_args = (batch, batch_labels, batch_label_present)
# Apply function to current batch
result = iter_fn(*batch_args)
if give_results:
if not isinstance(results, np.ndarray): results = np.empty(image_dataset.patch_space_shape + result.shape[1:], theano.config.floatX)
results.reshape((results.shape[0] * results.shape[1],) + results.shape[2:])[batch_start:(batch_start + combined_batch_size)] = result[:batch_filled]
# Optionally save and display results from current batch
if history_rate != None and i % history_rate == 0: history[i // history_rate] = result
if print_rate != None and i % print_rate == 0:
print('Batch %d/%d' % (i + 1, n_iter))
if metric_names != None:
for k, v in zip(metric_names, result): print('%s: %g' % (k, v.mean() if isinstance(v, np.ndarray) else v))
print('-' * 16)
# Return a subsequence of [best_network_functions_str, results, history]
ret_value = []
if has_valid_or_test: ret_value.append(best_network_functions_str)
if give_results: ret_value.append(results)
if history_rate != None: ret_value.append(history)
return ret_value[0] if len(ret_value) == 1 else ret_value
| [
"connors.clay@gmail.com"
] | connors.clay@gmail.com |
80a1c18f8e69671ebde216c7d4f3665ff8b2181b | b281dd9e711d737579745914c6611d8cfaddb07d | /phones_media_files_demo/phones_media_files_demo/phones/migrations/0001_initial.py | c64a49f588e99ab5a1c3d237694ae76464f853d7 | [
"MIT"
] | permissive | Beshkov/Python-web-fundamentals | daf76f3765cb56e02bdaba8ea7df675990dd3885 | 6b0e9cc9725ea80a33c2ebde6e29f2ab585ab8d9 | refs/heads/main | 2023-08-03T07:04:22.238320 | 2021-09-12T18:57:36 | 2021-09-12T18:57:36 | 392,644,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | # Generated by Django 3.2.6 on 2021-08-06 20:25
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Phone',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('manufacturer', models.CharField(max_length=30)),
('model', models.CharField(max_length=15)),
('image', models.ImageField(blank=True, upload_to='phones')),
],
),
]
| [
"alexander.beshkov@gmail.com"
] | alexander.beshkov@gmail.com |
7633c8e23a54248d3fec4e19704989074a7f9009 | ab3cffa97e11c841c3d7fefca0b303a23fbdef3d | /models/access_request.py | 80554cf746aab36d6d0a5d656fa5d31016062b74 | [] | no_license | mpopadic/animal-center-api | 1ef749b83bd9a3b2433d7f716636610a3aec378c | fbce052d1f8a540e7d81f13aa502f484ce26a523 | refs/heads/master | 2022-05-28T07:26:18.885396 | 2020-04-09T18:26:25 | 2020-04-09T18:26:25 | 253,509,247 | 0 | 0 | null | 2022-05-25T03:55:08 | 2020-04-06T13:41:27 | Python | UTF-8 | Python | false | false | 1,406 | py | from datetime import datetime
from database import db
class AccessRequest(db.Model):
"""Class that represent access_request table in DB. Used to store information when someone requires a token."""
__tablename__ = "access_requests"
id = db.Column(db.Integer, primary_key=True)
_center_id = db.Column('center_id', db.Integer, db.ForeignKey("centers.id"))
timestamp = db.Column(db.TIMESTAMP)
def __init__(self, center_id):
self.center_id = center_id
self.timestamp = datetime.now()
@property
def center_id(self):
return self._center_id
@center_id.setter
def center_id(self, value):
if not isinstance(value, int) or type(value) is not int:
raise TypeError('center_id must be a int')
self._center_id = value
@staticmethod
def json(ac):
"""Converts given AccessRequest object to json formatted data"""
return {'id': ac.id, 'center_id': ac.center_id, 'timestamp': ac.timestamp}
@staticmethod
def get_all_access_requests():
"""Get all rows from table access_request"""
return [AccessRequest.json(ac) for ac in AccessRequest.query.all()]
@staticmethod
def add_access_request(_center_id):
"""Adds new access request to DB"""
new_access_request = AccessRequest(_center_id)
db.session.add(new_access_request)
db.session.commit() | [
"mpopadic@griddynamics.com"
] | mpopadic@griddynamics.com |
2c0b1bd21ed3b1488a4df9c8597c6f088513dad0 | 97f7049050ab1eb8be379ea98cc20ff498e051ab | /zhCMS/common/request1.py | d4840fd0e2e2e3596e18859e7438e24433ef0ab0 | [] | no_license | xxzhaoxu/zhcms | 78d2940b6cd1c1888e974d86ce7fd34745bbc3d4 | 5fd121bd905e83a05dad763ae66e4781555d0160 | refs/heads/master | 2021-04-02T02:17:03.752566 | 2020-10-10T07:18:46 | 2020-10-10T07:18:46 | 248,233,638 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | def post(param):
from django.http import HttpRequest
print(param)
return HttpRequest.POST.get(param)
| [
"350861641@qq.com"
] | 350861641@qq.com |
aa9e0d80176cab68f0c1defae6efef96522ec18a | 440baf27ea3c1c71300afcd5b3ac5f9f45452abd | /Code/myapp/admin.py | 6fd652feb9df1e47f1c97f4733b875bd9c6cb5e3 | [] | no_license | batham630/Uber_rider_predictor | 636f63acc5053da717ec8c8550e2cd2e639c04ee | 61299f6d17e8b54ac5dd5a729e261a2b179a3400 | refs/heads/master | 2023-08-15T08:31:11.137926 | 2021-10-21T17:55:51 | 2021-10-21T17:55:51 | 419,821,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | from django.contrib import admin
from . models import Prediction
# Register your models here.
admin.site.register(Prediction)
| [
"bathamabhishek955@gmail.com"
] | bathamabhishek955@gmail.com |
54b5f81c202a4a9d48f25271d4ba743e2e4d049f | 4015e9d9cc72889b3494ae8b58e81dc507ae8d31 | /venv/Lib/site-packages/celery/bin/beat.py | faddd256a6bad3001f11a3074518b1a34db1463b | [] | no_license | Unlimit78/Test_For_DevelopsToday | 675676d3a477f590485722019bc1b1e1412b3926 | dc4e4ae887edf243adaca3a03c5fd3209ee60300 | refs/heads/master | 2022-12-17T18:41:33.511674 | 2020-09-15T18:13:53 | 2020-09-15T18:13:53 | 295,706,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,625 | py | # -*- coding: utf-8 -*-
"""The :program:`celery beat` command.
.. program:: celery beat
.. seealso::
See :ref:`preload-options` and :ref:`daemon-options`.
.. cmdoption:: --detach
Detach and run in the background as a daemon.
.. cmdoption:: -s, --schedule
Path to the schedule database. Defaults to `celerybeat-schedule`.
The extension '.db' may be appended to the filename.
Default is {default}.
.. cmdoption:: -S, --scheduler
Scheduler class to use.
Default is :class:`{default}`.
.. cmdoption:: --max-interval
Max seconds to sleep between schedule iterations.
.. cmdoption:: -f, --logfile
Path to log file. If no logfile is specified, `stderr` is used.
.. cmdoption:: -l, --loglevel
Logging level, choose between `DEBUG`, `INFO`, `WARNING`,
`ERROR`, `CRITICAL`, or `FATAL`.
.. cmdoption:: --pidfile
File used to store the process pid. Defaults to `celerybeat.pid`.
The program won't start if this file already exists
and the pid is still alive.
.. cmdoption:: --uid
User id, or user name of the user to run as after detaching.
.. cmdoption:: --gid
Group id, or group name of the main group to change to after
detaching.
.. cmdoption:: --umask
Effective umask (in octal) of the process after detaching. Inherits
the umask of the parent process by default.
.. cmdoption:: --workdir
Optional directory to change to after detaching.
.. cmdoption:: --executable
Executable to use for the detached process.
"""
from __future__ import absolute_import, unicode_literals
from functools import partial
from celery.bin.base import Command, daemon_options
from celery.platforms import detached, maybe_drop_privileges
__all__ = ("beat",)
HELP = __doc__
class beat(Command):
"""Start the beat periodic task scheduler.
Examples:
.. code-block:: console
$ celery beat -l info
$ celery beat -s /var/run/celery/beat-schedule --detach
$ celery beat -S django
The last example requires the :pypi:`django-celery-beat` extension
package found on PyPI.
"""
doc = HELP
enable_config_from_cmdline = True
supports_args = False
def run(
self,
detach=False,
logfile=None,
pidfile=None,
uid=None,
gid=None,
umask=None,
workdir=None,
**kwargs
):
if not detach:
maybe_drop_privileges(uid=uid, gid=gid)
kwargs.pop("app", None)
beat = partial(self.app.Beat, logfile=logfile, pidfile=pidfile, **kwargs)
if detach:
with detached(logfile, pidfile, uid, gid, umask, workdir):
return beat().run()
else:
return beat().run()
def add_arguments(self, parser):
c = self.app.conf
bopts = parser.add_argument_group("Beat Options")
bopts.add_argument("--detach", action="store_true", default=False)
bopts.add_argument("-s", "--schedule", default=c.beat_schedule_filename)
bopts.add_argument("--max-interval", type=float)
bopts.add_argument("-S", "--scheduler", default=c.beat_scheduler)
bopts.add_argument("-l", "--loglevel", default="WARN")
daemon_options(parser, default_pidfile="celerybeat.pid")
user_options = self.app.user_options["beat"]
if user_options:
uopts = parser.add_argument_group("User Options")
self.add_compat_options(uopts, user_options)
def main(app=None):
beat(app=app).execute_from_commandline()
if __name__ == "__main__": # pragma: no cover
main()
| [
"taras_ilyashchuk161@ukr.net"
] | taras_ilyashchuk161@ukr.net |
936b7b10e86cdeeaefe0e6f870ba20839b804f3d | 04a0614b8c2a893dab29bc4ffb0aaf82364fdf3f | /53. Maximum Subarray.py | 2fd8f5e141461dfc091e452ab1ffef6fc179a75e | [] | no_license | sharmaji27/Leetcode-Problems | 716bcb4a36b9e4f45274c4d551967e15c40ddbd2 | 0f878933b17df170c18f0b67b7200cec76c276e0 | refs/heads/master | 2021-10-20T17:35:35.175757 | 2021-10-20T05:33:17 | 2021-10-20T05:33:17 | 218,299,755 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | class Solution:
def maxSubArray(self, nums: List[int]) -> int:
if len(nums)==1:
print(nums[0])
global_max = nums[0]
current_sum = nums[0]
for i in range(1,len(nums)):
current_sum = max(current_sum+nums[i],nums[i])
global_max = max(current_sum,global_max)
return global_max | [
"asharma70420@gmail.com"
] | asharma70420@gmail.com |
a133b51478e530f03ad6149dc43190341777cc14 | b5516dda2d1680f2fe4a7a8f19a39a919e44fa47 | /tripodscode/source/vectorize.py | c3641c677d082fac1600306eae8ed07ca99620a1 | [
"ECL-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | Valparaiso-Data-Science/general-course-relevance-discovery | 0ad3c26ec8969138917ca3dcb3c8f6f746e10aea | cfbd34031fd8fcbc6aeff44a1262b32d9a99873c | refs/heads/main | 2023-02-20T15:36:50.441239 | 2022-12-18T18:25:51 | 2022-12-18T18:25:51 | 187,892,694 | 2 | 1 | ECL-2.0 | 2023-02-10T23:10:41 | 2019-05-21T18:28:59 | Python | UTF-8 | Python | false | false | 2,343 | py | import pandas as pd
from nltk.stem.porter import *
ps = PorterStemmer()
#Reader /output/Full csvs and convert to dataframe of CourseID and
first_stops = ['cr','ul','ii','cog','pp','ps','geog','cosc','biol','el','sesp',
'eecs','oba','phys','phy','mth','cmsc','nur','ce','cs','iii'] #unkown/unnecessary abbreviations
second_stops = ['make','impact','apply','change','involve','reside','vary','may',
'meet','use','include','pertain','tell','cover','devote',
'recognize','carry'] #verbs that are not useful
third_stops = ['new','minimum','useful','mainly','large','liberal','formerly',
'especially','absolutely','graduate','odd','one','throughout',
'weekly','least','well','hour','common','require','along','least',
'long','related','prior','open','sophomore','junior','single',
'necessary'] #unuseful descriptors
fourth_stops = ['treat','prereq','prerequisite','creditsprerequisite',
'corequisite','either','assignment','major','none','arts','core',
'andor','semester','hoursprereq','student','instructor','threehour',
'within','lecturescover','satisfactoryno','summer','yifat',
'givenfor','term','classroom','area','inquiry','researchintensive',
'year','via','teacher','ofhow'] #other unuseful words
def newClean(df):
import string
schoolID = []
courseID = []
description = []
stopwords = ['credits','spring','fall','course','students','offered','hours','credit','grade','typically']
stopwords += first_stops
stopwords += second_stops
stopwords += third_stops
stopwords += fourth_stops
for i, row in df.iterrows():
cleanDesc = row['Descriptions']
cleanDesc = cleanDesc.translate(cleanDesc.maketrans(string.punctuation, "\\" * len(string.punctuation)))
cleanDesc = cleanDesc.replace("\\", '')
cleanDesc = ' '.join([ps.stem(word.lower()) for word in cleanDesc.split() if word.lower() not in stopwords])
schoolID.append(row['School'])
courseID.append(row['CourseID'])
description.append(cleanDesc)
cleanDF = pd.DataFrame(list(zip(schoolID, courseID, description)), columns=['School', 'CourseID', 'Descriptions'])
print(cleanDF.head())
return (cleanDF) | [
"80072593+carolynnguyen614@users.noreply.github.com"
] | 80072593+carolynnguyen614@users.noreply.github.com |
21d3e4137877c5c962c0a372bcf51516c5d5cab3 | 32233acff831abdd290f4168a982b694f9f95393 | /src/util.py | 5aa9b1fda0b49ea8cf671313917c63a379a0a966 | [] | no_license | CarsonScott/onlinestats | e2dfa7ceca21e99b828eb1fd0149fc34e2b9c2ce | 48a8024a4c5c0f8ddbc7ec4f1b1eef3485ae95e7 | refs/heads/master | 2020-04-26T22:36:06.471611 | 2019-03-06T22:02:45 | 2019-03-06T22:02:45 | 173,877,356 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | import math
from copy import copy
def iterable(X):
return isinstance(X, list) or isinstance(X, tuple) | [
"carsonjscott14@gmail.com"
] | carsonjscott14@gmail.com |
d1f2a5a0ce19f01c58d9de12eb25fcc714910455 | 77d0b2fe45af81bcea1b7e9e7b0560f24a0e9448 | /postProcessing.py | f290ecaa4c1d68be4ad1144a126935afc173e81a | [
"MIT",
"CC-BY-4.0"
] | permissive | marz869/FrontLearning | a4992301203babe198a39844cd32290e3bf5ee46 | 70f0e4c2991ff5ba585e20fbc6aa9e7b82ca312c | refs/heads/master | 2023-05-03T18:08:49.117678 | 2021-03-30T06:57:12 | 2021-03-30T06:57:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,968 | py | #!/usr/bin/env python
u"""
postProcessing.py
by Michael Wood (Last Updated by Yara Mohajerani 10/2018)
find path of least resistance through an image
Update History
11/2018 - Yara: Don't separate train or test inputs based on glacier. Input subdir
and get glacier name from spreadsheet
10/2018 - Yara: Change input folder to be consistent with
other scripts
09/2018 - Yara: Clean up and add user input
09/2018 - Michael: written
"""
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from skimage.graph import route_through_array
import shapefile
import os
import sys
import getopt
from osgeo import ogr
from osgeo import osr
import urllib
from pyproj import Proj,transform
#############################################################################################
#############################################################################################
#This function to make a list of the labels with threshold label
def generateLabelList_threshold(indir):
labelList=[]
for fil in os.listdir(indir):
if fil.endswith('_nothreshold.png'):
labelList.append(fil.replace('_nothreshold.png',''))
return(labelList)
#This function to make a list of the labels without threshold label
def generateLabelList_sobel(indir):
labelList=[]
for fil in os.listdir(indir):
if fil[-6:] == 'B8.png' or fil[-6:] == 'B2.png':
labelList.append(fil[:-4])
return(labelList)
#############################################################################################
# These functions are to create a list of indices used to find the line label
# get glacier names
def getGlacierList(labelList,glaciersFolder):
f=open(os.path.join(glaciersFolder,'Scene_Glacier_Dictionary.csv'),'r')
lines=f.read()
f.close()
lines=lines.split('\n')
glacierList = []
for sceneID in labelList:
for line in lines:
line=line.split(',')
if line[0]==sceneID:
glacierList.append(line[1])
return(glacierList)
def obtainSceneCornersProjection(sceneID,glaciersFolder,glacier):
f=open(os.path.join(glaciersFolder, glacier, '%s Image Data.csv'%glacier),'r')
lines=f.read()
f.close()
lines=lines.split('\n')
for line in lines:
line=line.split(',')
if line[1][:-4]==sceneID:
corners=[]
projection=int(line[2])
for i in range(4,12):
corners.append(float(line[i]))
return(corners,projection)
def geoCoordsToImagePixels(coords,corners, projection, imageSize):
coords=reprojectPolygon(coords,3413,projection)
# fx(x,y) = ax + by + cxy + d
A=np.array([[corners[0],corners[1],corners[0]*corners[1],1], #lower left corner,
[corners[2],corners[3],corners[2]*corners[3],1], #lower right corner
[corners[4], corners[5], corners[4] * corners[5],1], #upper right corner
[corners[6], corners[7], corners[6] * corners[7],1]]) #upper left corner
#option 1
bx = np.array([[0],[imageSize[0]],[imageSize[0]],[0]])
by = np.array([[imageSize[1]],[imageSize[1]],[0], [0] ])
Cx=np.dot(np.linalg.inv(A),bx)
Cy = np.dot(np.linalg.inv(A), by)
imagePixels=[]
for coord in coords:
pixelX=Cx[0]*coord[0] + Cx[1]*coord[1] + Cx[2]*coord[0]*coord[1] + Cx[3]
pixelY=Cy[0]*coord[0] + Cy[1]*coord[1] + Cy[2]*coord[0]*coord[1] + Cy[3]
if pixelX>0 and pixelX<imageSize[0]-1 and pixelY>0 and pixelY<imageSize[1]-1:
imagePixels.append([round(pixelX),round(pixelY)])
return(np.array(imagePixels))
def reprojectPolygon(polygon,inputCRS,outputCRS):
inProj = Proj(init='epsg:'+str(inputCRS))
outProj = Proj(init='epsg:'+str(outputCRS))
x1,y1 = -11705274.6374,4826473.6922
x2,y2 = transform(inProj,outProj,x1,y1)
outputPolygon=[]
for point in polygon:
x = point[0]
y = point[1]
x2,y2 = transform(inProj,outProj,x,y)
outputPolygon.append([x2,y2])
return np.array(outputPolygon)
def seriesToNPoints(series,N):
#find the total length of the series
totalDistance=0
for s in range(len(series[:,0])-1):
totalDistance+=((series[s,0]-series[s+1,0])**2+(series[s,1]-series[s+1,1])**2)**0.5
intervalDistance=totalDistance/(N-1)
#make the list of points
newSeries=series[0,:]
currentS = 0
currentPoint1=series[currentS,:]
currentPoint2=series[currentS+1,:]
for p in range(N-2):
distanceAccrued = 0
while distanceAccrued<intervalDistance:
currentLineDistance=((currentPoint1[0]-currentPoint2[0])**2+(currentPoint1[1]-currentPoint2[1])**2)**0.5
if currentLineDistance<intervalDistance-distanceAccrued:
distanceAccrued+=currentLineDistance
currentS+=1
currentPoint1 = series[currentS, :]
currentPoint2 = series[currentS + 1, :]
else:
distance=intervalDistance-distanceAccrued
newX=currentPoint1[0]+(distance/currentLineDistance)*(currentPoint2[0]-currentPoint1[0])
newY = currentPoint1[1] + (distance / currentLineDistance) * (currentPoint2[1] - currentPoint1[1])
distanceAccrued=intervalDistance+1
newSeries=np.vstack([newSeries,np.array([newX,newY])])
currentPoint1=np.array([newX,newY])
newSeries = np.vstack([newSeries, series[-1,:]])
return(newSeries)
def fjordBoundaryIndices(glaciersFolder,glacier,corners,projection,imageSize):
boundary1file=os.path.join(glaciersFolder,glacier,'Fjord Boundaries',glacier+' Boundary 1 V2.csv')
boundary1=np.genfromtxt(boundary1file,delimiter=',')
boundary2file = os.path.join(glaciersFolder,glacier,'Fjord Boundaries',glacier + ' Boundary 2 V2.csv')
boundary2 = np.genfromtxt(boundary2file, delimiter=',')
boundary1=seriesToNPoints(boundary1,1000)
boundary2 = seriesToNPoints(boundary2, 1000)
boundary1pixels = geoCoordsToImagePixels(boundary1,corners,projection,imageSize)
boundary2pixels = geoCoordsToImagePixels(boundary2, corners, projection,imageSize)
return(boundary1pixels,boundary2pixels)
def plotImageWithBoundaries(image,boundary1pixels,boundary2pixels):
imArr = np.asarray(image)
plt.contourf(imArr)
plt.plot(boundary1pixels[:,0],boundary1pixels[:,1],'w-')
plt.plot(boundary2pixels[:, 0], boundary2pixels[:, 1], 'w-')
plt.gca().set_aspect('equal')
plt.show()
def testBoundaryIndices():
boundarySide1indices=[]
boundarySide2indices=[]
for j in range(30,180,10):
boundarySide1indices.append([40,j])
boundarySide2indices.append([160,j])
return(np.array(boundarySide1indices),np.array(boundarySide2indices))
#############################################################################################
# These functions are to find the most probable front based on the NN solution
def plotImageWithSolutionAndEndpoints(image,solution,startPoint,endPoint,boundary1pixels,boundary2pixels):
imArr = np.asarray(image)
C=plt.contourf(imArr)
plt.colorbar(C)
plt.plot(startPoint[0],startPoint[1],'w.',markersize=20)
plt.plot(endPoint[0], endPoint[1], 'w.', markersize=20)
plt.plot(boundary1pixels[:, 0], boundary1pixels[:, 1], 'w-')
plt.plot(boundary2pixels[:, 0], boundary2pixels[:, 1], 'w-')
plt.plot(solution[:,0],solution[:,1],'g-')
plt.gca().set_aspect('equal')
plt.show()
def leastCostSolution(imgArr,boundarySide1indices,boundarySide2indices,step):
weight=1e22
indices=[]
for b1 in range(len(boundarySide1indices)):
if b1 % step==0:
startPoint = np.array(boundarySide1indices[b1],dtype=int)
#if b1 % step == 0:
# print(' '+str(b1+1)+' of '+str(len(boundarySide1indices))+' indices tested')
for b2 in range(len(boundarySide2indices)):
if b2 % step ==0:
endPoint = np.array(boundarySide2indices[b2],dtype=int)
testIndices, testWeight = route_through_array(imgArr, (startPoint[1], startPoint[0]),\
(endPoint[1], endPoint[0]), geometric=True,\
fully_connected=True)
tmpIndices = np.array(testIndices)
testIndices=np.hstack([np.reshape(tmpIndices[:,1],(np.shape(tmpIndices)[0],1)),np.reshape(tmpIndices[:,0],(np.shape(tmpIndices)[0],1))])
if testWeight<weight:
weight=testWeight
indices=testIndices
return(indices)
def plotImageWithSolution(image,solution):
imArr = np.asarray(image)
plt.contourf(imArr)
plt.plot(solution[:,0],solution[:,1],'w-')
plt.gca().set_aspect('equal')
plt.show()
def outputSolutionIndicesPng(imgArr,solutionIndices,outputFolder,label):
solutionArr=255*np.ones_like(imgArr)
for i in range(len(solutionIndices)):
if solutionIndices[i,1]>1 and solutionIndices[i,1]<np.shape(solutionArr)[0]-1 and solutionIndices[i,1]>1 and solutionIndices[i,0]<np.shape(solutionArr)[1]-1:
solutionArr[solutionIndices[i, 1], solutionIndices[i, 0]] = 0
solutionArr[solutionIndices[i, 1]+1, solutionIndices[i, 0]+1] = 0
solutionArr[solutionIndices[i, 1], solutionIndices[i, 0]+1] = 0
solutionArr[solutionIndices[i, 1]-1, solutionIndices[i, 0]+1] = 0
solutionArr[solutionIndices[i, 1]+1, solutionIndices[i, 0]] = 0
solutionArr[solutionIndices[i, 1]-1, solutionIndices[i, 0]] = 0
solutionArr[solutionIndices[i, 1]+1, solutionIndices[i, 0]-1] = 0
solutionArr[solutionIndices[i, 1], solutionIndices[i, 0]-1] = 0
solutionArr[solutionIndices[i, 1]-1, solutionIndices[i, 0]-1] = 0
outIm=Image.fromarray(solutionArr)
outIm=outIm.transpose(Image.FLIP_LEFT_RIGHT)
# plt.imshow(solutionArr)
# plt.show()
outIm.save(outputFolder+'/'+label+'_Solution.png')
#############################################################################################
# These functions are to construct a shapefile from the geometric coordinates
def imagePixelsToGeoCoords(pixels, corners, projection, imageSize):
# fx(x,y) = ax + by + cxy + d
A = np.array([[0, 0, 0 * 0, 1], # lower left corner,
[imageSize[0], 0, imageSize[0] * 0, 1], # lower right corner
[imageSize[0], imageSize[1], imageSize[0] * imageSize[1], 1], # upper right corner
[0, imageSize[1], 0 * imageSize[1], 1]]) # upper left corner
# option 1
bx = np.array([[corners[0]], [corners[2]], [corners[4]], [corners[6]]])
by = np.array([[corners[1]], [corners[3]], [corners[5]], [corners[7]]])
Cx = np.dot(np.linalg.inv(A), bx)
Cy = np.dot(np.linalg.inv(A), by)
geoCoords = []
for pixel in pixels:
geoX = Cx[0] * pixel[0] + Cx[1] * pixel[1] + Cx[2] * pixel[0] * pixel[1] + Cx[3]
geoY = Cy[0] * pixel[0] + Cy[1] * pixel[1] + Cy[2] * pixel[0] * pixel[1] + Cy[3]
geoCoords.append([round(geoX), round(geoY)])
geoCoords = reprojectPolygon(geoCoords, projection,3413)
return (np.array(geoCoords))
def getPrj(epsg):
# access projection information
wkt = urllib.urlopen("http://spatialreference.org/ref/epsg/{0}/prettywkt/".format(str(epsg)))
remove_spaces = wkt.read().replace(" ", "")
output = remove_spaces.replace("\n", "")
return output
def solutionToShapefile(glacierList,labels,frontIndices,shapefileOutputFolder, cornersList, projectionList, imageSizeList):
#output the shapefile
outputFile = 'Front Profiles'
w = shapefile.Writer()
w.field('Glacier', 'C')
w.field('Scene', 'C')
for ll in range(len(labels)):
glacier=glacierList[ll]
frontSolution=imagePixelsToGeoCoords(frontIndices[ll],cornersList[ll],projectionList[ll],imageSizeList[ll])
w.record(glacier,labels[ll])
output = []
for c in range(len(frontSolution)):
output.append([frontSolution[c, 0], frontSolution[c, 1]])
w.line(parts=[output])
w.save(shapefileOutputFolder + '/' + outputFile)
# create the .prj file
prj = open(os.path.join(shapefileOutputFolder , outputFile + ".prj"), "w")
epsg = getPrj(3413)
prj.write(epsg)
prj.close()
def solutionToCSV(glacierList, labels, frontIndices, csvOutputFolder, cornersList, projectionList,imageSizeList):
for ll in range(len(labels)):
glacier=glacierList[ll]
frontSolution = imagePixelsToGeoCoords(frontIndices[ll], cornersList[ll], projectionList[ll], imageSizeList[ll])
outputFile = glacier + ' ' + labels[ll] + ' Profile.csv'
output = []
for c in range(len(frontSolution)):
output.append([frontSolution[c, 0], frontSolution[c, 1]])
output=np.array(output)
np.savetxt(csvOutputFolder+'/'+outputFile,output,delimiter=',')
def pixelSolutionToCSV(glacierList, labels, frontIndices, pixelOutputFolder, cornersList, projectionList, imageSizeList):
for ll in range(len(labels)):
glacier=glacierList[ll]
frontSolution = frontIndices[ll]
outputFile = glacier + ' ' + labels[ll] + ' Pixels.csv'
output = []
for c in range(len(frontSolution)):
output.append([frontSolution[c, 0], frontSolution[c, 1]])
output = np.array(output)
np.savetxt(pixelOutputFolder + '/' + outputFile, output, delimiter=',')
#############################################################################################
# All of the functions are run here
#-- main function to get user input and make training data
def main():
#-- Read the system arguments listed after the program
long_options = ['subdir=','method=','step=','indir=']
optlist,arglist = getopt.getopt(sys.argv[1:],'=D:M:S:I:',long_options)
subdir= 'all_data2_test'
method = ''
step = 50
indir = ''
for opt, arg in optlist:
if opt in ('-D','--subdir'):
subdir = arg
elif opt in ('-M','--method'):
method = arg
elif opt in ('-S','--step'):
step = np.int(arg)
elif opt in ('-I','--indir'):
indir = os.path.expanduser(arg)
#-- directory setup
#- current directory
current_dir = os.path.dirname(os.path.realpath(__file__))
headDirectory = os.path.join(current_dir,'..','FrontLearning_data')
glaciersFolder=os.path.join(headDirectory,'Glaciers')
#-- if user input not given, set label folder
if indir == '':
#-- first create ourdifr directory
outdir = os.path.join(headDirectory,'Results',subdir,method)
#-- make input directory
indir= os.path.join(outdir,method)
#-- if input directory is given, then set the method based on that
else:
method = os.path.basename(indir)
if method=='':
sys.exit("Please do not put '/' at the end of indir.")
#-- then make output directory based on method
outdir = os.path.join(headDirectory,'Results',subdir,method)
if (not os.path.isdir(outdir)):
os.mkdir(outdir)
print('input directory:%s'%indir)
print('method:%s'%method)
postProcessedOutputFolder = os.path.join(outdir,method+' Post-Processed '+str(step))
csvOutputFolder = os.path.join(outdir,method+' Geo CSVs '+str(step))
pixelOutputFolder = os.path.join(outdir,method+' Pixel CSVs '+str(step))
shapefileOutputFolder = os.path.join(outdir,method+' Shapefile '+str(step))
#-- make output folders
if (not os.path.isdir(postProcessedOutputFolder)):
os.mkdir(postProcessedOutputFolder)
if (not os.path.isdir(csvOutputFolder)):
os.mkdir(csvOutputFolder)
if (not os.path.isdir(pixelOutputFolder)):
os.mkdir(pixelOutputFolder)
if (not os.path.isdir(shapefileOutputFolder)):
os.mkdir(shapefileOutputFolder)
if method == 'Sobel':
labelList=generateLabelList_sobel(indir)
else:
labelList=generateLabelList_threshold(indir)
glacierList = getGlacierList(labelList,glaciersFolder)
print(len(labelList))
print(len(glacierList))
frontIndicesList=[]
cornersList=[]
projectionList=[]
imageSizeList=[]
for ind,label in enumerate(labelList):
glacier = glacierList[ind]
print('%i of %i'%(ind+1,len(labelList)))
print('Working on label '+label)
print('Glacier: '+glacier)
if ('sobel' in method) or ('Sobel' in method):
im = Image.open(indir + '/' + label + '.png').transpose(Image.FLIP_LEFT_RIGHT)
else:
im=Image.open(indir+'/'+label+'_nothreshold.png').transpose(Image.FLIP_LEFT_RIGHT)
corners,projection=obtainSceneCornersProjection(label,glaciersFolder,glacier)
cornersList.append(corners)
projectionList.append(projection)
imageSizeList.append(im.size)
boundary1pixels,boundary2pixels=fjordBoundaryIndices(glaciersFolder,glacier,corners,projection,im.size)
# plotImageWithBoundaries(im,boundary1pixels,boundary2pixels)
solutionIndices = leastCostSolution(im,boundary1pixels,boundary2pixels,step)
frontIndicesList.append(solutionIndices)
outputSolutionIndicesPng(im,solutionIndices,postProcessedOutputFolder,label)
# plotImageWithSolution(im,solutionIndices)
solutionToCSV(glacierList, labelList, frontIndicesList, csvOutputFolder, cornersList, projectionList,imageSizeList)
pixelSolutionToCSV(glacierList, labelList, frontIndicesList, pixelOutputFolder, cornersList, projectionList, imageSizeList)
solutionToShapefile(glacierList, labelList, frontIndicesList, shapefileOutputFolder, cornersList, projectionList, imageSizeList)
if __name__ == '__main__':
main()
| [
"ymohajer@uci.edu"
] | ymohajer@uci.edu |
fcfdfb2bf143fbabd9e7882777ff096eaec7745c | eeee18e2769766c550fb5e0948977a016b48e15a | /Creational/abstract-factory.py | 72d108161a2fa85440dac2ece3f9d6bf79735986 | [] | no_license | MoeinGhbh/DesignPattern | 19aff7bd09f4161d11af2662b1be7962fb692989 | b543a5c4eaf9da1341f95e9c777310d4f25ddeaf | refs/heads/master | 2022-11-18T17:49:56.101880 | 2020-07-22T07:54:23 | 2020-07-22T07:54:23 | 266,117,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,740 | py | """
Abstract Factory
Car => Benz, Bmw => Suv, Coupe
benz suv => gla, glc
bmw suv => x1, x2
benz coupe => cls, E-class
bmw coupe => m2, m4
"""
from abc import ABC,abstractclassmethod
class Car(ABC):
@abstractclassmethod
def call_suv(self):
pass
@abstractclassmethod
def call_coupe(self):
pass
#---------------------------------------------
class Benz(Car):
def call_suv(self):
return Gla()
def call_coupe(self):
return Cls()
#---------------------------------------------
class Bmw(Car):
def call_suv(self):
return X1()
def call_coupe(self):
return M2()
#---------------------------------------------
class SUV(ABC):
@abstractclassmethod
def create_suv(self):
pass
class Coupe(ABC):
@abstractclassmethod
def create_coupe(self):
pass
#------------------------------------------------
# Benz
class Gla(SUV):
def create_suv(self):
print("this is your Gla SUV Benz...")
class Cls(Coupe):
def create_coupe(self):
print("this is your cls coupe Benz...")
#---------------------------------------------------
# BMW
class X1(SUV):
def create_suv(self):
print("this is your X1 SUV BMW .... ")
class M2(Coupe):
def create_coupe(self):
print("this is your me coupe BMW ....")
#------------------------------------------------------
def client_suv_order(order):
suv = order.call_suv()
suv.create_suv()
def client_coupe_order(order):
coupe= order.call_coupe()
coupe.create_coupe()
#----------------------------------------------------------
client_coupe_order(Benz())
client_coupe_order(Bmw())
client_suv_order(Benz())
client_suv_order(Bmw())
| [
"="
] | = |
99a2478cea3c8d541d34c24dfcb9bc4ca59b0605 | 73b8aba05ee1424f38a8598a9f1305185588075f | /0x04-python-more_data_structures/9-multiply_by_2.py | 6a475a580fe3f50723c6e049968a98f01637a6dd | [] | no_license | nicolasportela/holbertonschool-higher_level_programming | 0d176c0e56f4f703c1e9a98b430fc6120f22f675 | e1537b81f21118456e5cfa0e4ed89520b232adb6 | refs/heads/master | 2023-04-20T21:30:22.693434 | 2021-05-13T01:47:30 | 2021-05-13T01:47:30 | 319,397,633 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | #!/usr/bin/python3
def multiply_by_2(a_dictionary):
new_dic = {}
for k, v in a_dictionary.items():
new_dic[k] = v * 2
return new_dic
| [
"2103@holbertonschool.com"
] | 2103@holbertonschool.com |
5c7a8d7040abb291de25c0a76b697062bba801b0 | f7f482f8039b35ee1efe07d3e373d5b63f335ce1 | /base_structure/models/individual_nalog_numbers.py | cf8279201becc773b47877aefe59e827a39f3d08 | [] | no_license | 3ython/kkmbase | 6fbb9590a1e8ce4b4f2c33eb839062ad4d8b5cdb | b4605ea6b15d9a7281cd768dca4f73a2eb91b108 | refs/heads/master | 2021-01-13T01:27:10.851514 | 2012-10-03T22:03:55 | 2012-10-03T22:03:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | # coding= utf-8
from django.db import models
class Individual_nalog_numbers(models.Model):
inn = models.CharField(u'ИНН',
max_length=255,
unique=True,
blank=True)
def __unicode__(self):
return u'%s' % (self.inn)
class Meta:
app_label = 'base_structure'
verbose_name = u"ИНН"
verbose_name_plural = u"ИНН"
#ordering = ('',)
| [
"3ython@gmail.com"
] | 3ython@gmail.com |
aa2ad8ba0ff14340d3d7d30cd9b8fb24c00f071c | 6820e74ec72ed67f6b84a071cef9cfbc9830ad74 | /plans/tasks.py | 22acd6cb6cb911b7571adefb4585bd699ce306c6 | [
"MIT"
] | permissive | AppforallHQ/f5 | 96c15eaac3d7acc64e48d6741f26d78c9ef0d8cd | 0a85a5516e15d278ce30d1f7f339398831974154 | refs/heads/master | 2020-06-30T17:00:46.646867 | 2016-11-21T11:41:59 | 2016-11-21T11:41:59 | 74,357,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,047 | py | from celery import task
from django.utils import timezone
from datetime import timedelta
import requests
import json
class EndpointNotAvailabe(Exception):
pass
def call_external_endpoint_to_update_status(the_task, action, subscription):
payload = {"uuid": subscription.uuid,
"plan": subscription.plan.pk,
"activate": (action == "activate"),
}
response = requests.put(
subscription.plan.interaction_endpoint_url % payload,
data=json.dumps(payload))
if response.status_code != 200:
e = EndpointNotAvailabe()
raise the_task \
.retry(args=[subscription], exc=e)
else:
return True
@task
def send_invoice_notification(invoice, email_type, **kwargs):
return
import requests
payload = {
"invoice_payment_url": invoice.payment_url,
"email_type": email_type,
"uuid": invoice.subscription.uuid,
"plan": invoice.subscription.plan.pk,
}
mail_body_response = requests.post(
invoice.subscription.plan.mail_endpoint_url % payload,
data=json.dumps(payload))
params = json.loads(mail_body_response.text)
from .actions import send_mail
send_mail(invoice, params, email_type)
@task(default_retry_delay=3*60)
def activate_subscription(subscription, **kwargs):
pass#return call_external_endpoint_to_update_status(activate_subscription, "activate", subscription)
@task(default_retry_delay=3*60)
def deactivate_subscription(subscription, **kwargs):
return call_external_endpoint_to_update_status(deactivate_subscription, "deactivate", subscription)
@task
def send_preinvoice():
from plans.models import Subscription
# FIXME
for subscription in Subscription.objects.filter():
if subscription.due_date < timezone.now() + timedelta(days=subscription.plan.preinvoice_length) \
and subscription.status == Subscription.ACTIVE:
subscription.status = Subscription.PREINVOICE
subscription.full_clean()
subscription.save()
@task
def mark_subscriptions_as_overdue():
from plans.models import Subscription
# FIXME
for subscription in Subscription.objects.filter():
if subscription.due_date < timezone.now() and subscription.status == Subscription.PREINVOICE:
subscription.status = Subscription.OVERDUE
subscription.full_clean()
subscription.save()
@task
def end_gracetime_for_fucking_users():
from plans.models import Subscription
# FIXME
for subscription in Subscription.objects.filter():
if subscription.due_date + timedelta(days=subscription.plan.overdue_length) < timezone.now():
subscription.status = Subscription.DEACTIVE
subscription.full_clean()
subscription.save()
@task
def invalidate_invoices():
from plans.models import Invoice
# FIXME
for invoice in Invoice.objects.filter():
if invoice.expires_at < timezone.now():
invoice.mark_as_invalid()
| [
"hi@appforall.ir"
] | hi@appforall.ir |
2c0be1a3aeba1599588929b8d9baf36785eb76a0 | 4be5afa1f93aba004a2823e5fa196bbfb11f56a4 | /Advanced_Section/advanced_class_views/manage.py | f83fe6ef9b4da680a6400d0bec3a6047aaed518e | [] | no_license | MajaZule/Django-stuff-Udemy | 1b42b9c1f8315eb7dfbaa34f7b6f6ed37f698362 | e2388e23b9785a2d6d13d684265e8d0d6d39fb07 | refs/heads/master | 2020-04-10T12:41:35.782702 | 2019-01-02T19:31:23 | 2019-01-02T19:31:23 | 161,029,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'advanced_class_views.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"44669001+MajaZule@users.noreply.github.com"
] | 44669001+MajaZule@users.noreply.github.com |
6049872fd0845b5d393848dd64e262e4be94813e | 27719e4d31b1b4da628c1abd146132e1d55e9c5e | /bot/bot/settings.py | 35c425ae8fd7c71e9c5eb010e45eef3d123d9740 | [] | no_license | bpiyush/covid-whatsapp-bot | c1f27f1fce090fb8825b1193faab2263c31f2f28 | e1526574c25eff881ba05e858dd73e83f53b688c | refs/heads/main | 2023-04-23T04:43:06.060341 | 2021-05-05T15:53:08 | 2021-05-05T15:53:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,251 | py | """
Django settings for bot project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-2du^q&*u&%0z@&)nh^70^+2!+d@lg7u3^a#%p7p*1dil4%1!)8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['6506ca6b502f.ngrok.io']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bot.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bot.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"piyush@wadhwaniai.org"
] | piyush@wadhwaniai.org |
a7ede928d362fa9da4e4116742bcf7ca4127fe20 | 6b53c31acd42269807d90ef0db3edc9860e98e87 | /Código/funcionarios.py | 56771bda38fe249b919de901b3c2ff0def4c1fdf | [] | no_license | danieldebiasi/cinema-python | f12f81b5f1611389cb08dabfca415c1b20601cf1 | 22d2b1b281ee9d78b5f365f794ceeac9679a0c86 | refs/heads/master | 2021-06-18T11:10:08.258201 | 2017-06-09T02:28:19 | 2017-06-09T02:28:19 | 86,069,255 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 10,628 | py | from tkinter import *
from functools import partial
from tkinter import messagebox
import gestor
import sqlite3
def voltar_click(frame, ctrl):
frame.destroy()
if ctrl==0:
gestor.show_frame()
else:
show_frame()
def registrar(frame, nome, rg, entrada_h, entrada_min, saida_h, saida_min, user, pwd):
if nome.get()=="" or rg.get()=="" or user.get()=="" or pwd.get()=="":
messagebox.showinfo("Erro", "Preencha todas as informações!")
else:
if not all(x.isalpha() or x.isspace() for x in nome.get()) or not rg.get().isdigit():
messagebox.showinfo("Erro", "Nome ou RG inválido!")
else:
conn = sqlite3.connect('dados/database.db')
c = conn.cursor()
c.execute('SELECT * FROM funcionarios WHERE rg=?', (rg.get(),))
result = c.fetchone()
if result is not None:
messagebox.showinfo("Erro", "RG já cadastrado!")
else:
entrada = entrada_h.get()+":"+entrada_min.get()
saida = saida_h.get()+":"+saida_min.get()
c.execute('INSERT INTO funcionarios (nome, rg, entrada, saida) VALUES(?, ?, ?, ?)',
(nome.get().upper(), rg.get(), entrada, saida))
conn.commit()
c.execute('INSERT INTO usuarios (user, password, acesso, rg) VALUES(?, ?, ?, ?)',
(user.get(), pwd.get(), 0, rg.get()))
conn.commit()
messagebox.showinfo("Cadastro de Funcionários", "Funcionário cadastrado com sucesso!")
voltar_click(frame, 1)
def consultar(rg, nome, entrada, saida):
nome["text"] = ""
entrada["text"] =""
saida["text"] = ""
conn = sqlite3.connect('dados/database.db')
c = conn.cursor()
c.execute('SELECT * FROM funcionarios WHERE rg=?', (rg.get(),))
result = c.fetchone()
if result is not None:
nome["text"] = result[0]
entrada["text"] = result[2]
saida["text"] = result[3]
return 1
else:
messagebox.showinfo("Consulta de Funcionários", "Nenhum funcionário encontrado!")
return 0
def deletar(rg, nome, entrada, saida, excluir):
conn = sqlite3.connect('dados/database.db')
c = conn.cursor()
result = messagebox.askyesno("Exclusão de Funcionários", "Confirmar exclusão do funcionário?")
if result:
c.execute('DELETE FROM funcionarios WHERE rg=?', (rg.get(),))
conn.commit()
c.execute('DELETE FROM usuarios WHERE rg=?', (rg.get(),))
conn.commit()
messagebox.showinfo("Exclusão de Funcionários", "Funcionário excluído com sucesso!")
else:
messagebox.showinfo("Exclusão de Funcionários", "Exclusão cancelada")
rg["text"] = ""
nome["text"] = ""
entrada["text"] = ""
saida["text"] = ""
excluir["state"] = DISABLED
def encontrar(rg, nome, entrada, saida, excluir):
if consultar(rg, nome, entrada, saida) == 1:
excluir["state"] = NORMAL
else:
excluir["state"] = DISABLED
def cadastrar_click(frame):
frame.destroy()
action = Tk()
Label(action, text="Cadastrar Funcionário", font=("Arial", 24)).grid(row=0, column=1, columnspan=3, ipadx=30, pady=20)
# Nome
nome_label = Label(action, text="Nome:", font=("Arial", 12))
nome_label.grid(row=1, column=0, pady=5, sticky=E)
nome = Entry(action, font=("Arial", 12))
nome.grid(row=1, column=1, columnspan=3, sticky=W + E)
# RG
rg_label = Label(action, text="RG:", font=("Arial", 12))
rg_label.grid(row=2, column=0, pady=(5, 10), sticky=E)
rg = Entry(action, font=("Arial", 12))
rg.grid(row=2, column=1, columnspan=3, sticky=W + E)
# Horário de Entrada
entrada_label = Label(action, text="Entrada:", font=("Arial", 12))
entrada_label.grid(row=8, column=0, pady=5, sticky=E)
entrada_h = Spinbox(action, font=("Arial", 12), from_=0, to=23, format="%02.0f", state="readonly", width=2)
entrada_h.grid(row=8, column=1, sticky=W)
entrada_min = Spinbox(action, font=("Arial", 12), from_=0, to=59, format="%02.0f", state="readonly", width=2)
entrada_min.grid(row=8, column=1)
# Horário de Saída
saida_label = Label(action, text="Saída:", font=("Arial", 12))
saida_label.grid(row=9, column=0, pady=5, sticky=E)
saida_h = Spinbox(action, font=("Arial", 12), from_=0, to=23, format="%02.0f", state="readonly", width=2)
saida_h.grid(row=9, column=1, sticky=W)
saida_min = Spinbox(action, font=("Arial", 12), from_=0, to=59, format="%02.0f", state="readonly", width=2)
saida_min.grid(row=9, column=1)
# Usuario
user_label = Label(action, text="Usuário:", font=("Arial", 12))
user_label.grid(row=10, column=0, pady=5, sticky=E)
user = Entry(action, font=("Arial", 12))
user.grid(row=10, column=1, columnspan=3, sticky=W)
# Senha
pwd_label = Label(action, text="Senha:", font=("Arial", 12))
pwd_label.grid(row=11, column=0, pady=(10, 5), sticky=E)
pwd = Entry(action, font=("Arial", 12))
pwd.grid(row=11, column=1, columnspan=3, sticky=W)
# Botão confirmar
confirmar = Button(action, bg="gray75", text="Confirmar", font=("Arial", 12))
confirmar["command"] = partial(registrar, action, nome, rg, entrada_h, entrada_min, saida_h, saida_min, user, pwd)
confirmar.grid(row=12, column=1, pady=5, sticky=W)
# Botão Voltar
voltar = Button(action, bg="gray75", text="Cancelar", font=("Arial", 12))
voltar["command"] = partial(voltar_click, action, 1)
voltar.grid(row=13, column=1, sticky=W, ipadx=3)
action.title("Gerenciamento de Cinema")
action.geometry("490x380+500+150")
action.iconbitmap(r'icones/icon.ico')
action.mainloop()
def consultar_click(frame):
frame.destroy()
action = Tk()
Label(action, text="Consultar Funcionário", font=("Arial", 24)).grid(row=0, column=1, padx=30, pady=20)
# RG
rg_label = Label(action, text="RG:", font=("Arial", 12))
rg_label.grid(row=1, column=0, pady=5, sticky=E)
rg = Entry(action, font=("Arial", 12))
rg.grid(row=1, column=1, sticky=W+E)
# Nome
nome_label = Label(action, text="Nome:", font=("Arial", 12))
nome_label.grid(row=2, column=0, pady=5, sticky=E)
nome = Label(action, text="", font=("Arial", 12))
nome.grid(row=2, column=1, sticky=W)
# Horário de Entrada
entrada_label = Label(action, text="Entrada:", font=("Arial", 12))
entrada_label.grid(row=3, column=0, pady=5, sticky=E)
entrada = Label(action, text="", font=("Arial", 12))
entrada.grid(row=3, column=1, sticky=W)
# Horário de Saída
saida_label = Label(action, text="Saída:", font=("Arial", 12))
saida_label.grid(row=4, column=0, pady=5, sticky=E)
saida = Label(action, text="", font=("Arial", 12))
saida.grid(row=4, column=1, sticky=W)
# Botão Consultar
consultar_bt = Button(action, bg="gray75", text="Consultar", font=("Arial", 12))
consultar_bt["command"] = partial(consultar, rg, nome, entrada, saida)
consultar_bt.grid(row=5, column=1, pady=10, sticky=W)
#Botão Voltar
voltar = Button(action, bg="gray75", text="Voltar", font=("Arial", 12))
voltar["command"] = partial(voltar_click, action, 1)
voltar.grid(row=6, column=1, sticky=W, ipadx=12)
action.title("Gerenciamento de Cinema")
action.geometry("490x310+500+150")
action.iconbitmap(r'icones/icon.ico')
action.mainloop()
def excluir_click(frame):
frame.destroy()
action = Tk()
Label(action, text="Excluir Funcionário", font=("Arial", 24)).grid(row=0, column=1, padx=35, pady=20)
# RG
rg_label = Label(action, text="RG:", font=("Arial", 12))
rg_label.grid(row=1, column=0, sticky=E)
rg = Entry(action, font=("Arial",12))
rg.grid(row=1, column=1, sticky=W+E)
# Nome
nome_label = Label(action, text="Nome:", font=("Arial", 12))
nome_label.grid(row=2, column=0, sticky=E)
nome = Label(action, text="", font=("Arial", 12))
nome.grid(row=2, column=1, sticky=W)
# Entrada
entrada_label = Label(action, text="Entrada:", font=("Arial", 12))
entrada_label.grid(row=3, column=0, sticky=E)
entrada = Label(action, text="", font=("Arial", 12))
entrada.grid(row=3, column=1, sticky=W)
# Saida
saida_label = Label(action, text="Saida:", font=("Arial", 12))
saida_label.grid(row=4, column=0, sticky=E)
saida = Label(action, text="", font=("Arial", 12))
saida.grid(row=4, column=1, sticky=W)
# Botão Excluir
excluir = Button(action, bg="gray75", text="Excluir", font=("Arial", 12), state=DISABLED)
excluir["command"] = partial(deletar, rg, nome, entrada, saida, excluir)
excluir.grid(row=6, column=1, pady=10, sticky=W, ipadx=6)
# Botão Encontrar
encontrar_bt = Button(action, bg="gray75", text="Encontrar", font=("Arial", 12))
encontrar_bt["command"] = partial(encontrar, rg, nome, entrada, saida, excluir)
encontrar_bt.grid(row=1, column=2, padx=5)
# Botão Voltar
voltar = Button(action, bg="gray75", text="Voltar", font=("Arial", 12))
voltar["command"] = partial(voltar_click, action, 1)
voltar.grid(row=7, column=1, sticky=W, ipadx=8)
action.title("Gerenciamento de Cinema")
action.geometry("520x290+500+150")
action.iconbitmap(r'icones/icon.ico')
action.mainloop()
def show_frame():
frame = Tk()
Label(frame, text="Funcionários", font=("Arial", 24)).grid(row=0, column=0, padx=155, pady=25)
#Botão Cadastrar
cadastrar = Button(frame, text="Cadastrar", font=("Arial", 14))
cadastrar["command"] = partial(cadastrar_click, frame)
cadastrar.grid(row=1, column=0, pady=20, sticky=W+E)
#Botão Consultar
consultar = Button(frame, text="Consultar", font=("Arial", 14))
consultar["command"] = partial(consultar_click, frame)
consultar.grid(row=2, column=0, pady=20, sticky=W+E)
#Botão Excluir
excluir = Button(frame, text="Excluir", font=("Arial", 14))
excluir["command"] = partial(excluir_click, frame)
excluir.grid(row=3, column=0, pady=20, sticky=W+E)
#Botão Voltar
voltar = Button(frame, bg="gray75", text="Voltar", font=("Arial", 14))
voltar["command"] = partial(voltar_click, frame, 0)
voltar.grid(row=4, column=0, pady=15)
#Botão Sair
sair = Button(frame, bg="gray75", text="Sair", font=("Arial", 14), command=frame.destroy)
sair.grid(row=5, column=0, ipadx=8)
frame.title("Gerenciamento de Cinema")
frame.geometry("490x450+500+150")
frame.iconbitmap(r'icones/icon.ico')
frame.mainloop() | [
"daniel.biasi@hotmail.com"
] | daniel.biasi@hotmail.com |
0d687ce06de3d811cf49ed90bc5ddba8c30a58fb | 39a22f938e4fcf83b61dcf71d7f533224612ec45 | /LSTM_BERTweet_model.py | a894aa64e55e2bcca7c46387a75580d14eadcaee | [] | no_license | quocthai9120/W-NUT-2020-Shared-Task-2 | 190e53784b415b603da0f3cd12dea8ab7deeb4f2 | a42deb3d2723ea6239207416f84e6afb4a284cc0 | refs/heads/master | 2022-12-18T14:36:24.763232 | 2020-09-23T08:10:03 | 2020-09-23T08:10:03 | 277,787,642 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,459 | py | from transformers import RobertaConfig, RobertaModel, BertPreTrainedModel
from torch.nn import CrossEntropyLoss
from torch import nn
from typing import Tuple
import torch
class BERTweetForClassification(BertPreTrainedModel):
base_model_prefix = "roberta"
def __init__(self):
self.num_labels: int = 2
config: RobertaConfig = RobertaConfig.from_pretrained(
"./BERTweet_base_transformers/config.json",
output_hidden_states=True,
)
super().__init__(config)
self.model: RobertaModel = RobertaModel.from_pretrained(
"./BERTweet_base_transformers/model.bin",
config=config
)
self.lstm_layer = nn.LSTM(input_size=768,
hidden_size=256,
num_layers=2,
bias=True,
bidirectional=True
)
self.dense = nn.Linear(in_features=256,
out_features=128,
)
self.dropout = nn.Dropout(p=0.10)
self.classifier = nn.Linear(in_features=128,
out_features=self.num_labels,
)
def forward(
self,
input_ids=None,
attention_mask=None,
labels=None,
):
outputs = self.model(
input_ids,
attention_mask=attention_mask,
)
# Take <CLS> token for Native Layer Norm Backward
hidden_states: Tuple[torch.tensor] = outputs[2]
sequence_output: torch.tensor = hidden_states[-1][:, 0, :]
# second_to_last_sequence_output: torch.tensor = hidden_states[-2][:, 0, :]
# third_to_last_sequence_output: torch.tensor = hidden_states[-3][:, 0, :]
# fourth_to_last_sequence_output: torch.tensor = hidden_states[-3][:, 0, :]
sequence_output = self.lstm_layer(sequence_output)
sequence_output = self.dense(sequence_output)
sequence_output = self.dropout(sequence_output)
logits: torch.tensor = self.classifier(sequence_output)
outputs = (logits,)
if labels is not None:
loss_function = CrossEntropyLoss()
loss = loss_function(
logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # loss, logits
| [
"quocthai9120@gmail.com"
] | quocthai9120@gmail.com |
753271955f78deae3afbada6c0d93276ade8e340 | 03bca281c8bb3ba69c3a01252cc7c9e35cd675bd | /django/DCC/dccApp/migrations/0001_initial.py | 63c611642310e159928a36aac3c2066355be6090 | [] | no_license | satish15625/pythonwork | 380fef04170064aef8aeb919a4e30f65db9a097f | 12d776152689a84f1560d08f35987f8ca4ea3fb0 | refs/heads/master | 2023-07-07T15:12:48.355226 | 2021-08-13T06:33:13 | 2021-08-13T06:33:13 | 374,058,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | # Generated by Django 3.0.7 on 2020-12-15 12:25
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='UploadImages',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('image_Img', models.ImageField(upload_to='images/')),
],
),
]
| [
"kumarsatish@dryicelabs.com"
] | kumarsatish@dryicelabs.com |
f589b7d4729e6a65c709ff5e51ff8f0732879fcd | 68c90ef853ea415a3c43ca10ebc9c23656907e10 | /challenge/07_rotate.py | 0c26a55897778a522911b19846bd76b46ba545db | [] | no_license | NJ-zero/LeetCode_Answer | 73889f46f4cd0c08f60a1e556c29114495062b2b | 1040b5dbbe509abe42df848bc34dd1626d7a05fb | refs/heads/master | 2021-08-06T04:33:39.890568 | 2020-04-14T09:59:39 | 2020-04-14T09:59:39 | 145,671,777 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,484 | py | # coding=utf-8
# @Time : 2020/4/7 8:06 下午
# @Author : 'Shichao-Dong'
'''
给你一幅由 N × N 矩阵表示的图像,其中每个像素的大小为 4 字节。请你设计一种算法,将图像旋转 90 度。
不占用额外内存空间能否做到?
示例 1:
给定 matrix =
[
[1,2,3],
[4,5,6],
[7,8,9]
],
原地旋转输入矩阵,使其变为:
[
[7,4,1],
[8,5,2],
[9,6,3]
]
示例 2:
给定 matrix =
[
[ 5, 1, 9,11],
[ 2, 4, 8,10],
[13, 3, 6, 7],
[15,14,12,16]
],
原地旋转输入矩阵,使其变为:
[
[15,13, 2, 5],
[14, 3, 4, 1],
[12, 6, 8, 9],
[16, 7,10,11]
]
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/rotate-matrix-lcci
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
思路:
1.延对角线 交换
在反转每一个子数组
2.matrix[i][j] ---> new_matrix[j][n-i-1]
'''
class Solution(object):
def rotate(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: None Do not return anything, modify matrix in-place instead.
"""
n = len(matrix)
new_matrix=[[]]*n
for i in range(n):
new_matrix[i] = [ _ for _ in range(n)]
# new_matrix = [[ _ for _ in range(n)]]
for i in range(n):
for j in range(n):
new_matrix[j][n-i-1] = matrix[i][j]
return (new_matrix)
s=Solution()
s.rotate([[1,2,3],[4,5,6],[7,8,9]])
| [
"dongshichao.1224@bytedance.com"
] | dongshichao.1224@bytedance.com |
471ba3b26b26a880e066cecc208d75505bbd726f | af96ca5ce99350bce9fb52203c7db2e997a90c0e | /ledgbook/views.py | 82999d03ca653912e4294f592f29e3948acb4516 | [] | no_license | masima305/mySTOKA | f725447f18baa7f2ce1a13e0d800e9a6fed09d2d | f72275eeb6ed5eefba0d7cda5f46f396e8bff1e1 | refs/heads/master | 2023-04-13T05:24:03.420991 | 2021-04-09T07:26:42 | 2021-04-09T07:26:42 | 350,264,350 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,322 | py | # selenium code
from django.core import serializers
from django.db.models import FloatField
from django.db.models import Max
from django.shortcuts import render, get_object_or_404
from django.shortcuts import redirect
from django.db.models.functions import Substr, Cast
from django.http import HttpResponse
import simplejson as json
from .forms import PostForm
from .stoka_scrapper import *
from .stoka_insert_tool import *
from datetime import datetime
# Create your views here.
#====================================================================================================
#---------------------------------------------- BASE ------------------------------------------------
#====================================================================================================
# 사이드바 페이지
def sidebar(request):
return render(request, "ledgbook/ledgbook_list.html")
# 베이스 페이지
def base(request):
return render(request, 'ledgbook/base.html')
#====================================================================================================
#---------------------------------------------- LEDGBOOK --------------------------------------------
#====================================================================================================
# 개인수익률 설정 변경
def savePersonalSetting_update(request):
form = json.loads(request.body)
personalSetting = PersonalSetting.objects.get(user=request.user)
personalSetting.setting_add_deposit = form['setting_add_deposit']
personalSetting.setting_change_rate = form['setting_change_rate']
personalSetting.save()
context = "수익률 설정이 정상적으로 변경되었습니다."
return HttpResponse(json.dumps(context), content_type="application/json")
# 수익설계 페이지
def ledgbook_rich(request):
# 지금 date확인(TODO : 공통묶기 대상)
now = datetime.now()
now_year = now.year
now_month = now.month
# 리스트 받아오기
bookmains = LedgbookMain.objects.filter().order_by('id')
# 마지막 라인의 마감 여부 저장
if bookmains.count() == 0:
# 최초 입력이라면
last_finished_yn = "S" # S for Start
else:
if ( int(now_year) > int(bookmains[bookmains.count()-1].year)
or int(now_month) > int(bookmains[bookmains.count()-1].month)
and bookmains[bookmains.count() - 1].finished_yn != "Y" ) :
last_finished_yn = "F" # F for need to finish
else:
last_finished_yn = bookmains[bookmains.count() - 1].finished_yn # else = Y or N
# 새로운 월 정보 등록 시 필요한 것듯
if bookmains.count() > 0:
last_prvBalance = bookmains[bookmains.count() - 1].cur_balance
last_month = int(bookmains[bookmains.count() - 1].month)+1
last_year = int(bookmains[bookmains.count() - 1].year)
else :
last_prvBalance = 0
last_month = 0
last_year = 0
#월이 막월이면 연도 +1, 월 1
if(last_month>12) :
last_month = 1
last_year += 1
# 개인세팅
try :
personalSetting = PersonalSetting.objects.get(user=request.user)
except Exception :
personalSetting = []
# 통계
# 타임스템프
# ----- send_data -------
send_data = {
'bookmains': bookmains
, 'last_finished_yn': last_finished_yn
, 'last_prvBalance' : last_prvBalance
, 'last_month' : ""
, 'last_year' : ""
, 'personalSetting' : personalSetting
, 'total_year' : last_year
, 'total_month' : last_month
}
if last_finished_yn == "Y":
send_data['last_month'] = last_month
send_data['last_year'] = last_year
return render(request, 'ledgbook/ledgbook_rich.html', send_data)
# 수익현황 신규등록
def saveledg_new(request):
if request.method == "POST":
# TODO : 업데이트 시 해당 아이디에 맞는 사람 업데이트 시켜줘야함.
form = ""
if request.is_ajax():
if request.method == 'POST':
# form = json.loads(request.body.decode("utf-8"))
form = json.loads(request.body)
post = LedgbookMain(
year=form['year']
, month=form['month']
, degree=int(form['degree'])
, user=request.user
, prv_balance=form['prv_balance']
, add_deposit=form['add_deposit']
, revenue=form['revenue']
, change=form['change']
, change_rate=form['change_rate']
, trgt_add_deposit=form['trgt_add_deposit']
, trgt_change_rate=form['trgt_change_rate']
, trgt_revenue=form['trgt_revenue']
, trgt_cur_balance=form['trgt_cur_balance']
, cur_balance=form['cur_balance']
, achievement_rate=form['achievement_rate']
, finished_yn=form['finished_yn']
)
post.save()
context = "new"
return HttpResponse(json.dumps(context), content_type="application/json")
# 수익현황 기존 업데이트
def saveledg_update(request):
if request.method == "POST":
# TODO : 업데이트 시 해당 아이디에 맞는 사람 업데이트 시켜줘야함.
form = ""
if request.is_ajax():
if request.method == 'POST':
#form = json.loads(request.body.decode("utf-8"))
form = json.loads(request.body)
intdegree = int(form.get('degree'))
post = LedgbookMain.objects.get(year=form.get('year'), month=form.get('month'), degree=form.get('degree'), user_id=request.user)
if post:
post.degree = int(form['degree'])+1
post.add_deposit = form['add_deposit']
post.revenue = form['revenue']
post.change = form['change']
post.change_rate = form['change_rate']
post.trgt_add_deposit = form['trgt_add_deposit']
post.trgt_change_rate = form['trgt_change_rate']
post.trgt_revenue = form['trgt_revenue']
post.trgt_cur_balance = form['trgt_cur_balance']
post.cur_balance = form['cur_balance']
post.achievement_rate = form['achievement_rate']
post.finished_yn = form['finished_yn']
post.save()
context = "update"
return HttpResponse(json.dumps(context), content_type="application/json")
def ledgbook_list(request):
# 키워드 : 쿼리셋에 대한 공부가 필요함.
# posts = LedgbookPost.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
posts = LedgbookPost.objects.all()
return render(request, 'ledgbook/ledgbook_list.html', {'posts': posts})
def post_detail(request, pk):
post = get_object_or_404(LedgbookPost, pk=pk)
return render(request, 'ledgbook/ledgbook_detail.html', {'post': post})
def post_new(request):
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False) # 작성자 정보를 추가로 저장하고 커밋해야하기때문에 펄스로 줬음
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('ledgbook_detail', pk=post.pk)
else:
form = PostForm
return render(request, 'ledgbook/post_edit.html', {'form': form})
#====================================================================================================
#---------------------------------------------- STOKA -----------------------------------------------
#====================================================================================================
#---------------------------------------------- pages -----------------------------------------------
# 메인페이지
def stock_stat_cathe(request):
now = datetime.now().strftime('%Y-%m-%d')
# 조건1 : 둘다 N/A인거는 판단 가치가 없음.
stock_info_list = \
StockDailyInfo.objects.filter(stock_updt = now ).exclude(stock_suspct_per = 'N/A', stock_per = 'N/A')
stock_info_list = stock_info_list.annotate(stock_per2=Cast('stock_per',FloatField()))\
.annotate(stock_suspct_per2=Cast('stock_suspct_per',FloatField()))
stock_info_list = stock_info_list.order_by('stock_per2','stock_suspct_per2')
send_data = {
'stock_info_list' : stock_info_list
}
return render(request, 'ledgbook/stoka_stock_stat_cathe.html',send_data)
# 환경설정
def stoka_setting(request):
cathe_list = StockCathe.objects.filter(user=request.user, use_yn="Y").order_by('cathe_num')
send_data = {
'cathe_list': cathe_list
}
return render(request, 'ledgbook/stoka_setting.html',send_data)
# 주식들의 싱크를 맞춰서 DB에 저장해준다.
def resync_stocks(request):
#주식 상장사 목록 가져오기( 리쿼스트로 가지고 옴)
tool_stock_insert_request()
context = "새로고침이 완료되었습니다."
stock_info_list = stoka_scrap()
StockDailyInfo.objects.all().delete()
now = datetime.now().strftime('%Y-%m-%d')
print(now) # 형식 2015-04-19
# 일부러 한개씩 업데이트를 친다.
for stock in stock_info_list:
saved_stock_info = StockDailyInfo.objects.filter(stock_num=stock["stock_num"], stock_updt=now)
if saved_stock_info:
for tmp_info in saved_stock_info:
tmp_info.stock_name = stock["stock_name"] # 주식이름
tmp_info.stock_price = stock["stock_price"] # 주식가격
tmp_info.stock_month_date = stock["stock_month_date"] # per 기준일
tmp_info.stock_per = stock["stock_per"] # PER
tmp_info.stock_suspct_per = stock["stock_suspct_per"] # 추정 PER
tmp_info.last_updt = datetime.now() # 추정 PER
tmp_info.save()
else:
saved_stock_info = StockDailyInfo(
stock_name = stock["stock_name"] # 주식이름
, stock_num = stock["stock_num"] # 종목번호
, stock_price = stock["stock_price"] # 주식가격
, stock_month_date = stock["stock_month_date"] # per 기준일
, stock_per = stock["stock_per"] # per 기준일
, stock_suspct_per = stock["stock_suspct_per"] # 추정 PER
, stock_updt = now
, last_updt = datetime.now() # 최근 업데이트
)
saved_stock_info.save()
return HttpResponse(json.dumps(context), content_type="application/json")
#---------------------------------------------- ajax -----------------------------------------------
# 주식검색
def sch_stock_list(request) :
if request.method == "POST":
if request.is_ajax():
form = json.loads(request.body.decode("utf-8"))
saved_stock_info = StockInfo.objects.filter(stock_name__icontains=form["sch_stock_nm"])
stock_list = serializers.serialize('json',saved_stock_info)
return HttpResponse(stock_list, content_type="application/json")
# 카테고리 추가
def add_cathe(request) :
if request.method == 'POST':
form = json.loads(request.body.decode("utf-8"))
user = request.user
cathe_info = StockCathe.objects.filter(user = user)
if cathe_info.count() == 0 :
max_cathe_num = 0
else :
max_cathe_num = cathe_info.aggregate(Max('cathe_num'))['cathe_num__max']+1
cathe_info = StockCathe(
cathe_name = form["cathe_name"]
, cathe_num = max_cathe_num
, cathe_keyword = form["cathe_keyword"]
, user = request.user
)
context = "등록이 완료되었음"
cathe_info.save()
return HttpResponse(json.dumps(context), content_type="application/json")
# 카테고리 삭제
def delete_cathe(request) :
if request.method == 'POST':
form = json.loads(request.body.decode("utf-8"))
user = request.user
cathe_name = form["cathe_name"]
cathe_num = form["cathe_num"]
cathe_info = StockCathe.objects.filter(user = user, cathe_num = cathe_num, cathe_name = cathe_name)
for cathe in cathe_info :
cathe.use_yn = "N"
cathe.save()
context = "삭제가 완료되었음"
return HttpResponse(json.dumps(context), content_type="application/json")
# 카테고리 수정
def update_cathe(request) :
if request.method == 'POST':
form = json.loads(request.body.decode("utf-8"))
user = request.user
cathe_num = form["cathe_num"]
cathe_info = StockCathe.objects.get(user = user, cathe_num = cathe_num)
cathe_info.cathe_name = form["cathe_name"]
cathe_info.cathe_keyword = form["cathe_keyword"]
cathe_info.save()
context = "수정완료"
return HttpResponse(json.dumps(context), content_type="application/json")
# 카테고리에 주식 추가
def add_stock_cathe(request) :
if request.method == 'POST':
form = json.loads(request.body.decode("utf-8"))
stock_num = form["stock_num"]
stock_name = form["stock_name"]
cathe_num = form["cathe_num"]
user = request.user
stock_cathe_info = StockCatheCd.objects.filter(cathe_num = cathe_num , stock_num = stock_num, user= user)
if stock_cathe_info:
context = "이미 해당 주식이 등록되어있습니다."
else:
stock_cathe_info = StockCatheCd(
cathe_num=cathe_num # 카테고리번호
, stock_num=stock_num # 종목번호
, stock_name = stock_name #종목명
, user=user # 사용자
)
context = "등록이 완료되었음"
stock_cathe_info.save()
return HttpResponse(json.dumps(context), content_type="application/json")
# 카테고리에 주식 삭제
def delete_stock_cathe(request):
context = "삭제호출"
if request.method == 'POST':
form = json.loads(request.body.decode("utf-8"))
stock_num = form["stock_num"]
cathe_num = form["cathe_num"]
user = request.user
stock_cathe_info = StockCatheCd.objects.get(cathe_num = cathe_num , stock_num = stock_num, user= user)
stock_cathe_info.delete()
return HttpResponse(json.dumps(context), content_type="application/json")
# 카테고리에 주식 불러오기
def reload_cathe_stocks(request):
if request.method == 'POST':
form = json.loads(request.body.decode("utf-8"))
cathe_num = form["cathe_num"]
user = request.user
cathe_stocks = StockCatheCd.objects.filter(cathe_num=cathe_num, user=user)
cathe_stocks = serializers.serialize('json', cathe_stocks)
return HttpResponse(cathe_stocks, content_type="application/json")
| [
"masima305@gmail.com"
] | masima305@gmail.com |
e15d76022e1bf4a6ec4ce357706ddfe41a76b487 | bea6da3ed6cfe5f2a408f7cd3b66cca5234dc354 | /app.py | 44e362d1778a6efb26ea93e51d1a89ec49dc15df | [] | no_license | singtoloco/project-3-beer-recommender | aaae5f8e9b74c77430f0a7f7c255abbcfd53c705 | 256290556e75e5b54cbbb175e2c158aad2cdaa87 | refs/heads/master | 2022-12-09T15:33:33.709926 | 2019-08-13T18:07:46 | 2019-08-13T18:07:46 | 202,192,181 | 0 | 0 | null | 2022-09-30T18:35:59 | 2019-08-13T17:23:35 | JavaScript | UTF-8 | Python | false | false | 3,279 | py | import numpy as np
import pandas as pd
from app_functions import fill_df, queryString, splitSearch, threeBeers
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, desc, inspect
from sklearn.cluster import KMeans
import os
import pickle
import joblib
import json
from flask import Flask, jsonify, render_template, session
from flask import Flask, request, url_for, redirect
from flask_sqlalchemy import SQLAlchemy
# Flask Setup ----------------------------
app = Flask(__name__)
app.secret_key=b'_5#y2L"F4Q8z\n\xec]/'
# Hack to deal w/ relative references
dir_path = os.path.dirname(os.path.realpath(__file__))
abs_db_path = os.path.join(dir_path,"db", "dbfinal.sqlite3")
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get("postgres:///INSERT LINK", "") or f"sqlite:///{abs_db_path}"
db = SQLAlchemy(app)
inspector = inspect(db.engine)
print("Check db table name: ")
print(inspector.get_table_names())
# Routes ----------------------------
@app.route("/",endpoint ="new")
def index():
"""Return the homepage."""
return redirect('/find')
@app.route("/find")
def find():
"""Return the homepage."""
return render_template("index.html")
@app.route("/list", methods=['GET'])
def handle_data2():
print("MADE IT")
"""Handles inputs."""
beers = []
beers.append(request.args.get("beer1"))
beers.append(request.args.get("beer2"))
beers.append(request.args.get("beer3"))
new_beers = []
for beer in beers:
if beer:
new_beers.append(splitSearch(beer))
print(beer)
print(len(new_beers))
if len(new_beers) == 0:
cluster = [0]
new_beers = ['null', 'null', 'null']
else:
loaded_model = pickle.load(open('model.sav', 'rb'))
cluster = loaded_model.predict(fill_df(new_beers))
new_beers = threeBeers(new_beers)
print(cluster[0])
prediction = cluster[0]
session['prediction'] = f'{prediction}'
return render_template("list.html", prediction = prediction, beer1 = new_beers[0], beer2 = new_beers[1], beer3 = new_beers[2])
@app.route("/run")
def run():
"""Runs the model."""
cluster_no = session['prediction']
print(f'Now: {int(cluster_no)}')
results = pd.read_sql(queryString(cluster_no, inspector.get_table_names()[1]), con=db.engine)
dict_results = results.to_json(orient='records')
session.clear()
"""Returns the json results."""
return dict_results
@app.route("/group")
def group():
"""Return the group page."""
return render_template("group.html")
@app.route("/all")
def all():
results1 = pd.read_sql(f"""SELECT * FROM {inspector.get_table_names()[0]} """, con=db.engine)
dict_results = results1.to_json(orient='records')
"""Return all cluster data."""
return dict_results
@app.route("/top")
def top():
results2 = pd.read_sql(f"""SELECT * FROM {inspector.get_table_names()[1]} """, con=db.engine)
beers = results2.to_json(orient='records')
"""Return the top beers."""
return beers
@app.route("/model")
def model():
"""Return the model page."""
return render_template("model.html")
if __name__ == "__main__":
app.run(debug=False)
| [
"vanich.vacharussiriyuth@gmail.com"
] | vanich.vacharussiriyuth@gmail.com |
5525a95aac3d74b4cade468d072818758457df21 | 30416dcfc8adac3d2fb2fa77ca5d793b2bc90bbc | /endpoints/google.py | b0e8bbc74e1c311254cc1e03a7cdb97950424d7f | [] | no_license | sx4-discord-bot/Image-Webserver | 82787ba736fb6e086bb332c5484a0293168219f4 | e214dc457c94b3a9e17c94ad5db7ed937248627c | refs/heads/master | 2023-09-03T17:14:49.189158 | 2023-08-27T12:29:34 | 2023-08-27T12:29:34 | 172,701,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,125 | py | from PIL import ImageDraw
from handlers.handler import Handler
from utility.image import get_image_asset, get_font_asset, get_image_response
class GoogleHandler(Handler):
def __init__(self, app):
super().__init__(app)
self.queries = [(["query", "q"], str)]
def on_request(self):
query = self.query("q") or self.query("query")
length = len(query)
background = get_image_asset("google.png")
font = get_font_asset("arialuni.ttf", 16)
frames = []
for i in range(0, length + 24):
text = query[:i]
copy = background.copy()
draw = ImageDraw.Draw(copy)
draw.text((378, 319), text, 0, font)
if i < length:
draw.text((378 + font.getsize(text)[0], 319), "|", 0, font)
else:
remaining = (i - length) % 6
if remaining == 3 or remaining == 4 or remaining == 5:
draw.text((378 + font.getsize(text)[0], 319), "|", 0, font)
frames.append(copy)
return get_image_response(frames, transparency=255)
| [
"sc4gaming@gmail.com"
] | sc4gaming@gmail.com |
a08fc269628f84e2422525a919a06e3bf7582034 | 3b59671657c15e5cbc9c1d503f984be28e0a659a | /app/migrations/0003_delete_app.py | 967f57683181d6edb77450e133c9a773bc666023 | [] | no_license | anirudhsingla8/instaclone | ac7c0b33274b70df559a103c086bf57d0c060133 | fdf26e127bd400e418f7a624209990fd867acab9 | refs/heads/master | 2021-01-02T08:22:23.239144 | 2017-08-09T16:57:26 | 2017-08-09T16:57:26 | 98,996,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-21 13:01
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0002_auto_20170721_1349'),
]
operations = [
migrations.DeleteModel(
name='App',
),
]
| [
"anirudhsingla8@gmail.com"
] | anirudhsingla8@gmail.com |
c09f1481e98d4014b1cc311d5f71ed8e750ff7f5 | bbb1ec554a9c4e9a789398c0a2e826cc694e2dc1 | /0x11-python-network_1/2-post_email.py | c222e31920ffc48e4e6d22014e1cef20fab216a4 | [] | no_license | andreshugueth/holbertonschool-higher_level_programming | 83a8b410f0c2d89103b23e6c01399b28d803f105 | dc56e02eb8021187a7bf67f43faf188b5a7ad4ff | refs/heads/master | 2022-12-22T04:21:03.831711 | 2020-09-24T21:35:38 | 2020-09-24T21:35:38 | 259,480,344 | 0 | 7 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | #!/usr/bin/python3
"""
script that takes in a URL and an email,
sends a POST request to the passed URL with the email as a parameter,
and displays the body of the response (decoded in utf-8)
"""
import sys
import urllib.parse
import urllib.request
if __name__ == "__main__":
url = sys.argv[1]
values = {"email": sys.argv[2]}
data = urllib.parse.urlencode(values)
data = data.encode('ascii')
req = urllib.request.Request(url, data)
with urllib.request.urlopen(req) as response:
print(response.read().decode("utf-8"))
| [
"andreshugueth@gmail.com"
] | andreshugueth@gmail.com |
4eec63edb5849bedfb3c1094f0944238a960f578 | a81d84fdb57e1b90812fc5b5b523685ba5b663c0 | /python/2021_04/Question0769.py | 648c3a58f644d02e661df59e3decc996ad812c3d | [] | no_license | KujouNozom/LeetCode | 1919081001126924daa7549493a0823702631a37 | 4de1e601274de1336d669e41f732a8cb056880b9 | refs/heads/master | 2023-07-17T12:17:45.156451 | 2021-09-04T11:57:40 | 2021-09-04T11:57:40 | 268,075,373 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | # 769. 最多能完成排序的块 [双指针]
from typing import List
class Solution:
def maxChunksToSorted(self, arr: List[int]) -> int:
min_value, max_value, start = 10, -1, 0
ans = 0
for index in range(len(arr)):
min_value = min(min_value, arr[index])
max_value = max(max_value, arr[index])
if min_value == start and max_value == index:
ans += 1
min_value, max_value, start = 10, -1, index + 1
return ans
| [
"438767738@qq.com"
] | 438767738@qq.com |
64fc8d14e57a5910de152b4a5ad07c00c39c7e77 | b43f41aae4311933caa96457a225e7074efd537f | /p2.py | 2fcedc6845290de6264cdfcce4bf2feaba6f841c | [] | no_license | jackbrucesimpson/NeuralNetworksFromScratch | 94e62200cb391f5d39cab833f5ad50be8611b8cc | 5400b1d0852fdcad029c1b83cbd67f7f214ff990 | refs/heads/main | 2023-01-05T12:18:19.251008 | 2020-10-30T13:17:00 | 2020-10-30T13:17:00 | 308,022,782 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | inputs = [1, 2, 3, 2.5]
weights1 = [0.2, 0.8, -0.5, 1]
weights2 = [0.5, -0.91, 0.26, -0.5]
weights3 = [-0.26, -0.27, 0.17, 0.87]
bias1 = 2
bias2 = 3
bias3 = 0.5
output = [
inputs[0] * weights1[0] + inputs[1] * weights1[1] + inputs[2] * weights1[2] + inputs[3] * weights1[3] + bias1,
inputs[0] * weights2[0] + inputs[1] * weights2[1] + inputs[2] * weights2[2] + inputs[3] * weights2[3] + bias2,
inputs[0] * weights3[0] + inputs[1] * weights3[1] + inputs[2] * weights3[2] + inputs[3] * weights3[3] + bias3
]
print(output)
| [
"jackbrucesimpson@gmail.com"
] | jackbrucesimpson@gmail.com |
8e902e4e628a8d138844e6ee81c87d0dc785a0b1 | 4674b8088ffdf55905d44995f08a0792a3e4cd5c | /tests/hwsim/test_monitor_interface.py | bfc9a1562ff2e5c9fb7ee4dc4b08dfa12334195f | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | vanhoefm/krackattacks-scripts | 41daca791638a92aa4cfa68a582e46119037560e | 4b78669686f74efe664c6543b1b5b1616b22f902 | refs/heads/research | 2022-10-29T20:21:11.512335 | 2022-10-16T18:44:41 | 2022-10-16T18:44:41 | 107,408,514 | 2,184 | 577 | NOASSERTION | 2021-07-06T12:43:49 | 2017-10-18T12:58:08 | C | UTF-8 | Python | false | false | 3,271 | py | # AP mode using the older monitor interface design
# Copyright (c) 2013, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
from remotehost import remote_compatible
import logging
logger = logging.getLogger()
import time
import hwsim_utils
import hostapd
from wpasupplicant import WpaSupplicant
def test_monitor_iface_open(dev, apdev):
"""Open connection using cfg80211 monitor interface on AP"""
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5", drv_params="use_monitor=1")
id = wpas.add_network()
wpas.set_network(id, "mode", "2")
wpas.set_network_quoted(id, "ssid", "monitor-iface")
wpas.set_network(id, "key_mgmt", "NONE")
wpas.set_network(id, "frequency", "2412")
wpas.connect_network(id)
dev[0].connect("monitor-iface", key_mgmt="NONE", scan_freq="2412")
def test_monitor_iface_wpa2_psk(dev, apdev):
"""WPA2-PSK connection using cfg80211 monitor interface on AP"""
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5", drv_params="use_monitor=1")
id = wpas.add_network()
wpas.set_network(id, "mode", "2")
wpas.set_network_quoted(id, "ssid", "monitor-iface-wpa2")
wpas.set_network(id, "proto", "WPA2")
wpas.set_network(id, "key_mgmt", "WPA-PSK")
wpas.set_network_quoted(id, "psk", "12345678")
wpas.set_network(id, "pairwise", "CCMP")
wpas.set_network(id, "group", "CCMP")
wpas.set_network(id, "frequency", "2412")
wpas.connect_network(id)
dev[0].connect("monitor-iface-wpa2", psk="12345678", scan_freq="2412")
def test_monitor_iface_multi_bss(dev, apdev):
"""AP mode mmonitor interface with hostapd multi-BSS setup"""
params = { "ssid": "monitor-iface", "driver_params": "use_monitor=1" }
hapd = hostapd.add_ap(apdev[0], params)
hostapd.add_bss(apdev[0], apdev[0]['ifname'] + '-2', 'bss-2.conf')
dev[0].connect("monitor-iface", key_mgmt="NONE", scan_freq="2412")
dev[1].connect("bss-2", key_mgmt="NONE", scan_freq="2412")
@remote_compatible
def test_monitor_iface_unknown_sta(dev, apdev):
"""AP mode monitor interface and Data frame from unknown STA"""
ssid = "monitor-iface-pmf"
passphrase = "12345678"
params = hostapd.wpa2_params(ssid=ssid, passphrase=passphrase)
params["wpa_key_mgmt"] = "WPA-PSK-SHA256"
params["ieee80211w"] = "2"
params['driver_params'] = "use_monitor=1"
hapd = hostapd.add_ap(apdev[0], params)
bssid = apdev[0]['bssid']
addr = dev[0].p2p_interface_addr()
dev[0].connect(ssid, psk=passphrase, ieee80211w="2",
key_mgmt="WPA-PSK-SHA256", proto="WPA2",
scan_freq="2412")
dev[0].request("DROP_SA")
# This protected Deauth will be ignored by the STA
hapd.request("DEAUTHENTICATE " + addr)
# But the unprotected Deauth from TX frame-from-unassoc-STA will now be
# processed
dev[0].request("DATA_TEST_CONFIG 1")
dev[0].request("DATA_TEST_TX " + bssid + " " + addr + " 0")
dev[0].request("DATA_TEST_CONFIG 0")
ev = dev[0].wait_event(["CTRL-EVENT-DISCONNECTED"], timeout=5)
if ev is None:
raise Exception("No disconnection")
dev[0].request("DISCONNECT")
| [
"j@w1.fi"
] | j@w1.fi |
2ecae8f6b820153021a5e26c49b0f027a2c16e6e | 6eaf4549c6a231e49b8aa642becbb0fa1121159a | /autorunsvm.py | 6120db1d679156a624c18df38854e3a9fb38ad43 | [] | no_license | jimmykim84/autorunsvm | ab633ae9858add32b0ad7b5a552bf0b661c8b34b | 311fa76dbfbd5e89372277c2b55e829cc008e5a1 | refs/heads/master | 2016-09-13T13:16:31.494169 | 2016-04-25T16:41:00 | 2016-04-25T16:41:00 | 57,049,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,870 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
' Cross platform automatic batch execution LibSVM under Python '
__author__ = 'Jimmykim'
import glob, os, time, sys
os.chdir('/Users/jimmykim/Documents/libsvm-3.21/tools/test/')
import subprocess
import platform
sysstr = platform.system()
if(sysstr=="Windows"):
# Windows
filesRead = r"C:\libsvm\tools\test\*.txt" # your libsvm data path
subsetpath = r"C:\libsvm\tools\test\subset.py"
easypath = r"C:\libsvm\tools\test\easy.py"
else:
# Mac
filesRead = r"/Users/jimmykim/Documents/libsvm-3.21/tools/test/*.txt"
subsetpath = r"/Users/jimmykim/Documents/libsvm-3.21/tools/test/subset.py"
easypath = r"/Users/jimmykim/Documents/libsvm-3.21/tools/test/easy.py"
m = 11 # 10 times validations
ts = 600 # train sample num
if __name__ == '__main__':
list = glob.glob(filesRead)
start = time.time()
# print list
for name in list:
txtFileName = name[0:-4]
showname =os.path.basename(txtFileName)
for j in range(1,m,1):
trainName = '%s_train_%d.txt'%(txtFileName,j)
testName = '%s_test_%d.txt'%(txtFileName,j)
#resultName = '%s_result_%d.txt'%(txtFileName,j)
subprocess.call("python %s %s %d %s %s"%(subsetpath,name,ts,trainName,testName),shell=True)
for j in range(1,m,1):
print '第 %2d 次对 %s 数据进行分类'%(j,showname)
trainName = '%s_train_%d.txt'%(txtFileName,j)
testName = '%s_test_%d.txt'%(txtFileName,j)
resultName = '%s_result_%d.txt'%(txtFileName,j) # output
#print "python %s %s %s >> %s"%(easypath,trainName,testName,resultName)
subprocess.call("python %s %s %s >> %s"%(easypath,trainName,testName,resultName),shell=True)
end = time.time()
print "spend time: %f s" % (end - start)
| [
"jimmykim84@users.noreply.github.com"
] | jimmykim84@users.noreply.github.com |
efc0ff16e064e56e714719076065e0481806106e | 951e433b25a25afeea4d9b45994a57e0a6044144 | /LeetCode/Q187_HM_findRepeatedDnaSequences.py | 4be93e56f1f5ce25527e7b244bc6bc2c45797d72 | [] | no_license | EricaEmmm/CodePython | 7c401073e0a9b7cd15f9f4a553f0aa3db1a951a3 | d52aa2a0bf71b5e7934ee7bff70d593a41b7e644 | refs/heads/master | 2020-05-31T14:00:34.266117 | 2019-09-22T09:48:23 | 2019-09-22T09:48:23 | 190,318,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,045 | py | # 重复的DNA序列
# 所有 DNA 由一系列缩写为 A,C,G 和 T 的核苷酸组成,例如:“ACGAATTCCG”。在研究 DNA 时,识别 DNA 中的重复序列有时会对研究非常有帮助
# 编写一个函数来查找 DNA 分子中所有出现超多一次的10个字母长的序列(子串)。
#
# 示例:
# 输入: s = "AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT"
# 输出: ["AAAAACCCCC", "CCCCCAAAAA"]
class Solution(object):
def findRepeatedDnaSequences(self, s):
"""
:type s: str
:rtype: List[str]
"""
res = dict()
if len(s) < 10:
return res
for i in range(len(s)-9):
tmp = s[i:i+10]
res[tmp] = res.get(tmp,0) + 1 # 返回指定键的值,如果值不在字典中返回default值
return list([i for i in res.keys() if res[i] > 1])
if __name__ == '__main__':
s = Solution()
tmp = "AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT"
print(s.findRepeatedDnaSequences(tmp))
# st = "abc"
# t = [1,2,3]
# print(st[0:3]) | [
"1016920795@qq.com"
] | 1016920795@qq.com |
6c172d1d135b205d3134c570b5fea04025c05ba2 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02578/s304732693.py | fd8ac3cded43971a72e3cf659d1486c121afa2f5 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | n = int(input())
input_line = input().split()
member = [int(input_line[i]) for i in range(n)]
stands = 0
for i in range(1,n):
stand = member[i-1] - member[i]
if stand > 0:
stands += stand
member[i] += stand
print(stands) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
b594ea5d9c012feedfb6dd74228118ce0300906b | 8d9318a33afc2c3b5ca8ac99fce0d8544478c94a | /Books/Casandra DB/opscenter-5.1.0/lib/py/html5lib/treebuilders/__init__.py | 50c8deeb08c187d8b51fcfdcb742e414c6ee52ab | [] | no_license | tushar239/git-large-repo | e30aa7b1894454bf00546312a3fb595f6dad0ed6 | 9ee51112596e5fc3a7ab2ea97a86ec6adc677162 | refs/heads/master | 2021-01-12T13:48:43.280111 | 2016-11-01T22:14:51 | 2016-11-01T22:14:51 | 69,609,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:b2a0b8030b4449b4e227ef389f48544f1136f27d0fc657128ee58812e529f7d3
size 4478
| [
"tushar239@gmail.com"
] | tushar239@gmail.com |
c31bbc04e9795a79303e1f16b21e6edf147b419b | 98d7c0714c9585e76118c7cfadbc7644757a6ecc | /script/extract_Chairman.py | 181f1424e0a686f93cab1f6b64a5d44aa2dea232 | [] | no_license | zhen8838/business_analytics | 06453efc03c58451daa832802d28eb16cc8e28bb | 00325d369989607019ec15b5441ee6099d1f8e29 | refs/heads/master | 2023-02-26T21:15:32.306805 | 2021-02-08T02:35:35 | 2021-02-08T02:35:35 | 325,265,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,098 | py | import os
import sys
sys.path.insert(0, os.getcwd())
import numpy as np
import re
import pandas as pd
from datetime import datetime
from tools.cleanfucs import maskfuc, work, institution
df1: pd.DataFrame = pd.read_csv('tmp\Chairman_tenure.csv')#读取职业生涯信息
series1: pd.Series = df1.set_index(['person_id', 'start_tenure', 'end_tenure']).index
person_group1 = df1.groupby('person_id')
promote_time = person_group1.apply(lambda s: maskfuc(s, 'chairman'))
work_before = person_group1.apply(lambda s: work(s, 'chairman'))
df2: pd.DataFrame = pd.read_csv('tmp\Chairman_rank.csv')
series2: pd.Series = df2.set_index(['person_id', 'top30', 'top200']).index
person_group2 = df2.groupby('person_id')
top_sch = person_group2.apply(institution)
df3: pd.DataFrame = pd.read_csv('tmp\Chairman_info.csv')
promote_time = list(promote_time)
work_before = list(work_before)
top_sch = list(top_sch)
c = {"promote_time": promote_time,
"work_beforeChairman": work_before,
"top_sch": top_sch}
c = pd.DataFrame(c)
sample = pd.concat([df3, c], axis=1)
sample.to_pickle('tmp/sample_Chairman.pkl') | [
"18000635@smail.cczu.edu.cn"
] | 18000635@smail.cczu.edu.cn |
ffca56ee08907af241e2909e99b96a3e28e41b59 | 7304a3dc3f331a4c5f85be5470f45c5bb0ec8467 | /day7.py | 28aa2f313bf2fa06783a3c99b7bb90e9d41bc9e0 | [] | no_license | Kcedrone/advent_of_code_2020 | 431d43a9091a15de459f61681ccb17cd43e8926c | 23ea30169b16c10d00b32b9e32bbd9be4eb084c6 | refs/heads/master | 2023-01-30T06:16:32.989551 | 2020-12-14T20:29:54 | 2020-12-14T20:29:54 | 317,558,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,577 | py | # possible_contents={}
# possible_contents['light red'] = ['1_bright white', '2_muted yellow']
# possible_contents['dark orange'] = ['3_bright white', '4_muted yellow']
# possible_contents['bright white'] = ['1_shiny gold']
# possible_contents['muted yellow'] = ['2_shiny gold', '9_faded blue']
# possible_contents['shiny gold'] = ['1_dark olive', '2_vibrant plum']
# possible_contents['dark olive'] = ['3_faded blue', '4_dotted black']
# possible_contents['vibrant plum'] = ['5_faded blue', '6_dotted black']
# possible_contents['faded blue'] = []
# possible_contents['dotted black'] = []
import re
input_file = 'day7_part1_test_input.txt'
# input_file = 'day7_part1_input.txt'
with open(input_file, 'r') as f:
lines = f.readlines()
rules = []
possible_contents = {}
for line in lines:
outer_bag_colour, rule = line.split(' bags contain ')
if 'no other bags' in rule:
possible_contents[outer_bag_colour] = ['0 other bag',]
else:
rule = rule.replace('bags', 'bag')
rule = rule.replace('bag.', 'bag')
rule = rule.replace(', ', ',')
rule = rule.replace('\n', '')
rule = rule.replace(' bag', '')
rules = rule.split(',')
possible_contents[outer_bag_colour] = rules
# print(rules)
def find_containers(test_bag):
usable_bags = []
for outer_bag_colour, allowable_contents in possible_contents.items():
if allowable_contents:
for bag_spec in allowable_contents:
if test_bag in bag_spec:
# bag_count = bag_spec.split('_')[0]
num_of_bags = int(re.match(r"[0-9]+", bag_spec)[0])
usable_bags.append(outer_bag_colour)
return usable_bags
test_bag = 'shiny gold'
usable_bags = find_containers(test_bag)
print(usable_bags)
other_test_bags = ['shiny gold',]
all_usable_bags = []
colors_tested = []
while len(other_test_bags):
for test_bag in other_test_bags:
if test_bag not in colors_tested:
colors_tested.append(test_bag)
usable_bags = find_containers(test_bag)
all_usable_bags += usable_bags
other_test_bags += usable_bags
break
all_usable_bags = list(set(all_usable_bags))
print("Part 1: ", input_file, all_usable_bags)
print("Part 1: ", len(all_usable_bags))
print("*" * 60)
# Part 2
test_bag = 'shiny gold'
def count_bags_inside(test_bag):
bag_contents = possible_contents[test_bag]
new_bags = []
if bag_contents:
for bag_spec in bag_contents:
idx_start, idx_end = re.match(r"[0-9]+", bag_spec).span()
num_of_bags = int(bag_spec[idx_start:idx_end])
bag_colour = bag_spec[idx_end+ 1:]
# print(bag_spec, ":", num_of_bags, bag_colour)
new_bags.append([num_of_bags, bag_colour])
return new_bags
# print(count_bags_inside(test_bag))
def count_bags(bag):
other_test_bags = [bag,]
colors_tested = []
bag_count_dict = {}
while sorted(other_test_bags) != sorted(colors_tested):
for test_bag in other_test_bags:
if test_bag not in colors_tested:
if 'other' not in test_bag:
new_bags = count_bags_inside(test_bag)
bags = [b for a,b in new_bags]
# counts = [a for a,b in new_bags]
bag_count_dict.update({b:a for a,b in new_bags})
# other_test_bags += bags
# other_test_bags = list(set(other_test_bags))
else:
counts = [0,]
bags = ['none']
colors_tested += [test_bag,]
# print(test_bag, ": ", bags, counts)
return bag_count_dict
def sum_next_level_down(bag_count_dict):
total = 0
for color, count in bag_count_dict.items():
total += count
return count
def count_the_bags(starting_bag):
bag_count_dict = count_bags(starting_bag)
count = sum_next_level_down(bag_count_dict)
return count + count * count_the_bags()
| [
"kevin@cedrone.com"
] | kevin@cedrone.com |
3a8131ad969a7bb969367e34d99f221e490b03e7 | a9096b8d24321c48ee09de151d674ddde0b7f9d9 | /music.py | 20f0da0ce9f4b19e6dbd142896b12033dd5c1604 | [] | no_license | Rutujalad/music-player | 9edd61ec008172a099e38f2978a4aaf2ee57c4b6 | 1342cd354d5ef25a2f75e8f73ea033d0fab75c22 | refs/heads/main | 2023-06-01T01:01:28.950954 | 2021-07-02T18:05:47 | 2021-07-02T18:05:47 | 370,728,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | from pygame import mixer
import time
mixer.init()
mixer.music.load("Dilwale.mp3")
mixer.music.set_volume(0.7)
mixer.music.play()
while True:
print("Press 'p' to pause, 'r' to resume")
print("Press 'e' to exit the program")
query = input(" ")
if query == 'p':
# Pausing the music
mixer.music.pause()
elif query == 'r':
# Resuming the music
mixer.music.unpause()
elif query == 'e':
# Stop the mixer
mixer.music.stop()
break | [
"84790054+Rutujalad@users.noreply.github.com"
] | 84790054+Rutujalad@users.noreply.github.com |
5951031d6db26d99cd69d588453cb88362a79190 | 2fac959f8b1c78afbe3f7013363e24818a203fee | /GCSE Holiday Work 2.py | b97a7a6a908600837d07dc5e51461e7d82d4d651 | [] | no_license | jaketbailey/oldpython | dd7f084ed0252fcdbaf620080160d45344b1a43c | 944325901bb08bcb021295da7542cf20e5bab8fd | refs/heads/main | 2023-08-02T23:28:50.179583 | 2021-09-29T20:32:45 | 2021-09-29T20:32:45 | 411,820,528 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65,228 | py |
#Imports time
import time
#Imports sys
import sys
#Imports os.path
import os.path
#Waits a second
time.sleep(1)
#Welcomes the user to the program
print("Welcome to the Holiday project program by Jake Thomas Bailey")
#Waits a second
time.sleep(1)
#Tells the user that the program contains a series of individual programs
print("This program contains a series of different programs, these include:")
#Waits a second
time.sleep(1)
#Tells the user the first program is a BMI Calculator
print("A BMI Calculator")
#Waits a second
time.sleep(1)
#Tells the user the second program is a normal Calculator
print("A Calculator")
#Waits a second
time.sleep(1)
#Tells the user the final program is a Guess my number game
print("A Guess my Number Game")
#Waits a second
time.sleep(1)
#Asks the user if they want to use the program
use = input("Would you like to use this program? ")
#While the variable use is equal to 'yes'
while use == "yes":
#Waits a second
time.sleep(1)
#Asks the user which program they want to use
which = input("Which program would you like to use? ")
#Defines the subroutine BMI
def BMI():
#Welcomes the user to the BMI calculator
print("Welcome to the BMI calculator!!")
#Waits a second
time.sleep(1)
#Asks the user to input their weight
w = float(input("Please input your weight: "))
#Prints a gap
print("")
#Waits a second
time.sleep(1)
#Asks the user to input their weight
kgorstone = input("If you input your weight in kilograms type 'yes' if not type 'no': ")
#Prints a space
print("")
#Waits a second
time.sleep(1)
#If the variable kgorstone equals 'yes'
if kgorstone == "yes":
#The variable w stays the same
w = w
#If kgorstone equals 'no'
elif kgorstone == "no":
#Waits a second
time.sleep(1)
#Asks the user to input the stone value
stone = int(input("Please input the stone value: "))
#Prints a space
print("")
#Waits a second
time.sleep(1)
#Asks the user to input the pounds value
lb = int(input("Please input the pounds value: "))
#Prints a space
print("")
#Sets the variable stonekg to stone * 6.35029318
stonekg = stone * 6.35029318
#Sets the variable lbkg to lb * 0.45359237
lbkg = lb * 0.45359237
#Sets the variable kilo to stonekg + lbkg
kilo = stonekg + lbkg
#Sets the variable w equal to kilo
w = kilo
#Waits a second
time.sleep(1)
#Asks the user to input their height
h = float(input("Please input your height: "))
#Prints a blank space
print("")
#Waits a second
time.sleep(1)
#Asks the user to input their height
metersorfeet = input("If you input your height in meters type 'yes' if not type 'no': ")
#Prints a blank space
print("")
#If the variable metersorfeet is equal to 'yes'
if metersorfeet == "yes":
#The variable h stays the same
h = h
#If metersorfeet is equal to 'no'
elif metersorfeet == "no":
#Waits a second
time.sleep(1)
#Asks the user to input the foot value
feet = int(input("Please enter the foot value: "))
#Prints a blank space
print("")
# Waits a second
time.sleep(1)
#Asks the user to input the inch value
inch = int(input("Please enter the inch value: "))
#Prints a blank space
print("")
#Sets the variable feetm to feet * 0.3
feetm = feet * 0.3
#Sets the variable inchm to inch * 0.025
inchm = inch * 0.025
#Sets the variable m to feetm + inchm
m = feetm + inchm
#Sets the variable h equal to m
h = m
#Sets the variable BMI equal to w / (h*h)
BMI = w / (h * h)
#If BMI is less than 18.5
if BMI < 18.5:
#Waits a second
time.sleep(1)
#Tells the user that they're BMI suggests they're underweight and gives them some advice
print("Your BMI is", BMI,
"this means that you're underweight, perhaps try to eat some more food to supply your body with the nutrients it needs and to reduce your risK of anemia and osteoporosis")
#If the 18.5 is greater than or equal to BMI and BMI is less than 25
elif 18.5 <= BMI < 25.0:
#Waits a second
time.sleep(1)
#Tells the user that they're BMI suggests they're a normal weight and gives them some advice
print("Your BMI is", BMI,
"this means that you're at a normal, healthy weight, keep eating how you usually do and you should stay at your healthy weight")
#If 25 is greater than or equal to BMI and BMI is less than 30
elif 25.0 <= BMI < 30.0:
#Waits a second
time.sleep(1)
# Tells the user that they're BMI suggests they're overweight and gives them some advice
print("Your BMI is", BMI,
"this means that you're overweight and you should start to eat less food to reduce the risk of heart problems")
#If 30 is greater than or equal to BMI
elif 30.0 <= BMI:
#Waits a second
time.sleep(1)
# Tells the user that they're BMI suggests they're obese and gives them some advice
print("Your BMI is", BMI,
"this means that you're obese and you should start to eat less for to reduce the risk of heart problems")
#Prints a blank space
print("")
#Waits a second
time.sleep(1)
#Thanks the user for using the BMI calculator
print("Thank you for using this BMI calculator")
#Asks the user what program they want to use next and if not to type 'end'
which = input("Which program would you like to use next, if not any type 'end' ")
#Defines the subroutine calc
def calc():
#Welcomes the user to the Calculator
print("Welcome to the Calculator")
#Asks the user how many numbers they wish to input
howmany = input(
"How many numbers to you wish to times, divide, subtract or add? 2 - 4 or do you not want to use this, if so type 'no': ")
#While the variable howmany is not equal to 'no'
while howmany != "no":
#If howmany is equal to '2'
if howmany == "2":
#Asks the user if they want to times, divide, add or subtract
calculation = input("Do you wish to times, divide, add or subtract? ")
#If the variable calculation is equal to 'times'
if calculation == "times":
#Waits a second
time.sleep(1)
#Asks the user to input the first number
num1 = float(input("Please enter the first number: "))
#Waits a second
time.sleep(1)
#Asks the user to input the second number
num2 = float(input("Please enter the second number: "))
#Tells the user the answer is being processed
print("Your answer is being processed")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Sets the variable answer to num1 * num2
answer = num1 * num2
#Tells the user the answer to their answer and thanks them for using the calculator
print("Your answer is", answer, "Thank you for using the calculator.")
#Waits for a second
time.sleep(1)
#Asks the user how many numbers they wish to use in their calculation
howmany = input(
"How many numbers to you wish to times, divide, subtract or add? 2 - 4 or do you not want to use this, if so type 'no': ")
# Waits for a second
time.sleep(1)
#If calculation is equal to 'divide'
elif calculation == "divide":
# Waits for a second
time.sleep(1)
#Asks the user to input their first number
num1 = float(input("Please enter the first number: "))
# Waits for a second
time.sleep(1)
#Asks the user to input their second number
num2 = float(input("Please enter the second number: "))
#Tells the user their answer is being processed
print("Your answer is being processed")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Sets the variable answer to num1 / num2
answer = num1 / num2
#Tells the user their answer and thanks them for using the calculator
print("Your answer is", answer, "Thank you for using the calculator.")
# Waits for a second
time.sleep(1)
#Asks how many numbers the user wishes to use in their calculation
howmany = input(
"How many numbers to you wish to times, divide, subtract or add? 2 - 4 or do you not want to use this, if so type 'no': ")
#Waits for a second
time.sleep(1)
#If the variable calculation is equal to 'add'
elif calculation == "add":
# Waits for a second
time.sleep(1)
#Asks the user to enter their first number
num1 = float(input("Please enter the first number: "))
# Waits for a second
time.sleep(1)
#Asks the user to enter their second number
num2 = float(input("Please enter the second number: "))
#Tells the user answer is being processed
print("Your answer is being processed")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
# Waits for half a second
time.sleep(0.5)
#Sets the variable answer to num1 + num2
answer = num1 + num2
#Tells the user their answer and thanks them for using the calculator
print("Your answer is", answer, "Thank you for using the calculator.")
#Waits for a second
time.sleep(1)
#Asks the user how many numbers they wish to use in the calculation
howmany = input(
"How many numbers to you wish to times, divide, subtract or add? 2 - 4 or do you not want to use this, if so type 'no': ")
#Waits for a second
time.sleep(1)
#If the variable calculation is equal to 'substact'
elif calculation == "subtract":
#Waits for a second
time.sleep(1)
#Asks the user to input their first number
num1 = float(input("Please enter the first number: "))
#Waits for second
time.sleep(1)
#Asks the user to input their second number
num2 = float(input("Please enter the second number: "))
#Tells the user the answer is being processed
print("Your answer is being processed")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Sets the variable answer to num1 - num2
answer = num1 - num2
#Tells the user the answer and thanks them for using the calculator
print("Your answer is", answer, "Thank you for using the calculator.")
#Waits for a second
time.sleep(1)
#Asks the user how many numbers they wish to use in the calculation
howmany = input(
"How many numbers to you wish to times, divide, subtract or add? 2 - 4 or do you not want to use this, if so type 'no': ")
#Waits for a second
time.sleep(1)
#Else
else:
#Waits for a second
time.sleep(1)
#Tells the user to try again
print("Please try again...")
#Waits for a second
time.sleep(1)
#Asks the user is they wish to times, divide, add or subtract
calculation = input("Do you wish to times, divide, add or subtract?")
#If howmany is equal to '3'
elif howmany == "3":
#Asks the user if the wish to times, divide, add or subtract
calculation = input("Do you wish to times, divide, add or subtract? ")
#If calculation is equal to 'times'
if calculation == "times":
#Waits for a second
time.sleep(1)
#Asks the user to input their first number
num1 = float(input("Please enter the first number: "))
#Waits for a second
time.sleep(1)
#Asks the user to input their second number
num2 = float(input("Please enter the second number: "))
#Waits for a second
time.sleep(1)
#Asks the user to input their third number
num3 = float(input("Please enter the third number: "))
#Tells the user their answer is being processed
print("Your answer is being processed")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Sets the variable answer to num1* num2 * num3
answer = num1 * num2 * num3
#Tells the user their answer and thanks them for using the calculator
print("Your answer is", answer, "Thank you for using the calculator.")
#Waits for a second
time.sleep(1)
#Asks the user how many numbers they wish to use in their calculation
howmany = input(
"How many numbers to you wish to times, divide, subtract or add? 2 - 4 or do you not want to use this, if so type 'no': ")
#Waits for a second
time.sleep(1)
#If calculation is equal to divide
elif calculation == "divide":
#Waits for a second
time.sleep(1)
#Asks the user to input their first number
num1 = float(input("Please enter the first number: "))
#Waits for a second
time.sleep(1)
#Asks the user to input their second number
num2 = float(input("Please enter the second number: "))
#Waits for a second
time.sleep(1)
#Asks the user to input their third number
num3 = float(input("Please enter the third number: "))
#Tells the user their answer is being processed
print("Your answer is being processed")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Sets the variable answer to num1 / num2 / num3
answer = num1 / num2 / num3
#Tells the user their answer and thanks them for using the calculator
print("Your answer is", answer, "Thank you for using the calculator.")
#Waits for a second
time.sleep(1)
#Asks the user how many numbers they wish to use in their calculation
howmany = input(
"How many numbers to you wish to times, divide, subtract or add? 2 - 4 or do you not want to use this, if so type 'no': ")
#Waits for a second
time.sleep(1)
#If the variable calculation is equal to 'add'
elif calculation == "add":
#Waits for a second
time.sleep(1)
#Asks the user for their first number
num1 = float(input("Please enter the first number: "))
#Waits for a second
time.sleep(1)
#Asks the user to input their second number
num2 = float(input("Please enter the second number: "))
#Waits for a second
time.sleep(1)
#Asks the user to input their third number
num3 = float(input("Please enter the third number: "))
#Tells the user their answer is being processed
print("Your answer is being processed")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Sets the variable answer to num1 + num2 + num3
answer = num1 + num2 + num3
#Tells the user their answer and thanks them for using the calculator
print("Your answer is", answer, "Thank you for using the calculator.")
#Waits for a second
time.sleep(1)
#Asks the user how many numbers they wish to use in their calculation
howmany = input(
"How many numbers to you wish to times, divide, subtract or add? 2 - 4 or do you not want to use this, if so type 'no': ")
#Waits for a second
time.sleep(1)
#If calculation is equal to 'subtract'
elif calculation == "subtract":
#Waits for a second
time.sleep(1)
#Asks the user to input their first number
num1 = float(input("Please enter the first number: "))
#Waits for a second
time.sleep(1)
#Asks the user to input their second number
num2 = float(input("Please enter the second number: "))
#Waits for a second
time.sleep(1)
#Asks the user to input their third number
num3 = float(input("Please enter the third number: "))
#Tells the user their answer is being processed
print("Your answer is being processed")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Sets the variable answet to num1 - num2 - num3
answer = num1 - num2 - num3
#Tells the user their answer and thanks them for using the calculator
print("Your answer is", answer, "Thank you for using the calculator.")
#Waits for a second
time.sleep(1)
#Asks the user how many numbers they want to use in their calculation
howmany = input(
"How many numbers to you wish to times, divide, subtract or add? 2 - 4 or do you not want to use this, if so type 'no': ")
#Waits for a second
time.sleep(1)
#Else
else:
#Waits for a second
time.sleep(1)
#Tells the user to try again
print("Please try again...")
#Waits for a second
time.sleep(1)
#Asks the user if they wish to times, divide, add or subtract
calculation = input("Do you wish to times, divide, add or subtract?")
#If howmany is equal to '4'
elif howmany == "4":
#Asks the user if they wish to times, divide, add or subtract
calculation = input("Do you wish to times, divide, add or subtract? ")
#If calculation is equal to 'times'
if calculation == "times":
#Waits for a second
time.sleep(1)
#Tells the user to input their first number
num1 = float(input("Please enter the first number: "))
#Waits for a second
time.sleep(1)
#Tells the user to input their second number
num2 = float(input("Please enter the second number: "))
#Waits for a second
time.sleep(1)
#Tells the user to input their third number
num3 = float(input("Please enter the third number: "))
#Waits for a second
time.sleep(1)
#Tells the user to input their fourth number
num4 = float(input("Please enter the fourth number: "))
#Tells the user their answer is being processed
print("Your answer is being processed")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Sets answer to num1 * num2 * num3 * num4
answer = num1 * num2 * num3 * num4
#Tells the user their answer and thanks them for using the calculator
print("Your answer is", answer, "Thank you for using the calculator.")
#Waits for a second
time.sleep(1)
#Asks the user how many numbers they wish to use in their calculation
howmany = input(
"How many numbers to you wish to times, divide, subtract or add? 2 - 4 or do you not want to use this, if so type 'no': ")
#Waits for a second
time.sleep(1)
#If calculation is equal to 'divide
elif calculation == "divide":
#Waits for a second
time.sleep(1)
#Asks the user to input their first number
num1 = float(input("Please enter the first number: "))
#Waits for a second
time.sleep(1)
#Asks the user to input their second number
num2 = float(input("Please enter the second number: "))
#Waits for a second
time.sleep(1)
#Asks the user to input their third number
num3 = float(input("Please enter the third number: "))
#Waits for a second
time.sleep(1)
#Asks the user to input their fourth number
num4 = float(input("Please enter the fourth number: "))
#Tells the user the answer is being processes
print("Your answer is being processed")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Sets the variable answer to num1 / num2 / num3 / num4
answer = num1 / num2 / num3 / num4
#Tells the user their answer and thanks them for using the calculator
print("Your answer is", answer, "Thank you for using the calculator.")
#Waits for a second
time.sleep(1)
#Asks the user how many numbers they wish to use in their calculation
howmany = input(
"How many numbers to you wish to times, divide, subtract or add? 2 - 4 or do you not want to use this, if so type 'no': ")
#Waits for a second
time.sleep(1)
#If the variable calculation is equal to 'add'
elif calculation == "add":
#Waits for a second
time.sleep(1)
#Asks the user to input their first number
num1 = float(input("Please enter the first number: "))
#Waits for a second
time.sleep(1)
#Asks the user to input their second number
num2 = float(input("Please enter the second number: "))
#Waits for a second
time.sleep(1)
#Asks the user to input their third number
num3 = float(input("Please enter the third number: "))
#Waits for a second
time.sleep(1)
#Asks the user to input their fourth number
num4 = float(input("Please enter the fourth number: "))
#Tells the user the answer is being processed
print("Your answer is being processed")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Sets the variable answer to num1 + num2 + num3 + num4
answer = num1 + num2 + num3 + num4
#Tells the user their answer and thanks them for using the calculator
print("Your answer is", answer, "Thank you for using the calculator.")
#Waits for a second
time.sleep(1)
#Asks the user how many numbers they wish to use in their calculation
howmany = input(
"How many numbers to you wish to times, divide, subtract or add? 2 - 4 or do you not want to use this, if so type 'no': ")
#Waits for a second
time.sleep(1)
#If calculation is equal to 'subtract'
elif calculation == "subtract":
#Waits for a second
time.sleep(1)
#Asks the user to input their first number
num1 = float(input("Please enter the first number: "))
#Waits for a second
time.sleep(1)
#Asks the user to input their second number
num2 = float(input("Please enter the second number: "))
#Waits for a second
time.sleep(1)
#Asks the user to input their third number
num3 = float(input("Please enter the third number: "))
#Waits for a second
time.sleep(1)
#Asks the user to input their fourth number
num4 = float(input("Please enter the fourth number: "))
#Tells the user their answer is being processed
print("Your answer is being processed")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Prints a dot
print(".")
#Waits for half a second
time.sleep(0.5)
#Sets the variable answer to num1 - num2 - num3 - num4
answer = num1 - num2 - num3 - num4
#Tells the user their answer and thanks them for using the calculator
print("Your answer is", answer, "Thank you for using the calculator.")
#Waits for a second
time.sleep(1)
#Asks the user how many numbers they wish to use in their calculation
howmany = input(
"How many numbers to you wish to times, divide, subtract or add? 2 - 4 or do you not want to use this, if so type 'no': ")
#Waits for a second
time.sleep(1)
#Else
else:
#Waits for a second
time.sleep(1)
#Tells the user to try again
print("Please try again...")
#Waits for a second
time.sleep(1)
#Asks the user whether they wish to times, divide, add or subtract
calculation = input("Do you wish to times, divide, add or subtract?")
#Else
else:
#Waits for a second
time.sleep(1)
#Thanks the user for using the calculator
print("Thank you for using the calculator")
#Asks the user if they want to use the program
use = input("Would you like to use this program? ")
#Defines the subroutine guessnum
def guessnum():
#Imports random
import random
#Welcomes the user to the guess my number game
print("Welcome to the guess my random number game!")
#Waits for half a second
time.sleep(0.5)
#Tells the user the rules are simple
print("The rules are quite simple: ")
#Waits for half a secondos
time.sleep(0.5)
#Tellse the user that they have 10 lives to begin with and this decreases throughout the levels
print(
"You have 10 lives, this means you have 10 guesses to get my number at the start and throughout the levels the amount of lives decreases")
#Waits for half a second
time.sleep(0.5)
#Tells the user that is the number is guessed then the user will move to the next level
print("If you succeed and guess my number within those guesses then you will move to the next level")
#Waits for half a second
time.sleep(0.5)
#Tells the user that if not, you loose the game
print("If not.. then you have lost the game")
#Waits for half a second
time.sleep(0.5)
#Tells the user that if you guess the number you move up a level, the max level is 5
print("Every one you get right you move onto the next, harder level in which the maximum level is 5")
#Waits for half a second
time.sleep(0.5)
#Tells the user that if you guess the number at level 5 you win
print("And if you reach level 5 and guess the number correct then you will win the game")
#Waits for half a second
time.sleep(0.5)
#Tellse the user they're ready to go
print("And you're now ready to go")
#Waits for half a second
time.sleep(0.5)
#Tells the user they can begin
print("So lets begin")
#Asks the user to enter their name
name = input("So.. player, please enter your name: ")
#If the file "game.txt" exists
if os.path.exists("game.txt"):
#Appends the file
f = open("game.txt", "a")
#Closes the file
f.close()
#Else
else:
#Opens the file
f = open("game.txt", "w")
#Closes the file
f.close()
#Opens the file to append
f = open("game.txt", "a")
#Writes 'The User's name is: ' to the file
f.write("The User's name is: ")
#Writes the variable name to the file and creates a new line
f.write(name + "\n")
#Writes a blank space to the file
f.write("")
#Closes the file
f.close()
#Defines the subroutine openfile
def openfile():
#Opens the file to append
f = open("game.txt", "a")
#Writes 'The user reached level ' to the file
f.write("The user reached level ")
#Writes the variable level as string to the file
f.write(str(level))
#Writes a new line to the file and 'And they finished on '
f.write("\n And they finished on ")
#Writes the variable lives as string to the file
f.write(str(lives))
#Writes ' lives.' and two new lines to the file
f.write(" lives." + "\n\n")
#Closes the file
f.close()
#Sets the variable num1 and a random number between 0 and 20
num1 = random.randint(0, 20)
#Sets the variable level to 1
level = 1
#While level is less than 6
while level < 6:
#Sets the variable lives to 10
lives = 10
#Waits for second
time.sleep(1)
#Welcomes the user to level 1
print("Hello", name, "Welcome to level 1!")
#Waits for second
time.sleep(1)
#Tellse the user how many lives they have
print("In this level you have", lives, "lives to guess my number")
#Waits for second
time.sleep(1)
#Tellse the user that the number is being generated
print("I'm thinking of a number between 0 and 20, can you guess it?")
#Waits for second
time.sleep(1)
#Asks the user to make a guess
guess = int(input("Please enter your guess: "))
#While guess is not equal to num1
while guess != num1:
#Sets the variable lives to itself - 1
lives = lives - 1
#If guess is greater than num1
if guess > num1:
#Waits for second
time.sleep(1)
#Tells the user that their guess was wrong and how many lives they have left
print("Your guess was wrong so you have", lives, "lives left")
#Waits for second
time.sleep(1)
#Tells the user the number they guessed is greater than the number
print("However your guess is greater than my number")
#Waits for second
time.sleep(1)
#Asks the user to take a guess
guess = int(input("Please enter your guess: "))
#If lives equals 0
if lives == 0:
#Waits for second
time.sleep(1)
#Tells the user that they've lost the game and thanks them for playing
print("You've now lost the game, thank you for playing!")
#Calls the subroutine openfile()
openfile()
#Asks the user if they want to use the program
use = input("Would you like to use this program? ")
#Else
else:
#The program continues
continue
#If guess is less than num1
elif guess < num1:
#Waits for second
time.sleep(1)
#Tells the user that their guess was wrong and how many lives they have
print("Your guess was wrong so you have", lives, "lives left")
#Waits for second
time.sleep(1)
#Tells the user their guess is less than the number
print("However your guess is less than my number")
#Waits for second
time.sleep(1)
#Tells the user to make a guess
guess = int(input("Please enter your guess: "))
#If lives equals 0
if lives == 0:
#Waits for second
time.sleep(1)
#Tells the user they've lost the game and thanks them for playing
print("You've now lost the game, thank you for playing!")
#Calls the subrouting openfile()
openfile()
#Asks the user to make a guess
use = input("Would you like to use this program? ")
#Else
else:
#The program continues
continue
#If guess is equal to num1
if guess == num1:
#If 0 is less than or equal to lives and lives us less than 11
if 0 <= lives < 11:
#Sets the variable level to itself + 1
level = level + 1
#Waits for second
time.sleep(1)
#Tells the user well done
print("Well done!!")
#Waits for second
time.sleep(1)
#Tells the user that they've guessed the number
print("You've guessed my number!!")
#Waits for second
time.sleep(1)
#Tells the user they will move onto the next level
print("You'll now move onto level", level, "well done!!")
#Else
else:
#Waits for second
time.sleep(1)
#Tells the user that they guessed the number, but in too many guesses so they have lost
print("You guessed the number, but you took too many guesses so you've lost, sorry")
#Calls the subroutine openfile()
openfile()
#Sets the variable lives to 9
lives = 9
#Waits for second
time.sleep(1)
#Welcomes the user to level 2
print("Hello", name, "Welcome to level 2!")
#Waits for second
time.sleep(1)
#Tells them how many lives they have
print("In this level you have", lives, "lives to guess my number")
#Waits for second
time.sleep(1)
#Tells the user that the number is being made
print("I'm thinking of a number between 10 and 40, can you guess it?")
#Sets the variable num1 to a random number between 10 and 40
num1 = random.randint(10, 40)
#Waits for second
time.sleep(1)
#Asks the user to make a guess
guess = int(input("Please enter your guess: "))
#While guess is not equal to num1
while guess != num1:
#Sets the variable lives to itself - 1
lives = lives - 1
#If guess is greater than num1
if guess > num1:
#Waits for second
time.sleep(1)
#Tells the user how many lives they have
print("Your guess was wrong so you have", lives, "lives left")
#Waits for second
time.sleep(1)
#Tells the user the guess was greater than the number
print("However your guess is greater than my number")
#Waits for second
time.sleep(1)
#Asks the user to make a guess
guess = int(input("Please enter your guess: "))
#If lives equals 0
if lives == 0:
#Waits for second
time.sleep(1)
#Tells the user they've lost the game and thanks them for playing
print("You've now lost the game, thank you for playing!")
#Calls the subroutine openfile()
openfile()
#Asks the user if they'd like to use the program
use = input("Would you like to use this program? ")
#Else
else:
#The program continues
continue
#If guess is less than num1
elif guess < num1:
#Waits for second
time.sleep(1)
#Tells the user how many lives they have
print("Your guess was wrong so you have", lives, "lives left")
#Waits for second
time.sleep(1)
#Tells the user their guess was less than the number
print("However your guess is less than my number")
#Waits for second
time.sleep(1)
#Asks the user to make a guess
guess = int(input("Please enter your guess: "))
#If lives equals 0
if lives == 0:
#Waits for second
time.sleep(1)
#Tells the user they've lost the game and thanks them for playing
print("You've now lost the game, thank you for playing!")
#Calls the subrouting openfile()
openfile()
#Asks the user if they'd like to use the program still
use = input("Would you like to use this program? ")
#Else
else:
#The program continues
continue
#If guess is equal to num1
if guess == num1:
#If 0 is less than or equal to lives and lives is less than 11
if 0 <= lives < 11:
#The variable level is set to itself + 1
level = level + 1
#Waits for second
time.sleep(1)
#Tells the user well done
print("Well done!!")
#Waits for second
time.sleep(1)
#Tells the user they've guessed the number
print("You've guessed my number!!")
#Waits for second
time.sleep(1)
#Tells the user they'll now move onto level 3
print("You'll now move onto level", level, "well done!!")
#Else
else:
#Waits for second
time.sleep(1)
#Tells the user they guessed the number in too many guesses so they lost
print("You guessed the number, but you took too many guesses so you've lost, sorry")
#Calls the subroutine openfile()
openfile()
#Sets the variable lives to 8
lives = 8
#Waits for second
time.sleep(1)
#Welcomes the user to level 3
print("Hello", name, "Welcome to level 3!")
#Waits for second
time.sleep(1)
#Tells the user how many lives they have
print("In this level you have", lives, "lives to guess my number")
#Waits for second
time.sleep(1)
#Tells the user the number is being thought of
print("I'm thinking of a number between 30 and 60, can you guess it?")
#Sets the variable num1 to a random number between 30 and 60
num1 = random.randint(30, 60)
#While guess is not equal to num1
while guess != num1:
#Sets the variable to itself - 1
lives = lives - 1
#If guess is greater than num1
if guess > num1:
#Waits for second
time.sleep(1)
#Tells the user how many lives they have
print("Your guess was wrong so you have", lives, "lives left")
#Waits for second
time.sleep(1)
#Tells the user that the guess was greater than the number
print("However your guess is greater than my number")
time.sleep(1)
#Tells the user to make a guess
guess = int(input("Please enter your guess: "))
#If lives equals 0
if lives == 0:
#Waits for second
time.sleep(1)
#Tells the user that they've lost the game and thanks them for playing
print("You've now lost the game, thank you for playing!")
#Calls the subroutine openfile()
openfile()
#Asks the user if they would like to use this program
use = input("Would you like to use this program? ")
#Else
else:
#The program will continue
continue
#If guess is less than num1
elif guess < num1:
#Waits for second
time.sleep(1)
#Tells the user how many lives they have left
print("Your guess was wrong so you have", lives, "lives left")
#Waits for second
time.sleep(1)
#Tells the user that their guess is less than the number
print("However your guess is less than my number")
#Waits for second
time.sleep(1)
#Asks the user to make a guess
guess = int(input("Please enter your guess: "))
#If lives equals 0
if lives == 0:
#Waits for a second
time.sleep(1)
#Tells the user they've lost the game and thanks them for playing
print("You've now lost the game, thank you for playing!")
#Calls the subroutine openfile()
openfile()
#Asks the user if they would like to use this program
use = input("Would you like to use this program? ")
#Else
else:
#The program continues
continue
#If guess equals num1
if guess == num1:
#If 0 is less than or equal to lives and lives is less than 1
if 0 <= lives < 11:
#Sets the variable level to itself + 1
level = level + 1
#Waits for a second
time.sleep(1)
#Tells the user well done
print("Well done!!")
#Waits for a second
time.sleep(1)
#Tells the user they've guessed the number
print("You've guessed my number!!")
#Waits for a second
time.sleep(1)
#Tells the user they will move onto level 4
print("You'll now move onto level", level, "well done!!")
#Else
else:
#Waits for a second
time.sleep(1)
#Tells the user they guessed the number but took too many guesses so they lost
print("You guessed the number, but you took too many guesses so you've lost, sorry")
#Calls the subroutine openfile()
openfile()
#Waits for a second
time.sleep(1)
#Asks the user to enter the guess
guess = int(input("Please enter your guess: "))
#Sets the variable lives to 7
lives = 7
#Waits for a second
time.sleep(1)
#Welcomes the user to level 4
print("Hello", name, "Welcome to level 4!")
#Waits for a second
time.sleep(1)
#Tells the user how many lives they have
print("In this level you have", lives, "lives to guess my number")
#Waits for a second
time.sleep(1)
#Tells the user the number is being made
print("I'm thinking of a number between 40 and 80, can you guess it?")
#Sets the variable num1 to a random number between 40 and 80
num1 = random.randint(40, 80)
#Waits for a second
time.sleep(1)
#Asks the user to make a guess
guess = int(input("Please enter your guess: "))
#While guess is greater than num1 or guess is less than num1
while guess > num1 or guess < num1:
#Sets the variable lives to itself - 1
lives = lives - 1
#If guess is greater than num1
if guess > num1:
#Waits for a second
time.sleep(1)
#Tells the user how many lives they have left
print("Your guess was wrong so you have", lives, "lives left")
#Waits for a second
time.sleep(1)
#Tells the user their guess is greater than the number
print("However your guess is greater than my number")
#Waits for a second
time.sleep(1)
#Asks to make a guess
guess = int(input("Please enter your guess: "))
#If lives equals 0
if lives == 0:
#Waits for a second
time.sleep(1)
#Tells the user they've lost them and thanks them for playing
print("You've now lost the game, thank you for playing!")
#Calls the subroutine openfile()
openfile()
#Asks the user if they'd like to use this program
use = input("Would you like to use this program? ")
#Else
else:
#The program continues
continue
#If guess is less than num1
elif guess < num1:
#Waits for a second
time.sleep(1)
#Tells the user how many lives they have left
print("Your guess was wrong so you have", lives, "lives left")
#Waits for a second
time.sleep(1)
#Tells the user the guess is less than the number
print("However your guess is less than my number")
#Waits for a second
time.sleep(1)
#Asks the user to make a guess
guess = int(input("Please enter your guess: "))
#If lives equals 0
if lives == 0:
#Waits for a second
time.sleep(1)
#Tells the user they've lost the game
print("You've now lost the game, thank you for playing!")
#Calls the subroutine openfile()
openfile()
#Asks the user if they'd like to use the program
use = input("Would you like to use this program? ")
#Else
else:
#The program continues
continue
#Else
else:
#If 0 <= lives <11:
if 0 <= lives < 11:
#The variable level is set to itself + 1
level = level + 1
#Waits for a second
time.sleep(1)
#Tells the user well done
print("Well done!!")
#Waits for a second
time.sleep(1)
#Tells the user they guessed the number
print("You've guessed my number!!")
#Waits for a second
time.sleep(1)
#Tells the user they'll move onto level 5
print("You'll now move onto level", level, "well done!!")
#Else
else:
#Waits for a second
time.sleep(1)
#Tells the user they guessed the number but took too many guesses so they lost
print("You guessed the number, but you took too many guesses so you've lost, sorry")
#Calls the subroutine openfile()
openfile()
#Waits for a second
time.sleep(1)
#Asks the user to make a guess
guess = int(input("Please enter your guess: "))
#Sets s the variable lives to 6
lives = 6
#Waits for a second
time.sleep(1)
#Welcomes the user to level 5
print("Hello", name, "Welcome to level 5!")
#Waits for a second
time.sleep(1)
#Tells the user how many lives they have
print("In this level you have", lives, "lives to guess my number")
#Waits for a second
time.sleep(1)
#Tells the user the number is being made
print("I'm thinking of a number between 50 and 100, can you guess it?")
#The variable num1 is set to a random number between 50 and 100
num1 = random.randint(50, 100)
#Waits for a second
time.sleep(1)
#Asks the user to make a guess
guess = int(input("Please enter your guess: "))
#While guess is not equal to num1
while guess != num1:
#Sets the variable lives to itself - 1
lives = lives - 1
#If guess is greater than num1
if guess > num1:
#Waits for a second
time.sleep(1)
#Tells the user how many lives they have
print("Your guess was wrong so you have", lives, "lives left")
#Waits for a second
time.sleep(1)
#Tells the user their guess if greater than the number
print("However your guess is greater than my number")
#Waits for a second
time.sleep(1)
#Asks the user to enter a guess
guess = int(input("Please enter your guess: "))
#If lives equals 0
if lives == 0:
#Waits for a second
time.sleep(1)
#Tells the user they have lost the game and thanks them for playing
print("You've now lost the game, thank you for playing!")
#Calls the subrouting openfile()
openfile()
#Asks the user if they want to usethis program
use = input("Would you like to use this program? ")
#Else
else:
#The program continues
continue
#If guess is less than num1
elif guess < num1:
#Waits for a second
time.sleep(1)
#Tells the user how many lives they have
print("Your guess was wrong so you have", lives, "lives left")
#Waits for a second
time.sleep(1)
#Tells the user their guess is less than the number
print("However your guess is less than my number")
#Waits for a second
time.sleep(1)
#Tells the user to make a guess
guess = int(input("Please enter your guess: "))
#If lies equals 0
if lives == 0:
#Waits for a second
time.sleep(1)
#Tells the user they lost and thanks them for playing
print("You've now lost the game, thank you for playing!")
#Calls the subroutine openfile()
openfile()
#Asks the user if they would like to use this program
use = input("Would you like to use this program? ")
#Else
else:
#The program will continue
continue
#Else
else:
#If 0 is less than or equal to lives and lives is less than 11
if 0 <= lives < 11:
#Sets the variable level to itself + 1
level = level + 1
#Waits for a second
time.sleep(1)
#Tells the user well done
print("Well done!!")
#Waits for a second
time.sleep(1)
#Tells the user they've guessed the number
print("You've guessed my number!!")
#Waits for a second
time.sleep(1)
#Tells the user they won
print("You won!!")
#Else
else:
#Waits for a second
time.sleep(1)
#Tells the user they guessed the number but took too many guesses so they lost the game
print("You guessed the number, but you took too many guesses so you've lost, sorry")
#Calls the subroutine openfile()
openfile()
#Else
else:
#Waits for a second
time.sleep(1)
#Tells the user they've won the game
print("You've won the game!!")
#Waits for a second
time.sleep(1)
#Tells them congratulations
print("Congratulations!!")
#Waits for a second
time.sleep(1)
#Thanks the user for playing
print("Thank you for playing the game!!")
#Calls the subroutine openfile()
openfile()
#Asks the user if they would like to use this program
use = input("Would you like to use this program? ")
#If which is equal to 'bmi'
if which == "bmi":
#The BMI() subroutine is called
BMI()
#If which is equal to 'calculator'
elif which == "calculator":
#The calc() subroutine is called
calc()
#If which is equal to 'guess my number'
elif which == "guess my number":
#The guessnum() subroutine is called
guessnum()
#Else
else:
#Waits for a second
time.sleep(1)
#Asks the user which program they would like to use
which = input("Which program would you like to use?")
#Else
else:
#Waits for a second
time.sleep(1)
#Thanks the user for using the program
print("Thank you for using this program, the program will now end.")
#The program ends
sys.exit(1)
| [
"jake.bailey2801@gmail.com"
] | jake.bailey2801@gmail.com |
c0c66eaea9dfe2425afa77f6df0f941293a1ae4f | 71a8f91e25a6b2910d70bfc48fa8ac55c39b9924 | /algorithms/find_max_1.py | 5b3cd85e685e68366c777d62c2f9f9040f5868c9 | [] | no_license | zongzeliunt/Python_experiments | 7184f5da4c5d914054b60994f6ef0221ea462d57 | a36d5b89115aaa0e31efb69dad1dbf7371c45c0e | refs/heads/master | 2021-12-09T15:35:50.478810 | 2021-08-02T20:49:24 | 2021-08-02T20:49:24 | 201,525,255 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | #a = [1,2,3,4,5,6,7,5,3,1]
a = [1,3,5,7,8,15,20,17,6,4,2]
#a = [1,3,20,17,15,14,13,12,6,4,2]
def find_max (a):
print a
length = len(a)
if length == 0:
return -1
if length == 1:
return a[0]
mid = length/2 - 1
print a[mid]
if a[mid] < a[mid + 1]:
return find_max(a[mid + 1: length])
#start from mid + 1
#finish at length - 1
else:
return find_max(a[0: mid + 1])
#this will include mid
print find_max(a)
| [
"zongzeli2@my.unt.edu"
] | zongzeli2@my.unt.edu |
eb342f478e17e53e7432ec941a888fbc30b223a4 | b5fbd7600a33c6231af9856560053d9051f6781e | /api/category/serializers.py | e7255e05ee956d8b31bd61212a78b5e01e1d858c | [] | no_license | ShourovRoy/django-backend | e311c21365209573d786b0d0274901ba4faae05e | e5664e2e940c98b0aadf6973370b3fa7ca4d7da7 | refs/heads/main | 2023-01-28T14:56:33.105804 | 2020-12-04T07:46:47 | 2020-12-04T07:46:47 | 318,530,882 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | from rest_framework import serializers
from .models import Category
class CategorySerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Category
fields = ['name', 'description', 'created_at', 'updated_at'] | [
"shourovroy2389@gmail.com"
] | shourovroy2389@gmail.com |
107a88bd95231f9b662b36f5797794f7224e7e21 | 02c352e42f2fabcb655f3e7472cbe9a19844ad78 | /tutorials/phydyn-week11/relabel_fasta.py | c24b5062955e40157e65f8df177f1356e98bffcc | [] | no_license | davidrasm/MolEpi | 542387a69989d7b13309383e85f36d3eae50dac0 | 5f2195b93e331a6742005a91f5ed0cc50646034a | refs/heads/gh-pages | 2023-05-01T02:22:44.961544 | 2022-04-18T14:48:03 | 2022-04-18T14:48:03 | 233,446,759 | 3 | 1 | null | 2023-04-12T05:49:58 | 2020-01-12T19:31:28 | Python | UTF-8 | Python | false | false | 598 | py | """
Filter sequences in fasta file
Created on Wed May 8 09:59:01 2019
@author: david
"""
from Bio import SeqIO
from Bio import Entrez
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import random
fasta_file = 'WAhalf_cov2020_aligned.fasta'
records = SeqIO.parse(fasta_file, "fasta")
records_out = [];
for record in records:
record.id = record.id.replace('|', '_') + '_Il'
record.id = record.id.replace('2020_EPI_ISL_', '')
print(record.id)
record.name = ''
record.description = ''
records_out.append(record)
SeqIO.write(records_out, "WAhalf_cov2020_relabeled.fasta", "fasta") | [
"davidrasm@gmail.com"
] | davidrasm@gmail.com |
b9810ff31175bdadcc791279425c342667486090 | 4d80969ec8a7def1de9835aac8be63b5f360f28b | /just_do_it/what.py | 53a6fbfd9c57cb8fd4e92af2c3055b6ff6391231 | [] | no_license | OlgaVorokh/cnn_project | 081a883781cd286d648904f4b54df48facd45a57 | 181ff577b8479409afef14614b31b297620e6a73 | refs/heads/master | 2021-01-20T08:57:05.079153 | 2017-08-27T19:58:13 | 2017-08-27T19:58:13 | 101,576,015 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,926 | py | # -*- coding: utf-8 -*-
import numpy as np
import codecs
import re
import itertools
from collections import Counter
DATA_TRAIN_PATH = './data/ru_syntagrus-ud-train.conllu'
DATA_TEST_PART = './data/ru_syntagrus-ud-test.conllu'
DATA_PATH = None
# Constants for the column indices
COLCOUNT = 10
ID, FORM, LEMMA, UPOSTAG, XPOSTAG, FEATS, HEAD, DEPREL, DEPS, MISC = range(COLCOUNT)
COLNAMES = u"ID,FORM,LEMMA,UPOSTAG,XPOSTAG,FEATS,HEAD,DEPREL,DEPS,MISC".split(u",")
# If it's nessesary to expand a sentence, it makes by adding this fake word
FAKE_WORD = 'aaaaaaaaaaa'
WORD2VEC_VECTOR_SIZE = 40
MARK_SUBJECT = 1
MARK_PREDICATE = 2
MARK_ENOTHER = 0
NUM_CLASSES = 3
def tree_generator(inp):
word_lines = [] # List of token/word lines of the current sentence
for line in inp:
line = line.rstrip(u"\n")
if not line and word_lines: # Sentence done
yield word_lines
word_lines = []
if not line or line[0] == u"#":
continue
if line[0].isdigit():
cols = line.split("\t")
word_lines.append(cols)
else: # end of file
if word_lines:
yield word_lines
def get_words_from_tree(tree):
words = []
for line in tree:
if line[UPOSTAG] == 'POINT':
words.append(u'препинание')
continue
word = line[FORM].lower()
words.append(word)
return words
def make_sentence_graph(tree):
g = [dict() for _ in xrange(len(tree) + 1)]
for index, line in enumerate(tree):
ver_from = int(line[HEAD])
edge_how = line[DEPREL]
g[ver_from][edge_how] = index + 1
return g
# =================================== REWRITE ALL ABOVE ============================================
# sent_id = 2
# text = About ANSI SQL query mode
# ID, FORM, LEMMA, UPOSTAG, XPOSTAG, FEATS, HEAD, DEPREL, DEPS, MISC = range(COLCOUNT)
# 1 About _ ADP _ _ 5 case _ _
# 2 ANSI _ PROPN SG-NOM _ 5 compound _ _
# 3 SQL _ PROPN SG-NOM _ 2 flat _ _
# 4 query _ NOUN SG-NOM _ 2 flat _ _
# 5 mode _ NOUN _ _ 0 root _ _
def get_answer_from_tree(ver, edge, prev, g, tree, answer):
ver_edges = g[ver]
if edge == 'root':
if 'nsubj' in ver_edges or 'nsubjpass' in ver_edges:
answer[ver - 1] = MARK_PREDICATE
else:
ver_info = tree[ver - 1]
if ver_info[LEMMA] == 'NOUN' or ver_info[LEMMA] == 'PRON':
answer[ver - 1] = MARK_SUBJECT
else:
answer[ver - 1] = MARK_PREDICATE
elif edge.startswith('nsubj'):
if 'nsubj' in ver_edges or 'nsubjpass' in ver_edges:
answer[ver - 1] = MARK_PREDICATE
else:
answer[ver - 1] = MARK_SUBJECT
elif edge == 'xcomp' or edge.startswith('aux') or edge.startswith('conj'):
answer[ver - 1] = answer[prev - 1]
elif edge.startswith('acl') or edge.startswith('advcl'):
if 'nsubj' in ver_edges or 'nsubjpass' in ver_edges:
answer[ver - 1] = MARK_PREDICATE
else:
ver_info = tree[ver - 1]
if ver_info[LEMMA] == 'VERB':
verb_info = dict(_.split('=') for _ in ver_info[FEATS].split('|'))
if not (ver_info['VerbForm'] == 'Trans' or ver_info['VerbForm'] == 'Part'):
answer[ver - 1] = MARK_PREDICATE
for e in ver_edges:
get_answer_from_tree(ver_edges[e], e, ver, g, tree, answer)
def data_generator(inp):
for tree in tree_generator(inp):
g = make_sentence_graph(tree)
words = get_words_from_tree(tree)
answer = [MARK_ENOTHER] * len(words)
get_answer_from_tree(0, 'start', -1, g, tree, answer)
yield words, answer
def load_data_and_labels():
print ('Load data from file...')
with codecs.getreader("utf-8")(open(DATA_TRAIN_PATH, mode='U')) as inp:
sentences = []
labels = []
for words, answers in data_generator(inp):
sentences.append(words)
labels.append(answers)
return sentences, labels
# Updaters
# ==================================================
def updater_all_sentence(x, y, len_seq):
x_update = []
y_update = list(y)
for index in xrange(len(x)):
x_update.append(' '.join(x[index]))
while len(y_update[index]) > len_seq:
y_update[index].pop()
y_update[index].extend([0] * max(0, len_seq - len(y[index])))
return x_update, y_update
def updater_both_k_words(x, y, k=3):
x_update = []
y_update = []
for i in xrange(len(x)):
for j in xrange(len(x[i])):
for k in xrange(- (k / 2), k / 2 + 1):
new_expample = []
if j + k < 0 or j + k >= len(x[i]):
new_expample.append(FAKE_WORD)
else:
new_expample.append(x[i][j + k])
x_update.append(' '.join(new_expample))
y_update.append([0.] * NUM_CLASSES)
y_update[-1][y[i][j]] = 1.
return x_update, y_update
def updater_both_pairs(x, y):
x_update = []
y_update = []
for num_seq in xrange(len(x)):
seq = x[num_seq]
label_seq = y[num_seq]
for i in xrange(len(seq)):
for j in xrange(i, len(seq)):
x_update.append(' '.join([seq[i], seq[j]]))
label_first = label_seq[i]
label_second = label_seq[j]
y_update.append([0.] * NUM_CLASSES)
if not label_first and not label_second:
y_update[-1][0] = 1.
if (not label_first and label_second == 1) or (label_first == 1 and not label_second):
y_update[-1][1] = 1.
if (not label_first and label_second == 2) or (label_first == 2 and not label_second):
y_update[-1][2] = 1.
if (label_first == 1 and label_second == 2) or (label_first == 2 and label_second == 1):
y_update[-1][3] = 1.
if (label_first == 1 and label_second == 1):
y_update[-1][4] = 1.
if (label_first == 2 and label_second == 2):
y_update[-1][5] = 1.
return x_update, y_update
def updater_main_pairs(x, y):
x_update = []
y_update = []
for num in xrange(len(x)):
seq = x[num]
for i in xrange(len(seq)):
for j in xrange(i, len(seq)):
x_update.append(' '.join([seq[i], seq[j]]))
label_first = y[num][i]
label_second = y[num][j]
y_update.append([0.] * NUM_CLASSES)
if not label_first and not label_second:
y_update[-1][0] = 1.
if not label_first and label_second:
y_update[-1][1] = 1.
if label_first and not label_second:
y_update[-1][2] = 1.
if label_first and label_second:
y_update[-1][3] = 1.
return x_update, y_update
# ==================================================
def get_data(data_format, len_seq=None, status='TRAIN'):
''' Return data in some user format. '''
global MARK_ENOTHER
global MARK_SUBJECT
global MARK_PREDICATE
global NUM_CLASSES
global DATA_PATH
if status == 'TRAIN':
DATA_PATH = DATA_TRAIN_PATH
else:
DATA_PATH = DATA_TEST_PART
if data_format == 'ALL_SENTENCE':
MARK_ENOTHER = 0
MARK_SUBJECT = 1
MARK_PREDICATE = 1
x, y = load_data_and_labels()
x_update, y_update = updater_all_sentence(x, y, len_seq)
if data_format == 'BOTH_THREE_WORDS': # 0.845 полчаса
MARK_ENOTHER = 0
MARK_PREDICATE = 2
MARK_SUBJECT = 1
NUM_CLASSES = 3
x, y = load_data_and_labels()
x_update, y_update = updater_both_k_words(x, y, k=3)
if data_format == 'MAIN_THREE_WORDS': # 0.848
MARK_ENOTHER = 0
MARK_SUBJECT = 1
MARK_PREDICATE = 1
NUM_CLASSES = 2
x, y = load_data_and_labels()
x_update, y_update = updater_both_k_words(x, y, k=3)
if data_format == 'BOTH_FIVE_WORDS': # 0.838
MARK_ENOTHER = 0
MARK_SUBJECT = 1
MARK_PREDICATE = 2
NUM_CLASSES = 3
x, y = load_data_and_labels()
x_update, y_update = updater_both_k_words(x, y, k=5)
if data_format == 'MAIN_FIVE_WORDS': # 0.845
MARK_ENOTHER = 0
MARK_SUBJECT = 1
MARK_PREDICATE = 1
NUM_CLASSES = 2
x, y = load_data_and_labels()
x_update, y_update = updater_both_k_words(x, y, k=5)
if data_format == 'MAIN_SEVEN_WORDS': # 0.845
MARK_ENOTHER = 0
MARK_SUBJECT = 1
MARK_PREDICATE = 1
NUM_CLASSES = 2
x, y = load_data_and_labels()
x_update, y_update = updater_both_k_words(x, y, k=7)
if data_format == 'BOTH_PAIRS': # 0.827
MARK_ENOTHER = 0
MARK_PREDICATE = 2
MARK_SUBJECT = 1
NUM_CLASSES = 6
x, y = load_data_and_labels()
x_update, y_update = updater_both_pairs(x, y)
if data_format == 'MAIN_PAIRS': # 0.827
MARK_ENOTHER = 0
MARK_PREDICATE = 1
MARK_SUBJECT = 1
NUM_CLASSES = 4
x, y = load_data_and_labels()
x_update, y_update = updater_main_pairs(x, y)
if data_format == 'olyaolya': # 0.845
MARK_ENOTHER = 0
MARK_SUBJECT = 1
MARK_PREDICATE = 1
NUM_CLASSES = 2
x, y = load_data_and_labels()
x_update, y_update = updater_both_k_words(x, y, k=9)
del x
del y
return x_update, y_update
def batch_iter(data, batch_size, num_epochs, shuffle=True):
"""
Generates a batch iterator for a dataset.
"""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int(len(data) / batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
if __name__ == '__main__':
x, y = load_data_and_labels()
count_predicate = 0
count_subject = 0.
count_all = 0
for line in y:
count_predicate += line.count(MARK_PREDICATE)
count_subject += line.count(MARK_SUBJECT)
count_all += len(line)
print 'PREDICATE:', count_predicate
print 'SUBJECT:', count_subject
print 'Diff:', count_predicate + count_subject, '/', count_all
print '%', float(count_subject + count_predicate) / count_all | [
"alcyone@yandex-team.ru"
] | alcyone@yandex-team.ru |
dda213c37af2f9c9c79342b1e51e552411080ec5 | 49ab501632b0a8336058406e7daa3afce6be6e93 | /python_server/run_keras_server.py | 14a25f5c8f258346bcedf3cf308c98eb4e1fbf53 | [] | no_license | CharlesFauman/meme_server | 3ab73e9788b9fea26f6ea270563381515d4b0d47 | 75b0d6fc041c1e2b04e260e9eecbff160225a0f6 | refs/heads/master | 2020-03-25T08:58:32.780593 | 2018-08-05T19:24:58 | 2018-08-05T19:24:58 | 143,640,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,029 | py | # USAGE
# Start the server:
# python run_keras_server.py
# Submit a request via cURL:
# curl -X POST -F image=@jemma.png 'http://localhost:5000/predict'
# Submita a request via Python:
# python simple_request.py
# import the necessary packages
import numpy as np
from threading import Thread
import flask
import redis
import uuid
import time
import json
import sys
import io
# initialize constants used for server queuing
PROCESSING_QUEUE = "processing_queue"
BATCH_SIZE = 32
SERVER_SLEEP = 0.25
CLIENT_SLEEP = 0.25
# initialize our Flask application, Redis server, and Keras model
app = flask.Flask(__name__)
db = redis.StrictRedis(host="localhost", port=6379, db=0)
db.flushdb()
print("* Loading model...")
import meme_model as model
print("* Model loaded")
def classify_process():
# continually pool for new inputs to classify
while True:
# attempt to grab a batch of inputs from the database, then
# initialize the input IDs and batch of inputs themselves
queue = db.lrange(PROCESSING_QUEUE, 0, BATCH_SIZE - 1)
inputIDs = []
batch = None
# loop over the queue
for q in queue:
# deserialize the object and obtain the input
q = json.loads(q)
input_ = model.preprocess_deserialize(q["input"])
# check to see if the batch list is None
if batch is None:
batch = input_
# otherwise, stack the data
else:
batch = np.vstack([batch, input_])
# update the list of input IDs
inputIDs.append(q["id"])
# check to see if we need to process the batch
if len(inputIDs) > 0:
# classify the batch
print("* Batch size: {}".format(batch.shape))
preds = model.process(batch)
preds = model.postprocess_serialize(preds)
# loop over the image IDs and their corresponding set of
# results from our model
for (inputID, result) in zip(inputIDs, preds):
db.set(inputID, json.dumps(result))
# remove the set of images from our queue
db.ltrim(PROCESSING_QUEUE, len(inputIDs), -1)
# sleep for a small amount
time.sleep(SERVER_SLEEP)
@app.route("/predict", methods=["POST"])
def predict():
# initialize the data dictionary that will be returned from the
# view
data = {"success": False}
print("predicting!")
# ensure an input was properly uploaded to our endpoint
if flask.request.method == "POST":
print("was post!")
input_form = None
input_files = None
if(flask.request.form.get("input")):
input_form = flask.request.form.get("input")
if(flask.request.files.get("input")):
input_files = flask.request.files.get("input").read()
if input_form or input_files:
input_ = model.preprocess_serialize(input_form, input_files)
# generate an ID for the classification then add the
# classification ID + input to the queue
k = str(uuid.uuid4())
d = {"id": k, "input": input_}
db.rpush(PROCESSING_QUEUE, json.dumps(d))
# keep looping until our model server returns the output
# predictions
while True:
# attempt to grab the output predictions
output = db.get(k)
# check to see if our model has classified the input
if output is not None:
# add the output predictions to our data
# dictionary so we can return it to the client
data["predictions"] = json.loads(output)
# delete the result from the database and break
# from the polling loop
db.delete(k)
break
# sleep for a small amount to give the model a chance
# to classify the input
time.sleep(CLIENT_SLEEP)
# indicate that the request was a success
data["success"] = True
# return the data dictionary as a JSON response
return flask.jsonify(data)
# if this is the main thread of execution first load the model and
# then start the server
if __name__ == "__main__":
# load the function used to classify input images in a *separate*
# thread than the one used for main classification
print("* Starting model service...")
t = Thread(target=classify_process, args=())
t.daemon = True
t.start()
# start the web server
print("* Starting web service...")
app.run() | [
"you@example.com"
] | you@example.com |
911e4111113de48d2536e42295d9b53a9386730c | b42a5ad89005fe6b1b2ce9d96453b8f222c68f0d | /users/migrations/0005_user_login_method.py | d0b6be2edfe418eefde7d5d8c5faed5c733fc62b | [] | no_license | doongjun/airbnb-clone | 0562b56eb82e2b5174307132f69137b681d88913 | 6bef617767157a674e7ab07d2890ded91461c3ea | refs/heads/master | 2023-06-24T16:27:51.511412 | 2021-07-25T01:58:39 | 2021-07-25T01:58:39 | 310,051,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | # Generated by Django 2.2.5 on 2020-11-18 14:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20201118_2203'),
]
operations = [
migrations.AddField(
model_name='user',
name='login_method',
field=models.CharField(choices=[('email', 'Email'), ('github', 'Github'), ('kakao', 'Kakao')], default='email', max_length=50),
),
]
| [
"66319788+doongjun@users.noreply.github.com"
] | 66319788+doongjun@users.noreply.github.com |
8f9536c2451f1c553693aed0b4015a05647789bf | 4b95aeb2533f0a582cea2fb26d6177e94aabb21f | /2020/src/lobby_layout.py | 3bba605d05757a8dc9695996a0304392f18ef81b | [] | no_license | MarcoBurgos/advent_of_code | 0d9984e0fa47f68e52ef0f5cdf7681e23767bd16 | 81ac54bfe200cc348efbe860bd95aae4270f03b7 | refs/heads/main | 2023-02-09T14:40:38.204271 | 2020-12-26T00:09:36 | 2020-12-26T00:09:36 | 317,739,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,687 | py | import sys
from utils import read_and_load_input
VECTORS = {
'w' : (-4, 0),
'e' : ( 4, 0),
'nw': (-2, -3),
'ne': ( 2, -3),
'sw': (-2, 3),
'se': ( 2, 3),
}
def parse(line):
result = []
while line:
stepLength = 1 if line[0] in ('e', 'w') else 2
result.append(line[:stepLength])
line = line[stepLength:]
return result
def walk(path):
x, y = 0, 0
for step in path:
dx, dy = VECTORS[step]
x += dx
y += dy
return x, y
def lobby_layout_1():
result = set()
for path in tiles:
tile = walk(path)
if tile in result:
result.remove(tile)
else:
result.add(tile)
return result
def neighbors(tile):
yield from ((tile[0] + dx, tile[1] + dy) for dx, dy in VECTORS.values())
def lobby_layout_2(blackTiles):
for day in range(100):
newTiles = set()
affectedTiles = blackTiles.copy()
for tile in blackTiles:
affectedTiles.update(neighbors(tile))
for tile in affectedTiles:
numNeighbors = sum(n in blackTiles for n in neighbors(tile))
if tile in blackTiles:
if numNeighbors in (1, 2):
newTiles.add(tile)
else:
if numNeighbors == 2:
newTiles.add(tile)
blackTiles = newTiles
return len(blackTiles)
if __name__ == '__main__':
input_data = read_and_load_input("Day24")
tiles = [parse(line.rstrip()) for line in input_data]
blackTiles = lobby_layout_1()
print(f"Solution 1: {len(blackTiles)}")
print(f"Solution 2: {lobby_layout_2(blackTiles)}")
| [
"marko_burgos@gmail.com"
] | marko_burgos@gmail.com |
1f14cb5ba4813adfb8e5a0d46741ee3ea9bc30af | c8e15e95026b7b1429e8c2201aafea477280a2c0 | /inspections/settings.py | 3641b379645240ec024af646feb03e542c7d41a4 | [] | no_license | imclab/ifcinspection | 8957d03bc5fd61d7f6054939cd277d0583f27d14 | 866a111082771206d901a2983567e2d3eb12a7c9 | refs/heads/master | 2021-01-15T09:56:33.974093 | 2014-04-08T02:39:49 | 2014-04-08T02:39:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,723 | py | # Django settings for inspections project.
import os
import getpass
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('rlau', 'rlau@mit.edu'),
)
MANAGERS = ADMINS
if getpass.getuser() != 'Ryan': # for production
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'rlau+inspections',
'OPTIONS': {
'read_default_file' : os.path.expanduser('~/.my.cnf'),
},
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
else:
DATABASES = { # for development
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'inspections',
'USER': 'root',
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.4/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
'/User/Ryan/Desktop/PSK/JudComm/ifcinspection/inspectionapp/static',
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '9ot=9=b=#u_&owizt^&k3+_l##*4#ue65o4kg@pkow8c9$(4vq'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'inspections.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'inspections.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
'django.contrib.admin',
'django.contrib.admindocs',
'inspectionapp',
'south',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| [
"rlau@mit.edu"
] | rlau@mit.edu |
f081eba8ccd0e874b73f18e6582caf92e3dc704f | 3c1c4cabf2875536af67358fd3a0c877501c0e67 | /BookStore/BookStore/urls.py | 4a0c8a4727b326176093a9e536063cb5117e130e | [] | no_license | z850672062/book | dcf2dfc6724df9943c548dddf806337fe2071414 | 816d5f721151a01a0ef331e88c19201bb19da7fc | refs/heads/master | 2023-05-08T02:12:36.216094 | 2021-05-27T09:06:45 | 2021-05-27T09:06:45 | 371,312,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | """BookStore URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"850672062@qq.com"
] | 850672062@qq.com |
c5fdd82c49629d8740496be1867f94403decf673 | 512d704dfcaccfd1a3e3409592e5b22dba1c5be1 | /week4-numbers/random_task_7.py | df616b4c393e68b3fad0b75a511be974d946624e | [] | no_license | elemanjan/python_01 | 93e95f5c657b4e67a266fa3f43de27c6a74a40ce | ae88af2ceb1a56d549ffe62f58e04818427b9ca5 | refs/heads/master | 2023-03-04T09:56:08.001954 | 2021-02-15T02:50:16 | 2021-02-15T02:50:16 | 317,219,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 105 | py | x = int(input('Enter number for X: '))
y = int(input('Enter number for Y: '))
print(abs(x-y) / (x + y))
| [
"mr.eleman@gmail.com"
] | mr.eleman@gmail.com |
6932ac9a7c4fdfd992a60e36299250e4c76f3fcc | 8aff2fc26b00654a0c08a3cbf158c70e7ac97fee | /flask_monitor_agent/utils.py | 97595b791d6a4a62cd7a6ee1fbbf0547db96245e | [] | no_license | zrongh90/zdj | e82b360529792aace62a448c6c24cbfe567384fb | b553854aec3b2e9e3abc8cb08cc146410a1905f3 | refs/heads/master | 2022-12-14T14:59:44.445899 | 2018-12-28T13:03:45 | 2018-12-28T13:03:45 | 131,481,766 | 0 | 0 | null | 2022-12-08T02:54:03 | 2018-04-29T09:30:30 | Python | UTF-8 | Python | false | false | 1,235 | py | # encoding: utf-8
# 定义agent使用的所有方法,例如如何获取hostname,IP等
import socket
import psutil
def get_hostname():
"""
获取hostname,根据系统的/etc/hostname的配置信息
:return: hostname字符串
"""
return socket.gethostname()
def get_ip_address():
"""
获取主机的IP信息,先通过过get_hostname获取主机名,再根据主机名
在/etc/hosts配置的IP信息去获取主机的IP地址
:return: ip地址的字符串
"""
return socket.gethostbyname(get_hostname())
def get_cpu_percent():
"""
通过psutil的cpu_percent模块获取cpu使用率
:return: cpu使用率
"""
return psutil.cpu_percent()
def get_mem_percent():
"""
通过psutil的virtual_memory模块获取内存的使用情况
:return: mem使用率
"""
return psutil.virtual_memory().percent
def get_cpu_core_num():
"""
通过psutil的cpu_count方法获取CPU的格式
:return: cpu个数
"""
return psutil.cpu_count()
def get_memory():
"""
获取psutil的virtual_memory返回的内存大小,以MB为单位
:return: 内存大小,以MB为单位
"""
return psutil.virtual_memory().total/1024/1024 | [
"397864223@qq.com"
] | 397864223@qq.com |
0778305f911ec263c0f9a423f6c0f6c251927391 | b4eac531c95793a78dd6132fedef089d1d0d509d | /python/obli99/obli99_weirdAlgorithm.py | 2d9bc628613fc9e8c1e3f4064a6d42b9d29e21ee | [] | no_license | LoopGlitch26/CSES-Problems | 2cceddd6e1072c099298c164427a562cee4792bb | da41aafb5f73b41a1455caba2dd4f2104fe45a04 | refs/heads/master | 2023-01-08T21:44:36.113975 | 2020-11-15T19:52:38 | 2020-11-15T19:52:38 | 303,933,770 | 1 | 0 | null | 2020-11-15T19:52:39 | 2020-10-14T07:17:27 | Java | UTF-8 | Python | false | false | 496 | py | # Question: Consider an algorithm that takes as input a positive integer n. If n is even, the algorithm divides it by two, and if n is odd, the algorithm multiplies it by three and adds one.
# Link: https://cses.fi/problemset/task/1068/
# author: jyotij
# profile: https://cses.fi/user/32333
# solution: https://cses.fi/problemset/result/1133081/
n = int((input()))
print(n, end=" ")
while n != 1:
if n % 2 == 0:
n //= 2
elif n%2!=0:
n = n * 3 + 1
print(n, end=" ") | [
"jyotijayadeep.jena2000@gmail.com"
] | jyotijayadeep.jena2000@gmail.com |
ec28f6cb6199656c7ba63d834f86d550fd51c0ad | 8738ca1752a1ba764fcecaecc6573e8e7edc3b21 | /check_up.py | 9e28a2bd6d3052aeef035baddc261557ff0c3219 | [] | no_license | madziaw/python-tests | bc0e5be5082b82a8ddeb28aa53a5768ef92f056c | 5e4ee44b7c7a0747b880e65ac8d670e36f7fb225 | refs/heads/master | 2022-02-16T16:21:45.335206 | 2022-01-30T20:26:58 | 2022-01-30T20:26:58 | 213,188,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | def check_up(collection):
for element in collection:
if bool(element):
pass
else:
return False
return True
| [
"magdalena.barbara.wasilewska@gmail.com"
] | magdalena.barbara.wasilewska@gmail.com |
1f8ff73847238507b2bd80765ea56aaf3274d017 | 7f40eb2d4be21397266a9e0df06ed47b59d94f6e | /feature_extractor.py | 92ad310b7288904657f7d9b66fa431e592aa9cad | [] | no_license | yy9669/2017_visual_concepts-1 | 467da74204cd88ed6b04a4ffeb2019b6263fd073 | 76ec7fba2e78c0cae51a619f1dbe6ef6c153a1a3 | refs/heads/master | 2022-01-04T23:04:28.095720 | 2019-05-15T12:55:07 | 2019-05-15T12:55:07 | 113,170,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,764 | py | from utils import *
from tensorflow.python.client import timeline
from datetime import datetime
import network as vgg
from global_variables import *
from scipy.cluster import hierarchy
from scipy.spatial.distance import pdist
from sklearn.metrics.pairwise import cosine_distances
import matplotlib.pyplot as plt
import random
import json
from scipy.optimize import linear_sum_assignment
class FeatureExtractor:
def __init__(self, which_layer='pool4', which_snapshot=200000, from_scratch=False):
# params
self.batch_size = 1
self.scale_size = vgg.vgg_16.default_image_size
# Runtime params
checkpoints_dir = '/data2/xuyangf/OcclusionProject/NaiveVersion/checkpoint'
tf.logging.set_verbosity(tf.logging.INFO)
# Create the model, use the default arg scope to configure the batch norm parameters.
with tf.device('/cpu:0'):
self.input_images = tf.placeholder(tf.float32, [self.batch_size, self.scale_size, self.scale_size, 3])
with tf.variable_scope('vgg_16', reuse=False):
with slim.arg_scope(vgg.vgg_arg_scope()):
_, vgg_end_points = vgg.vgg_16(self.input_images,num_classes=100, is_training=False,dropout_keep_prob=1)
# self.pool4 = vgg_end_points['vgg_16/pool4']
# with tf.variable_scope('VC', reuse=False):
# self.tight_loss, self.tight_end_points = online_clustering(self.pool4, 512)
if which_layer[0]>'0' and which_layer[0]<='9':
self.features = vgg_end_points['vgg_16_'+which_layer[0]+'/' + which_layer[1:]]
else:
self.features = vgg_end_points['vgg_16/' + which_layer] # TODO
# Create restorer and saver
restorer = get_init_restorer()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
init_op = tf.global_variables_initializer()
# Run the session:
self.sess = tf.Session(config=config)
print(str(datetime.now()) + ': Start Init')
if which_snapshot == 0: # Start from a pre-trained vgg ckpt
if from_scratch:
self.sess.run(init_op)
else:
restorer.restore(self.sess, os.path.join(checkpoints_dir, 'fine_tuned'))
else: # Start from the last time
# sess.run(init_op)
restorer.restore(self.sess, os.path.join(checkpoints_dir, 'fine_tuned-' + str(which_snapshot)))
print(str(datetime.now()) + ': Finish Init')
# visualize first layer conv filters
# conv1_1 = restorer._var_list[0]
# conv1_1_weights = self.sess.run(conv1_1) * 0.5 + 0.5
# fig = plt.figure(figsize=(16, 9), dpi=300)
# for i in range(64):
# ax = fig.add_subplot(8, 8, i + 1)
# ax.imshow(conv1_1_weights[:, :, :, i])
# ax.get_xaxis().set_ticks([])
# ax.get_xaxis().set_ticklabels([])
# ax.get_yaxis().set_ticks([])
# ax.get_yaxis().set_ticklabels([])
# fig.savefig(os.path.join(g_cache_folder, 'weights.eps'))
# fig.clear()
def extract_from_paths(self, paths):
feature_list = []
image_list = []
blank_list=[]
for i in range(-(-len(paths) // self.batch_size)):
batch_images = np.ndarray([self.batch_size, self.scale_size, self.scale_size, 3])
batch_blank=np.ndarray([self.batch_size,2])
for j in range(self.batch_size):
# read paths
if i * self.batch_size + j >= len(paths):
break
img = cv2.imread(paths[i * self.batch_size + j], cv2.IMREAD_UNCHANGED)
#print(paths[i * self.batch_size + j])
#print(img.shape)
batch_images[j],batch_blank[j][0],batch_blank[j][1] = process_image(img, paths[i * self.batch_size + j], augment=0)
# batch_images[j],batch_blank[j][0],batch_blank[j][1] = process_image2(img)
out_features = self.extract_from_batch_images(batch_images)
feature_list.append(out_features)
image_list.append(batch_images)
blank_list.append(batch_blank)
features = np.concatenate(feature_list)
images = np.concatenate(image_list)
blanks= np.concatenate(blank_list)
return features[:len(paths), :], images[:len(paths), :], blanks[:len(paths), :]
def extract_from_batch_images(self, batch_images):
feed_dict = {self.input_images: batch_images}
# [out_features, out_end_points, out_tight_loss] = self.sess.run([self.features, self.tight_end_points, self.tight_loss], feed_dict=feed_dict)
out_features = self.sess.run(self.features, feed_dict=feed_dict)
return out_features
| [
"noreply@github.com"
] | yy9669.noreply@github.com |
e95a7916c34df020176c7b88a4bbeb80de9ce5d0 | 0720ff5b76b03e63fd83ae8d7a90c241faee5875 | /MODEL/hparams.py | 259792ef91b4844e3f442ede0898dfa34b8ad275 | [] | no_license | 2g-XzenG/MSAM | d8d8e1ff117885e011b7e8a2934d2c84744b1129 | e1a7ba59073288ea6310aabfcd04639891950c0c | refs/heads/master | 2023-07-10T03:56:24.696992 | 2019-10-02T03:53:23 | 2019-10-02T03:53:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,745 | py | import argparse
class Hparams:
parser = argparse.ArgumentParser()
# prepro
parser.add_argument('--vocab_size', default=13234, type=int)
parser.add_argument('--grouped_vocab_size', default=272, type=int)
parser.add_argument('--max_v', default=12, type=int)
parser.add_argument('--max_c', default=100, type=int)
## train
parser.add_argument('--VOCAB', default='../PROCESS/VOCAB', help="VOCAB")
parser.add_argument('--INPUT_DATA', default='../PROCESS/INPUT_DATA', help="INPUT_DATA")
parser.add_argument('--TARGET_DATA', default='../PROCESS/TARGET_DATA', help="TARGET_DATA")
parser.add_argument('--LOGDIR', default="LOG/1", help="log directory")
# training scheme
parser.add_argument('--cost_pred_weight', default='../TOOLS/cost_pred_weight', help="cost_pred_weight")
parser.add_argument('--train_batch_size', default=128, type=int)
parser.add_argument('--test_batch_size', default=128, type=int)
parser.add_argument('--ff_dim', default=128, type=int)
parser.add_argument('--code_dim', default=128, type=int)
parser.add_argument('--visit_dim', default=128, type=int)
parser.add_argument('--patient_dim', default=280, type=int) # same as grouped_vocab_size
parser.add_argument('--dropout_rate', default=0.5, type=float)
parser.add_argument('--num_blocks', default=1, type=int)
parser.add_argument('--num_heads', default=1, type=int)
parser.add_argument('--L2_alpha', default=0.001, type=float)
parser.add_argument('--ce_alpha', default=1, type=float)
parser.add_argument('--mse_alpha', default=0.0000001, type=float)
parser.add_argument('--num_epochs', default=50, type=int)
parser.add_argument('--eval_every', default=10, type=int)
| [
"xxz005@resfvfxp51khv2d.columbuschildrens.net"
] | xxz005@resfvfxp51khv2d.columbuschildrens.net |
3390cbe0d676d93a296bed147503ef9adea42904 | 4a39ecf62fe6ce8416f7d6af5d035703f3b4c49e | /DoubleGraphCF/preprocess/also_view.py | f108f9ce2ee5c1b9aade612dcd96bc99a6ee31eb | [] | no_license | mindis/DeepRecomm | c7a462c0162e3adb4bb45b2ebf9dc80c54b2165c | f2cccb37aa41167fb11a066781bc82d1b0b7bf22 | refs/heads/master | 2020-07-08T23:29:08.587715 | 2019-08-07T14:12:25 | 2019-08-07T14:12:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,877 | py | import random
dataset = 'amazon-book'
data_path = 'Data/'
org_path = 'Data/origins/'
def proc_line(line, fdst):
cols = line.split(' ')
if line is None or line == '':
return
userid = cols[0]
items = cols[1:]
max_itemid = 0
for itemid in items:
if userid == '' or itemid == '':
continue
if int(itemid) > max_itemid:
max_itemid = int(itemid)
fdst.write(userid + '\t' + itemid + '\t1\t0\n')
return max_itemid
def proc_negative(line, fneg, max_itemid):
if line is None or line == '':
return
cols = line.split(' ')
userid = cols[0]
item_strs = cols[1:]
items = []
for istr in item_strs:
if istr != '':
items.append(int(istr))
for itemid in items:
fneg.write('(' + userid + ',' + str(itemid) + ')')
cnt = 0
while cnt < 99:
ridx = random.randint(0, max_itemid)
if ridx not in items:
fneg.write('\t' + str(ridx))
cnt += 1
fneg.write('\n')
def proc(mode='train'):
fsrc = open(org_path + dataset + '/' + mode + '.txt')
fdst = open(data_path + dataset + '.' + mode + '.rating', 'w')
max_itemid = 0
sline = fsrc.readline().replace('\n', '')
while sline is not None and sline != '':
tmax = proc_line(sline, fdst)
if max_itemid < tmax:
max_itemid = tmax
sline = fsrc.readline().replace('\n', '')
fsrc.close()
fdst.close()
if mode == 'test':
fneg = open(data_path + dataset + '.test.negative', 'w')
fsrc = open(org_path + dataset + '/' + mode + '.txt')
sline = fsrc.readline().replace('\n', '')
while sline is not None and sline != '':
proc_negative(sline, fneg, max_itemid)
sline = fsrc.readline().replace('\n', '')
proc()
proc('test') | [
"826074356@qq.com"
] | 826074356@qq.com |
9485d33384747f19fcf2ec827745ea2996e7d926 | 19dcea1e9075905ccc4e46d241f2357294e955e6 | /agent/bin/comnsense-worker | 4e8c05554dbcdc05a5fadb0aa36aa72b713b278c | [] | no_license | martinthenext/comnsense | 67647b7ad3b26e928c1eda074807d0243b78fd83 | d5601954e9d2f0a66d553cfc1567e16adcfe97ab | refs/heads/master | 2021-01-16T23:09:47.840135 | 2015-09-09T23:06:28 | 2015-09-09T23:06:28 | 36,244,627 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,048 | #!/usr/bin/env python2
import argparse
from zmq.eventloop import ioloop
try:
import comnsense_agent.worker
except ImportError:
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
import comnsense_agent.worker
def parse_args(args=None):
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--connection", type=str,
required=True,
help="agent connection string")
parser.add_argument("-i", "--ident", type=str,
required=True,
help="worker identity")
parser.add_argument("-l", "--log-level", type=str,
default="DEBUG", help="logging level")
return parser.parse_args(args)
def main(args):
loop = ioloop.IOLoop()
loop.make_current()
try:
worker = comnsense_agent.worker.Worker(
args.ident, args.connection, loop)
worker.start()
except (SystemExit, KeyboardInterrupt):
pass
if __name__ == '__main__':
main(parse_args())
| [
"goldshtein.kirill@gmail.com"
] | goldshtein.kirill@gmail.com | |
2ee21a58c8741efe953603dbf9748bb0cd228fd8 | 4610ee6741a83b0993168a38a9016a7623cd3c68 | /utils/save_errors.py | afdbd3fdb454f035c492a9af64bd9ffc6186adb4 | [] | no_license | dpolob/ff | 00baa0139c051c368578c1b0f2551fcbb556ecaf | 4afebd06f306df3e1d697e3ac02d63d45f403fbd | refs/heads/master | 2022-11-06T10:52:33.817184 | 2020-06-23T15:44:01 | 2020-06-23T15:44:01 | 274,448,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | import datetime
import time
from utils import read_parameters, save_parameters
def save_errors(msg):
ts_current = time.mktime(datetime.datetime.now().timetuple())
parameters = read_parameters.read_parameters()
parameters['error'][str(ts_current).split('.')[0]] = msg
save_parameters.save_parameters(parameters)
| [
"ivan@encore-lab.com"
] | ivan@encore-lab.com |
efdc7c5f3a37d9a54dc3008c32d132f0d89f1e19 | 95a8883583543d7469b033d1c7d1589153c79686 | /pydia/DIA_GPU.py | 570fcaf32dc38b96f7e698e47114e5b42f50513f | [
"MIT"
] | permissive | srodney/pyDIA | a08868eb72c9626404886c5c106bb9c8724779c7 | 070551a2a5d5cdfe6d74766fa947873e3b50ea7f | refs/heads/master | 2020-04-16T14:29:08.838785 | 2019-04-15T19:39:48 | 2019-04-15T19:39:48 | 165,668,471 | 2 | 1 | MIT | 2019-01-14T13:42:37 | 2019-01-14T13:42:37 | null | UTF-8 | Python | false | false | 33,305 | py | import cuda_interface_functions as CI
# pyDIA
#
# This software implements the difference-imaging algorithm of Bramich et al. (2010)
# with mixed-resolution delta basis functions. It uses an NVIDIA GPU to do the heavy
# processing.
#
# Subroutines deconvolve3_rows, deconvolve3_columns, resolve_coeffs_2d and
# interpolate_2d are taken from the Gwiddion software for scanning probe
# microscopy (http://gwyddion.net/), which is distributed under the GNU General
# Public License.
#
# All remaining code is Copyright (C) 2014, 2015 Michael Albrow
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import os
import time
import fnmatch
import itertools
from multiprocessing import Pool
import numpy as np
import data_structures as DS
import io_functions as IO
import image_functions as IM
import photometry_functions as PH
import c_interface_functions as CIF
def difference_image(ref, target, params, stamp_positions=None, psf_image=None,
star_positions=None, star_group_boundaries=None,
detector_mean_positions_x=None,
detector_mean_positions_y=None, star_sky=None):
from scipy.linalg import lu_solve, lu_factor, LinAlgError
start = time.time()
print
'difference_image', ref.name, target.name
#
# Set the kernel size based on the difference in seeing from the reference
#
# kernelRadius = min(params.kernel_maximum_radius,
# max(params.kernel_minimum_radius,
# np.abs(target.fw-ref.fw)*params.fwhm_mult))
kernelRadius = min(params.kernel_maximum_radius,
max(params.kernel_minimum_radius, np.sqrt(np.abs(
target.fw ** 2 - ref.fw ** 2)) * params.fwhm_mult))
#
# Mask saturated pixels
#
# print 'Masking ',target.name,time.time()-start
# smask = compute_saturated_pixel_mask(target.image,kernelRadius,params)
#
# Define the kernel basis functions
#
print
'Defining kernel pixels', time.time() - start
if params.use_fft_kernel_pixels:
kernelIndex, extendedBasis = IM.define_kernel_pixels_fft(ref, target,
kernelRadius + 2,
INNER_RADIUS=20,
threshold=params.fft_kernel_threshold)
else:
kernelIndex, extendedBasis = IM.define_kernel_pixels(kernelRadius)
nKernel = kernelIndex.shape[0]
#
# We dont want to use bad pixels in either the target or reference image
#
smask = target.mask * ref.mask
bmask = np.ones(smask.shape, dtype=bool)
g = DS.EmptyBase()
for iteration in range(params.iterations):
print
'Computing matrix', time.time() - start
tmask = bmask * smask
#
# Compute the matrix and vector
#
H, V, texref = CI.compute_matrix_and_vector_cuda(ref.image, ref.blur,
target.image,
target.inv_variance,
tmask, kernelIndex,
extendedBasis,
kernelRadius, params,
stamp_positions=stamp_positions)
#
# Solve the matrix equation to find the kernel coefficients
#
print
'Solving matrix equation', time.time() - start
try:
lu, piv = lu_factor(H)
c = lu_solve((lu, piv), V).astype(np.float32).copy()
except (LinAlgError, ValueError):
print
'LU decomposition failed'
g.model = None
g.flux = None
g.diff = None
print
'H'
print
H
sys.stdout.flush()
return g
#
# Compute the model image
#
print
'Computing model', time.time() - start
g.model = CI.compute_model_cuda(ref.image.shape, texref, c,
kernelIndex, extendedBasis, params)
#
# Compute the difference image
#
difference = (target.image - g.model)
g.norm = difference * np.sqrt(target.inv_variance)
#
# Recompute the variance image from the model
#
target.inv_variance = 1.0 / (g.model / params.gain + (
params.readnoise / params.gain) ** 2) + (1 - smask)
mp = np.where(tmask == 0)
if len(mp[0]) > 0:
target.inv_variance[mp] = 1.e-12
#
# Mask pixels that disagree with the model
#
if iteration > 2:
bmask = IM.kappa_clip(smask, g.norm,
params.pixel_rejection_threshold)
print
'Iteration', iteration, 'completed', time.time() - start
#
# Delete the target image array to save memory
#
del target.image
#
# Save the kernel coefficients to a file
#
if params.do_photometry and psf_image:
kf = params.loc_output + os.path.sep + 'k_' + os.path.basename(
target.name)
IO.write_kernel_table(kf, kernelIndex, extendedBasis, c, params)
g.norm = difference * np.sqrt(target.inv_variance)
g.variance = 1.0 / target.inv_variance
g.mask = tmask
#
# Do the photometry if requested
#
g.flux = None
if params.do_photometry and psf_image:
print
'star_positions', star_positions.shape
print
'star_group_boundaries', star_group_boundaries
if ref.name == target.name:
sky_image, _ = IO.read_fits_file(
params.loc_output + os.path.sep + 'temp.sub2.fits')
phot_target = ref.image - sky_image
g.flux, g.dflux = CIF.photom_all_stars_simultaneous(phot_target,
target.inv_variance,
star_positions,
psf_image, c,
kernelIndex,
extendedBasis,
kernelRadius,
params,
star_group_boundaries,
detector_mean_positions_x,
detector_mean_positions_y)
else:
phot_target = difference
g.flux, g.dflux = CI.photom_all_stars(phot_target,
target.inv_variance,
star_positions, psf_image, c,
kernelIndex, extendedBasis,
kernelRadius, params,
star_group_boundaries,
detector_mean_positions_x,
detector_mean_positions_y)
print
'Photometry completed', time.time() - start
#
# Apply the photometric scale factor to the difference image.
# We don't do this prior to the photometry because the PSF is
# being convolved by the kernel, which already includes the
# photometric scale factor.
#
g.diff = IM.apply_photometric_scale(difference, c, params.pdeg)
sys.stdout.flush()
return g
def process_reference_image(f, args):
best_seeing_ref, params, stamp_positions = args
result = difference_image(f, best_seeing_ref, params,
stamp_positions=stamp_positions)
del f.image
del f.mask
del f.inv_variance
return result
def process_reference_image_helper(args):
return process_reference_image(*args)
def make_reference(files, params, reference_image='ref.fits'):
seeing = {}
sky = {}
ref_seeing = 1000
#
# Have we specified the files to make the reference with?
#
if params.ref_include_file:
ref_list = []
for line in open(params.ref_include_file, 'r'):
for f in files:
if f.name == line.split()[0]:
ref_list.append(f)
print
f.name, f.fw, f.signal
if f.fw < ref_seeing:
ref_sky = f.sky
ref_seeing = f.fw
best_seeing_ref = f
else:
#
# We try to choose the best images
#
reference_exclude = []
if params.ref_exclude_file:
for line in open(params.ref_exclude_file, 'r'):
reference_exclude.append(line.split()[0])
sig = []
for f in files:
sig.append(f.signal)
sig = np.asarray(sig)
sigcut = np.mean(sig) - 2.0 * np.std(sig)
print
'signal: mean, std, cut = ', np.mean(sig), np.std(sig), sigcut
print
'Searching for best-seeing image'
for f in files:
print
f.name, f.fw, f.sky, f.signal
if (f.fw < ref_seeing) and (
f.fw > params.reference_min_seeing) and (
f.roundness < params.reference_max_roundness) and (
f.signal > sigcut) and not (f.name in reference_exclude):
ref_sky = f.sky
ref_seeing = f.fw
best_seeing_ref = f
ref_list = []
while len(ref_list) < params.min_ref_images:
ref_list = []
print
'Reference FWHM = ', ref_seeing
print
'Cutoff FWHM for reference = ', params.reference_seeing_factor * ref_seeing
print
'Combining for reference:'
for f in files:
if (f.fw < params.reference_seeing_factor * ref_seeing) and (
f.roundness < params.reference_max_roundness) and (
f.sky < params.reference_sky_factor * ref_sky) and (
f.fw > params.reference_min_seeing) and (
f.signal > sigcut) and not (
f.name in reference_exclude):
ref_list.append(f)
print
f.name, f.fw, f.sky, f.signal
params.reference_seeing_factor *= 1.02
sig = []
for f in ref_list:
sig.append(f.signal)
sig = np.asarray(sig)
sigcut = np.mean(sig) - 2 * np.std(sig)
print
'signal: mean, std, cut = ', np.mean(sig), np.std(sig), sigcut
ref_seeing = 1000
ref_roundness = 2.0
for f in ref_list:
if (f.fw < ref_seeing) and (f.signal > sigcut):
ref_sky = f.sky
ref_seeing = f.fw
ref_roundness = f.roundness
best_seeing_ref = f
#
# Which ref image has the worst seeing?
#
worst_seeing = 0.0
for f in ref_list:
if f.fw > worst_seeing:
worst_seeing = f.fw
worst_seeing_ref = f
if params.ref_image_list:
with open(params.loc_output + os.path.sep + params.ref_image_list,
'w') as fid:
for f in ref_list:
fid.write(
f.name + ' ' + str(f.fw) + ' ' + str(f.sky) + ' ' + str(
f.signal) + '\n')
#
# Find the locations of the brightest stars to use as stamp positions
# if required
#
stamp_positions = None
if params.use_stamps:
stars = PH.choose_stamps(best_seeing_ref, params)
stamp_positions = stars[:, 0:2]
#
# Construct the reference image.
#
ref = np.zeros([1, 1])
sum1 = 0
sum2 = 0
good_ref_list = []
for f in ref_list:
f.blur = IM.boxcar_blur(f.image)
good_ref_list.append(f)
print
'difference_image:', f.name, best_seeing_ref.name
if not (params.use_GPU) and (params.n_parallel > 1):
#
# Use ParallelProcessing to process images in the reference list
#
pool = Pool(params.n_parallel)
results = pool.map(process_reference_image_helper,
itertools.izip(ref_list, itertools.repeat(
(best_seeing_ref, params, stamp_positions))))
for i, f in enumerate(ref_list):
f.result = results[i]
else:
for f in ref_list:
f.result = process_reference_image(f, (
best_seeing_ref, params, stamp_positions))
#
# Remove bad reference models
#
rlist = [g for g in good_ref_list]
for g in rlist:
if not (isinstance(g.result.diff, np.ndarray)):
print
'removing', g.name
good_ref_list.remove(g)
print
'good reference list:'
for g in good_ref_list:
print
g.name
print
'kappa-clipping reference list'
for iterations in range(5):
if len(good_ref_list) < 4:
break
sd = np.zeros(len(good_ref_list))
for i, g in enumerate(good_ref_list):
print
g.name, g.result.diff
sd[i] = np.std(g.result.diff)
sds = sd.std()
sdm = sd.mean()
rlist = [g for g in good_ref_list]
for g in rlist:
if np.std(g.result.diff) > (sdm + 2.5 * sds):
print
'removing', g.name
good_ref_list.remove(g)
#
# Combine the good reference models
#
g = good_ref_list[0]
gstack = np.zeros(
[len(good_ref_list), g.result.model.shape[0], g.result.model.shape[1]])
mask = np.ones_like(g.result.model)
print
'final reference list'
for i, g in enumerate(good_ref_list):
if isinstance(g.result.model, np.ndarray):
print
g.name, np.std(g.result.diff), np.median(g.result.model)
IO.write_image(g.result.model,
params.loc_output + os.path.sep + 'mr_' + g.name)
gstack[i, :, :] = g.result.model
mask *= g.mask
rr = np.median(gstack, axis=0)
IO.write_image(rr, params.loc_output + os.path.sep + reference_image)
IO.write_image(mask,
params.loc_output + os.path.sep + 'mask_' + reference_image)
for f in ref_list:
f.result = None
return stamp_positions
def process_image(f, args):
ref, params, stamp_positions, star_positions, star_group_boundaries, star_unsort_index, detector_mean_positions_x, detector_mean_positions_y = args
dtarget = params.loc_output + os.path.sep + 'd_' + f.name
if not (os.path.exists(dtarget)):
#
# Compute difference image
#
result = difference_image(ref, f, params,
stamp_positions=stamp_positions,
psf_image=params.loc_output + os.path.sep + 'psf.fits',
star_positions=star_positions,
star_group_boundaries=star_group_boundaries,
detector_mean_positions_x=detector_mean_positions_x,
detector_mean_positions_y=detector_mean_positions_y)
del f.image
del f.mask
del f.inv_variance
#
# Save photometry to a file
#
if isinstance(result.flux, np.ndarray):
if not (params.use_GPU):
print
'ungrouping fluxes'
result.flux = result.flux[star_unsort_index].copy()
result.dflux = result.dflux[star_unsort_index].copy()
np.savetxt(params.loc_output + os.path.sep + f.name + '.flux',
np.vstack((result.flux, result.dflux)).T)
f.flux = result.flux.copy()
f.dflux = result.dflux.copy()
#
# Save output images to files
#
if isinstance(result.diff, np.ndarray):
IO.write_image(result.diff,
params.loc_output + os.path.sep + 'd_' + f.name)
IO.write_image(result.model,
params.loc_output + os.path.sep + 'm_' + f.name)
IO.write_image(result.norm,
params.loc_output + os.path.sep + 'n_' + f.name)
IO.write_image(result.mask,
params.loc_output + os.path.sep + 'z_' + f.name)
return 0
def process_image_helper(args):
return process_image(*args)
def imsub_all_fits(params, reference='ref.fits'):
#
# Create the output directory if it doesn't exist
#
if not (os.path.exists(params.loc_output)):
os.mkdir(params.loc_output)
#
# The degree of spatial shape changes has to be at least as
# high as the degree of spatial photometric scale
#
if (params.sdeg < params.pdeg):
print
'Increasing params.sdeg to ', params.pdeg
params.sdeg = params.pdeg
#
# Print out the parameters for this run.
#
print
'Parameters:'
for par in dir(params):
print
par, getattr(params, par)
print
#
# Determine our list of images
#
all_files = os.listdir(params.loc_data)
all_files.sort()
files = []
for f in all_files:
if fnmatch.fnmatch(f, params.name_pattern):
g = DS.Observation(params.loc_data + os.path.sep + f, params)
del g.data
del g.mask
if g.fw > 0.0:
files.append(g)
print
g.name
if len(files) < 3:
print
'Only', len(files), 'files found matching', params.name_pattern
print
'Exiting'
sys.exit(0)
#
# Have we specified a registration template?
#
if params.registration_image:
reg = DS.Observation(params.registration_image, params)
else:
reg = DS.EmptyBase()
reg.fw = 999.0
for f in files:
if (f.fw < reg.fw) and (f.fw > 1.2):
reg = f
print
'Registration image:', reg.name
#
# Register images
#
for f in files:
if f == reg:
f.image = f.data
rf = params.loc_output + os.path.sep + 'r_' + f.name
IO.write_image(f.image, rf)
else:
f.register(reg, params)
# delete image arrays to save memory
del f.image
del f.mask
del f.inv_variance
del reg.data
del reg.image
del reg.mask
del reg.inv_variance
#
# Write image names and dates to a file
#
if params.image_list_file:
try:
with open(params.loc_output + os.path.sep + params.image_list_file,
'w') as fid:
for f in files:
date = None
if params.datekey:
date = IO.get_date(
params.loc_data + os.path.sep + f.name,
key=params.datekey) - 2450000
if date:
fid.write(f.name + ' %10.5f\n' % date)
else:
fid.write(f.name)
except:
raise
#
# Make the photometric reference image if we don't have it.
# Find stamp positions if required.
#
if not (os.path.exists(params.loc_output + os.path.sep + reference)):
print
'Reg = ', reg.name
stamp_positions = make_reference(files, params,
reference_image=reference)
ref = DS.Observation(params.loc_output + os.path.sep + reference,
params)
mask, _ = IO.read_fits_file(
params.loc_output + os.path.sep + 'mask_' + reference)
ref.mask = mask
ref.register(reg, params)
else:
ref = DS.Observation(params.loc_output + os.path.sep + reference,
params)
if os.path.exists(
params.loc_output + os.path.sep + 'mask_' + reference):
mask, _ = IO.read_fits_file(
params.loc_output + os.path.sep + 'mask_' + reference)
else:
mask = np.ones_like(ref.data)
ref.mask = mask
ref.register(reg, params)
stamp_positions = None
if params.use_stamps:
stamp_file = params.loc_output + os.path.sep + 'stamp_positions'
if os.path.exists(stamp_file):
stamp_positions = np.genfromtxt(stamp_file)
else:
stars = PF.choose_stamps(ref, params)
stamp_positions = stars[:, 0:2]
np.savetxt(stamp_file, stamp_positions)
pm = params.pixel_max
params.pixel_max *= 0.9
ref.mask *= IM.compute_saturated_pixel_mask(ref.image, 4, params)
params.pixel_max = pm
ref.blur = IM.boxcar_blur(ref.image)
if params.mask_cluster:
ref.mask *= IM.mask_cluster(ref.image, ref.mask, params)
#
# Detect stars and compute the PSF if we are doing photometry
#
star_positions = None
sky = 0.0
if params.do_photometry:
star_file = params.loc_output + os.path.sep + 'star_positions'
psf_file = params.loc_output + os.path.sep + 'psf.fits'
if not (os.path.exists(psf_file)) or not (os.path.exists(star_file)):
stars = PH.compute_psf_image(params, ref, psf_image=psf_file)
star_positions = stars[:, 0:2]
star_sky = stars[:, 4]
if os.path.exists(star_file):
star_positions = np.genfromtxt(star_file)
star_sky = star_positions[:, 0] * 0.0;
else:
np.savetxt(star_file, star_positions)
print
'sky =', sky
#
# If we have pre-determined star positions
#
# if params.star_file:
# stars = np.genfromtxt(params.star_file)
# star_positions = stars[:,1:3]
# if params.star_reference_image:
# star_ref, h = IO.read_fits_file(params.star_reference_image)
# dy, dx = IM.positional_shift(ref.image,star_ref)
# print 'position shift =',dx,dy
# star_positions[:,0] += dx
# star_positions[:,1] += dy
# np.savetxt(star_file,star_positions)
#
# If we are using a CPU, group the stars by location
#
print
'Group_check'
print
'params.do_photometry', params.do_photometry
print
'params.use_GPU', params.use_GPU
if params.do_photometry:
star_group_boundaries = None
detector_mean_positions_x = None
detector_mean_positions_y = None
star_unsort_index = None
star_sort_index, star_group_boundaries, detector_mean_positions_x, detector_mean_positions_y = PH.group_stars_ccd(
params, star_positions,
params.loc_output + os.path.sep + reference)
star_positions = star_positions[star_sort_index]
star_sky = star_sky[star_sort_index]
star_unsort_index = np.argsort(star_sort_index)
#
# Do photometry of the reference image
#
if params.do_photometry:
ref_flux_file = params.loc_output + os.path.sep + 'ref.flux'
if not (os.path.exists(ref_flux_file)):
result = difference_image(ref, ref, params,
stamp_positions=stamp_positions,
psf_image=psf_file,
star_positions=star_positions,
star_group_boundaries=star_group_boundaries,
detector_mean_positions_x=detector_mean_positions_x,
detector_mean_positions_y=detector_mean_positions_y,
star_sky=star_sky)
if isinstance(result.flux, np.ndarray):
print
'ungrouping fluxes'
result.flux = result.flux[star_unsort_index].copy()
result.dflux = result.dflux[star_unsort_index].copy()
np.savetxt(ref_flux_file,
np.vstack((result.flux, result.dflux)).T)
#
# Process images
#
if params.make_difference_images:
if not (params.use_GPU) and (params.n_parallel > 1):
pool = Pool(params.n_parallel)
pool.map(process_image_helper, itertools.izip(files,
itertools.repeat((
ref, params,
stamp_positions,
star_positions,
star_group_boundaries,
star_unsort_index,
detector_mean_positions_x,
detector_mean_positions_y))))
else:
for f in files:
process_image(f, (ref, params, stamp_positions, star_positions,
star_group_boundaries, star_unsort_index,
detector_mean_positions_x,
detector_mean_positions_y))
return files
def do_photometry(params, extname='newflux', star_file='star_positions',
psf_file='psf.fits', star_positions=None,
reference_image='ref.fits'):
#
# Determine our list of files
#
all_files = os.listdir(params.loc_data)
all_files.sort()
files = []
for f in all_files:
if fnmatch.fnmatch(f, params.name_pattern):
g = DS.Observation(params.loc_data + os.path.sep + f, params)
if g.fw > 0.0:
files.append(g)
ref = DS.Observation(params.loc_output + os.path.sep + reference_image,
params)
ref.register(ref, params)
#
# Detect stars and compute the PSF if necessary
#
if params.do_photometry:
psf_file = params.loc_output + os.path.sep + psf_file
if os.path.exists(params.star_file):
star_pos = np.genfromtxt(params.star_file)[:, 1:3]
if not (os.path.exists(psf_file)):
stars = PH.compute_psf_image(params, ref, psf_image=psf_file)
else:
if not (os.path.exists(star_file)):
stars = PH.compute_psf_image(params, ref, psf_image=psf_file)
star_pos = stars[:, 0:2]
np.savetxt(star_file, star_pos)
else:
star_pos = np.genfromtxt(star_file)
if not (os.path.exists(psf_file)):
stars = PH.compute_psf_image(params, ref,
psf_image=psf_file)
#
# Have we been passed an array of star positions?
#
if star_positions == None:
star_positions = star_pos
#
# If we are using a CPU, group the stars by location
#
star_group_boundaries = None
detector_mean_positions_x = None
detector_mean_positions_y = None
if not (params.use_GPU):
star_sort_index, star_group_boundaries, detector_mean_positions_x, detector_mean_positions_y = PH.group_stars_ccd(
params, star_positions,
params.loc_output + os.path.sep + reference_image)
star_positions = star_positions[star_sort_index]
star_unsort_index = np.argsort(star_sort_index)
#
# Process the reference image
#
print
'Processing', reference_image
ref = DS.Observation(params.loc_output + os.path.sep + reference_image,
params)
# reg = Observation(params.loc_data+os.path.sep+
# params.registration_image,params)
ref.register(ref, params)
smask = IM.compute_saturated_pixel_mask(ref.image, 6, params)
ref.inv_variance += 1 - smask
ktable = params.loc_output + os.path.sep + 'k_' + os.path.basename(
reference_image)
kernelIndex, extendedBasis, c, params = IO.read_kernel_table(ktable,
params)
kernelRadius = np.max(kernelIndex[:, 0]) + 1
if np.sum(extendedBasis) > 0:
kernelRadius += 1
print
'kernelIndex', kernelIndex
print
'extendedBasis', extendedBasis
print
'coeffs', c
print
'kernelRadius', kernelRadius
phot_target = ref.image
ref.flux, ref.dflux = PH.photom_all_stars(phot_target, ref.inv_variance,
star_positions, psf_file, c,
kernelIndex, extendedBasis,
kernelRadius, params,
star_group_boundaries,
detector_mean_positions_x,
detector_mean_positions_y,
sky=sky)
if isinstance(ref.flux, np.ndarray):
if not (params.use_GPU):
print
'ungrouping fluxes'
ref.flux = ref.flux[star_unsort_index].copy()
ref.dflux = ref.dflux[star_unsort_index].copy()
np.savetxt(
params.loc_output + os.path.sep + reference_image + '.' + extname,
np.vstack((ref.flux, ref.dflux)).T)
#
# Process difference images
#
for f in files:
if not (os.path.exists(
params.loc_output + os.path.sep + f.name + '.' + extname)):
print
'Processing', f.name
target = f.name
dtarget = params.loc_output + os.path.sep + 'd_' + os.path.basename(
target)
ntarget = params.loc_output + os.path.sep + 'n_' + os.path.basename(
target)
ztarget = params.loc_output + os.path.sep + 'z_' + os.path.basename(
target)
ktable = params.loc_output + os.path.sep + 'k_' + os.path.basename(
target)
if os.path.exists(dtarget) and os.path.exists(
ntarget) and os.path.exists(ktable):
norm, h = IO.read_fits_file(ntarget)
diff, h = IO.read_fits_file(dtarget)
mask, h = IO.read_fits_file(ztarget)
inv_var = (norm / diff) ** 2 + (1 - mask)
kernelIndex, extendedBasis, c, params = IO.read_kernel_table(
ktable, params)
kernelRadius = np.max(kernelIndex[:, 0]) + 1
if np.sum(extendedBasis) > 0:
kernelRadius += 1
print
'kernelIndex', kernelIndex
print
'extendedBasis', extendedBasis
print
'coeffs', c
print
'kernelRadius', kernelRadius
diff = IM.undo_photometric_scale(diff, c, params.pdeg)
flux, dflux = PH.photom_all_stars(diff, inv_var,
star_positions, psf_file, c,
kernelIndex, extendedBasis,
kernelRadius, params,
star_group_boundaries,
detector_mean_positions_x,
detector_mean_positions_y)
if isinstance(flux, np.ndarray):
if not (params.use_GPU):
print
'ungrouping fluxes'
flux = flux[star_unsort_index].copy()
dflux = dflux[star_unsort_index].copy()
np.savetxt(
params.loc_output + os.path.sep + f.name + '.' + extname,
np.vstack((flux, dflux)).T)
| [
"steve.rodney@gmail.com"
] | steve.rodney@gmail.com |
7ce9f25d9e4a88e41687b206e6a0bd9b74daa432 | d89a482aaf3001bbc4515f39af9ba474e1ae6062 | /trex/trex_output.py | f0d835f8b948280acec5897964ce1cb142978ed3 | [] | no_license | hongtao510/u_tool | 2925e3694aba81714cf83018c3f8520a7b503228 | 98c962cfb1f53c4971fb2b9ae22c882c0fae6497 | refs/heads/master | 2021-01-10T20:40:24.793531 | 2014-03-14T22:57:37 | 2014-03-14T22:57:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,924 | py | # -*- coding: utf-8 -*-
# TREX
import os
os.environ['DJANGO_SETTINGS_MODULE']='settings'
#from trex import trex_input
import webapp2 as webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
import numpy as np
import cgi
import cgitb
cgitb.enable()
from trex import trex_model
from uber import uber_lib
class TRexOutputPage(webapp.RequestHandler):
def post(self):
form = cgi.FieldStorage()
chem_name = form.getvalue('chemical_name')
use = form.getvalue('Use')
formu_name = form.getvalue('Formulated_product_name')
a_i = form.getvalue('percent_ai')
a_i = float(a_i)/100
Application_type = form.getvalue('Application_type')
p_i = form.getvalue('percent_incorporated')
p_i = float(p_i)/100
a_r = form.getvalue('application_rate')
a_r = float(a_r)
a_r_l = form.getvalue('application_rate_l')
a_r_l=float(a_r_l)
seed_treatment_formulation_name = form.getvalue('seed_treatment_formulation_name')
den = form.getvalue('density_of_product')
den = float(den)
m_s_r_p = form.getvalue('maximum_seedling_rate_per_use')
m_s_r_p = float(m_s_r_p)
a_r_p = form.getvalue('application_rate_per_use')
a_r_p = float(a_r_p)
r_s = form.getvalue('row_sp')
r_s=float(r_s)
b_w = form.getvalue('bandwidth') #convert to ft
b_w = float(b_w)/12
n_a = form.getvalue('number_of_applications')
a_t = form.getvalue('Application_target')
if a_t=='Short grass':
para=240 #coefficient used to estimate initial conc.
elif a_t=='Tall grass':
para=110
elif a_t=='Broad-leafed plants/small insects':
para=135
elif a_t=='Fruits/pods/seeds/large insects':
para=15
i_a = form.getvalue('interval_between_applications')
h_l = form.getvalue('Foliar_dissipation_half_life')
ld50_bird = form.getvalue('avian_ld50')
lc50_bird = form.getvalue('avian_lc50')
NOAEC_bird = form.getvalue('avian_NOAEC')
NOAEC_bird = float(NOAEC_bird)
NOAEL_bird = form.getvalue('avian_NOAEL')
NOAEL_bird = float(NOAEL_bird)
# bird_type = form.getvalue('Bird_type')
aw_bird = form.getvalue('body_weight_of_the_assessed_bird')
aw_bird = float(aw_bird)
tw_bird = form.getvalue('body_weight_of_the_tested_bird')
tw_bird = float(tw_bird)
x = form.getvalue('mineau_scaling_factor')
ld50_mamm = form.getvalue('mammalian_ld50')
lc50_mamm = form.getvalue('mammalian_lc50')
lc50_mamm=float(lc50_mamm)
NOAEC_mamm = form.getvalue('mammalian_NOAEC')
NOAEC_mamm = float(NOAEC_mamm)
NOAEL_mamm = form.getvalue('mammalian_NOAEL')
# mammal_type = form.getvalue('Mammal_type')
# if mammal_type =='Herbivores and insectivores':
# mf_w_mamm=0.8 #coefficient used to estimate initial conc.
# elif mammal_type=='Granivores':
# mf_w_mamm=0.1
# if bird_type =='Herbivores and insectivores':
# mf_w_bird=0.8 #coefficient used to estimate initial conc.
# elif bird_type=='Granivores':
# mf_w_bird=0.1
aw_mamm = form.getvalue('body_weight_of_the_assessed_mammal')
aw_mamm = float(aw_mamm)
tw_mamm = form.getvalue('body_weight_of_the_tested_mammal')
tw_mamm = float(tw_mamm)
#mf_w_mamm = form.getvalue('mass_fraction_of_water_in_the_mammal_food')
#mf_w_bird = form.getvalue('mass_fraction_of_water_in_the_bird_food')
text_file = open('trex/trex_description.txt','r')
x1 = text_file.read()
templatepath = os.path.dirname(__file__) + '/../templates/'
ChkCookie = self.request.cookies.get("ubercookie")
html = uber_lib.SkinChk(ChkCookie, "TREX Output")
html = html + template.render(templatepath + '02uberintroblock_wmodellinks.html', {'model':'trex','page':'output'})
html = html + template.render (templatepath + '03ubertext_links_left.html', {})
html = html + template.render(templatepath + '04uberoutput_start.html', {
'model':'trex',
'model_attributes':'T-Rex Output'})
html = html + """<table width="600" border="1" class="out_1">
<tr>
<th scope="col">Inputs</div></th>
<th scope="col">Value</div></th>
<th scope="col">Inputs</div></th>
<th scope="col">Value</div></th>
</tr>
<tr>
<td>Chemical name</td>
<td>%s</td>
<td>Use</td>
<td>%s</td>
</tr>
<tr>
<td>Formulated procuct name</td>
<td>%s</td>
<td>Percentage active ingredient</td>
<td>%s%%</td>
</tr>
<tr>
<td>Application type</td>
<td>%s</td>
<td>Percentage incorporated</td>
<td>%s%%</td>
</tr>
<tr>
<td>Application rate (lbs a.i./A)</td>
<td>%s</td>
<td>Liquid application rate (fl oz/A)</td>
<td>%s</td>
</tr>
<tr>
<td>Seed treatment formulation name</td>
<td>%s</td>
<td>Density of product (lbs/gal)</td>
<td>%s</td>
</tr>
<tr>
<td>Maximum seeding rate per use (lbs/A)</td>
<td>%s</td>
<td>Application rate per use (fl oz/cwt)</td>
<td>%s</td>
</tr>
<tr>
<td>Row spacing (inch)</td>
<td>%s</td>
<td>Bandwidth (inch)</td>
<td>%s</td>
</tr>
<tr>
<td>Number of applications</td>
<td>%s</td>
<td>Application target</td>
<td>%s</td>
</tr>
<tr>
<td>Interval between applications (days)</td>
<td>%s</td>
<td>Foliar dissipation half-life (days)</td>
<td>%s</td>
</tr>
<tr>
<td>Avian LD50 (mg/kg-bw)</td>
<td>%s</td>
<td>Avian LC50 (mg/kg-diet)</td>
<td>%s</td>
</tr>
<tr>
<td>Avian NOAEC (mg/kg-diet)</td>
<td>%s</td>
<td>Avian NOAEL (mg/kg-bw)</td>
<td>%s</td>
</tr>
<tr>
<td>Body weight of assessed bird (g)</td>
<td>%s</td>
<td>Body weight of tested bird (g)</td>
<td>%s</td>
</tr>
<tr>
<td>Mineau scaling factor</td>
<td>%s</td>
<td>Mammalian LD50 (mg/kg-bw)</td>
<td>%s</td>
</tr>
<tr>
<td>Mammalian LC50 (mg/kg-diet)</td>
<td>%s</td>
<td>Mammalian NOAEC (mg/kg-diet)</td>
<td>%s</td>
</tr>
<tr>
<td>Mammalian NOAEL (mg/kg-bw)</td>
<td>%s</td>
<td>Body weight of assessed mammal (g)</td>
<td>%s</td>
</tr>
<tr>
<td>Body weight of tested mammal (g)</td>
<td>%s</td>
<td> </td>
<td> </td>
</tr>
</table>
<p> </p>
"""%(chem_name, use, formu_name, 100*a_i, Application_type, 100*p_i, a_r, a_r_l, seed_treatment_formulation_name, den, m_s_r_p, a_r_p,
r_s, b_w, n_a, a_t, i_a, h_l, ld50_bird, lc50_bird, NOAEC_bird, NOAEL_bird, aw_bird, tw_bird, x, ld50_mamm,
lc50_mamm, NOAEC_mamm, NOAEL_mamm, aw_mamm, tw_mamm)
html = html + """<table width="600" border="1" class="out_2">
<tr>
<th scope="col">Outputs</div></th>
<th scope="col">Value</div></th>
</tr>
<tr>
<td>Dietary-based EECs for %s</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Avian dose-based acute EECs for %s (Herbivores and insectivores)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Avian dose-based acute EECs (Granivores)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Avian dose-based acute RQs for %s (Herbivores and insectivores)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Avian dose-based acute RQs (Granivores)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Avian diet-based acute RQs for %s (Herbivores and insectivores)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Avian diet-based chronic RQs for %s (Herbivores and insectivores)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Mammalian dose-based acute EECs for %s (Herbivores and insectivores)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Mammalian dose-based acute EECs (Granivores)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Mammalian dose-based acute RQs for %s (Herbivores and insectivores)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Mammalian dose-based acute RQs (Granivores)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Mammalian dose-based chronic RQs for %s (Herbivores and insectivores)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Mammalian dose-based chronic RQs (Granivores)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Mammalian diet-based acute RQs for %s (Herbivores and insectivores)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Mammalian diet-based chronic RQs for %s (Herbivores and insectivores)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Avian LD50<sup>-2</sup> for row/band/in-furrow granular application</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Avian LD50<sup>-2</sup> for row/band/in-furrow liquid application</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Avian LD50<sup>-2</sup> for broadcast granular application</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Avian LD50<sup>-2</sup> for broadcast liquid application</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Mammalian LD50<sup>-2</sup> for row/band/in-furrow granular application</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Mammalian LD50<sup>-2</sup> for row/band/in-furrow liquid application</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Mammalian LD50<sup>-2</sup> for broadcast granular application</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Mammalian LD50<sup>-2</sup> for broadcast liquid application</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Seed treatment avian acute RQs (method 1)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Seed treatment avian acute RQs (method 2)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Seed treatment avian chronic RQs</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Seed treatment mammalian acute RQs (method 1)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Seed treatment mammalian acute RQs (method 2)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Seed treatment mammalian chronic RQs</td>
<td>%0.2E</td>
</tr>
</table>""" %(a_t, trex_model.EEC_diet(trex_model.C_0, n_a, i_a, a_r, a_i, para, h_l), a_t, trex_model.EEC_dose_bird(trex_model.EEC_diet, aw_bird, trex_model.fi_bird, 0.8, trex_model.C_0, n_a, i_a, a_r, a_i, para, h_l),
trex_model.EEC_dose_bird_g(trex_model.EEC_diet, aw_bird, trex_model.fi_bird, 0.1, trex_model.C_0, n_a, i_a, a_r, a_i, para, h_l), a_t,
trex_model.ARQ_dose_bird(trex_model.EEC_dose_bird, trex_model.EEC_diet, aw_bird, trex_model.fi_bird, trex_model.at_bird, ld50_bird, tw_bird, x, 0.8, trex_model.C_0, n_a, i_a, a_r, a_i, para, h_l),
trex_model.ARQ_dose_bird_g(trex_model.EEC_dose_bird, trex_model.EEC_diet, aw_bird, trex_model.fi_bird, trex_model.at_bird, ld50_bird, tw_bird, x, 0.1, trex_model.C_0, n_a, i_a, a_r, a_i, para, h_l),
a_t, trex_model.ARQ_diet_bird(trex_model.EEC_diet, lc50_bird, trex_model.C_0, n_a, i_a, a_r, a_i, para, h_l), a_t, trex_model.CRQ_diet_bird(trex_model.EEC_diet, NOAEC_bird, trex_model.C_0, n_a, i_a, a_r, a_i, para, h_l),
a_t, trex_model.EEC_dose_mamm(trex_model.EEC_diet, aw_mamm, trex_model.fi_mamm, 0.8, trex_model.C_0, n_a, i_a, a_r, a_i, para, h_l), trex_model.EEC_dose_mamm_g(trex_model.EEC_diet, aw_mamm, trex_model.fi_mamm, 0.1, trex_model.C_0, n_a, i_a, a_r, a_i, para, h_l),
a_t, trex_model.ARQ_dose_mamm(trex_model.EEC_dose_mamm, trex_model.at_mamm, aw_mamm, ld50_mamm, tw_mamm, 0.8, trex_model.C_0, n_a, i_a, a_r, a_i, para, h_l),
trex_model.ARQ_dose_mamm_g(trex_model.EEC_dose_mamm, trex_model.at_mamm, aw_mamm, ld50_mamm, tw_mamm, 0.1, trex_model.C_0, n_a, i_a, a_r, a_i, para, h_l),
a_t, trex_model.CRQ_dose_mamm(trex_model.EEC_diet, trex_model.EEC_dose_mamm, trex_model.ANOAEL_mamm, NOAEL_mamm, aw_mamm, tw_mamm, 0.8, n_a, i_a, a_r, a_i, para, h_l),
trex_model.CRQ_dose_mamm_g(trex_model.EEC_diet, trex_model.EEC_dose_mamm, trex_model.ANOAEL_mamm, NOAEL_mamm, aw_mamm, tw_mamm, 0.1, n_a, i_a, a_r, a_i, para, h_l),
a_t, trex_model.ARQ_diet_mamm(trex_model.EEC_diet, lc50_mamm, trex_model.C_0, n_a, i_a, a_r, a_i, para, h_l),
a_t, trex_model.CRQ_diet_mamm(trex_model.EEC_diet, NOAEC_mamm, trex_model.C_0, n_a, i_a, a_r, a_i, para, h_l),
trex_model.LD50_rg_bird(Application_type, a_r, a_i, p_i, r_s, b_w, aw_bird, trex_model.at_bird, ld50_bird, tw_bird, x), trex_model.LD50_rl_bird(Application_type, a_r_l, a_i, p_i, b_w, aw_bird, trex_model.at_bird, ld50_bird, tw_bird, x),
trex_model.LD50_bg_bird(Application_type, a_r, a_i, p_i, b_w, aw_bird, trex_model.at_bird, ld50_bird, tw_bird,x),trex_model.LD50_bl_bird(Application_type, a_r_l, a_i, p_i, b_w, aw_bird, trex_model.at_bird, ld50_bird, tw_bird,x),
trex_model.LD50_rg_mamm(Application_type, a_r, a_i, p_i, r_s, b_w, aw_mamm, trex_model.at_mamm, ld50_mamm, tw_mamm), trex_model.LD50_rl_mamm(Application_type, a_r_l, a_i, p_i, b_w, aw_mamm, trex_model.at_mamm, ld50_mamm, tw_mamm),
trex_model.LD50_bg_mamm(Application_type, a_r, a_i, p_i, b_w, aw_mamm, trex_model.at_mamm, ld50_mamm, tw_mamm),trex_model.LD50_bl_mamm(Application_type, a_r_l, a_i, p_i, b_w, aw_mamm, trex_model.at_mamm, ld50_mamm, tw_mamm),
trex_model.sa_bird_1(a_r_p, a_i, den, trex_model.at_bird,trex_model.fi_bird, ld50_bird, aw_bird, tw_bird, x),trex_model.sa_bird_2(a_r_p, a_i, den, m_s_r_p, trex_model.at_bird, ld50_bird, aw_bird, tw_bird, x),
trex_model.sc_bird(a_r_p, a_i, den, NOAEC_bird),trex_model.sa_mamm_1(a_r_p, a_i, den, trex_model.at_mamm, trex_model.fi_mamm, ld50_mamm, aw_mamm, tw_mamm),
trex_model.sa_mamm_2(a_r_p, a_i, den, m_s_r_p, trex_model.at_mamm, ld50_mamm, aw_mamm, tw_mamm),trex_model.sc_mamm(a_r_p, a_i, den, NOAEC_mamm))
html = html + template.render(templatepath + 'export.html', {})
html = html + template.render(templatepath + '04uberoutput_end.html', {'sub_title': ''})
html = html + template.render(templatepath + '06uberfooter.html', {'links': ''})
self.response.out.write(html)
app = webapp.WSGIApplication([('/.*', TRexOutputPage)], debug=True)
def main():
run_wsgi_app(app)
if __name__ == '__main__':
main()
| [
"hongtao510@gmail.com"
] | hongtao510@gmail.com |
04c0a9aa06b8567653908c8159d470bb3be89b2d | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_200/5468.py | 063e5c8d196c9bfcca7a5d638432897002ca1793 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | dirt=[]
k=1
t=input()
for j in range(t):
n=input();w=n
while(w):
c=0;g=n%10
n=w;q=(n)%10;m=-2
while(n):
d=n%10
if c>=1:
if q<d:
break
q=d;n/=10;
c+=1;g=d
if n==0:
dirt.append(w)
break
w=w-1
for i in dirt:
print "Case #{0}: {1}".format(k,i)
k+=1
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
a5ddd507e15815aaad86ceaaa47e2a295133f13d | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/devices/v20160203/list_iot_hub_resource_keys.py | 42ce719ca651ad316e0363197087b52eff4ffe47 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,383 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'ListIotHubResourceKeysResult',
'AwaitableListIotHubResourceKeysResult',
'list_iot_hub_resource_keys',
'list_iot_hub_resource_keys_output',
]
@pulumi.output_type
class ListIotHubResourceKeysResult:
"""
The list of shared access policies with a next link.
"""
def __init__(__self__, next_link=None, value=None):
if next_link and not isinstance(next_link, str):
raise TypeError("Expected argument 'next_link' to be a str")
pulumi.set(__self__, "next_link", next_link)
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="nextLink")
def next_link(self) -> str:
"""
The next link.
"""
return pulumi.get(self, "next_link")
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.SharedAccessSignatureAuthorizationRuleResponse']]:
"""
The list of shared access policies.
"""
return pulumi.get(self, "value")
class AwaitableListIotHubResourceKeysResult(ListIotHubResourceKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListIotHubResourceKeysResult(
next_link=self.next_link,
value=self.value)
def list_iot_hub_resource_keys(resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListIotHubResourceKeysResult:
"""
The list of shared access policies with a next link.
:param str resource_group_name: The name of the resource group that contains the IoT hub.
:param str resource_name: The name of the IoT hub.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:devices/v20160203:listIotHubResourceKeys', __args__, opts=opts, typ=ListIotHubResourceKeysResult).value
return AwaitableListIotHubResourceKeysResult(
next_link=__ret__.next_link,
value=__ret__.value)
@_utilities.lift_output_func(list_iot_hub_resource_keys)
def list_iot_hub_resource_keys_output(resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListIotHubResourceKeysResult]:
"""
The list of shared access policies with a next link.
:param str resource_group_name: The name of the resource group that contains the IoT hub.
:param str resource_name: The name of the IoT hub.
"""
...
| [
"noreply@github.com"
] | bpkgoud.noreply@github.com |
e7f28841c3cab7b1b034f5d0de68744734459162 | f8c35a47c7199aed0747e91e5c36ec97e7543de1 | /custom/icds_reports/management/commands/generate_migration_tables.py | 868e4fe59a2acf305643ee8eed39d62f19f82f18 | [] | no_license | dr-aryone/commcare-hq | 13a3f2a39382e3f6fe1f19d6c08bb61b808c146d | 3e7e09247fc8d1246ccfc77c1fff8603c9f65228 | refs/heads/master | 2020-05-27T14:29:48.923458 | 2019-05-26T00:01:33 | 2019-05-26T00:01:33 | 188,650,727 | 2 | 1 | null | 2019-05-26T07:03:18 | 2019-05-26T07:03:18 | null | UTF-8 | Python | false | false | 6,056 | py | from __future__ import absolute_import, print_function
from __future__ import unicode_literals
import logging
import re
import sqlite3
from django.core.management import CommandError
from django.core.management.base import BaseCommand
from sqlalchemy import inspect as sqlinspect
from corehq.apps.userreports.models import StaticDataSourceConfiguration
from corehq.apps.userreports.util import get_indicator_adapter, UCR_TABLE_PREFIX
from corehq.sql_db.connections import connection_manager
from custom.icds_reports.const import DASHBOARD_DOMAIN
from custom.icds_reports.management.commands.create_citus_child_tables import keep_child_tables, plain_tables, \
drop_child_tables, get_parent_child_mapping
from custom.icds_reports.models import AggregateSQLProfile
logger = logging.getLogger(__name__)
IGNORE_TABLES = {
'django_migrations',
AggregateSQLProfile._meta.db_table,
'ucr_table_name_mapping',
}
CREATE_TABLE = """
CREATE TABLE IF NOT EXISTS tables (
id integer PRIMARY KEY,
source_table text NOT NULL,
date text,
target_table text,
migrated integer
); """
def get_all_tables(connection):
res = connection.execute("select tablename from pg_tables where schemaname = 'public'")
return {row.tablename for row in res}
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('output_database')
parser.add_argument(
'--source-engine-id', default='icds-ucr',
help='Django alias for source database'
)
def handle(self, output_database, source_engine_id, **options):
with connection_manager.get_engine(source_engine_id).begin() as conn:
self.parent_child_mapping = get_parent_child_mapping(conn)
self.child_parent_mapping = {
child: parent
for parent, children in self.parent_child_mapping.items()
for child in children
}
self.table_count = 0
self.db = sqlite3.connect(output_database)
try:
self.setup_sqlite_db()
self.generate_dump_script(source_engine_id)
self.stdout.write("\n{} tables processed\n".format(self.table_count))
finally:
self.db.close()
def setup_sqlite_db(self):
with self.db:
self.db.execute(CREATE_TABLE)
res = self.db.execute('select count(*) from tables')
if res.fetchone()[0] > 0:
raise CommandError('Database already has records. Delete it and re-run command.')
def insert_row(self, row):
self.table_count += 1
with self.db:
self.db.execute('INSERT INTO tables(source_table, date, target_table) values (?,?,?)', row)
def generate_dump_script(self, source_engine_id):
self.seen_tables = set()
source_engine = connection_manager.get_engine(source_engine_id)
# direct dump and load from parent + child tables
with source_engine.begin() as source_conn:
insp = sqlinspect(source_conn)
for table in keep_child_tables + plain_tables:
for line in self.get_table_date_target(insp, table):
self.insert_row(line)
# direct dump and load from parent
# dump from all child tables into parent table
for table in drop_child_tables:
for line in self.get_table_date_target(insp, table, all_in_parent=True):
self.insert_row(line)
for datasource in StaticDataSourceConfiguration.by_domain(DASHBOARD_DOMAIN):
if source_engine_id == datasource.engine_id or source_engine_id in datasource.mirrored_engine_ids:
adapter = get_indicator_adapter(datasource)
table_name = adapter.get_table().name
# direct dump and load from parent
# dump from all child tables into parent table
# - if table is distrubuted, citus will distribute the data
# - if table is partitioned the triggers on the parent will distribute the data
for line in self.get_table_date_target(insp, table_name, all_in_parent=True):
self.insert_row(line)
all_tables = get_all_tables(source_conn)
remaining_tables = all_tables - self.seen_tables - IGNORE_TABLES
icds_ucr_prefix = '{}{}_'.format(UCR_TABLE_PREFIX, DASHBOARD_DOMAIN)
def keep_table(table):
root_table = self.child_parent_mapping.get(table, table)
return not root_table.startswith(UCR_TABLE_PREFIX) or root_table.startswith(icds_ucr_prefix)
remaining_tables = list(filter(keep_table, remaining_tables))
if remaining_tables:
self.stderr.write("Some tables not seen:")
for t in remaining_tables:
parent = self.child_parent_mapping.get(t)
if parent:
self.stderr.write("\t{} (parent: {})".format(t, parent))
else:
self.stderr.write("\t{}".format(t))
def get_table_date_target(self, sql_insepctor, table, all_in_parent=False):
yield table, None, None
self.seen_tables.add(table)
for child in self.parent_child_mapping[table]:
self.seen_tables.add(child)
yield child, get_table_date(sql_insepctor, child), table if all_in_parent else None
def get_table_date(sql_insepctor, table):
def _get_date(string):
match = re.match(r'.*(\d{4}-\d{2}-\d{2}).*', string)
if match:
return match.groups()[0]
date = _get_date(table)
if not date:
constraints = [
constraint for constraint in sql_insepctor.get_check_constraints(table)
if constraint['name'].startswith(table)
]
if constraints:
date = _get_date(constraints[0]['sqltext'])
return date
| [
"skelly@dimagi.com"
] | skelly@dimagi.com |
19aab88df7aec32b7971ae1f9f4d9863c192e9e8 | 965fe92b03b37d2e6fa700281c4ef383fb104ada | /sciencebeam_trainer_delft/sequence_labelling/debug.py | 53a1befadf92215d50210611fddba0ded0508508 | [
"MIT"
] | permissive | elifesciences/sciencebeam-trainer-delft | 1591bebb7f5b9ed178329f4e9e02a9d893ab228d | 2413fe7f0801869208741e4ab6c4096db8d53b5e | refs/heads/develop | 2022-05-20T21:55:13.210432 | 2022-03-28T17:32:31 | 2022-03-28T17:32:31 | 192,557,708 | 5 | 1 | MIT | 2022-03-28T17:33:14 | 2019-06-18T14:34:50 | Python | UTF-8 | Python | false | false | 3,133 | py | import os
import logging
import time
from contextlib import contextmanager
from pathlib import Path
from typing import IO, Iterator, Optional
import numpy as np
from sciencebeam_trainer_delft.sequence_labelling.tag_formatter import (
TagOutputFormats,
format_tag_result
)
LOGGER = logging.getLogger(__name__)
SCIENCEBEAM_DELFT_TAGGING_DEBUG_OUT = "SCIENCEBEAM_DELFT_TAGGING_DEBUG_OUT"
@contextmanager
def exclusive_prefixed_file(prefix: str, suffix: str = '') -> Iterator[IO]:
for index in range(1, 10000):
filename = '%s-%d%s' % (prefix, index, suffix)
try:
with open(filename, mode='x', encoding='utf-8') as fileobj:
yield fileobj
return
except FileExistsError:
continue
raise FileExistsError('could not create any prefixed file: %s, suffix: %s' % (prefix, suffix))
class TagDebugReporter:
def __init__(self, output_directory: str):
self.output_directory = output_directory
def get_base_output_name(self, model_name: str) -> str:
return os.path.join(self.output_directory, 'sciencebeam-delft-%s-%s' % (
round(time.time()),
model_name
))
def report_tag_results(
self,
texts: np.array,
features: np.array,
annotations,
model_name: str):
base_filename_prefix = self.get_base_output_name(model_name=model_name)
with exclusive_prefixed_file(base_filename_prefix, '.json') as json_fp:
output_file = json_fp.name
filename_prefix = os.path.splitext(output_file)[0]
LOGGER.info('tagger, output_file: %s', output_file)
format_tag_result_kwargs = dict(
tag_result=annotations,
texts=texts,
features=features,
model_name=model_name
)
formatted_text = format_tag_result(
output_format=TagOutputFormats.TEXT,
**format_tag_result_kwargs
)
Path(filename_prefix + '.txt').write_text(formatted_text, encoding='utf-8')
formatted_json = format_tag_result(
output_format=TagOutputFormats.JSON,
**format_tag_result_kwargs
)
json_fp.write(formatted_json)
formatted_xml = format_tag_result(
output_format=TagOutputFormats.XML,
**format_tag_result_kwargs
)
Path(filename_prefix + '.xml').write_text(formatted_xml, encoding='utf-8')
if features is not None:
formatted_data = format_tag_result(
output_format=TagOutputFormats.DATA,
**format_tag_result_kwargs
)
Path(filename_prefix + '.data').write_text(formatted_data, encoding='utf-8')
def get_tag_debug_reporter_if_enabled() -> Optional[TagDebugReporter]:
output_directory = os.environ.get(SCIENCEBEAM_DELFT_TAGGING_DEBUG_OUT)
if not output_directory:
return None
return TagDebugReporter(output_directory)
| [
"noreply@github.com"
] | elifesciences.noreply@github.com |
9d824bbf957c67b29d81de029cb2487e5846c09b | 93984a6e0c8d0fb46ac571ef196a2c83637f3228 | /DesionTree/DT_tree/1_DT_Classification.py | f41b32d2b354475e38fc26f5c1ff00f7e7209bee | [] | no_license | zhangj123/Artificial | ecdf3f3c0adfad978cd1a4375fe27fedba52349e | a2ff7c8cc1856d6b6a0c9b6943a15398b255f82a | refs/heads/master | 2020-04-04T03:51:44.698813 | 2018-11-09T05:28:21 | 2018-11-09T05:28:21 | 155,728,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,202 | py | # DecisionTreeClassifier is a class capable of performing multi-class classification on a dataset.
# scikit-learn决策树算法类库内部实现是使用了调优过的CART树算法,
# 既可以做分类,又可以做回归。分类决策树的类对应的是DecisionTreeClassifier,
# 而回归决策树的类对应的是DecisionTreeRegressor。两者的参数定义几乎完全相同,但是意义不全相同。
# As with other classifiers, DecisionTreeClassifier takes as input two arrays: an array X,
# of size [n_samples, n_features] holding the training samples,
# and an array Y of integer values, size [n_samples], holding the class labels for the training samples:
# criterion : string, optional (default="gini")
# The function to measure the quality of a split. Supported criteria are
# "gini" for the Gini impurity and "entropy" for the information gain.
from sklearn import tree
X = [[1,1,1,1], [2,2,2,2], [2,2,2,0]]
y = [0, 1, 2] # 花类别
clf = tree.DecisionTreeClassifier(criterion = "entropy")
clf = clf.fit(X, y)
# After being fitted, the model can then be used to predict the class of samples:
print(clf.predict([[2., 2., 3., 4.]]))
| [
"noreply@github.com"
] | zhangj123.noreply@github.com |
d4ee6961649aca8865294008a94b35181bbe50bc | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/CodeJamData/14/03/5.py | 44713545f4a1f6d56fb33b4f7f95aaa4764dea56 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 1,620 | py | def trans(a):
return map(lambda x: ''.join(list(x)), zip(*a))
def can(r, c, m):
if r > c: r, c = c, r
safe = r * c - m
if r == 1 or safe == 1:
return True
elif r == 2:
return safe % 2 == 0 and safe >= 4
else:
return not safe in [2, 3, 5, 7]
def solve(r, c, m):
if not can(r, c, m):
print 'Impossible'
return
swapped = False
if r > c: r, c, swapped = c, r, True
ans, safe = [['.'] * c for _ in xrange(r)], r * c - m
if r == 1:
for i in xrange(safe, c):
ans[0][i] = '*'
elif r == 2:
for i in xrange(safe // 2, c):
ans[0][i] = ans[1][i] = '*'
elif m <= (r - 2) * (c - 2):
for i in xrange(m):
ans[r - i % (r - 2) - 1][c - i // (r - 2) - 1] = '*'
else:
ans = [['*'] * c for _ in xrange(r)]
if safe <= 6:
for i in xrange(safe // 2):
ans[i][0] = ans[i][1] = '.'
else:
for i in xrange(8):
ans[i % 3][i // 3] = '.'
safe -= 8
if safe % 2 == 1:
ans[2][2] = '.'
safe -= 1
a = min(r - 3, safe // 2)
for i in xrange(a):
ans[3 + i][0] = ans[3 + i][1] = '.'
safe -= 2 * a
for i in xrange(safe // 2):
ans[0][3 + i] = ans[1][3 + i] = '.'
ans[0][0] = 'c'
if swapped: ans = trans(ans)
for row in ans: print ''.join(row)
T = input()
for i in xrange(T):
[r, c, m] = map(int, raw_input().split())
print 'Case #%d:' % (i + 1)
solve(r, c, m)
| [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
a4ba41d97249df95e6fde99000f8a0edaa8c65e4 | d124b7aae63da8fea664abb48c85ddea6ae49c3b | /lab3/1.py | 7b581bc18915d54ab1dff0abafffe6c4f66d7a56 | [] | no_license | 4rthb/dataSorting | 64093c5e09b88f624dff8fe01809a0f28c96b037 | c45e87947e6ac2dc0e74a67ed43ffeb98c08dea1 | refs/heads/master | 2023-04-19T22:21:55.641400 | 2021-05-13T10:15:39 | 2021-05-13T10:15:39 | 338,474,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,195 | py | import lsb
fname = ["frankestein_clean.txt","war_and_peace_clean.txt"]
outname = ["frankestein_clean_ordenado.txt","war_and_peace_clean_ordenado.txt","frankestein_clean_stats.txt","war_and_peace_clean_stats.txt"]
ocur = [{},{}]
entrada = [open(fname[0],"r"),open(fname[1],"r")]
saida = [open(outname[0],"w"),open(outname[1],"w"),open(outname[2],"w"),open(outname[3],"w")]
f1 = entrada[0].read()
f2 = entrada[1].read()
f1 = f1.split(" ")
f2 = f2.split(" ")
for word in f1:
if word not in ocur[0]:
ocur[0][word] = f1.count(word)
print("First file stats analyzed!")
for word in f2:
if word not in ocur[1]:
ocur[1][word] = f2.count(word)
print("Second file stats analyzed!")
for word in ocur[0].items():
saida[2].write(f"{word[0]}: {word[1]}\n")
for word in ocur[1].items():
saida[3].write(f"{word[0]}: {word[1]}\n")
print("Stats files completed!")
f1 = lsb.radix_sort(f1)
f2 = lsb.radix_sort(f2)
print("Files now sorted!")
for word in f1:
saida[0].write("%s " % word)
for word in f2:
saida[1].write("%s " % word)
print("Sorted files completed!")
for file in entrada:
file.close()
for file in saida:
file.close()
print("Process completed!")
| [
"b_arthp@outlook.com"
] | b_arthp@outlook.com |
aa8f11eec599dd5fa75448927adae7a953afee5f | ad2753a7af19eeb96048705b1e5df30b944d2c49 | /hr_system_app/models/company.py | 0e1f5dc0df0509dbbf160ade600b38773b5f20c0 | [] | no_license | cynthianyeint/hr_sys_backend | bcc4ee417504a9c9c2538e96142671f537fef0c9 | c347efcfd6497ef02505b497012bf25a582b45cd | refs/heads/master | 2020-03-22T09:01:31.242229 | 2018-07-06T08:32:47 | 2018-07-06T08:32:47 | 139,808,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 757 | py | __author__ = 'Cynthia'
from django.db import models
from django.contrib.auth.models import User
from django.core.validators import RegexValidator
class Company(models.Model):
company_name = models.CharField(max_length=200, null=False, blank=True, default="")
user = models.ForeignKey(User, on_delete=models.CASCADE, default="")
phone_regex = RegexValidator(regex=r'^\+?1?\d{6,15}$', message="Phone number must be entered in the format: '+999999'. Up to 15 digits allowed.")
phone_number = models.CharField(max_length=15, validators=[phone_regex], blank=True, null=True)
def __unicode__(self):
return self.name
@property
def get_email(self):
return self.user.email
@property
def get_date_joined(self):
return self.user.date_joined.date
| [
"nyeintnyeintkhin.92@gmail.com"
] | nyeintnyeintkhin.92@gmail.com |
b6176db9cf906b94b069180306ba7dc935c84b19 | 4061f9f2a7dc2acde4c4c630fbe10ac8f5913f5d | /user/views.py | 6059ac2806bf2a9c4bcdc72bc67893bae2b34d3b | [] | no_license | eibrahimarisoy/tasty_dishes | 8b9db3129c4d670f71a9e64025b25f51646c9e36 | ddfa3286bca06e153fbbd1e1a0d914c9f31d008e | refs/heads/master | 2022-12-04T00:45:55.607207 | 2020-04-03T09:42:31 | 2020-04-03T09:42:31 | 252,424,641 | 0 | 0 | null | 2022-11-22T05:27:25 | 2020-04-02T10:29:54 | JavaScript | UTF-8 | Python | false | false | 4,511 | py | from django.contrib import messages
from django.contrib.auth import authenticate, login, logout, update_session_auth_hash
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404, redirect, render
from user.forms import RegisterForm, LoginForm, UserUpdateForm
from recipe.models import Recipe
STATUS = "published"
def user_register(request):
context = dict()
form = RegisterForm(request.POST or None)
if form.is_valid():
# get new user information from form
username = form.clean_username()
first_name = form.clean_first_name()
last_name = form.clean_last_name()
email = form.clean_email()
password = form.clean_password()
# create new user and set_password and set active
new_user = User(username=username, last_name=last_name,
first_name=first_name, email=email)
new_user.set_password(password)
new_user.is_active = True
new_user.save()
# login new user
login(request, new_user)
messages.success(request, "You have successfully registered.")
return redirect("index")
context["register_form"] = form
return render(request, "user/register.html", context)
def user_login(request):
context = dict()
form = LoginForm(request.POST or None)
context["form"] = form
if form.is_valid():
email = form.cleaned_data.get("email")
password = form.cleaned_data.get("password")
# if username is not exists throw and error to user
try:
username = User.objects.get(email=email).username
except User.DoesNotExist:
messages.info(request, "Username is wrong.")
return render(request, "user/login.html", context)
# check username and password are correct
user = authenticate(request, username=username, password=password)
if user is None:
messages.info(request, "Username or password is wrong")
return render(request, "user/login.html", context)
else:
messages.success(request, "You have successfully logged in.")
# start new session for user
login(request, user)
return redirect("index")
return render(request, "user/login.html", context)
@login_required()
def user_logout(request):
logout(request)
messages.success(request, "You have successfully logged out.")
return redirect("index")
@login_required()
def user_like_recipe_list(request):
# to send user's favorite recipes to template
context = dict()
user = request.user
recipes = Recipe.objects.filter(likes=user)
context['recipes'] = recipes
return render(request, "user/like_recipe_list.html", context)
@login_required()
def user_recipe_list(request):
# to show the user their own recipes
context = dict()
user = request.user
recipes = Recipe.objects.filter(
owner=user,
status=STATUS,
)
context['recipes'] = recipes
return render(request, "user/recipe_list.html", context)
@login_required()
def user_profile(request):
context = dict()
user = get_object_or_404(User, pk=request.user.pk)
context['user'] = user
return render(request, "user/profile.html", context)
@login_required()
def update_user_profile(request):
context = dict()
form = UserUpdateForm(request.POST or None, instance=request.user)
context['form'] = form
if request.method == "POST":
if form.is_valid():
form.save()
messages.success(request, "Your profile updated successfully.")
return redirect("user_profile")
return render(request, "user/update_profile.html", context)
@login_required()
def change_password(request):
context = dict()
if request.method == 'POST':
form = PasswordChangeForm(request.user, request.POST)
if form.is_valid():
user = form.save()
update_session_auth_hash(request, user)
messages.success(request, 'Your password has been successfully changed!')
return redirect('user_profile')
else:
messages.error(request, 'You have logged in incorrectly!')
else:
form = PasswordChangeForm(request.user)
context['form'] = form
return render(request, 'user/change_password.html', context)
| [
"eibrahimarisoy@gmail.com"
] | eibrahimarisoy@gmail.com |
c108ca3cc8d2372d77d37263b1c355ac499377de | 295c8ae0aba57aca04c90bfbd4fcbfb83d80507c | /my_radar_syn/radar_multiprocess_pf_value.py | 7f8907d6f914f723e643f043b1ce9bb313352bb2 | [] | no_license | SYZhou-11/Lab_210 | a43ac68a6f0162d02e8f63639ed2f0e72dedc8f0 | 5a84180ac6a1c1025958713e9886e3b70d457300 | refs/heads/master | 2020-08-23T21:06:02.786667 | 2019-11-01T12:57:27 | 2019-11-01T12:57:27 | 216,706,422 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,911 | py | # 双雷达单目标跟踪,单目标找多个最大值
from __future__ import division, print_function
from pymoduleconnector import ModuleConnector
import winsound
import numpy as np
import time, os
import sympy
from sympy.abc import x, y
from multiprocessing import Process, Manager, Pool, Queue
import matplotlib.pyplot as plt
import multiprocessing
test_seconds = 12
fps = 10
nframes = test_seconds * fps
def reset(device_name):
mc = ModuleConnector(device_name)
xep = mc.get_xep()
xep.module_reset()
mc.close()
time.sleep(1)
def clear_buffer(xep):
"""Clears the frame buffer"""
while xep.peek_message_data_float():
xep.read_message_data_float()
def xep_setup(device_name, baseband):
reset(device_name)
mc = ModuleConnector(device_name)
# Assume an X4M300/X4M200 module and try to enter XEP mode
app = mc.get_x4m300()
# Stop running application and set module in manual mode.
try:
app.set_sensor_mode(0x13, 0) # Make sure no profile is running.
except RuntimeError:
#Profile not running, OK
pass
try:
app.set_sensor_mode(0x12, 0) # Manual mode.
except RuntimeError:
# Sensor already stopped, OK
pass
xep = mc.get_xep()
xep.mc = mc
# Set DAC range
xep.x4driver_set_dac_min(949)
xep.x4driver_set_dac_max(1100)
# Set integration
xep.x4driver_set_iterations(16)
xep.x4driver_set_pulses_per_step(10)
xep.x4driver_set_downconversion(int(baseband))
# Set detection range
xep.x4driver_set_frame_area(0, 9.0)
return xep
def estimate_dis(temp, flag):
a = temp[-3]
b = temp[-2]
c = temp[-1]
if abs(b - a) >= flag and abs(c - b) < flag:
distance = int(round((b + c) / 2))
temp[-3] = b
elif abs(b - a) < flag and abs(c - b) >= flag:
distance = int(round((a + b) / 2))
temp[-1] = b
elif abs(b - a) < flag and abs(c - b) < flag:
distance = int(round((a + b + c) / 3))
else:
distance = int(round((a + c) / 2))
temp[-2] = distance
return distance
def top_n_arg(last_arg, current_arr1, n):
sort_arr = np.argsort(-current_arr1) #得到按值降序排列对应的索引值序列
top_arg = sort_arr[0:n] #取出最大的n个索引
top_arg -= last_arg
near_arg = np.argmin(abs(top_arg)) #取出n个索引中距离上帧距离单元最近的
final_arg = top_arg[near_arg] + last_arg
return final_arg
def record_radar1(device_name, i, distance1, flag1):
#global xep_rad1
xep_rad1 = xep_setup(device_name, baseband=True)
xep_rad1.x4driver_set_fps(fps)
time.sleep(max(2. / fps, 5e-2))
if xep_rad1.peek_message_data_float() == 0:
print("FPS %d fails" % fps)
xep_rad1.x4driver_set_fps(0)
raise Exception("False")
winsound.Beep(1000, 500)
frame = xep_rad1.read_message_data_float().get_data()
#global lframe, frames_diff_rad1, frames_rad1, dis_max_rad1, dis_record_rad1, icounter_rad1, temp_icounter_rad1
lframe = int(len(frame) / 2) # 采样时实部和虚部分开采集,所以快时间=总长/2
frames_rad1 = np.zeros((nframes, lframe), dtype=np.complex64)
frames_diff_rad1 = np.zeros((nframes - 1, lframe), dtype=np.complex64)
#dis_max_rad1 = list()
#dis_record_rad1 = list()
clear_buffer(xep_rad1)
last_index, current_index = 0, 0
for icounter_rad1 in range(nframes):
frame = xep_rad1.read_message_data_float().get_data()
frames_rad1[icounter_rad1] = np.array(frame[:lframe]) + 1j * np.array(frame[lframe:])
if icounter_rad1 > 0:
frames_diff_rad1[icounter_rad1 - 1] = frames_rad1[icounter_rad1] - frames_rad1[icounter_rad1 - 1]
if icounter_rad1 == 1:
current_index = np.argmax(np.abs(frames_diff_rad1[icounter_rad1 - 1]))
#dis_max_rad1.append(last_index)
else:
last_index = current_index
current_index = top_n_arg(last_index, np.abs(frames_diff_rad1[icounter_rad1 - 1]), 5)
"""
if abs(temp - last_index) <= 10:
last_index = temp
"""
#last_index = temp
#dis_max_rad1.append(last_index)
real_dis = 0.0514 * last_index
print('radar_' + i + ': ', real_dis)
distance1.value = real_dis
flag1.value = icounter_rad1
#time.sleep(0.5)
xep_rad1.x4driver_set_fps(0)
clear_buffer(xep_rad1)
file_str = 'walk_radar_' + i + '_{}'
np.save(file_str.format(time.localtime()), frames_rad1)
xep_rad1.mc.close()
def record_radar2(device_name, i, distance2, flag2):
#global xep_rad1
xep_rad1 = xep_setup(device_name, baseband=True)
xep_rad1.x4driver_set_fps(fps)
time.sleep(max(2. / fps, 5e-2))
if xep_rad1.peek_message_data_float() == 0:
print("FPS %d fails" % fps)
xep_rad1.x4driver_set_fps(0)
raise Exception("False")
winsound.Beep(1000, 500)
frame = xep_rad1.read_message_data_float().get_data()
#global lframe, frames_diff_rad1, frames_rad1, dis_max_rad1, dis_record_rad1, icounter_rad1, temp_icounter_rad1
lframe = int(len(frame) / 2) # 采样时实部和虚部分开采集,所以快时间=总长/2
frames_rad1 = np.zeros((nframes, lframe), dtype=np.complex64)
frames_diff_rad1 = np.zeros((nframes - 1, lframe), dtype=np.complex64)
#dis_max_rad1 = list()
#dis_record_rad1 = list()
clear_buffer(xep_rad1)
last_index, current_index = 0, 0
for icounter_rad1 in range(nframes):
frame = xep_rad1.read_message_data_float().get_data()
frames_rad1[icounter_rad1] = np.array(frame[:lframe]) + 1j * np.array(frame[lframe:])
if icounter_rad1 > 0:
frames_diff_rad1[icounter_rad1 - 1] = frames_rad1[icounter_rad1] - frames_rad1[icounter_rad1 - 1]
if icounter_rad1 == 1:
current_index = np.argmax(np.abs(frames_diff_rad1[icounter_rad1 - 1]))
#dis_max_rad1.append(last_index)
else:
last_index = current_index
current_index = top_n_arg(last_index, np.abs(frames_diff_rad1[icounter_rad1 - 1]), 5)
"""
if abs(temp - last_index) <= 10:
last_index = temp
"""
#last_index = temp
#dis_max_rad1.append(last_index)
real_dis = 0.0514 * last_index
print('radar_' + i + ': ', real_dis)
distance2.value = real_dis
flag2.value = icounter_rad1
#time.sleep(0.5)
xep_rad1.x4driver_set_fps(0)
clear_buffer(xep_rad1)
file_str = 'walk_radar_' + i + '_{}'
np.save(file_str.format(time.localtime()), frames_rad1)
xep_rad1.mc.close()
def get_point(distance1, distance2, flag1, flag2):
x_list = list()
y_list = list()
plt.ion()
plt.figure(1)
while True:
if distance1.value != 0 and distance2.value != 0:
dis1 = distance1.value
dis2 = distance2.value
aa = sympy.solve([(x + 0.8) ** 2 + y ** 2 - dis1 ** 2, (x - 0.8) ** 2 + y ** 2 - dis2 ** 2], [x, y])
# if q1.value != 0 and q2.value != 0:
# aa = sympy.solve([x ** 2 + y ** 2 - q1.value ** 2, (x - 1.6) ** 2 + y ** 2 - q2.value ** 2], [x, y])
result = [round(aa[0][0], 2), round(abs(aa[0][1]), 2)]
print("当前距离为: r1 = " + str(dis1) + "m , r2 = " + str(dis2) + "m")
# print("当前坐标为: x = " + str(result[0]) + "m , y = " + str(result[1]) + "m")
x_list.append(result[0])
y_list.append(result[1])
plt.clf()
plt.axis([-3, 3, 0, 6])
ax = plt.gca()
ax.xaxis.set_ticks_position('top')
ax.yaxis.set_ticks_position('left')
ax.invert_yaxis()
ax.spines['left'].set_position(('data', 0))
plt.plot(x_list, y_list, 'o', markersize=7, color='blue', alpha=0.5)
plt.pause(0.0001)
if flag1.value == nframes - 1 or flag2.value == nframes - 1:
plt.ioff()
plt.show()
break
if __name__ == "__main__":
#radar_q1 = Queue()
#radar_q2 = Queue()
flag1 = multiprocessing.Value("i", 0) #设置为整型常数0
flag2 = multiprocessing.Value("i", 0)
distance1 = multiprocessing.Value("f", 0) #设置为浮点常数0
distance2 = multiprocessing.Value("f", 0)
pg_rad1 = Process(target=record_radar1, args=('COM3', '1', distance1, flag1))
pg_rad2 = Process(target=record_radar2, args=('COM4', '2', distance2, flag2))
p_out = Process(target=get_point, args=(distance1, distance2, flag1, flag2))
time.sleep(5)
pg_rad1.start()
#time.sleep(1)
pg_rad2.start()
p_out.start()
pg_rad1.join()
pg_rad2.join()
#p_out.terminate()
p_out.join()
| [
"syzhou1995@zju.edu.cn"
] | syzhou1995@zju.edu.cn |
961b6ba6d21cb01e798f3e3af4767d604c8e5b22 | 8fca3c04dbaf0fab3cc07b151d5101d001ea2a4b | /webpage.py | 68c6714571c68d8c047f87116cc5c525d01253f7 | [] | no_license | YuXinZhi/PythonLearning | 0df946fe8777608579ef986b8e1bcf39350bbf34 | a446df618eebe3bec6460da37e172be24b7c62fa | refs/heads/master | 2021-01-13T06:16:30.450677 | 2017-06-24T19:15:20 | 2017-06-24T19:15:20 | 95,117,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35 | py | '''
bewutifulsoup
Python-goose
''' | [
"whoyumengmeng@gmail.com"
] | whoyumengmeng@gmail.com |
be2de358f55905585b69f0671ad93625d31c6afe | c19295c1a25f18bec599e2ba0d2211587eea3978 | /django_youtube_analyzer/urls.py | 1fadbc98fb2c6df38c259923743091a498f34baf | [] | no_license | motsuka1/django_youtube_analyzer | 144774b0179f0346fe905acc7af4afc03d4c4f21 | 49170a7c5ad1a3928f88a9b89961fa5e5e627a71 | refs/heads/main | 2023-01-30T20:41:45.149058 | 2020-12-10T15:05:40 | 2020-12-10T15:05:40 | 320,163,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 765 | py | """django_youtube_analyzer URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"motsuka1996@gmail.com"
] | motsuka1996@gmail.com |
fa3f5466ad8bcab2dadb823615e08fc9e14db94a | c0795000de54a26956efe1a87afba507bb328b81 | /docs/conf.py | ccd96e7307cb1b7e20bed096c7eb0dfae85de6c9 | [
"MIT"
] | permissive | steinitzu/beets | ff6c24d9e072b3d86f889e2b9af66a6ca2374d09 | 1fbbe6154698ce50f1a7e8d32af9a6376e2c7ede | refs/heads/master | 2021-01-16T20:26:07.732280 | 2013-02-28T18:43:02 | 2013-02-28T18:43:02 | 7,949,551 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 972 | py | AUTHOR = u'Adrian Sampson'
# -- General configuration -----------------------------------------------------
extensions = []
#templates_path = ['_templates']
exclude_patterns = ['_build']
source_suffix = '.rst'
master_doc = 'index'
project = u'beets'
copyright = u'2012, Adrian Sampson'
version = '1.1'
release = '1.1b3'
pygments_style = 'sphinx'
# -- Options for HTML output ---------------------------------------------------
html_theme = 'default'
#html_static_path = ['_static']
htmlhelp_basename = 'beetsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_documents = [
('index', 'beets.tex', u'beets Documentation',
AUTHOR, 'manual'),
]
# -- Options for manual page output --------------------------------------------
man_pages = [
('reference/cli', 'beet', u'music tagger and library organizer',
[AUTHOR], 1),
('reference/config', 'beetsconfig', u'beets configuration file',
[AUTHOR], 5),
]
| [
"adrian@radbox.org"
] | adrian@radbox.org |
cfcddb4feb4741231274227ddbdf9f5c30039ae3 | 225152fd5771de163473a06307a02c1b05f45301 | /commands.py | bf60aac1cb1205d99a62057946308503e9e777a4 | [] | no_license | elmWilh/sunshinedsbot | 874bf86a834d06def8eb82c8f37698f4bee1ba92 | f786c9009a963066ceae3b4f464428a1f19c6129 | refs/heads/main | 2023-04-16T04:09:15.829552 | 2021-04-17T16:03:41 | 2021-04-17T16:03:41 | 359,215,595 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,775 | py | """
"ладно" считатель - Простой в использовании бот Discord, который считает, сколько раз каждый пользователь сказал ладно
Написано в 2019 by NinjaSnail1080 (Дискорд: @NinjaSnail1080#8581), улучшено и переведено Perchun_Pak
"""
from discord.ext import commands
import discord
import collections
import datetime
import time
import pprint
import sys
from random import randint
import asyncpg
def find_color(ctx):
"""Ищет цвет отрисовки бота. Если это цвет по умолчанию или мы находимся в ЛС, верните "greyple" цвет Дискорда."""
try:
if ctx.guild.me.color == discord.Color.default():
color = discord.Color.greyple()
else:
color = ctx.guild.me.color
except AttributeError: # * If it's a DM channel
color = discord.Color.greyple()
return color
class Commands(commands.Cog):
"""Команды для ладно считателя"""
def __init__(self, bot):
self.bot = bot
@commands.command()
async def help(self, ctx):
"""Это команда помощи!"""
cmds = sorted([c for c in self.bot.commands if not c.hidden], key=lambda c: c.name)
embed = discord.Embed(
title="Команда помощи",
description="Я считаю каждый раз когда кто то говорит " + '"ладно"' + ". Я "
"довольно простой бот в использовании. Мой префикс это @упоминание, имеется ввиду что Вам нужно "
f"вставлять {self.bot.user.mention} перед каждой командой."
"\n\nВот короткий список моих команд:",
color=find_color(ctx))
embed.set_footer(
text="Примечание: Нет, я не считаю слова перед тем как присоединился на сервер")
for c in cmds:
embed.add_field(name=c.name, value=c.help, inline=False)
await ctx.send(embed=embed)
@commands.command(aliases=["info"])
async def about(self, ctx):
"""Немного базовой информации про меня"""
embed = discord.Embed(
title=str(self.bot.user), description=self.bot.app_info.description +
f"\n\n**ID**: {self.bot.app_info.id}", color=find_color(ctx))
embed.set_thumbnail(url=self.bot.app_info.icon_url)
embed.add_field(name="Владелц", value=self.bot.app_info.owner)
embed.add_field(name="Количество серверов", value=len(self.bot.guilds))
embed.add_field(name="Количество пользователей", value=len(self.bot.users))
embed.add_field(
name="Язык програмирования",
value=f"Python {sys.version_info[0]}.{sys.version_info[1]}.{sys.version_info[2]}")
embed.add_field(
name="Библиотека", value="[discord.py](https://github.com/Rapptz/discord.py)")
embed.add_field(
name="Лицензия",
value="[CC0 1.0 Universal](https://creativecommons.org/publicdomain/zero/1.0/)")
embed.add_field(
name="Открытый код", value="https://github.com/PerchunPak/sunshinedsbot", inline=False)
embed.set_footer(
text="Примечание: Оригинальный автор не Perchun_Pak#9236, а NinjaSnail1080#8581")
await ctx.send(embed=embed)
@commands.command()
async def count(self, ctx, user: discord.User = None):
"""Узнайте, сколько раз пользователь сказал "ладно"
Формат: `count <@пинг пользователя>`
Если вы не указываете пинг, я укажу **вашу** статистику
"""
if user is None:
user = ctx.author
if user == self.bot.user:
return await ctx.send(
"""@MATUKITE has said the N-word **1,070,855 times**,
__1,070,801 of which had a hard-R__
They've said the N-word __23,737 times__ since they were last investigated
""")
if user.bot:
return await ctx.send(
"Я не считаю " + '"ладно-слова"' + ", сказанные ботами. Представляете, насколько это было бы странно?")
try:
count = self.bot.lwords[user.id]
except:
return await ctx.send(f"{user.mention} еще ни разу не говорил " + '"ладно"' + ". Странный чел")
if count["total"]:
msg = (f"{user.mention} сказал ладно **{count['total']:,} "
f"раз{'а' if count['total'] == 2 or count['total'] == 3 or count['total'] == 4 else 'а'}**")
if "last_time" in count:
since_last = count["total"] - count["last_time"]
if since_last:
msg += (f".\n\nТак же {user.mention} сказал ладно __{since_last:,} "
f"раз{'а' if since_last == 2 or since_last == 3 or since_last == 4 else 'а'}__ "
"с прошлой проверки")
await ctx.send(msg)
self.bot.lwords[user.id]["last_time"] = self.bot.lwords[user.id]["total"]
else:
await ctx.send(f"{user.mention} еще ни разу не говорил " + '"ладно"' + ". Странный чел")
@count.error
async def count_error(self, ctx, exc):
if isinstance(exc, commands.BadArgument):
return await ctx.send(exc)
@commands.command()
async def invite(self, ctx):
"""Скидывает ссылку чтобы Вы могли пригласить бота на свой сервер"""
await ctx.send("Это моя пригласительная ссылка чтобы Вы могли считать " + '"ладно"' + " тоже:\n"
f"https://discordapp.com/oauth2/authorize?client_id={self.bot.app_info.id}"
"&scope=bot&permissions=8")
@commands.command()
async def stats(self, ctx):
"""Показывает мою статистику"""
await ctx.channel.trigger_typing()
uptime = datetime.datetime.utcnow() - self.bot.started_at
# * This code was copied from my other bot, MAT
y = int(uptime.total_seconds()) // 31557600 # * Number of seconds in 356.25 days
mo = int(uptime.total_seconds()) // 2592000 % 12 # * Number of seconds in 30 days
d = int(uptime.total_seconds()) // 86400 % 30 # * Number of seconds in 1 day
h = int(uptime.total_seconds()) // 3600 % 24 # * Number of seconds in 1 hour
mi = int(uptime.total_seconds()) // 60 % 60 # * etc.
se = int(uptime.total_seconds()) % 60
frmtd_uptime = []
if y != 0:
frmtd_uptime.append(f"{y}г")
if mo != 0:
frmtd_uptime.append(f"{mo}мес")
if d != 0:
frmtd_uptime.append(f"{d}дн")
if h != 0:
frmtd_uptime.append(f"{h}ч")
if mi != 0:
frmtd_uptime.append(f"{mi}м")
if se != 0:
frmtd_uptime.append(f"{se}с")
allUsers = f"{len(self.bot.lwords):,}"
for _ in range(1): randomInt = randint(0, 100)
embed = discord.Embed(
description=f"ID: {self.bot.user.id}",
timestamp=datetime.datetime.utcnow(),
color=find_color(ctx))
embed.add_field(name="Количество серверов", value=f"{len(self.bot.guilds):,} серверов")
embed.add_field(name="Количество пользовотелей", value=f"{len(self.bot.users):,} уникальных пользователей")
embed.add_field(
name="Количество каналов",
value=f"{len(list(self.bot.get_all_channels()) + self.bot.private_channels):,} "
"каналов")
embed.add_field(
name="Использование памяти",
value=f"{round(self.bot.process.memory_info().rss / 1000000, 2)} МБ")
embed.add_field(name="Пинг", value=f"{round(self.bot.latency * 1000, 2)}мс")
embed.add_field(name="Аптайм", value=" ".join(frmtd_uptime) + " после прошлого рестарта")
embed.add_field(
name="Количество пользователей кто произнес " + '"ладно"',
value=str(int(allUsers) - 1),
inline=False)
embed.add_field(
name="Всего слов насчитано",
value=f"{self.bot.lwords[0]['total']:,} ",
inline=False)
embed.set_author(name="Статистика", icon_url=self.bot.user.avatar_url)
embed.set_footer(text="Эти статистические данные верны на: " + str(randomInt) + "%")
await ctx.send(embed=embed)
@commands.command(aliases=["leaderboard", "high"])
@commands.guild_only()
async def top(self, ctx, param: str = None):
"""Показывает таблицу лидеров по произношению слова "ладно" на этом сервере. Используйте `top global` чтобы посмотреть таблицу лидеров всех серверов
Примечание: Если пользователь сказал "ладно" на другом сервере, на котором я тоже есть, они будут приняты во внимание.
"""
await ctx.channel.trigger_typing()
def create_leaderboard():
leaderboard = {}
if param == "global":
for u, n in self.bot.lwords.items():
if self.bot.get_user(u):
leaderboard.update({self.bot.get_user(u): n["total"]})
leaderboard = dict(collections.Counter(leaderboard).most_common(10))
else:
for m in ctx.guild.members:
if m.id in self.bot.lwords and not m.bot:
if self.bot.lwords[m.id]["total"]:
leaderboard.update({m: self.bot.lwords[m.id]["total"]})
leaderboard = dict(collections.Counter(leaderboard).most_common(10))
return leaderboard
leaderboard = await self.bot.loop.run_in_executor(None, create_leaderboard)
if not len(leaderboard):
return await ctx.send("На этом сервере еще никто не сказал " + '"ладно"')
description = "\n"
counter = 1
for m, c in leaderboard.items():
description += (f"**{counter}.** {m if param == 'global' else m.mention} - __{c:,} "
f"раз{'а' if c == 2 or c == 3 or c == 4 else ''}__\n")
counter += 1
description = description.replace("**1.**", ":first_place:").replace("**2.**", ":second_place:").replace(
"**3.**", ":third_place:")
embed = discord.Embed(description=description, color=find_color(ctx),
timestamp=datetime.datetime.utcnow())
if param == "global":
embed.set_author(
name=f"Топ за все время")
else:
embed.set_author(
name=f"Топ сервера {ctx.guild.name}", icon_url=ctx.guild.icon_url)
for _ in range(1): randomInt = randint(0, 100)
embed.set_footer(
text="Эти списки верны на: " + str(randomInt) + "%", icon_url=self.bot.user.avatar_url)
await ctx.send(embed=embed)
@top.error
async def top_error(self, ctx, exc):
if isinstance(exc, commands.NoPrivateMessage):
return await ctx.send(exc)
@commands.command(hidden=True)
@commands.is_owner()
async def edit(self, ctx, user_id: int, total: int, last_time: int = None):
"""Отредактируйте запись пользователя в ДБ или добавьте новую"""
totalBefore = self.bot.lwords[user_id]['total']
if total < totalBefore:
self.bot.lwords[0]["total"] -= (totalBefore - (self.bot.lwords[user_id]['total']))
if last_time:
self.bot.lwords[user_id] = {"id": user_id, "total": total, "last_time": last_time}
else:
self.bot.lwords[user_id] = {"id": user_id, "total": total}
elif total > totalBefore:
self.bot.lwords[0]["total"] += (int(self.bot.lwords[user_id]['total']) - totalBefore)
if last_time:
self.bot.lwords[user_id] = {"id": user_id, "total": total, "last_time": last_time}
else:
self.bot.lwords[user_id] = {"id": user_id, "total": total}
else:
await ctx.send("Неизвестная ошибка")
await ctx.send("Готово")
@commands.command(hidden=True)
@commands.is_owner()
async def pop(self, ctx, user_id: int):
"""Удалите пользователя с ДБ"""
self.bot.lwords[0]["total"] -= int(self.bot.lwords[user_id]['total'])
try:
self.bot.lwords.pop(user_id)
await ctx.send("Готово")
except KeyError as e:
await ctx.send(f"Ошибка: ```{e}```")
@commands.command(hidden=True)
@commands.is_owner()
async def execute(self, ctx, *, query):
"""Выполнить запрос в базе данных"""
try:
with ctx.channel.typing():
async with self.bot.pool.acquire() as conn:
result = await conn.execute(query)
await ctx.send(f"Запрос выполнен:```{result}```")
except Exception as e:
await ctx.send(f"Ошибка:```{e}```")
@commands.command(hidden=True)
@commands.is_owner()
async def fetch(self, ctx, *, query):
"""Выполнить поиск в базе данных"""
try:
with ctx.channel.typing():
async with self.bot.pool.acquire() as conn:
result = await conn.fetch(query)
fmtd_result = pprint.pformat([dict(i) for i in result])
await ctx.send(f"Поиск выполнен:```{fmtd_result}```")
except Exception as e:
await ctx.send(f"Ошибка:```{e}```")
@commands.command(aliases=["resetstatus"], hidden=True)
@commands.is_owner()
async def restartstatus(self, ctx):
await self.bot.change_presence(status=discord.Status.online, activity=discord.Activity(
name=f'кто сколько раз сказал "ладно"', type=discord.ActivityType.competing))
await ctx.send("Статус был сброшен")
@commands.command(hidden=True)
@commands.is_owner()
async def setstatus(self, ctx, status):
"""Изменить статус бота"""
if status.startswith("on"):
await self.bot.change_presence(status=discord.Status.online)
elif status.startswith("id"):
await self.bot.change_presence(status=discord.Status.idle)
elif status.startswith("d"):
await self.bot.change_presence(status=discord.Status.dnd)
elif status.startswith("off") or status.startswith("in"):
await self.bot.change_presence(status=discord.Status.invisible)
else:
await ctx.send("Недействительный статус")
await ctx.send("Поставить новый статус")
@commands.command(hidden=True)
@commands.is_owner()
async def updatedb(self, ctx):
temp = await ctx.send("Обновление вручную... Это может занять несколько минут... Подождите...")
with ctx.channel.typing():
start = time.perf_counter()
async with self.bot.pool.acquire() as conn:
await conn.execute("""
INSERT INTO lwords
(id)
VALUES {}
ON CONFLICT
DO NOTHING
;""".format(", ".join([f"({u})" for u in self.bot.lwords])))
for data in self.bot.lwords.copy().values():
await conn.execute("""
UPDATE lwords
SET total = {}
WHERE id = {}
;""".format(data["total"], data["id"]))
delta = time.perf_counter() - start
mi = int(delta) // 60
sec = int(delta) % 60
ms = round(delta * 1000 % 1000)
await temp.delete()
await ctx.send(f"Завершено обновление базы данных ({mi}м {sec}с {ms}мс)")
def setup(bot):
bot.add_cog(Commands(bot))
| [
"noreply@github.com"
] | elmWilh.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.