blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1b3579c057d59d68b0e4e4a606d18f7d37dcd74d | ef5b50ab02759efb94d1536f93ec1b51543c2169 | /Model/Net.py | 9f7b873f48c68a89da4f85269ff2e1e8f081f5ae | [] | no_license | swJiang/CCN-CTN | 759c0b5b7e7a425d3062bf1fc4d05e063319c1a6 | fc022a966b4412908915836116df942fc3ebaa35 | refs/heads/master | 2020-04-08T07:48:42.430869 | 2018-11-27T07:43:19 | 2018-11-27T07:43:19 | 159,152,617 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 853 | py | import tensorflow as tf
from Model.FC import fc_layer
from Model.NetWork import network
from Model.accuracy import compute_acc
from Model.loss import compute_loss
class Model(object):
def __init__(self):
self.FC = fc_layer
self.network = network
self.compute_acc = compute_acc
self.compute_loss = compute_loss
def build_net(self, inputs):
with tf.variable_scope("siamese") as scope:
o1 = self.network(self, tf.real(inputs))
scope.reuse_variables()
o2 = self.network(self, tf.imag(inputs))
return tf.concat([o1,o2],axis=-1)
def acc(self, logits, labels):
logits = tf.round(logits)
return self.compute_acc(logits, labels)
def loss(self, logits, labels):
loss = self.compute_loss(labels=labels,logits=logits)
return loss | [
"jsw950515@gmail.com"
] | jsw950515@gmail.com |
e31dea602a2885d6f6b29d64376f9e3e2a16e75e | 57391fbdde43c3d2e8628613d9003c65ff8abf9d | /Exercicios/ex050.py | c2deb6d939dc7f9a9690a0cbb5d9e7af53d18167 | [] | no_license | JoaolSoares/CursoEmVideo_python | 082a6aff52414cdcc7ee94d76c3af0ac2cb2aaf5 | aa9d6553ca890a6d9369e60504290193d1c0fb54 | refs/heads/main | 2023-07-15T07:39:57.299061 | 2021-08-26T20:04:22 | 2021-08-26T20:04:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | soma = 0
for c in range(1, 7):
n1 = int(input('Diga um {}º numero: ' .format(c)))
if n1 % 2 == 0:
soma += n1
print('A soma de todos os numeros pares é de: \033[1;34m{}\033[m'. format(soma))
| [
"joaolucassoaresk@outlook.com"
] | joaolucassoaresk@outlook.com |
7579bfc272c5d6e5cbc01b35f5cbab6443cfb503 | bcc043d5686f2ed7d2e23ae442612f252a811857 | /tests/parsing/test_translator.py | 2fed15b5ab9aa033ef6cc833fc3234a8c92f1126 | [
"MIT"
] | permissive | eryktr/xmarie-vm | e5674d8c1cc08650af34925f51f59e389f598f20 | 736b7fa0ad04322b37938027c63859aee888fbbb | refs/heads/master | 2023-03-04T01:18:56.313422 | 2021-02-10T23:38:09 | 2021-02-10T23:38:09 | 282,985,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 929 | py | import pytest
from xmarievm.parsing.ast_types import JnS, Jump, Halt, LoadX, LoadY, Push, Pop, Clear, ShiftR, ShiftL, HEX, DEC
import xmarievm.parsing.translator as translator
def instr(x):
return int(x, 16)
@pytest.mark.parametrize('instructions, result', [
(
[JnS(0x10), Jump(0x20), Halt()],
[instr('00010'), instr('09020'), instr('07000')],
),
(
[LoadX(), LoadY(), Push(), Pop(), Clear()],
[instr('15000'), instr('16000'), instr('0F000'), instr('10000'), instr('0A000')],
),
(
[ShiftR(0x90), ShiftL(0xA1)],
[instr('12090'), instr('110A1')],
),
(
[HEX(0x00020), HEX(0xFFFFF), HEX(0xFFFFE)],
[instr('00020'), -1, -2],
),
(
[DEC(1024), DEC(0), DEC(2048)],
[1024, 0, 2048],
)
])
def test_translate_instructions(instructions, result):
assert translator.translate(instructions, labels={}) == result
| [
"eryk.trzeciakiewicz@gmail.com"
] | eryk.trzeciakiewicz@gmail.com |
e455c8af6a4cbc82b1d597f07de8d84ccb35de0d | 614db3d71137766beea7fbda39693ff8fa625d9a | /source_code/collect_git.py | 2444a2a517e03bb4699dab255eed45b3b6e555f8 | [] | no_license | DmSide/DmSide-ai_code_analysis_tools | c8c0043d8ac418d732043f0c0d2844762aa08eb8 | 327ede21d651aada1711d49cf3a55dea98bfc6c9 | refs/heads/master | 2021-06-26T04:10:50.305101 | 2019-12-26T08:40:35 | 2019-12-26T08:40:35 | 230,227,174 | 0 | 0 | null | 2021-06-10T22:26:27 | 2019-12-26T08:36:44 | Python | UTF-8 | Python | false | false | 6,052 | py | import sys
from os import listdir
from os.path import join, abspath
from threading import Thread, active_count
import pandas as pd
from git import exc, Repo, RemoteProgress
# import asyncio
config = dict()
accounts = dict()
repo_ptr = {}
def init():
"""
config initialisation from *.txt , easy way to change some config with text editor without using additional python source_code
Text file for cases then the script uses as terminal tool, in some cases best practices is using python config files
Warning!!! For stable parsing better way is using regular expression
"""
with open(join(abspath('../data'), 'config')) as config_file:
# TODO temporary, while config ctructure and requirement for it doesnt exist,
# then it is preferable to use regular expression
for config_line in config_file:
config_line = config_line.split(' = ')
config.update({config_line[0]: config_line[1][:-1]})
with open(join(abspath('../data'), 'accounts')) as __accounts:
# TODO temporary, while config ctructure and requirement for it doesnt exist
for account in __accounts:
account = account.split(' ')
accounts.update({account[0]: f'{account[1]}:{account[2]}@'})
def thread(function):
"""
Decorator for move function in thread processing, work much faster than asynchronous implementation
"""
def threaded_function(*args, **kwargs):
_thread = Thread(target=function, args=args, kwargs=kwargs)
_thread.start()
return threaded_function
# async def load_repo(link=None, username=None, password=None, account=None): # asinchron implementation
class ProgressPrinter(RemoteProgress):
"""
Indication of standard git out
"""
def update(self, op_code, cur_count, max_count=None, message=''):
print(op_code, cur_count, max_count, cur_count / (max_count or 100.0), message or "NO MESSAGE")
def get_new_name(repo_name: str, config=config):
"""
Folder for downloading git-repository, used when there is a folder with a similar name
"""
if repo_name in listdir(config['save_path']):
i = 1
while f'{repo_name}({i})' in [folder for folder in listdir(config['save_path']) if
folder.startswith(repo_name)]:
i += 1
return f'{repo_name}({i})'
return repo_name
@thread
def load_repo(link=None, username=None, password=None, account=None):
'''
:param link:
:param username:
:param password:
:param account: username:password
:return: dictionary repository name and pointer to it
'''
if link:
try:
repo_name = link.split('/')[-1][:-4]
repo_name = get_new_name(repo_name)
path = join(config['save_path'], repo_name)
if account is not None:
# TODO check behavior in this case
url = f'https://{accounts[account]}{link.split("://")[-1]}'
# print('account :', url)
if username is not None and password is not None:
url = f'https://{username}:{password}@{link.split("://")[-1]}'
# print('username:password :', url)
else:
url = link
Repo.clone_from(url, path) # , progress=ProgressPrinter)
except exc.GitCommandError as error:
print(f'Git command error: {error}\n')
# print(f'repository link: {url}')
except:
import traceback
trace = traceback.format_exc()
print("Exception occured:\n{}\n\n".format(trace))
else:
repo_ptr.update({repo_name: Repo(path)})
if __name__ == "__main__":
init()
# parsing arguments from terminal
for argument in sys.argv[1:]:
# TODO add some specific key arguments for adding accounts, paths , change options and etc.
data = pd.read_csv(argument)
# Load *.csv and read rows from table
data = data.drop_duplicates()
data = data.dropna(subset=['link'])
threads = [[]]
separate_threads = [[]]
for source in data.to_dict('records'):
for num, thread_queue in enumerate(separate_threads, 0):
repository_name = source['link'].split('/')[-1]
if repository_name not in thread_queue:
separate_threads[num].append(repository_name)
threads[num].append(source)
break
else:
if len(separate_threads) - 1 == num:
separate_threads.append([repository_name])
threads.append([source])
break
for queue in threads:
for load_thread in queue:
load_repo(**{key: load_thread[key] for key in load_thread if not pd.isnull(load_thread[key])})
else:
# Wait until all threads completed
while active_count() != 1:
# TODO progress indication
pass
else:
pass
# pass
for i in repo_ptr:
print(i, repo_ptr[i]) # pointers to repositories !!!!!!!!!!!!
# repo = git.Repo.clone_from(self._small_repo_url(), os.path.join(rw_dir, 'repo'), branch='master')
# cloned_repo = repo.clone(os.path.join(rw_dir, 'to/this/path'))
# origin = bare_repo.create_remote('origin', url=cloned_repo.working_tree_dir)
# assert origin.exists()
####################################################################################################################
# asinchron implementation !!!
# loop = asyncio.get_event_loop()
# loop.run_until_complete(asyncio.wait([load_repo(**{key: source[key] for key in source if not pd.isnull(source[key])}) for source in data.to_dict('records')]))
####################################################################################################################
else:
# this case for load function to another module, may include some initialisations and ect
pass
| [
"redhat@zuzex.lan"
] | redhat@zuzex.lan |
25fe704f5be77484a077c570572772385b9cdd39 | 27c27208a167f089bb8ce4027dedb3fcc72e8e8a | /ProjectEuler/Solutions/Problems 50-100/Q075.py | b1510ace6b6e74c60b66d6a3e138b7926017acc7 | [] | no_license | stankiewiczm/contests | fd4347e7b84c8c7ec41ba9746723036d86e2373c | 85ed40f91bd3eef16e02e8fd45fe1c9b2df2887e | refs/heads/master | 2021-05-10T16:46:41.993515 | 2018-02-16T09:04:15 | 2018-02-16T09:04:15 | 118,587,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | from numpy import *
# m^2+n^2, 2mn, m^2-n^2
LIM = 1500000; Count = zeros(LIM+1, int);
def GCD(a,b):
while (b > 0):
c = a-(a/b)*b;
a = b;
b = c;
return a;
m = 1; M2 = 1;
while (2*M2 < LIM):
n = m%2+1;
while (2*M2+2*m*n < LIM) and (n < m):
if GCD(m,n) == 1:
p = 2*M2+2*m*n;
for k in range(1, LIM/p+1):
Count[p*k] += 1;
n += 2;
m += 1;
M2 = m*m;
print sum(Count==1)
| [
"mstankiewicz@gmail.com"
] | mstankiewicz@gmail.com |
a6095634bed30033519406856e61a0c38e3efe78 | be5d1ababc8dee59d3ea7687d5feee791ea78821 | /.vEnv/lib/python3.5/keyword.py | 8ff824fb913bb4a45d41291215d8dcb855a930e8 | [] | no_license | duggalr2/Personal-RSS | 980083337b52d2b8a74257766d3f1f1f546cac44 | d4cc0d04fdb19ce957f74cfbc583662f0dc3e727 | refs/heads/master | 2021-08-24T07:11:09.036689 | 2017-12-08T15:17:34 | 2017-12-08T15:17:34 | 100,282,827 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46 | py | /Users/Rahul/anaconda/lib/python3.5/keyword.py | [
"ibrahul24@gmail.com"
] | ibrahul24@gmail.com |
b55c0ef5ce10ba776e3dca028d482d7d8158c7e0 | 34bed0900b134ee0a09f495941fae4336e2f81b6 | /Project/urls.py | c4e45c225e49c3aaea8bd74ae5a5e2c579a99715 | [] | no_license | nehilgajare/clash-rc-backend-task | 4871c278b2ddd145511313399e716933083f0e13 | b7e6fe3badf8049b05c6e7b0989e21639db523ad | refs/heads/main | 2023-08-27T17:49:46.457341 | 2021-10-20T13:52:18 | 2021-10-20T13:52:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 829 | py | """Project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from users import views as user_views
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('users.urls'))
]
| [
"gajarenehil@gmail.com"
] | gajarenehil@gmail.com |
2db93df279e2e7651e7f462a9d558dc444be41b7 | b42b8f2bfadd25c51cbb12054bc6df42943b7536 | /venv/Scripts/easy_install-3.7-script.py | d0dad9ea927377d5b3c3ccd6ddf55aeec430b305 | [] | no_license | sharikgrg/week4.Gazorpazorp | 4b785f281334a6060d6edc8a195a58c072fb5a75 | 0f168a0df81703a8950e375081cafd2e766595fb | refs/heads/master | 2020-08-03T22:41:43.373137 | 2019-09-30T15:35:29 | 2019-09-30T15:35:29 | 211,907,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | #!"C:\Users\Sharik Gurung\OneDrive - Sparta Global Limited\PYTHON\gazorpazorp-space-station\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"SGurung@spartaglobal.com"
] | SGurung@spartaglobal.com |
bdb35b007e3b1bdcd94d107d8aa8bdabb7c0a1d6 | 9db396a773d59f2c696c6427f1a1fbc29f97d768 | /app.py | a155aa55c9d6286b2a7e2d0b3523fe8fb27b2956 | [] | no_license | brianndungunjenga/courseFinder | 36dc00336942ba54f39c0b4045d35b5fcecd7053 | 035c6808a0c26f106470463e061092915c7feeb5 | refs/heads/master | 2023-05-11T07:47:55.603768 | 2019-11-03T21:41:02 | 2019-11-03T21:41:02 | 218,930,337 | 0 | 0 | null | 2023-05-01T20:36:05 | 2019-11-01T06:55:45 | CSS | UTF-8 | Python | false | false | 206 | py | from flask import Flask
from routes.search import search_blueprint
app = Flask(__name__)
app.register_blueprint(search_blueprint)
if __name__ == "__main__":
app.run("0.0.0.0", port=8005, threaded=True) | [
"ndungunjenga96@gmail.com"
] | ndungunjenga96@gmail.com |
bd2aabef58c79c7e5886177fcb5bde3623a59323 | 41220db74084e57899b00973871e2b5548214b2c | /user/migrations/0007_auto_20210824_1757.py | 869d1959534efd11e42a1a0762b5982634aa01c2 | [] | no_license | deepakdpyqaz/fortuneshelfServer | 343836ae207bdae2274c88ec0f438613d10521f3 | 3f817c69dd6bf12fb91fae789ee2ba45fc07558f | refs/heads/master | 2023-08-11T00:37:41.133152 | 2021-10-01T15:58:05 | 2021-10-01T15:58:05 | 396,842,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,727 | py | # Generated by Django 3.2.6 on 2021-08-24 12:27
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('user', '0006_alter_user_email'),
]
operations = [
migrations.AddField(
model_name='userunverified',
name='address',
field=models.TextField(default=''),
preserve_default=False,
),
migrations.AddField(
model_name='userunverified',
name='city',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='userunverified',
name='country',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='userunverified',
name='district',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='userunverified',
name='otpGenTime',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='userunverified',
name='pincode',
field=models.CharField(default='', max_length=8),
preserve_default=False,
),
migrations.AddField(
model_name='userunverified',
name='state',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
]
| [
"deepakdpyqaz@gmail.com"
] | deepakdpyqaz@gmail.com |
b9bb247908627e956a13a1a60512620a650ad601 | a87292a555197a165e4518aad95c3d8999d734d4 | /2018/NeverLAN-CTF(2018)/Scripting/basic-math/sum.py | 02f7f1c015de2ee5cdac8873e55f14b5b37aee6a | [] | no_license | munsiwoo/ctf-write-ups | dc717b6e42f94e2568db0ffe606ade56df250ae0 | eea658aa64209e2a9b1d11ed54df792ed87d8088 | refs/heads/master | 2021-07-09T06:31:45.242620 | 2020-06-07T09:57:59 | 2020-06-07T09:57:59 | 100,913,158 | 17 | 2 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | num = '''6255385361218216
6157005081529331
8094787234940670
1979194212824551
3930726164428768
5191869878056791
7528262998799463
5345470866315424
1647835474241505
3432404873925893'''
cal = num.replace("\n", "+")
eval("print("+str(cal)+")") | [
"noreply@github.com"
] | noreply@github.com |
105b66682da75be919d969965dcd0c11bb4617ce | 80d50ea48e10674b1b7d3f583a1c4b7d0b01200f | /src/datadog_api_client/v1/model/monitor_search_response_counts.py | 94a81e62a22817294ef78c86f9fecc7290984a77 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"MPL-2.0"
] | permissive | DataDog/datadog-api-client-python | 3e01fa630278ad0b5c7005f08b7f61d07aa87345 | 392de360e7de659ee25e4a6753706820ca7c6a92 | refs/heads/master | 2023-09-01T20:32:37.718187 | 2023-09-01T14:42:04 | 2023-09-01T14:42:04 | 193,793,657 | 82 | 36 | Apache-2.0 | 2023-09-14T18:22:39 | 2019-06-25T22:52:04 | Python | UTF-8 | Python | false | false | 2,063 | py | # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from __future__ import annotations
from typing import Union, TYPE_CHECKING
from datadog_api_client.model_utils import (
ModelNormal,
cached_property,
unset,
UnsetType,
)
if TYPE_CHECKING:
from datadog_api_client.v1.model.monitor_search_count import MonitorSearchCount
class MonitorSearchResponseCounts(ModelNormal):
@cached_property
def openapi_types(_):
from datadog_api_client.v1.model.monitor_search_count import MonitorSearchCount
return {
"muted": (MonitorSearchCount,),
"status": (MonitorSearchCount,),
"tag": (MonitorSearchCount,),
"type": (MonitorSearchCount,),
}
attribute_map = {
"muted": "muted",
"status": "status",
"tag": "tag",
"type": "type",
}
def __init__(
self_,
muted: Union[MonitorSearchCount, UnsetType] = unset,
status: Union[MonitorSearchCount, UnsetType] = unset,
tag: Union[MonitorSearchCount, UnsetType] = unset,
type: Union[MonitorSearchCount, UnsetType] = unset,
**kwargs,
):
"""
The counts of monitors per different criteria.
:param muted: Search facets.
:type muted: MonitorSearchCount, optional
:param status: Search facets.
:type status: MonitorSearchCount, optional
:param tag: Search facets.
:type tag: MonitorSearchCount, optional
:param type: Search facets.
:type type: MonitorSearchCount, optional
"""
if muted is not unset:
kwargs["muted"] = muted
if status is not unset:
kwargs["status"] = status
if tag is not unset:
kwargs["tag"] = tag
if type is not unset:
kwargs["type"] = type
super().__init__(kwargs)
| [
"noreply@github.com"
] | noreply@github.com |
4a816d2e8c8cb66e983d9cadc56ca837a5f9609d | 890004f5df54ee3b7e04e7f5c66d4babd7cbf592 | /Vertice.py | 7cdd61bf6caf83452ed04b8612e6ffb579d6e717 | [] | no_license | Kyouma54/GRAPH-ALGORITHMS | d48b4bc23b0c2848d9153afbd3321667f7b09541 | d4be156167bb61b64c4169dcb158c71ec935dcae | refs/heads/master | 2023-03-07T21:30:06.060901 | 2023-03-02T15:52:54 | 2023-03-02T15:52:54 | 189,263,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70 | py | class Vertice:
def __init__(self, nome):
self.nome = nome
| [
"ffdonkatsu@gmail.com"
] | ffdonkatsu@gmail.com |
fb57b4796a7193e566bc5cf6dc614ef2baca239e | 5e815b44e9dfe18664a4ad98971da946403bf733 | /script_vnd.py | 9521c205164e58ec7c5f21683ca223085ba7d59b | [] | no_license | RafaelaMartins/Heuristic-Abduction-vns-vnd | 6e7c5c9b7ff331ba1dd11e7dbf9b73bd87d60075 | 9d2355d4b4523f0a5ae60a625f9950f34aeb3c6a | refs/heads/main | 2023-05-14T04:59:00.572105 | 2021-06-04T00:18:26 | 2021-06-04T00:18:26 | 373,672,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,817 | py | from vnd import VND
from graph import Graph
import time
import numpy
ITER = 101
#file_name = 'myciel3.col'
#file_name = 'fpsol2.i.1.col'
#file_name = 'fpsol2.i.2.col'
#file_name = 'fpsol2.i.3.col'
#file_name = 'inithx.i.1.col'
#file_name = 'inithx.i.2.col'
#file_name = 'inithx.i.3.col'
#file_name = 'le450_5a.col'
#file_name = 'le450_15b.col'
#file_name = 'le450_25d.col'
#file_name = 'miles250.col'
#file_name = 'miles1500.col'
#file_name = 'mulsol.i.1.col'
#file_name = 'myciel3.col'
#file_name = 'queen5_5.col'
file_name = 'qg.order60.col'
result = []
# Inicia a marcação do tempo
start = time.time()
# roda VND ITER vezes e salva resultados
path = 'data/'+file_name
graph = Graph(path)
optimal_color = graph.optimal_color
density = (graph.num_edges/(graph.num_vertex*(graph.num_vertex-1)))*100
for i in range(ITER):
print(i)
result.append(VND(path))
# cria conteúdo
content = 'file = {}\nOptimal Color = {}\nDensity = '.format(file_name, optimal_color)+\
'{0:.2f} %'.format(density)+'\n\n'
content += 'ITER,FIND_COLOR,TIME\n\n' # MIN\tMAX\tMÉDIA\tMEDIANA\tVARIÂNCIA\tDESVIO\tQ1\tQ3\n\n
for i in range(1,ITER):
content += '{},{},'.format(i, result[i].color) + \
'{0:.4f}\n'.format(result[i].total_time)
colors = []
for i in range(ITER):
colors.append(result[i].color)
content += '\nINST: {}'.format(file_name)
content += '\nMIN COLOR: {}'.format(min(colors))
content += '\nMEAN: {}'.format(numpy.mean(colors))
content += '\nMAX COLOR: {}'.format(max(colors))
content += '\nSD: {}'.format(numpy.std(colors))
content += '\nVAR: {}'.format(numpy.var(colors))
with open('data/vnd/'+file_name+"_result", "w") as file:
file.write(content)
# Termina a marcação do tempo
end = time.time()
# Calculando o tempo
t = end - start
print("Total time execution:",t)
| [
"rafaelamartinsv@gmail.com"
] | rafaelamartinsv@gmail.com |
347d0ea9561448fc30d4a289a796fa6453ad8a76 | 08120ee05b086d11ac46a21473f3b9f573ae169f | /gcloud/google-cloud-sdk/.install/.backup/lib/surface/projects/add_iam_policy_binding.py | c25bf659de28f724ec44d284cf9b7e902abe6009 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | harrystaley/TAMUSA_CSCI4349_Week9_Honeypot | 52f7d5b38af8612b7b0c02b48d0a41d707e0b623 | bd3eb7dfdcddfb267976e3abe4c6c8fe71e1772c | refs/heads/master | 2022-11-25T09:27:23.079258 | 2018-11-19T06:04:07 | 2018-11-19T06:04:07 | 157,814,799 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,025 | py | # -*- coding: utf-8 -*- #
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to add IAM policy binding for a resource."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.cloudresourcemanager import projects_api
from googlecloudsdk.api_lib.util import http_retry
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.iam import iam_util
from googlecloudsdk.command_lib.projects import flags
from googlecloudsdk.command_lib.projects import util as command_lib_util
from googlecloudsdk.command_lib.resource_manager import completers
import six.moves.http_client
@base.ReleaseTracks(base.ReleaseTrack.GA)
class AddIamPolicyBinding(base.Command):
"""Add IAM policy binding for a project.
Adds a policy binding to the IAM policy of a project,
given a project ID and the binding.
"""
detailed_help = iam_util.GetDetailedHelpForAddIamPolicyBinding(
'project', 'example-project-id-1')
@staticmethod
def Args(parser):
flags.GetProjectFlag('add IAM policy binding to').AddToParser(parser)
iam_util.AddArgsForAddIamPolicyBinding(
parser,
role_completer=completers.ProjectsIamRolesCompleter)
@http_retry.RetryOnHttpStatus(six.moves.http_client.CONFLICT)
def Run(self, args):
project_ref = command_lib_util.ParseProject(args.id)
return projects_api.AddIamPolicyBinding(project_ref, args.member, args.role)
| [
"staleyh@gmail.com"
] | staleyh@gmail.com |
b7343dbf8c01a59994b844eb9eae7bef044c513f | edf4ef62ce66df6850b66d6733669406dffd907f | /manager/comp_wifi/__init__.py | 02eb26f31e77f7e365fdb24dfa30225744f532cb | [] | no_license | erizhang/fqrouter | 786ae3ac481797f2b598860e535cf6f1f4ab728d | 8fda07262f29f00afe8800ce4aabab2133ce6d7a | refs/heads/master | 2020-12-25T05:07:52.284882 | 2013-07-14T15:32:54 | 2013-07-14T15:32:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,512 | py | import httplib
from wifi import WIFI_INTERFACE
from wifi import get_working_hotspot_iface
from wifi import setup_lo_alias
from wifi import start_hotspot
from wifi import stop_hotspot
from wifi import setup_networking
from wifi import enable_wifi_p2p_service
from wifi import get_ip_and_mac
from utils import config
def start():
setup_lo_alias()
return [
('POST', 'wifi-repeater/start', handle_start),
('POST', 'wifi-repeater/stop', handle_stop),
('POST', 'wifi-repeater/reset', handle_reset),
('GET', 'wifi-repeater/is-started', handle_is_started),
]
def stop():
pass
def is_alive():
return get_working_hotspot_iface()
def handle_start(environ, start_response):
cfg = config.read()
ssid = str(cfg['wifi_hotspot_ssid'])
password = str(cfg['wifi_hotspot_password'])
success, message = start_hotspot(ssid, password)
status = httplib.OK if success else httplib.BAD_GATEWAY
start_response(status, [('Content-Type', 'text/plain')])
yield message
def handle_stop(environ, start_response):
start_response(httplib.OK, [('Content-Type', 'text/plain')])
yield stop_hotspot()
def handle_reset(environ, start_response):
start_response(httplib.OK, [('Content-Type', 'text/plain')])
enable_wifi_p2p_service()
stop_hotspot()
return []
def handle_is_started(environ, start_response):
start_response(httplib.OK, [('Content-Type', 'text/plain')])
yield 'TRUE' if get_working_hotspot_iface() else 'FALSE'
| [
"fqrouter@gmail.com"
] | fqrouter@gmail.com |
c850b7ff62e072b79250924149b4b8d33658b86a | 1276051db6315e12459bd96f1af76ca9f03cb2b4 | /pyslet/blockstore.py | b74c8f5b1edc2ec316f6b28ceb8996475d0c8d75 | [
"BSD-3-Clause"
] | permissive | rcplay/pyslet | 587a08a3225322e44e9cdea22a9f752008ca5eff | 152b2f2a3368ecd35ce985aef1f100f46dc4ae6d | refs/heads/master | 2021-01-18T15:56:32.635146 | 2016-02-25T08:32:43 | 2016-02-25T08:32:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,255 | py | #! /usr/bin/env python
import hashlib
import os
import threading
import time
import random
import logging
import string
import io
from pyslet.vfs import OSFilePath as FilePath
from pyslet.iso8601 import TimePoint
import pyslet.http.params as params
import pyslet.odata2.csdl as edm
import pyslet.odata2.core as core
MAX_BLOCK_SIZE = 65536
"""The default maximum block size for block stores: 64K"""
def _magic():
"""Calculate a magic string used to identify an object."""
try:
magic = os.urandom(4)
except NotImplementedError:
logging.warn("weak magic: urandom not available, "
"falling back to random.randint")
magic = []
for i in xrange(4):
magic.append(unichr(random.randint(0, 255)))
magic = string.join(magic, '')
return magic.encode('hex')
class BlockSize(Exception):
"""Raised when an attempt is made to store a block exceeding the
maximum block size for the block store."""
pass
class BlockMissing(Exception):
"""Raised when an attempt is made to retrieve a block with an
unknown key."""
pass
class LockError(Exception):
"""Raised when a timeout occurs during by
:py:meth:`LockingBlockStore.lock`"""
pass
class BlockStore(object):
"""Abstract class representing storage for blocks of data.
max_block_size
The maximum block size the store can hold. Defaults to
:py:attr:`MAX_BLOCK_SIZE`.
hash_class
The hashing object to use when calculating block keys. Defaults
to hashlib.sha256."""
def __init__(
self,
max_block_size=MAX_BLOCK_SIZE,
hash_class=hashlib.sha256):
self.hash_class = hash_class
self.max_block_size = max_block_size
def key(self, data):
if isinstance(data, bytearray):
data = str(data)
return self.hash_class(data).hexdigest().lower()
def store(self, data):
"""Stores a block of data, returning the hash key
data
A binary string not exceeding the maximum block size"""
if len(data) > self.max_block_size:
raise BlockSize
else:
raise NotImplementedError
def retrieve(self, key):
"""Returns the block of data referenced by key
key
A hex string previously returned by :py:meth:`store`.
If there is no block with *key* :py:class:`BlockMissing` is
raised."""
raise BlockMissing(key)
def delete(self, key):
"""Deletes the block of data referenced by key
key
A hex string previously returned by :py:meth:`store`."""
raise NotImplementedError
class FileBlockStore(BlockStore):
"""Class for storing blocks of data in the file system.
Additional keyword arguments:
dpath
A :py:class:`FilePath` instance pointing to a directory in which
to store the data blocks. If this argument is omitted then a
temporary directory is created using the builtin mkdtemp.
Each block is saved as a single file but the hash key is decomposed
into 3 components to reduce the number of files in a single
directory. For example, if the hash key is 'ABCDEF123' then the
file would be stored at the path: 'AB/CD/EF123'"""
def __init__(self, dpath=None, **kwargs):
super(FileBlockStore, self).__init__(**kwargs)
if dpath is None:
# create a temporary directory
self.dpath = FilePath.mkdtemp('.d', 'pyslet_blockstore-')
else:
self.dpath = dpath
self.tmpdir = self.dpath.join('tmp')
if not self.tmpdir.exists():
try:
self.tmpdir.mkdir()
except OSError:
# catch race condition where someone already created it
pass
self.magic = _magic()
def store(self, data):
# calculate the key
key = self.key(data)
parent = self.dpath.join(key[0:2], key[2:4])
path = parent.join(key[4:])
if path.exists():
return key
elif len(data) > self.max_block_size:
raise BlockSize
else:
tmp_path = self.tmpdir.join(
"%s_%i_%s" %
(self.magic, threading.current_thread().ident, key[
0:32]))
with tmp_path.open(mode="wb") as f:
f.write(data)
if not parent.exists():
try:
parent.makedirs()
except OSError:
# possible race condition, ignore for now
pass
tmp_path.move(path)
return key
def retrieve(self, key):
path = self.dpath.join(key[0:2], key[2:4], key[4:])
if path.exists():
with path.open('rb') as f:
data = f.read()
return data
else:
raise BlockMissing
def delete(self, key):
path = self.dpath.join(key[0:2], key[2:4], key[4:])
if path.exists():
try:
path.remove()
except OSError:
# catch race condition where path is gone already
pass
class EDMBlockStore(BlockStore):
"""Class for storing blocks of data in an EDM-backed data service.
Additional keyword arguments:
entity_set
A :py:class:`pyslet.odata2.csdl.EntitySet` instance
Each block is saved as a single entity using the hash as the key.
The entity must have a string key property named *hash* large enough
to hold the hex strings generated by the selected hashing module.
It must also have a Binary *data* property capable of holding
max_block_size bytes."""
def __init__(self, entity_set, **kwargs):
super(EDMBlockStore, self).__init__(**kwargs)
self.entity_set = entity_set
def store(self, data):
key = self.key(data)
with self.entity_set.OpenCollection() as blocks:
if key in blocks:
return key
elif len(data) > self.max_block_size:
raise BlockSize
try:
block = blocks.new_entity()
block['hash'].set_from_value(key)
block['data'].set_from_value(data)
blocks.insert_entity(block)
except edm.ConstraintError:
# race condition, duplicate key
pass
return key
def retrieve(self, key):
with self.entity_set.OpenCollection() as blocks:
try:
block = blocks[key]
return block['data'].value
except KeyError:
raise BlockMissing
def delete(self, key):
with self.entity_set.OpenCollection() as blocks:
try:
del blocks[key]
except KeyError:
pass
class LockStoreContext(object):
def __init__(self, ls, hash_key):
self.ls = ls
self.hash_key = hash_key
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.ls.unlock(self.hash_key)
class LockStore(object):
"""Class for storing simple locks
entity_set
A :py:class:`pyslet.odata2.csdl.EntitySet` instance for the
locks.
lock_timeout
The maximum number of seconds that a lock is considered valid
for. If a lock is older than this time it will be reused
automatically. This value is a long-stop cut off which allows a
system to recover automatically from bugs causing stale locks.
Defaults to 180s (3 minutes)
This object is designed for use in conjunction with the basic block
store to provide locking. The locks are managed using an EDM entity
set.
The entity must have a string key property named *hash* large enough
to hold the hex strings generated by the block store - the hash
values are not checked and can be any ASCII string so the LockStore
class could be reused for other purposes if required.
The entity must also have a string field named *owner* capable of
holding an ASCII string up to 32 characters in length and a datetime
field named *created* for storing the UTC timestamp when each lock
is created. The created property is used for optimistic concurrency
control during updates and must be identified as having fixed
concurrency mode in the entity type's definition."""
def __init__(self, entity_set, lock_timeout=180):
self.entity_set = entity_set
self.lock_timeout = lock_timeout
self.magic = _magic()
def lock(self, hash_key, timeout=60):
"""Acquires the lock on hash_key or raises LockError
The return value is a context manager object that will
automatically release the lock on hash_key when it exits.
locks are not nestable, they can only be acquired once. If the
lock cannot be acquired a back-off strategy is implemented using
random waits up to a total maximum of *timeout* seconds. If the
lock still cannot be obtained :py:class:`LockError` is raised."""
owner = "%s_%i" % (self.magic, threading.current_thread().ident)
with self.entity_set.OpenCollection() as locks:
tnow = time.time()
tstop = tnow + timeout
twait = 0
while tnow < tstop:
time.sleep(twait)
lock = locks.new_entity()
lock['hash'].set_from_value(hash_key)
lock['owner'].set_from_value(owner)
lock['created'].set_from_value(TimePoint.from_now_utc())
try:
locks.insert_entity(lock)
return LockStoreContext(self, hash_key)
except edm.ConstraintError:
pass
try:
lock = locks[hash_key]
except KeyError:
# someone deleted the lock, go straight round again
twait = 0
tnow = time.time()
continue
# has this lock expired?
locktime = lock['created'].value.with_zone(zdirection=0)
if locktime.get_unixtime() + self.lock_timeout < tnow:
# use optimistic locking
lock['owner'].set_from_value(owner)
try:
locks.update_entity(lock)
logging.warn("LockingBlockStore removed stale lock "
"on %s", hash_key)
return LockStoreContext(self, hash_key)
except KeyError:
twait = 0
tnow = time.time()
continue
except edm.ConstraintError:
pass
twait = random.randint(0, timeout // 5)
tnow = time.time()
logging.warn("LockingBlockStore: timeout locking %s", hash_key)
raise LockError
def unlock(self, hash_key):
"""Releases the lock on *hash_key*
Typically called by the context manager object returned by
:py:meth:`lock` rather than called directly.
Stale locks are handled automatically but three possible warning
conditions may be logged. All stale locks indicate that the
process holding the lock was unexpectedly slow (or clients with
poorly synchronised clocks) so these warnings suggest the need
for increasing the lock_timeout.
stale lock reused
The lock was not released as it has been acquired by another
owner. Could indicate significant contention on this
hash_key.
stale lock detected
The lock was no longer present and has since been acquired
and released by another owner. Indicates a slow process
holding locks.
stale lock race
The lock timed out and was reused while we were removing it.
Unlikely but indicates both significant contention and a
slow process holding the lock."""
owner = "%s_%i" % (self.magic, threading.current_thread().ident)
with self.entity_set.OpenCollection() as locks:
try:
lock = locks[hash_key]
if lock['owner'].value == owner:
# this is our lock - delete it
# potential race condition here if we timeout between
# loading and deleting the entity so we check how
# close it is and buy more time if necessary
locktime = lock['created'].value.with_zone(zdirection=0)
if (locktime.get_unixtime() + self.lock_timeout <
time.time() + 1):
# less than 1 second left, buy more time
# triggers update of 'created' property using
# optimistic locking ensuring we still own
locks.update_entity(lock)
del locks[hash_key]
else:
# we're not the owner
logging.warn("LockingBlockStore: stale lock reused "
"on busy hash %s", hash_key)
except KeyError:
# someone deleted the lock already - timeout?
logging.warn("LockingBlockStore: stale lock detected "
"on hash %s", hash_key)
pass
except edm.ConstraintError:
logging.warn("LockingBlockStore: stale lock race "
"on busy hash %s", hash_key)
class StreamStore(object):
"""Class for storing stream objects
Streams are split in to blocks that are stored in the associated
BlockStore. Timed locks are used to minimise the risk of conflicts
during store and delete operations on each block but all other
operations are done without locks. As a result, it is possible to
delete or modify a stream while another client is using it.
The intended use case for this store is to read and write entire
streams - not for editing. The stream identifiers are simply
numbers so if you want to modify the stream associated with a
resource in your application upload a new stream, switch the
references in your application and then delete the old one.
bs
A :py:class:`BlockStore`: used to store the actual data. The
use of a block store to persist the data in the stream ensures
that duplicate streams have only a small impact on storage
requirements as the block references are all that is duplicated.
Larger block sizes reduce this overhead and speed up access at
the expense of keeping a larger portion of the stream in memory
during streaming operations. The block size is set when the
block store is created.
ls
A :py:class:`LockStore`: used to lock blocks during write and
delete operations.
entity_set
An :py:class:`~pyslet.odata2.csdl.EntitySet` to hold the Stream
entities.
The entity set must have the following properties:
streamID
An automatically generated integer stream identifier that is
also the key
mimetype
An ASCII string to hold the stream's mime type (at least 64
characters).
created
An Edm.DateTime property to hold the creation date.
modified
An Edm.DateTime property to hold the last modified date.
size
An Edm.Int64 to hold the stream's size
md5
An Edm.Binary field of fixed length 16 bytes to hold the
MD5 checksum of the stream.
Blocks
A 1..Many navigation property to a related entity set with the
following properties...
blockID
An automatically generated integer block identifier that is
also the key
num
A block sequence integer
hash
The hash key of the block in the block store"""
def __init__(self, bs, ls, entity_set):
self.bs = bs
self.ls = ls
self.stream_set = entity_set
self.block_set = entity_set.NavigationTarget('Blocks')
def new_stream(self,
mimetype=params.MediaType('application', 'octet-stream'),
created=None):
"""Creates a new stream in the store.
mimetype
A :py:class:`~pyslet.http.params.MediaType` object
Returns a stream entity which is an
:py:class:`~pyslet.odata2.csdl.Entity` instance.
The stream is identified by the stream entity's key which you
can store elsewhere as a reference and pass to
:py:meth:`get_stream` to retrieve the stream again later."""
with self.stream_set.OpenCollection() as streams:
stream = streams.new_entity()
if not isinstance(mimetype, params.MediaType):
mimetype = params.MediaType.from_str(mimetype)
stream['mimetype'].set_from_value(str(mimetype))
now = TimePoint.from_now_utc()
stream['size'].set_from_value(0)
if created is None:
stream['created'].set_from_value(now)
stream['modified'].set_from_value(now)
else:
created = created.shift_zone(0)
stream['created'].set_from_value(created)
stream['modified'].set_from_value(created)
stream['md5'].set_from_value(hashlib.md5().digest())
streams.insert_entity(stream)
return stream
def get_stream(self, stream_id):
"""Returns the stream with identifier *stream_id*.
Returns the stream entity as an
:py:class:`~pyslet.odata2.csdl.Entity` instance."""
with self.stream_set.OpenCollection() as streams:
stream = streams[stream_id]
return stream
def open_stream(self, stream, mode="r"):
"""Returns a file-like object for a stream.
Returns an object derived from io.RawIOBase.
stream
A stream entity
mode
Files are always opened in binary mode. The characters "r",
"w" and "+" and "a" are honoured.
Warning: read and write methods of the resulting objects do not
always return all requested bytes. In particular, read or write
operations never cross block boundaries in a single call."""
if stream is None:
raise ValueError
return BlockStream(self, stream, mode)
def delete_stream(self, stream):
"""Deletes a stream from the store.
Any data blocks that are orphaned by this deletion are
removed."""
with self.stream_set.OpenCollection() as streams:
self.delete_blocks(stream)
del streams[stream.key()]
stream.exists = False
def store_block(self, stream, block_num, data):
hash_key = self.bs.key(data)
with stream['Blocks'].OpenCollection() as blocks:
block = blocks.new_entity()
block['num'].set_from_value(block_num)
block['hash'].set_from_value(hash_key)
blocks.insert_entity(block)
# now ensure that the data is stored
with self.ls.lock(hash_key):
self.bs.store(data)
return block
def update_block(self, block, data):
hash_key = block['hash'].value
new_hash = self.bs.key(data)
if new_hash == hash_key:
return
filter = core.BinaryExpression(core.Operator.eq)
filter.AddOperand(core.PropertyExpression('hash'))
hash_value = edm.EDMValue.NewSimpleValue(edm.SimpleType.String)
filter.AddOperand(core.LiteralExpression(hash_value))
# filter is: hash eq <hash_value>
with self.block_set.OpenCollection() as base_coll:
with self.ls.lock(hash_key):
with self.ls.lock(new_hash):
self.bs.store(data)
block['hash'].set_from_value(new_hash)
base_coll.update_entity(block)
# is the old hash key used anywhere?
hash_value.set_from_value(hash_key)
base_coll.set_filter(filter)
if len(base_coll) == 0:
# remove orphan block from block store
self.bs.delete(hash_key)
def retrieve_blocklist(self, stream):
with stream['Blocks'].OpenCollection() as blocks:
blocks.set_orderby(
core.CommonExpression.OrderByFromString("num asc"))
for block in blocks.itervalues():
yield block
def retrieve_block(self, block):
return self.bs.retrieve(block['hash'].value)
def delete_blocks(self, stream, from_num=0):
blocks = list(self.retrieve_blocklist(stream))
filter = core.BinaryExpression(core.Operator.eq)
filter.AddOperand(core.PropertyExpression('hash'))
hash_value = edm.EDMValue.NewSimpleValue(edm.SimpleType.String)
filter.AddOperand(core.LiteralExpression(hash_value))
# filter is: hash eq <hash_value>
with self.block_set.OpenCollection() as base_coll:
for block in blocks:
if from_num and block['num'].value < from_num:
continue
hash_key = block['hash'].value
with self.ls.lock(hash_key):
del base_coll[block.key()]
# is this hash key used anywhere?
hash_value.set_from_value(hash_key)
base_coll.set_filter(filter)
if len(base_coll) == 0:
# remove orphan block from block store
self.bs.delete(hash_key)
class BlockStream(io.RawIOBase):
"""Provides a file-like interface to stored streams
Based on the new style io.RawIOBase these streams are always in
binary mode. They are seekable but lack efficiency if random access
is used across block boundaries. The main design criteria is to
ensure that no more than one block is kept in memory at any one
time."""
def __init__(self, ss, stream, mode="r"):
self.ss = ss
self.stream = stream
self.r = "r" in mode or "+" in mode
self.w = "w" in mode or "+" in mode
self.size = stream['size'].value
self.block_size = self.ss.bs.max_block_size
self._bdata = None
self._bnum = 0
self._bpos = 0
self._btop = 0
self._bdirty = False
self._md5 = None
if "a" in mode:
self.seek(self.size)
self.blocks = list(self.ss.retrieve_blocklist(self.stream))
else:
self.seek(0)
if "w" in mode:
self.ss.delete_blocks(self.stream)
self.blocks = []
self._md5 = hashlib.md5()
self._md5num = 0
else:
self.blocks = list(self.ss.retrieve_blocklist(self.stream))
def close(self):
super(BlockStream, self).close()
self.blocks = None
self.r = self.w = False
def readable(self):
return self.r
def writable(self):
return self.w
def seekable(self):
return True
def seek(self, offset, whence=io.SEEK_SET):
if whence == io.SEEK_SET:
self.pos = offset
elif whence == io.SEEK_CUR:
self.pos += offset
elif whence == io.SEEK_END:
self.pos = self.size + offset
else:
raise IOError("bad value for whence in seek")
new_bnum = self.pos // self.block_size
if new_bnum != self._bnum:
self.flush()
self._bdata = None
self._bnum = new_bnum
self._bpos = self.pos % self.block_size
self._set_btop()
def _set_btop(self):
if self.size // self.block_size == self._bnum:
# we're pointing to the last block
self._btop = self.size % self.block_size
else:
self._btop = self.block_size
def flush(self):
if self._bdirty:
# the current block is dirty, write it out
data = self._bdata[:self._btop]
if data:
block = self.blocks[self._bnum]
if block.exists:
self.ss.update_block(block, str(data))
else:
self.blocks[self._bnum] = self.ss.store_block(
self.stream, self._bnum, data)
if self._md5 is not None and self._bnum == self._md5num:
self._md5.update(str(data))
self._md5num += 1
else:
self._md5 = None
if self.size != self.stream['size'].value:
self.stream['size'].set_from_value(self.size)
now = TimePoint.from_now_utc()
self.stream['modified'].set_from_value(now)
if self._md5 is not None:
self.stream['md5'].set_from_value(self._md5.digest())
else:
self.stream['md5'].set_null()
self.stream.commit()
self._bdirty = False
def tell(self):
return self.pos
def readinto(self, b):
if not self.r:
raise IOError("stream not open for reading")
nbytes = self._btop - self._bpos
if nbytes <= 0:
# we must be at the file size limit
return 0
if self._bdata is None:
# load the data
if self.w:
# create a full size block in case we also write
self._bdata = bytearray(self.block_size)
data = self.ss.retrieve_block(self.blocks[self._bnum])
self._bdata[:len(data)] = data
else:
self._bdata = self.ss.retrieve_block(self.blocks[self._bnum])
if nbytes > len(b):
nbytes = len(b)
b[:nbytes] = self._bdata[self._bpos:self._bpos + nbytes]
self.seek(nbytes, io.SEEK_CUR)
return nbytes
def write(self, b):
if not self.w:
raise IOError("stream not open for writing")
# we can always write something in the block, nbytes > 0
nbytes = self.block_size - self._bpos
if self._bdata is None:
if self._btop <= 0:
# add a new empty blocks first
last_block = len(self.blocks)
while last_block < self._bnum:
self.blocks.append(self.ss.store_block(
self.stream, last_block, bytearray(self.block_size)))
last_block += 1
self.size = last_block * self.block_size
# force the new size to be written
self._bdata = bytearray(self.block_size)
self._bdirty = True
self.flush()
# finally add the last block, but don't store it yet
with self.stream['Blocks'].OpenCollection() as blist:
new_block = blist.new_entity()
new_block['num'].set_from_value(self._bnum)
self.blocks.append(new_block)
self.size = self.pos
self._set_btop()
if self._bpos:
self._bdirty = True
else:
self._bdata = bytearray(self.block_size)
data = self.ss.retrieve_block(self.blocks[self._bnum])
self._bdata[:len(data)] = data
if nbytes > len(b):
nbytes = len(b)
self._bdata[self._bpos:self._bpos + nbytes] = b[:nbytes]
self._bdirty = True
if self.pos + nbytes > self.size:
self.size = self.pos + nbytes
self._set_btop()
self.seek(nbytes, io.SEEK_CUR)
return nbytes
| [
"steve.w.lay@gmail.com"
] | steve.w.lay@gmail.com |
59aeb4698e5be1a9660b979dcf41c2e3880deca6 | 14bb0b5d7478d3a8740cbc15cc7870fcd1fa8207 | /tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/common_v1.py | c8dcb7ba231cf3f57f8b8a5dd3782e2a124fbac7 | [
"Apache-2.0"
] | permissive | terigrossheim/tensorflow | 2be34891c99e0fcf88cf8418632f24676f1620a7 | ed9d45f096097c77664815c361c75e73af4f32d4 | refs/heads/master | 2022-11-06T12:08:10.099807 | 2020-06-29T12:10:56 | 2020-06-29T12:35:24 | 275,867,898 | 1 | 0 | Apache-2.0 | 2020-06-29T16:21:41 | 2020-06-29T16:21:39 | null | UTF-8 | Python | false | false | 4,320 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Serves as a common "main" function for all the SavedModel tests.
There is a fair amount of setup needed to initialize tensorflow and get it
into a proper TF2 execution mode. This hides that boilerplate.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
from tensorflow.python import pywrap_mlir # pylint: disable=g-direct-tensorflow-import
# Use /tmp to make debugging the tests easier (see README.md)
flags.DEFINE_string('save_model_path', '', 'Path to save the model to.')
FLAGS = flags.FLAGS
def set_tf_options():
# Default TF1.x uses reference variables that are not supported by SavedModel
# v1 Importer. To use SavedModel V1 Importer, resource variables should be
# enabled.
tf.enable_resource_variables()
tf.compat.v1.disable_eager_execution()
# This function needs to take a "create_module_fn", as opposed to just the
# module itself, because the creation of the module has to be delayed until
# after absl and tensorflow have run various initialization steps.
def do_test(signature_def_map,
init_op=None,
canonicalize=False,
show_debug_info=False):
"""Runs test.
1. Performs absl and tf "main"-like initialization that must run before almost
anything else.
2. Converts signature_def_map to SavedModel V1
3. Converts SavedModel V1 to MLIR
4. Prints the textual MLIR to stdout (it is expected that the caller will have
FileCheck checks in its file to check this output).
This is only for use by the MLIR SavedModel importer tests.
Args:
signature_def_map: A map from string key to signature_def. The key will be
used as function name in the resulting MLIR.
init_op: The initializer op for the saved model. If set, it will generate a
initializer graph in the resulting MLIR.
canonicalize: If true, canonicalizer will be run on the resulting MLIR.
show_debug_info: If true, shows debug locations in the resulting MLIR.
"""
# Make LOG(ERROR) in C++ code show up on the console.
# All `Status` passed around in the C++ API seem to eventually go into
# `LOG(ERROR)`, so this makes them print out by default.
logging.set_stderrthreshold('error')
def app_main(argv):
"""Function passed to absl.app.run."""
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
if FLAGS.save_model_path:
save_model_path = FLAGS.save_model_path
else:
save_model_path = tempfile.mktemp(suffix='.saved_model')
sess = tf.Session()
sess.run(tf.initializers.global_variables())
builder = tf.saved_model.builder.SavedModelBuilder(save_model_path)
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map,
main_op=init_op,
strip_default_attrs=True)
builder.save()
logging.info('Saved model to: %s', save_model_path)
# TODO(b/153507667): Set the following boolean flag once the hoisting
# variables logic from SavedModel importer is removed.
lift_variables = False
mlir = pywrap_mlir.experimental_convert_saved_model_v1_to_mlir(
save_model_path, ','.join([tf.saved_model.tag_constants.SERVING]),
lift_variables, show_debug_info)
if canonicalize:
mlir = pywrap_mlir.experimental_run_pass_pipeline(mlir, 'canonicalize',
show_debug_info)
print(mlir)
app.run(app_main)
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
ee7611e405952a6d724354ab56524138152af431 | f8d3f814067415485bb439d7fe92dc2bbe22a048 | /solem/pcv_book/graphcut.py | 242ac3449953f8cca3ec94fabb66d20ceecfa821 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | gmonkman/python | 2f9ab8f159c01f6235c86cb0cd52062cd3fdedd3 | 9123aa6baf538b662143b9098d963d55165e8409 | refs/heads/master | 2023-04-09T15:53:29.746676 | 2022-11-26T20:35:21 | 2022-11-26T20:35:21 | 60,254,898 | 0 | 2 | null | 2023-03-24T22:58:39 | 2016-06-02T10:25:27 | Python | UTF-8 | Python | false | false | 3,413 | py | from pylab import *
from numpy import *
from pygraph.classes.digraph import digraph
from pygraph.algorithms.minmax import maximum_flow
import bayes
"""
Graph Cut image segmentation using max-flow/min-cut.
"""
def build_bayes_graph(im, labels, sigma=1e2, kappa=1):
""" Build a graph from 4-neighborhood of pixels.
Foreground and background is determined from
labels (1 for foreground, -1 for background, 0 otherwise)
and is modeled with naive Bayes classifiers."""
m, n = im.shape[:2]
# RGB vector version (one pixel per row)
vim = im.reshape((-1, 3))
# RGB for foreground and background
foreground = im[labels == 1].reshape((-1, 3))
background = im[labels == -1].reshape((-1, 3))
train_data = [foreground, background]
# train naive Bayes classifier
bc = bayes.BayesClassifier()
bc.train(train_data)
# get probabilities for all pixels
bc_lables, prob = bc.classify(vim)
prob_fg = prob[0]
prob_bg = prob[1]
# create graph with m*n+2 nodes
gr = digraph()
gr.add_nodes(range(m * n + 2))
source = m * n # second to last is source
sink = m * n + 1 # last node is sink
# normalize
for i in range(vim.shape[0]):
vim[i] = vim[i] / (linalg.norm(vim[i]) + 1e-9)
# go through all nodes and add edges
for i in range(m * n):
# add edge from source
gr.add_edge((source, i), wt=(prob_fg[i] / (prob_fg[i] + prob_bg[i])))
# add edge to sink
gr.add_edge((i, sink), wt=(prob_bg[i] / (prob_fg[i] + prob_bg[i])))
# add edges to neighbors
if i % n != 0: # left exists
edge_wt = kappa * exp(-1.0 * sum((vim[i] - vim[i - 1])**2) / sigma)
gr.add_edge((i, i - 1), wt=edge_wt)
if (i + 1) % n != 0: # right exists
edge_wt = kappa * exp(-1.0 * sum((vim[i] - vim[i + 1])**2) / sigma)
gr.add_edge((i, i + 1), wt=edge_wt)
if i // n != 0: # up exists
edge_wt = kappa * exp(-1.0 * sum((vim[i] - vim[i - n])**2) / sigma)
gr.add_edge((i, i - n), wt=edge_wt)
if i // n != m - 1: # down exists
edge_wt = kappa * exp(-1.0 * sum((vim[i] - vim[i + n])**2) / sigma)
gr.add_edge((i, i + n), wt=edge_wt)
return gr
def cut_graph(gr, imsize):
""" Solve max flow of graph gr and return binary
labels of the resulting segmentation."""
m, n = imsize
source = m * n # second to last is source
sink = m * n + 1 # last is sink
# cut the graph
flows, cuts = maximum_flow(gr, source, sink)
# convert graph to image with labels
res = zeros(m * n)
for pos, label in cuts.items()[:-2]: # don't add source/sink
res[pos] = label
return res.reshape((m, n))
def save_as_pdf(gr, filename, show_weights=False):
from pygraph.readwrite.dot import write
import gv
dot = write(gr, weighted=show_weights)
gvv = gv.readstring(dot)
gv.layout(gvv, 'fdp')
gv.render(gvv, 'pdf', filename)
def show_labeling(im, labels):
""" Show image with foreground and background areas.
labels = 1 for foreground, -1 for background, 0 otherwise."""
imshow(im)
contour(labels, [-0.5, 0.5])
contourf(labels, [-1, -0.5], colors='b', alpha=0.25)
contourf(labels, [0.5, 1], colors='r', alpha=0.25)
# axis('off')
xticks([])
yticks([])
| [
"gmonkman@mistymountains.biz"
] | gmonkman@mistymountains.biz |
4c1f1f9e5f52b7d2f34a4bae7e5d0cc6a5be067b | 621cac8557fd74fe05483d680118a07b36982a8d | /churnchall/ensemble.py | 93ca45af23e136a076895135272fde9cf0b7a8da | [
"MIT"
] | permissive | hulkis/churnchall | f99733bfc1af9ecde3c3349e87ec9170dcb356d8 | 93d0e819248e734d8f8b49f6c51b950fd8e0a748 | refs/heads/master | 2020-03-28T23:42:35.168482 | 2018-10-28T16:54:42 | 2018-10-28T16:54:42 | 149,304,749 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,813 | py | import catboost as cgb
import lightgbm as lgb
import numpy as np
import pandas as pd
import xgboost as xgb
from sklearn.linear_model import Ridge
from sklearn.metrics import make_scorer
from sklearn.model_selection import KFold, cross_val_score
from wax_toolbox import Timer
from churnchall.boosters import CatBoostCookie, LgbCookie, XgbCookie, compute_auc_lift
from churnchall.datahandler import DataHandleCookie
from churnchall.constants import RESULT_DIR
"""
https://github.com/vecxoz/vecstack
--> What is blending? How is it related to stacking?
Basically it is the same thing. Both approaches use predictions as features.
Often this terms are used interchangeably.
The difference is how we generate features (predictions) for the next level:
stacking: perform cross-validation procedure and predict each part of train set (OOF)
blending: predict fixed holdout set
"""
def auc_lift_score_func(y, y_pred):
return compute_auc_lift(y_pred=y_pred, y_true=y, target=0)
auc_lift_scorer = make_scorer(auc_lift_score_func, greater_is_better=True)
DEFAULT_STACKER = Ridge()
DEFAULT_BASE_MODELS = (
{
'model': LgbCookie(random_state=1),
'exec_params': {
'num_boost_round': 10000,
'early_stopping_rounds': 200,
}
},
{
'model': LgbCookie(random_state=2),
'exec_params': {
'num_boost_round': 10000,
'early_stopping_rounds': 200,
}
},
{
'model': LgbCookie(random_state=3),
'params_override': {
'boosting_type': 'dart'
},
'exec_params': {
'num_boost_round': 10000,
'early_stopping_rounds': 200,
}
},
)
class Ensemble():
def __init__(self,
n_splits=5,
stacker=DEFAULT_STACKER,
base_models=DEFAULT_BASE_MODELS):
self.n_splits = n_splits
self.stacker = stacker
self.base_models = base_models
def fit_predict(self, X, y, T):
folds = list(
KFold(n_splits=self.n_splits, shuffle=True,
random_state=2016).split(X, y))
S_train = np.zeros((X.shape[0], len(self.base_models)))
S_test = np.zeros((T.shape[0], len(self.base_models)))
for i, clf in enumerate(self.base_models):
S_test_i = np.zeros((T.shape[0], self.n_splits))
model = clf.pop('model')
params_override = clf.pop('params_override', {})
exec_params = clf.pop('exec_params', {})
model.params_best_fit = {**model.params_best_fit, **params_override}
for j, (train_idx, test_idx) in enumerate(folds):
X_train = X.iloc[train_idx]
y_train = y.iloc[train_idx]
X_holdout = X.iloc[test_idx]
y_holdout = y.iloc[test_idx]
with Timer("Fit_Predict Model {} fold {}".format(clf, j)):
y_pred = model.fit_predict(X_train, y_train, X_holdout,
y_holdout, **exec_params)
S_train[test_idx, i] = y_pred.values.ravel()
S_test_i[:, j] = model.booster.predict(T)[:]
S_test[:, i] = S_test_i.mean(axis=1)
results = cross_val_score(
self.stacker,
S_train,
y.values.ravel(),
cv=5,
scoring=auc_lift_scorer)
print("Stacker score: %.5f (%.5f)" % (results.mean(), results.std()))
self.stacker.fit(S_train, y)
res = self.stacker.predict(S_test)[:]
return res
def validate(self, drop_lowimp_features=False):
datahandle = DataHandleCookie(
debug=False, drop_lowimp_features=drop_lowimp_features)
dtrain, dtest = datahandle.get_train_valid_set()
X_train, y_train = dtrain
X_test, y_test = dtest
y_pred = self.fit_predict(X_train, y_train, X_test)
score = auc_lift_score_func(y_test, y_pred)
print('Obtained AUC Lift of {}'.format(score))
def generate_submit(self, drop_lowimp_features=False):
datahandle = DataHandleCookie(
debug=False, drop_lowimp_features=drop_lowimp_features)
dtrain = datahandle.get_train_set()
dtest = datahandle.get_test_set()
X_train, y_train = dtrain
X_test = dtest
with Timer('Fit & Predict for all stacks'):
y_pred = self.fit_predict(X_train, y_train, X_test)
df = pd.DataFrame(y_pred, columns=['value'])
df.loc[df['value'] > 1.] = 1.
df.loc[df['value'] < 0.] = 0.
now = pd.Timestamp.now(tz='CET').strftime("%d-%Hh-%Mm")
df.to_csv(
RESULT_DIR / "submit_{}.csv".format(now),
index=False,
header=False)
| [
"jd5584@engie.com"
] | jd5584@engie.com |
2752310a13c918b9241f5106394d9e7008825b06 | 83c7d4f246dea32f9c449ee3b498d476a22914fd | /Odoo/personnel_detail/hr_termination.py | b8c13d3575f85360c1173d0dab87138e750c9f34 | [] | no_license | linkcheng/windyblog | 726280775169aaed04f2763070481149ffb299a1 | ad5ed4d74226d42ac45de6c2be66a573576274f0 | refs/heads/master | 2020-05-21T23:54:50.209818 | 2018-07-04T07:55:18 | 2018-07-04T07:55:18 | 63,590,065 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,411 | py | # -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from openerp import models, api, _, fields
from openerp.models import BaseModel
from openerp.exceptions import UserError
from operator import itemgetter
class TerminationController(models.AbstractModel):
_name = 'termination.controller'
_inherit = ['termination.model', 'ir.needaction_mixin', 'personnel.detail']
_description = "Termination Controller"
@api.multi
def push_remind(self, title, message, user_ids, action, title_args=(), message_args=()):
for record in self:
msg = {
'record_id': record.id,
'model_name': record._name,
'type': 'personnel_detail',
'action': action,
}
lang_user_ids_dict = self.env['res.users'].get_res_partner_lang(user_ids)
for lang, user_id_list in lang_user_ids_dict.items():
title_with_lang = self.env['ir.translation'].format_the_string_with_lang(title, title_args, lang)
message_with_lang = self.env['ir.translation'].format_the_string_with_lang(message, message_args, lang)
msg.update({
'title': title_with_lang,
'message': message_with_lang,
})
self.env['app.action'].jpush_send(msg, user_ids=user_id_list, push_type='double')
return True
@api.multi
def app_view_detail(self):
res = dict()
head = super(TerminationController, self).app_view_detail()
res.update(head)
for record in self:
# 构建各自的 json content dict
data = {
"approver_ids": [user.id for user in record.approver_ids],
"content": [
{
"type": "date",
"can_edit": True,
"title": self.get_translation('Termination Date'),
"value": record.leave_time,
"key": "leave_time",
},
{
"type": "text",
"can_edit": False,
"title": self.get_translation('Remarks'),
"value": record.leave_reason or '',
"key": "leave_reason",
},
],
}
if 'approve_state' in res and 'can_cancel' in res['approve_state']:
res['approve_state']['can_cancel'] = False # 后端没有实现撤回功能,所以暂时不能撤回
res.update(data)
return res
@api.multi
def app_do_reject(self):
if 'description' in self.env.context:
vals = self.env.context['description']
for k, v in vals.items():
if v == '': # date 类型置空不能用'',用 False
vals[k] = False
r = self.write(vals)
res = super(TerminationController, self).app_do_reject()
return res
@api.multi
def app_do_approved(self):
if 'description' in self.env.context:
vals = self.env.context['description']
for k, v in vals.items():
if v == '':
vals[k] = False
r = self.write(vals)
res = super(TerminationController, self).app_do_approved()
return res
class HrTermination(models.Model):
_name = 'hr.termination'
_description = "HR Termination"
_inherit = ['termination.controller', 'work.bench']
_order = "create_date desc"
_workbench_search = {
'remark': lambda self: self.get_translation(self._description, 'ir.model,name') or '',
'title': lambda self: self.employee_name,
'description': '',
'record_id': lambda self: self.id,
'date': lambda self: self.leave_time,
'type': 'personnel_detail',
'color': '#3fa2a3',
'state': lambda self:
self.env['ir.translation'].selection_field_key_translations_map('approve.instance', 'state', lang=self._context['lang'])[
getattr(self, 'state')]
}
def _search_app_action(self, search_place):
return 'eHR://personalEvent?id=%d&event_type=termination' % self.id
name = fields.Char(string='ID', readonly=True, states={'draft': [('readonly', False)]}, required=True,
default=lambda self: self.env['ir.sequence'].next_by_code(self._name))
# 与审批流有关,view 中 domain 使用
approver_ids = fields.Many2many('res.users', 'hr_termination_approver_rel', 'termination_id', 'user_id', string='Approver')
approved_ids = fields.Many2many('res.users', 'hr_termination_approved_rel', 'termination_id', 'user_id', string='Approved by')
@api.multi
def build_approve_remind(self, msg, approver_ids):
"""
构建审批人的推送消息
:return:
"""
if not isinstance(approver_ids, (list, tuple)):
approver_ids = [approver_ids]
title = _('Personnel Event Approval')
action_push = 'eHR://personalEvent?id=%d&event_type=termination' % self.id
msg_args_list = [
{
'src_or_value': self.create_uid.partner_id.name,
'need_translate': False,
},
{
'src_or_value': self._description,
'translate_name': 'ir.model,name',
'translate_type': 'model',
'need_translate': True,
}
]
# 推送消息
self.push_remind(title, msg, approver_ids, action_push, message_args=msg_args_list)
@api.multi
def build_create_remind(self, msg, push_uid):
"""
构建创建者的推送消息
:return:
"""
if not isinstance(push_uid, (list, tuple)):
push_uid = [push_uid]
title = _('Personnel Event Approval')
action_push = 'eHR://personalEvent?id=%d&event_type=termination' % self.id
msg_args_list = [
{
'src_or_value': self._description,
'translate_name': 'ir.model,name',
'translate_type': 'model',
'need_translate': True,
}
]
# 推送消息
self.push_remind(title, msg, push_uid, action_push, message_args=msg_args_list)
| [
"zhenglong1992@126.com"
] | zhenglong1992@126.com |
3f6d20b2b0368bc1fce9ed4428930b1693f2765e | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4134/codes/1723_2505.py | 8e06f5ecd7a38c37363f35f111d10476601ae390 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | from math import*
ang = eval(input("angulo:"))
k = int(input("numero de termos:"))
soma = 0
i = 0
fim = k - 1
while(i <= fim):
soma = soma+(-1)**i*((ang**(2*i+1)/factorial(2*i+1)))
i = i + 1
print(round(soma, 10)) | [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
0add22caa678b2f5099feef3cbcd7c33dd0cde8d | 857c1fa3da77f6949ab66ca0abcb7ff0d38a324e | /calendrier/app/modulos/contas/forms.py | ba1062ced2883cda125ed8290c028b7929a2fbbe | [] | no_license | leandropaixao/Python-Flask-WTForms_SalaoDeBeleza_Agenda-Caixa | 803ee57b78783812c0db8aa630feca62c0adab36 | 9a730e44a9563e06dc0069d97a8491f028767aec | refs/heads/master | 2023-01-30T22:09:13.254185 | 2020-12-06T23:11:50 | 2020-12-06T23:11:50 | 312,880,073 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | from flask_wtf import FlaskForm
from wtforms import StringField
from wtforms.validators import DataRequired
class ContasForm(FlaskForm):
descricao = StringField('Descrição da conta', validators=[DataRequired(message="Campo obrigatório")], render_kw={"placeholder": "Descrição da conta"})
| [
"leandro.tec.inf@gmail.com"
] | leandro.tec.inf@gmail.com |
20eeee6a20125183e69b74373fb882f41cf4c4ab | 157d2961f8196d56640594c8a094c41521b6ae2d | /Untitled1.py | 2b65f0c6a99952359cdb2792d0c28b1e0a539d5c | [] | no_license | anaidpm/IBM-Data-Visualisation-Labs | e0a2279a942c04b91b490f46b48bddae9c40425b | 886ca05b113267eda7cfa2f6429b6e7983e968eb | refs/heads/master | 2020-03-27T22:11:47.380461 | 2018-09-17T13:35:53 | 2018-09-17T13:35:53 | 147,212,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,011 | py |
# coding: utf-8
# In[3]:
import numpy as np # useful for many scientific computing in Python
import pandas as pd # primary data structure library
df = pd.read_csv('https://cocl.us/sanfran_crime_dataset')
df.rename(columns={'PdDistrict':'Neighbourhood'}, inplace=True)
print ('Data read into a pandas dataframe!')
df.head()
# In[14]:
df_areas=df.groupby('Neighbourhood',axis=0).count()
df_areas=df_areas[['IncidntNum']]
df_areas.rename(columns={'IncidntNum':'Count'}, inplace=True)
df_areas
# In[17]:
get_ipython().system('conda install -c conda-forge folium=0.5.0 --yes')
import folium
# San Francisco latitude and longitude values
latitude = 37.77
longitude = -122.42
# create map and display it
sanfran_map = folium.Map(location=[latitude, longitude], zoom_start=12)
# display the map of San Francisco
sanfran_map
# In[26]:
# download countries geojson file
get_ipython().system('wget --quiet https://cocl.us/sanfran_geojson -O sanfran.json')
print('GeoJSON file downloaded!')
df_areas=df_areas.reset_index()
df_areas
# In[32]:
san_geo = r'sanfran.json'
# create a numpy array of length 6 and has linear spacing from the minium total immigration to the maximum total immigration
threshold_scale = np.linspace(df_areas['Count'].min(),
df_areas['Count'].max(),
6, dtype=int)
threshold_scale = threshold_scale.tolist() # change the numpy array to a list
threshold_scale[-1] = threshold_scale[-1] + 1 # make sure that the last value of the list is greater than the maximum immigration
# let Folium determine the scale.
sanfran_map = folium.Map(location=[latitude, longitude], zoom_start=12)
sanfran_map.choropleth(
geo_data=san_geo,
data=df_areas,
columns=['Neighbourhood', 'Count'],
key_on='feature.properties.DISTRICT',
threshold_scale=threshold_scale,
fill_color='YlOrRd',
fill_opacity=0.7,
line_opacity=0.2,
legend_name='Crime in San Francisco',
reset=True
)
sanfran_map
| [
"noreply@github.com"
] | noreply@github.com |
14693149d31686fd7ed16988d92202f76cc51287 | 20018da049ba9aedd7daad316a59cd7f00577cc2 | /entero.py | ab4e9473d6def49dd07a98f9fd1a72308e656daa | [] | no_license | danielacorvalan79/trabajo_grupal_2 | 0d09cc2d872eded01ccc8e1bb58967469532bb6b | 5aff3d56643c0845101b94cd8a21181481d69646 | refs/heads/main | 2023-01-07T01:10:45.505800 | 2020-11-05T21:48:41 | 2020-11-05T21:48:41 | 310,395,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | import random
import numpy as np
#punto 5
#generando lista con numeros de 1 a 20 digitos
caracteres = '1234567890'
lista = []
for n in range(0,100):
string_aleatorio = ''
largo = random.randint(1, 20)
for i in range(0, largo):
string_aleatorio += random.choice(caracteres)
lista.append(int(string_aleatorio))
print(lista)
#calculando promedio
suma = 0
for elem in lista:
suma = suma + elem
print("suma: ",suma)
print("numero de elementos: ", len(lista))
promedio = suma/ len(lista)
print("promedio: ", promedio)
print("promedio con numpy: ", np.mean(lista) )
for elem in range(len(lista)):
lista[elem] = lista[elem]-promedio
print("Lista actualizada: \n",lista) | [
"pedrocid@example.com"
] | pedrocid@example.com |
8068df745ef00e5492674054c69115a2d6c59513 | 7db3827b9a8ed9d34607bb5af104930fa6cfd8b6 | /accounts/urls.py | 04fdd47ddece9e8d7320eb2d52e279fb9fa40f30 | [] | no_license | shivani1666/carzone-gitproject | 2a66ca868cfb0c819e24241f90bba39ba53cc59f | 92dc251cf28bb79624d5d5c974af2268d7e931fb | refs/heads/master | 2023-05-21T14:49:47.696949 | 2021-06-12T10:20:45 | 2021-06-12T10:20:45 | 362,923,972 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | from django.urls import path
from . import views
urlpatterns = [
path('login', views.login, name='login'),
path('register', views.register, name='register'),
path('logout', views.logout, name='logout'),
path('dashboard', views.dashboard, name='dashboard'),
]
| [
"shivani16cs@gmail.com"
] | shivani16cs@gmail.com |
afaa62f64cd35fb3b529c054cc3170eb0d51759a | b946fbbd8b720f135a53d1989e319b50a21fc82a | /codetables/models.py | 745ddd94006849bb29fc6979f88a43176a2eba85 | [] | no_license | Ravinderbaid/codetable | 64bf1c209897ee74c7c2b993b6b6594a34f79ef2 | c1162c51ffd613cb5b90210b4368786255f357d7 | refs/heads/master | 2021-01-02T08:39:26.193759 | 2016-03-13T03:51:13 | 2016-03-13T03:51:13 | 41,509,160 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Files_saveds(models.Model):
title = models.CharField(max_length = 100)
author = models.ForeignKey(User, verbose_name="by")
created_date = models.DateTimeField(auto_now_add=True, editable=False)
modified_date = models.DateTimeField(auto_now_add=True) | [
"ravinderbaid@gmail.com"
] | ravinderbaid@gmail.com |
4adf64dcfff43a8c6a74d42b19225f1953debe63 | 9e928640a0d92edf69c5b3b5b05aec256635ebf6 | /0old/tests/url_test/main.py | d7d184d19f7627081ef9dd0413f23c6a9f55b084 | [] | no_license | joiplay/rapt | 9db0f1cc824041a8f385e89d77fbbaeed745e87b | cf8ee5c1aabcb6d1dc08b5a2c75c7723afe96c31 | refs/heads/master | 2020-09-29T14:40:48.187559 | 2019-12-14T10:22:06 | 2019-12-14T10:22:06 | 227,056,098 | 5 | 1 | null | 2019-12-10T07:31:25 | 2019-12-10T07:31:24 | null | UTF-8 | Python | false | false | 1,990 | py | import pygame
import webbrowser
# Import the android module. If we can't import it, set it to None - this
# lets us test it, and check to see if we want android-specific behavior.
try:
import android
import android.mixer
except ImportError:
android = None
# Event constant.
TIMEREVENT = pygame.USEREVENT
# The FPS the game runs at.
FPS = 30
# Color constants.
RED = (255, 0, 0, 255)
GREEN = (0, 255, 0, 255)
def main():
pygame.init()
if android:
android.init()
android.mixer.music.load("click.wav")
android.mixer.music.play(-1)
# Set the screen size.
screen = pygame.display.set_mode((480, 800))
# Map the back button to the escape key.
if android:
android.map_key(android.KEYCODE_BACK, pygame.K_ESCAPE)
# Use a timer to control FPS.
pygame.time.set_timer(TIMEREVENT, 1000 / FPS)
# The color of the screen.
color = RED
while True:
ev = pygame.event.wait()
# Android-specific:
if android:
if android.check_pause():
android.wait_for_resume()
# Draw the screen based on the timer.
if ev.type == TIMEREVENT:
screen.fill(color)
pygame.display.flip()
android.mixer.periodic()
# When the touchscreen is pressed, change the color to green.
elif ev.type == pygame.MOUSEBUTTONDOWN:
color = GREEN
if android:
android.vibrate(.25)
print "Open URL Version 2"
webbrowser.open("http://www.renpy.org/")
# When it's released, change the color to RED.
elif ev.type == pygame.MOUSEBUTTONUP:
color = RED
# When the user hits back, ESCAPE is sent. Handle it and end
# the game.
elif ev.type == pygame.KEYDOWN and ev.key == pygame.K_ESCAPE:
break
# This isn't run on Android.
if __name__ == "__main__":
main()
| [
"tom@rothamel.us"
] | tom@rothamel.us |
c30392e2bb7b8ca47fa86eecc06d3ba2ebbf67c5 | b6472217400cfce4d12e50a06cd5cfc9e4deee1f | /sites/top/api/rest/WlbItemDeleteRequest.py | 90181cc7828d9d8c9ed09c35a46a07e62a9e7a08 | [] | no_license | topwinner/topwinner | 2d76cab853b481a4963826b6253f3fb0e578a51b | 83c996b898cf5cfe6c862c9adb76a3d6a581f164 | refs/heads/master | 2021-01-22T22:50:09.653079 | 2012-08-26T19:11:16 | 2012-08-26T19:11:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | '''
Created by auto_sdk on 2012-08-26 16:43:44
'''
from top.api.base import RestApi
class WlbItemDeleteRequest(RestApi):
def __init__(self,domain,port):
RestApi.__init__(self,domain, port)
self.item_id = None
self.user_nick = None
def getapiname(self):
return 'taobao.wlb.item.delete'
| [
"timo.jiang@qq.com"
] | timo.jiang@qq.com |
a6233eaa6757dee3394ea65eed815e8023fd32b7 | 9befe2880bb67a2fa076214d81b15165a3a43101 | /main.py | 7a70aca875fa2603f7d428995fe3c0361740bc20 | [
"MIT"
] | permissive | Aarrtteemm123/Balls | f459cf56da5d86dcc684d95d30074813721b6609 | ded92c3a1a903daac5851749eee90e5fb42b44e5 | refs/heads/master | 2021-02-16T13:43:24.724495 | 2020-12-18T13:42:42 | 2020-12-18T13:42:42 | 245,011,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | import pygame
from App import Application
if __name__ == '__main__':
pygame.init()
app = Application()
app.start()
| [
"“artem@gmail.com”"
] | “artem@gmail.com” |
a33002ee62b9f1e34ed9eabcd27de694c1e05a29 | 00f1f01f218fddc30a4194e999f0b48c45c47012 | /elements/resources/migrations/0001_initial.py | fb4b807194a3f2905b8e8ba7d9f27baedea4299e | [] | no_license | mikpanko/grakon | 495659317c5933a95650b3f9000aab73e7335a13 | 6c64432c366a6ad44fb7227f22498335bd193f37 | refs/heads/master | 2020-12-26T00:19:52.799388 | 2013-07-28T02:33:19 | 2013-07-28T02:33:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,184 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.rename_table('elements_entityresource', 'resources_entityresource')
if not db.dry_run:
# For permissions to work properly after migrating
orm['contenttypes.contenttype'].objects.filter(app_label='elements', model='EntityResource').update(app_label='elements.resources')
def backwards(self, orm):
db.rename_table('resources_entityresource', 'elements_entityresource')
if not db.dry_run:
# For permissions to work properly after migrating
orm['contenttypes.contenttype'].objects.filter(app_label='elements.resources', model='EntityResource').update(app_label='elements')
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'resources.entityresource': {
'Meta': {'unique_together': "(('content_type', 'entity_id', 'resource'),)", 'object_name': 'EntityResource'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'resource': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'})
}
}
complete_apps = ['resources'] | [
"sergkop@gmail.com"
] | sergkop@gmail.com |
1dcaa9207f2ccf6e23d755d436896b1aef624ac1 | a170461845f5b240daf2090810b4be706191f837 | /pyqt/DemoFullCode-PythonQt/chap12QtChart/Demo12_2ChartConfig/myDialogPen.py | 4cdbf17519ebcd8973fd4577bfea498efc83ca6b | [] | no_license | longhuarst/QTDemo | ec3873f85434c61cd2a8af7e568570d62c2e6da8 | 34f87f4b2337a140122b7c38937ab4fcf5f10575 | refs/heads/master | 2022-04-25T10:59:54.434587 | 2020-04-26T16:55:29 | 2020-04-26T16:55:29 | 259,048,398 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,026 | py | # -*- coding: utf-8 -*-
import sys
from PyQt5.QtWidgets import QApplication, QDialog,QColorDialog
from PyQt5.QtCore import pyqtSlot,Qt
##from PyQt5.QtWidgets import
from PyQt5.QtGui import QPen, QPalette,QColor
from ui_QWDialogPen import Ui_QWDialogPen
class QmyDialogPen(QDialog):
def __init__(self, parent=None):
super().__init__(parent)
self.ui=Ui_QWDialogPen()
self.ui.setupUi(self) #构造UI界面
self.__pen=QPen()
##“线型”ComboBox的选择项设置
self.ui.comboPenStyle.clear()
self.ui.comboPenStyle.addItem("NoPen",0)
self.ui.comboPenStyle.addItem("SolidLine",1)
self.ui.comboPenStyle.addItem("DashLine",2)
self.ui.comboPenStyle.addItem("DotLine",3)
self.ui.comboPenStyle.addItem("DashDotLine",4)
self.ui.comboPenStyle.addItem("DashDotDotLine",5)
self.ui.comboPenStyle.addItem("CustomDashLine",6)
self.ui.comboPenStyle.setCurrentIndex(1)
##=================自定义接口函数====================
def setPen(self,pen): ##设置pen
self.__pen=pen
self.ui.spinWidth.setValue(pen.width()) #线宽
i=int(pen.style()) #枚举类型转换为整型
self.ui.comboPenStyle.setCurrentIndex(i)
color=pen.color() #QColor
## self.ui.btnColor.setAutoFillBackground(True)
qss="background-color: rgb(%d, %d, %d)"%(
color.red(),color.green(),color.blue())
self.ui.btnColor.setStyleSheet(qss) #使用样式表设置按钮背景色
def getPen(self): ##返回pen
index=self.ui.comboPenStyle.currentIndex()
self.__pen.setStyle(Qt.PenStyle(index)) #线型
self.__pen.setWidth(self.ui.spinWidth.value()) #线宽
color=self.ui.btnColor.palette().color(QPalette.Button)
self.__pen.setColor(color) #颜色
return self.__pen
@staticmethod ##类函数,或静态函数
def staticGetPen(iniPen):
# 不能有参数self,不能与类的成员函数同名,也就是不能命名为getPen()
Dlg=QmyDialogPen() #创建一个对话框
Dlg.setPen(iniPen) #设置初始化QPen
pen=iniPen
ok=False
ret=Dlg.exec() #模态显示对话框
if ret==QDialog.Accepted:
pen=Dlg.getPen() #获取pen
ok=True
return pen ,ok #返回设置的QPen对象
## ==========由connectSlotsByName()自动连接的槽函数============
@pyqtSlot() ##选择颜色
def on_btnColor_clicked(self):
color=QColorDialog.getColor()
if color.isValid(): #用样式表设置QPushButton的背景色
qss="background-color: rgb(%d, %d, %d);"%(
color.red(),color.green(),color.blue())
self.ui.btnColor.setStyleSheet(qss)
## ============窗体测试程序 ================================
if __name__ == "__main__":
app = QApplication(sys.argv)
iniPen=QPen(Qt.blue)
pen=QmyDialogPen.staticGetPen(iniPen) #测试类函数调用
sys.exit(app.exec_())
| [
"841105197@qq.com"
] | 841105197@qq.com |
27fe1e9da8e79b42ad8492c48fc4391361361bbf | d57534daa527217d0ed14ef89cae55620b4c8fbb | /env_bluetooth-API/bin/wheel | c3c288012325a59442a043ab8702c98bfe43899e | [
"Apache-2.0"
] | permissive | ryltar/bluetooth-API | 2c5c368b7926f3c3089270efb8e78e57021abc9a | 3c97618b4b9ef2824db2d2f4c5e1be43c7ae662e | refs/heads/master | 2021-10-23T16:12:02.386771 | 2019-03-18T16:33:49 | 2019-03-18T16:33:49 | 109,590,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | #!/home/bluetoothapi/bluetooth-API/env_bluetooth-API/bin/python2
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"mart1.guillaume@free.fr"
] | mart1.guillaume@free.fr | |
25565a19fc1880f481db4da8ee61c08e14747dfd | 506ad7a2be6eb1b6ad858edbf00673d7b0d670b1 | /player.py | b46aa73d7e42ff2beb4fd0acdbdb3b6ea7d03851 | [] | no_license | Tenelen/BlockBro | c3d17c656b804de2b31819b52dde337825f51e1c | 392b083cfb939089d29b61ff4fdcb32b86dce6a0 | refs/heads/master | 2021-01-24T03:12:06.270056 | 2018-02-25T21:57:22 | 2018-02-25T21:57:22 | 122,880,594 | 0 | 0 | null | 2018-02-25T21:57:22 | 2018-02-25T21:48:37 | Python | UTF-8 | Python | false | false | 2,743 | py | import pygame
from time import sleep
class Player():
def __init__(self, settings, screen, platform):
self.player_speed = settings.player_speed
self.velocity = settings.velocity
self.settings = settings
self.screen = screen
self.player_width = settings.player_width
self.player_height = settings.player_height
self.player_color = settings.player_color
self.direction = 1
self.move_left = False
self.move_right = False
self.jumping = False
self.onGround = False
#self.jumptime = 0
self.image = pygame.Surface([self.player_width, self.player_height])
self.image.fill(self.player_color)
self.rect = self.image.get_rect()
self.centerx = float(self.rect.centerx)
self.bottom = int(self.rect.bottom)
self.platform = platform
#self.platform1 = platform.rect_plat1
#self.platform2 = platform.rect_plat2
def place_player(self):
self.centerx = 200
self.bottom = 100
def blitme(self):
self.screen.blit(self.image, self.rect)
def check_player(self, settings, screen):
if self.rect.bottom >= settings.screen_height:
self.onGround = True
def collision(self, settings, screen, platform):
if pygame.Rect.colliderect(self.rect, self.platform):
#or pygame.Rect.colliderect(self.rect, self.platform2):
self.onGround = True
self.jumping = False
elif self.bottom >= self.settings.screen_height:
self.onGround = True
else:
self.onGround = False
def jump(self, settings):
if self.onGround and self.jumping == False:
self.jumping = True
self.bottom -= 150
self.centerx += (self.direction * 5)
self.jumping = False
else:
self.velocity = self.settings.velocity
def gravity(self, settings):
if self.onGround == True:
self.velocity = 0
elif self.jumping:
self.velocity = 0
else:
self.velocity = self.settings.velocity
def check_position(self, settings):
if self.move_right == True and self.rect.right <= self.settings.screen_width - 1:
self.centerx += 3
elif self.move_left == True and self.rect.left > 0:
self.centerx -= 3
self.bottom += self.velocity
self.rect.centerx = self.centerx
self.rect.bottom = self.bottom
def update(self, settings, screen, platform):
self.check_player(settings, screen)
self.collision(settings, screen, platform)
self.gravity(settings)
self.check_position(settings)
| [
"tfptenelen@gmail.com"
] | tfptenelen@gmail.com |
87ce11d808d55a15ec8198dedc0d8ea2514b4e48 | 27bb9d333e3609d791f0ded928090721b98ddf52 | /app/db_models.py | a343c6d3d77b1548ab810fa23f8a90f9dc5b10e2 | [
"MIT"
] | permissive | carverdo/homer | 64434bf3f9f784fd6a63ce549e3dcc5caedb371c | 93169838d15c557406b37cc7f56f9631abb0fd6c | refs/heads/master | 2021-01-10T13:30:31.867435 | 2016-04-12T16:10:25 | 2016-04-12T16:10:25 | 54,338,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,934 | py | """
We import our empty db and write our model changes to it.
NOTE: scheduler creates an additional table not captured in this model.
"""
__author__ = 'donal'
__project__ = 'ribcage'
from datetime import datetime
from werkzeug.security import generate_password_hash, check_password_hash
from sqlalchemy import Column, Integer, String, Boolean, DateTime, Float, ForeignKey
from sqlalchemy.orm import relationship
from flask.ext.login import UserMixin
from flask import current_app
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from config_vars import MAX_COL_WIDTHS, ADMIN_USER, INITIALLY_ACTIVE
from . import db, login_manager
from . import lg
# ==============================
# DATABASE STRUCTURE
# ==============================
class CRUDMixin(object):
"""Inherit object for common operations"""
__table_args__ = {'extend_existing': True}
id = Column(db.Integer, primary_key=True)
@classmethod
def create(cls, commit=True, **kwargs):
# a bit too bespoke (handling only for new signups) -
try:
kwargs.pop('password2', None)
# kwargs.pop('submit', None)
except:
pass
instance = cls(**kwargs)
return instance.save(commit=commit)
@classmethod
def get(cls, id):
return cls.query.get(id)
# Also proxy Flask-SqlAlchemy's get_or_44 for symmetry
@classmethod
def get_or_404(cls, id):
return cls.query.get_or_404(id)
def update(self, commit=True, **kwargs):
for attr, value in kwargs.iteritems():
setattr(self, attr, value)
return commit and self.save() or self
def save(self, commit=True):
db.session.add(self)
if commit:
db.session.commit()
return self
def delete(self, commit=True):
db.session.delete(self)
return commit and db.session.commit()
class Member(UserMixin, CRUDMixin, db.Model):
"""Simple member / user definition"""
__tablename__ = 'member'
id = Column(Integer, primary_key=True)
firstname = Column(String(MAX_COL_WIDTHS), nullable=False)
surname = Column(String(MAX_COL_WIDTHS), nullable=False)
email = Column(String(MAX_COL_WIDTHS), nullable=False, unique=True)
pwdhash = Column(String, nullable=False)
adminr = Column(Boolean)
active = Column(Boolean)
confirmed = Column(Boolean, default=False)
first_log = Column(DateTime(), default=datetime.utcnow)
last_log = Column(DateTime(), default=datetime.utcnow)
logins = Column(Integer)
ips = relationship('Visit', backref='member',
cascade="all, delete-orphan", passive_deletes=True)
def __init__(self, firstname, surname, email, password,
adminr=ADMIN_USER, active=INITIALLY_ACTIVE,
confirmed=False,
first_log=datetime.utcnow(), last_log=datetime.utcnow(), logins=1):
self.firstname = firstname.title()
self.surname = surname.title()
self.email = email.lower()
self.set_password(password)
self.adminr = adminr
self.active = active
self.confirmed = confirmed
self.first_log = first_log
self.last_log = last_log
self.logins = logins
def set_password(self, password):
self.pwdhash = generate_password_hash(
password, method='pbkdf2:sha512:10000')
def check_password(self, password):
return check_password_hash(self.pwdhash, password)
# ===========================
# Next 4 all for flask-login
# UserMixin would handle ordinarily, but in case we modify
def is_authenticated(self):
"""True: if exist, they are authenticated"""
return True
def is_active(self):
"""Extra protection: we can determine/toggle"""
return self.active
def is_anonymous(self):
"""False: not allowed"""
return False
def get_id(self):
return unicode(self.id)
# ===========================
def ping(self, increment=True):
self.last_log = datetime.utcnow()
if increment: self.logins += 1
self.save(self)
# ===========================
# ACTIVATION
def generate_confirm_token(self, expiry=3600): # seconds
s = Serializer(current_app.config['SECRET_KEY'], expiry)
return s.dumps({'confirm': self.id})
def confirm_token(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
lg.logger.info('try data: {}'.format(data))
except:
lg.logger.info('failed try')
return False
if data.get('confirm') != self.id:
lg.logger.info('failed confirm: {} {}'.format(data.get('confirm'), self.id))
return False
self.confirmed = True
self.save(self)
return True
def __repr__(self):
return '<{0} {1}>'.format(self.surname, self.email)
class Visit(CRUDMixin, db.Model):
__tablename__ = 'visit'
id = Column(Integer, primary_key=True)
ip_address = Column(String)
browser = Column(String)
city = Column(String)
zip_code = Column(String)
latitude = Column(Float)
longitude = Column(Float)
date = Column(DateTime(), default=datetime.utcnow)
member_id = Column(Integer, ForeignKey('member.id', ondelete='CASCADE'))
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __repr__(self):
return '<IP {} in {} on {}>'\
.format(self.ip_address, self.city, self.date)
# flask-login needs this definition
@login_manager.user_loader
def load_user(user_id):
return Member.query.get(int(user_id))
if __name__ == '__main__':
mem = Member('pat', 'brok', 'PB', 'fish', 0)
print mem
print mem.pwdhash
print mem.check_password('Fish'), mem.check_password('fish')
| [
"donal.carville@gmail.com"
] | donal.carville@gmail.com |
ffc8456c74ad528d18d3eb7750c29ab605bde020 | 977352af2be278d01ab02c91423738cc79af174a | /python-data-structures-master/3. Pointer Structures/testLinkedList.py | 28a099f4bf14612a5d75293d1858517a769307db | [] | no_license | MattRijk/data-structures | 4c69d8798fdb3f8967e7e3b3f7da986aec4590ea | 8ffa1de094339f33ed79ee62ecc83e997ebdb992 | refs/heads/master | 2021-01-17T19:59:59.941557 | 2016-11-28T19:00:48 | 2016-11-28T19:00:48 | 60,529,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,914 | py | """
Test cases for LinkedList data structure
Author: George Heineman
"""
import unittest
from linkedList import LinkedList, LinkedNode
import random
class TestLinkedList(unittest.TestCase):
def setUp(self):
self.link = LinkedList()
def tearDown(self):
self.link = None
def test_basic(self):
"""Basic test."""
self.assertEqual(0, len(self.link))
self.link.prepend(99)
self.assertEqual(1, len(self.link))
self.link.prepend(30)
self.assertEqual(30, self.link.pop())
self.assertEqual(99, self.link.pop())
def test_stack(self):
"""Basic test."""
self.assertEqual(0, len(self.link))
self.link.prepend(99)
self.assertEqual(1, len(self.link))
self.link.prepend(30)
self.assertFalse(self.link.remove(19))
self.assertTrue(self.link.remove(30))
self.assertEqual(1, len(self.link))
self.assertFalse(self.link.remove(30))
self.assertEqual(1, len(self.link))
self.assertTrue(self.link.remove(99))
self.assertEqual(0, len(self.link))
def test_iterators(self):
"""Iterators"""
self.link.prepend(99)
self.assertEqual(1, len(self.link))
self.link.prepend(30)
self.assertTrue(30 in self.link)
self.assertFalse(15 in self.link)
def test_infinite(self):
"""Test infinite check."""
node0 = LinkedNode(0)
node1 = LinkedNode(10)
node2 = LinkedNode(20)
node3 = LinkedNode(30)
node1.next = node2
node2.next = node3
self.assertFalse(node1.checkInfinite())
node3.next = node2 # WARNING CYCLE
self.assertTrue(node1.checkInfinite())
node0.next = node1
self.assertTrue(node0.checkInfinite())
if __name__ == '__main__':
unittest.main()
| [
"edgesidemedia@gmail.com"
] | edgesidemedia@gmail.com |
449a4e9073d7775f05349340826f0d6e53ce9997 | 19da1a56f137a08772c347cf974be54e9c23c053 | /lib/adafruit_motor/servo.py | 0c46abd369009f496e2dd3f194a68ec1901f43f5 | [] | no_license | mk53202/mk53202-timeclock-pyportal | d94f45a9d186190a4bc6130077baa6743a816ef3 | 230a858d429f8197c00cab3e67dcfd3b295ffbe0 | refs/heads/master | 2021-02-04T05:38:25.533292 | 2020-02-27T22:45:56 | 2020-02-27T22:45:56 | 243,626,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,061 | py | # The MIT License (MIT)
#
# Copyright (c) 2017 Scott Shawcroft for Adafruit Industries LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_motor.servo`
====================================================
Servos are motor based actuators that incorporate a feedback loop into the design. These feedback
loops enable pulse width modulated control to determine position or rotational speed.
* Author(s): Scott Shawcroft
"""
__version__ = "2.0.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_Motor.git"
# We disable the too few public methods check because this is a private base class for the two types
# of servos.
class _BaseServo: # pylint: disable-msg=too-few-public-methods
"""Shared base class that handles pulse output based on a value between 0 and 1.0
:param ~pulseio.PWMOut pwm_out: PWM output object.
:param int min_pulse: The minimum pulse length of the servo in microseconds.
:param int max_pulse: The maximum pulse length of the servo in microseconds."""
def __init__(self, pwm_out, *, min_pulse=750, max_pulse=2250):
self._pwm_out = pwm_out
self.set_pulse_width_range(min_pulse, max_pulse)
def set_pulse_width_range(self, min_pulse=750, max_pulse=2250):
"""Change min and max pulse widths."""
self._min_duty = int((min_pulse * self._pwm_out.frequency) / 1000000 * 0xffff)
max_duty = (max_pulse * self._pwm_out.frequency) / 1000000 * 0xffff
self._duty_range = int(max_duty - self._min_duty)
@property
def fraction(self):
"""Pulse width expressed as fraction between 0.0 (`min_pulse`) and 1.0 (`max_pulse`).
For conventional servos, corresponds to the servo position as a fraction
of the actuation range. Is None when servo is diabled (pulsewidth of 0ms).
"""
if self._pwm_out.duty_cycle == 0: # Special case for disabled servos
return None
return (self._pwm_out.duty_cycle - self._min_duty) / self._duty_range
@fraction.setter
def fraction(self, value):
if value is None:
self._pwm_out.duty_cycle = 0 # disable the motor
return
if not 0.0 <= value <= 1.0:
raise ValueError("Must be 0.0 to 1.0")
duty_cycle = self._min_duty + int(value * self._duty_range)
self._pwm_out.duty_cycle = duty_cycle
class Servo(_BaseServo):
"""Control the position of a servo.
:param ~pulseio.PWMOut pwm_out: PWM output object.
:param int actuation_range: The physical range of motion of the servo in degrees, \
for the given ``min_pulse`` and ``max_pulse`` values.
:param int min_pulse: The minimum pulse width of the servo in microseconds.
:param int max_pulse: The maximum pulse width of the servo in microseconds.
``actuation_range`` is an exposed property and can be changed at any time:
.. code-block:: python
servo = Servo(pwm)
servo.actuation_range = 135
The specified pulse width range of a servo has historically been 1000-2000us,
for a 90 degree range of motion. But nearly all modern servos have a 170-180
degree range, and the pulse widths can go well out of the range to achieve this
extended motion. The default values here of ``750`` and ``2250`` typically give
135 degrees of motion. You can set ``actuation_range`` to correspond to the
actual range of motion you observe with your given ``min_pulse`` and ``max_pulse``
values.
.. warning:: You can extend the pulse width above and below these limits to
get a wider range of movement. But if you go too low or too high,
the servo mechanism may hit the end stops, buzz, and draw extra current as it stalls.
Test carefully to find the safe minimum and maximum.
"""
def __init__(self, pwm_out, *, actuation_range=180, min_pulse=750, max_pulse=2250):
super().__init__(pwm_out, min_pulse=min_pulse, max_pulse=max_pulse)
self.actuation_range = actuation_range
"""The physical range of motion of the servo in degrees."""
self._pwm = pwm_out
@property
def angle(self):
"""The servo angle in degrees. Must be in the range ``0`` to ``actuation_range``.
Is None when servo is disabled."""
if self.fraction is None: # special case for disabled servos
return None
return self.actuation_range * self.fraction
@angle.setter
def angle(self, new_angle):
if new_angle is None: # disable the servo by sending 0 signal
self.fraction = None
return
if new_angle < 0 or new_angle > self.actuation_range:
raise ValueError("Angle out of range")
self.fraction = new_angle / self.actuation_range
class ContinuousServo(_BaseServo):
"""Control a continuous rotation servo.
:param int min_pulse: The minimum pulse width of the servo in microseconds.
:param int max_pulse: The maximum pulse width of the servo in microseconds."""
@property
def throttle(self):
"""How much power is being delivered to the motor. Values range from ``-1.0`` (full
throttle reverse) to ``1.0`` (full throttle forwards.) ``0`` will stop the motor from
spinning."""
return self.fraction * 2 - 1
@throttle.setter
def throttle(self, value):
if value > 1.0 or value < -1.0:
raise ValueError("Throttle must be between -1.0 and 1.0")
if value is None:
raise ValueError("Continuous servos cannot spin freely")
self.fraction = (value + 1) / 2
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.throttle = 0
def deinit(self):
"""Stop using the servo."""
self.throttle = 0
| [
"mkoster@stack41.com"
] | mkoster@stack41.com |
6dc332de3de9579b40a1198a92309319dbf350e9 | 07524b6bc412128785db7ef799fb99966e0d3295 | /polls/urls.py | 83af977bddeb5c55a8b0f22564ba285c594e3daf | [] | no_license | anton-khodak/gtm-app | 988770228e19c96c15b14d5aa1f099a861acf9fb | afdd0d2b566138245fd5344f33418c6aad0b856b | refs/heads/master | 2021-01-10T13:49:52.505672 | 2017-07-29T08:24:12 | 2017-07-29T08:24:12 | 51,945,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,124 | py | from django.conf.urls import url
from rest_framework.urlpatterns import format_suffix_patterns
from polls.views import *
urlpatterns = [
# url(r'^api/polls/$', UsersPollList.as_view()),
url(r'^api/polls/all/$', UsersPollFullList.as_view()),
url(r'^api/polls/passed/$', UserPollPassedView.as_view()),
url(r'^api/answers/$', UserAnswerList.as_view()),
url(r'^text-polls/$', TextPollsList.as_view()),
url(r'^polls/$', PollsList.as_view()),
url(r'^polls/(?P<poll_id>[0-9]+)/$', PollView.as_view()),
url(r'^polls/(?P<poll_id>[0-9]+)/(?P<poll_element>(\w)+)/$', PollView.as_view()),
url(r'^polls/(?P<poll_id>[0-9]+)/(?P<poll_element>(\w)+)/(?P<question_id>[0-9]+)/$', PollView.as_view()),
url(r'^text-polls/(?P<poll_id>[0-9]+)/$', PollView.as_view()),
url(r'^text-polls/(?P<poll_id>[0-9]+)/(?P<poll_element>(\w)+)/$', PollView.as_view()),
url(r'^text-polls/(?P<poll_id>[0-9]+)/(?P<poll_element>(\w)+)/(?P<question_id>[0-9]+)/$', PollView.as_view()),
# url(r'^text-polls/(?P<question_id>[0-9]+)//$', TextPollView.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns) | [
"anton.khodak@ukr.net"
] | anton.khodak@ukr.net |
cb875e2e9f522227d18d0f4e5b51d3ab04e1b40f | 067b8f7180d15375a593163b44952b82544914f5 | /python/第七章 用户输入和while循环/pets.py | b446e1efde477fa67c648afe5fba7e4bf9b77dc9 | [] | no_license | WenRich666/learn-note | fb0bfdfcddba78ccb6d35837ed2c59421907b70e | d4a344396380cefd9391baede824acabc916e507 | refs/heads/master | 2020-04-13T05:36:36.505613 | 2019-01-21T08:37:20 | 2019-01-21T08:37:20 | 162,996,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | pets = ["dog","cat","dog","goldfish","cat","rabbit","cat"]
print(pets)
while "cat" in pets:
pets.remove("cat")
print(pets) | [
"940031354@qq.com"
] | 940031354@qq.com |
c24968234482ab1ffb8a859e5600f442fb2e4fff | 2fb75382cb8bb94ed8da382dc5843766ead6def2 | /python/xraydb/xraydb.py | 975f87f7bf9e39948a982fe26446153e5d8eb0da | [
"LicenseRef-scancode-public-domain",
"BSD-3-Clause"
] | permissive | mlevant/XrayDB | 16c677121364e163ce9c91a38c4880c0c3b6aa19 | cf2b405a60cefae961db220aca62f3fb7375b544 | refs/heads/master | 2021-05-18T01:46:21.303699 | 2019-10-09T18:38:10 | 2019-10-09T18:38:10 | 251,052,175 | 0 | 0 | NOASSERTION | 2020-03-29T14:29:43 | 2020-03-29T14:29:43 | null | UTF-8 | Python | false | false | 25,199 | py | #!/usr/bin/env python
"""
SQLAlchemy wrapping of x-ray database for data from
Elam et al, Chantler et al, Waasmaier and Kirfel
Main Class for full Database: xrayDB
"""
import os
import json
from collections import namedtuple
import numpy as np
from scipy.interpolate import UnivariateSpline
from sqlalchemy import MetaData, create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.pool import SingletonThreadPool
from .utils import elam_spline, as_ndarray
XrayEdge = namedtuple('XrayEdge', ('energy', 'fyield', 'jump_ratio'))
XrayLine = namedtuple('XrayLine', ('energy', 'intensity', 'initial_level',
'final_level'))
ElementData = namedtuple('ElementData', ('Z', 'symbol', 'mass', 'density'))
__version__ = '1.4'
def make_engine(dbname):
"create engine for sqlite connection"
return create_engine('sqlite:///%s' % (dbname),
poolclass=SingletonThreadPool)
def isxrayDB(dbname):
"""whether a file is a valid XrayDB database
Args:
dbname (string): name of XrayDB file
Returns:
bool: is file a valid XrayDB
Notes:
1. must be a sqlite db file, with tables named 'elements',
'photoabsorption', 'scattering', 'xray_levels', 'Coster_Kronig',
'Chantler', 'Waasmaier', and 'KeskiRahkonen_Krause'
"""
_tables = ('Chantler', 'Waasmaier', 'Coster_Kronig',
'KeskiRahkonen_Krause', 'xray_levels',
'elements', 'photoabsorption', 'scattering')
result = False
try:
engine = make_engine(dbname)
meta = MetaData(engine)
meta.reflect()
result = all([t in meta.tables for t in _tables])
except:
pass
return result
class XrayDB():
"""
Database of Atomic and X-ray Data
This XrayDB object gives methods to access the Atomic and
X-ray data in th SQLite3 database xraydb.sqlite.
Much of the data in this database comes from the compilation
of Elam, Ravel, and Sieber, with additional data from Chantler,
and other sources. See the documention and bibliography for
a complete listing.
"""
def __init__(self, dbname='xraydb.sqlite', read_only=True):
"connect to an existing database"
if not os.path.exists(dbname):
parent, _ = os.path.split(__file__)
dbname = os.path.join(parent, dbname)
if not os.path.exists(dbname):
raise IOError("Database '%s' not found!" % dbname)
if not isxrayDB(dbname):
raise ValueError("'%s' is not a valid X-ray Database file!" % dbname)
self.dbname = dbname
self.engine = make_engine(dbname)
self.conn = self.engine.connect()
kwargs = {}
if read_only:
kwargs = {'autoflush': True, 'autocommit': False}
def readonly_flush(*args, **kwargs):
return
self.session = sessionmaker(bind=self.engine, **kwargs)()
self.session.flush = readonly_flush
else:
self.session = sessionmaker(bind=self.engine, **kwargs)()
self.metadata = MetaData(self.engine)
self.metadata.reflect()
self.tables = self.metadata.tables
elems = self.tables['elements'].select().execute()
self.atomic_symbols = [e.element for e in elems.fetchall()]
def close(self):
"close session"
self.session.flush()
self.session.close()
def query(self, *args, **kws):
"generic query"
return self.session.query(*args, **kws)
def get_version(self, long=False, with_history=False):
"""
return sqlite3 database and python library version numbers
Parameters:
long (bool): show timestamp and notes of latest version [False]
with_history (bool): show complete version history [False]
Returns:
string: version information
"""
out = []
rows = self.tables['Version'].select().execute().fetchall()
if not with_history:
rows = rows[-1:]
if long or with_history:
for row in rows:
out.append("XrayDB Version: %s [%s] '%s'" % (row.tag,
row.date,
row.notes))
out.append("Python Version: %s" % __version__)
out = "\n".join(out)
else:
out = "XrayDB Version: %s, Python Version: %s" % (rows[0].tag,
__version__)
return out
def f0_ions(self, element=None):
"""
return list of ion names supported for the .f0() function.
Parameters:
element (string, int, pr None): atomic number, symbol, or ionic symbol
of scattering element.
Returns:
list: if element is None, all 211 ions are returned.
if element is not None, the ions for that element are returned
Example:
>>> xdb = XrayDB()
>>> xdb.f0_ions('Fe')
['Fe', 'Fe2+', 'Fe3+']
Notes:
Z values from 1 to 98 (and symbols 'H' to 'Cf') are supported.
References:
Waasmaier and Kirfel
"""
wtab = self.tables['Waasmaier']
rows = self.query(wtab)
if element is not None:
elem = self.symbol(element)
rows = rows.filter(wtab.c.element == elem)
return [str(r.ion) for r in rows.all()]
def f0(self, ion, q):
"""
return f0(q) -- elastic X-ray scattering factor from Waasmaier and Kirfel
Parameters:
ion (string, int, or None): atomic number, symbol or ionic symbol
of scattering element.
q (float, list, ndarray): value(s) of q for scattering factors
Returns:
ndarray: elastic scattering factors
Example:
>>> xdb = XrayDB()
>>> xdb.f0('Fe', range(10))
array([ 25.994603 , 6.55945765, 3.21048827, 1.65112769,
1.21133507, 1.0035555 , 0.81012185, 0.61900285,
0.43883403, 0.27673021])
Notes:
q = sin(theta) / lambda, where theta = incident angle,
and lambda = X-ray wavelength
Z values from 1 to 98 (and symbols 'H' to 'Cf') are supported.
The list of ionic symbols can be read with the function .f0_ions()
References:
Waasmaier and Kirfel
"""
wtab = self.tables['Waasmaier']
if isinstance(ion, int):
row = self.query(wtab).filter(wtab.c.atomic_number == ion).all()[0]
elif ion not in self.f0_ions():
raise ValueError('No ion {:s} from Waasmaier table'.format(repr(ion)))
else:
row = self.query(wtab).filter(wtab.c.ion == ion.title()).all()[0]
q = as_ndarray(q)
f0 = row.offset
for s, e in zip(json.loads(row.scale), json.loads(row.exponents)):
f0 += s * np.exp(-e*q*q)
return f0
def _from_chantler(self, element, energy, column='f1', smoothing=0):
"""
return energy-dependent data from Chantler table
Parameters:
element (string or int): atomic number or symbol.
eneregy (float or ndarray):
columns: f1, f2, mu_photo, mu_incoh, mu_total
Notes:
this function is meant for internal use.
"""
ctab = self.tables['Chantler']
elem = self.symbol(element)
row = self.query(ctab).filter(ctab.c.element == elem).one()
energy = as_ndarray(energy)
emin, emax = min(energy), max(energy)
te = np.array(json.loads(row.energy))
nemin = max(0, -3 + max(np.where(te <= emin)[0]))
nemax = min(len(te), 3 + max(np.where(te <= emax)[0]))
te = te[nemin:nemax+1]
if column == 'mu':
column = 'mu_total'
ty = np.array(json.loads(getattr(row, column)))[nemin:nemax+1]
if column == 'f1':
out = UnivariateSpline(te, ty, s=smoothing)(energy)
else:
out = np.exp(np.interp(np.log(energy),
np.log(te),
np.log(ty)))
if isinstance(out, np.ndarray) and len(out) == 1:
out = out[0]
return out
def chantler_energies(self, element, emin=0, emax=1.e9):
"""
return array of energies (in eV) at which data is
tabulated in the Chantler tables for a particular element.
Parameters:
element (string or int): atomic number or symbol
emin (float): minimum energy (in eV) [0]
emax (float): maximum energy (in eV) [1.e9]
Returns:
ndarray: energies
References:
Chantler
Notes:
returns 2 energies below emin and above emax to better
enable interpolation
"""
ctab = self.tables['Chantler']
elem = self.symbol(element)
row = self.query(ctab).filter(ctab.c.element == elem).one()
te = np.array(json.loads(row.energy))
if emin <= min(te):
nemin = 0
else:
nemin = max(0, -1 + max(np.where(te <= emin)[0]))
if emax > max(te):
nemax = len(te)
else:
nemax = min(len(te), 2 + max(np.where(te <= emax)[0]))
return te[nemin:nemax+1]
def f1_chantler(self, element, energy, **kws):
"""
returns f1 -- real part of anomalous X-ray scattering factor
for selected input energy (or energies) in eV.
Parameters:
element (string or int): atomic number or symbol
energy (float or ndarray): energies (in eV).
Returns:
ndarray: real part of anomalous scattering factor
References:
Chantler
"""
return self._from_chantler(element, energy, column='f1', **kws)
def f2_chantler(self, element, energy, **kws):
"""
returns f2 -- imaginary part of anomalous X-ray scattering factor
for selected input energy (or energies) in eV.
Parameters:
element (string or int): atomic number or symbol
energy (float or ndarray): energies (in eV).
Returns:
ndarray: imaginary part of anomalous scattering factor
References:
Chantler
"""
return self._from_chantler(element, energy, column='f2', **kws)
def mu_chantler(self, element, energy, incoh=False, photo=False):
"""
returns X-ray mass attenuation coefficient, mu/rho in cm^2/gr
for selected input energy (or energies) in eV.
default is to return total attenuation coefficient.
Parameters:
element (string or int): atomic number or symbol
energy (float or ndarray): energies (in eV).
photo (bool): return only the photo-electric contribution [False]
incoh (bool): return only the incoherent contribution [False]
Returns:
ndarray: mass attenuation coefficient in cm^2/gr
References:
Chantler
"""
col = 'mu_total'
if photo:
col = 'mu_photo'
elif incoh:
col = 'mu_incoh'
return self._from_chantler(element, energy, column=col)
def _elem_data(self, element):
"return data from elements table: internal use"
etab = self.tables['elements']
row = self.query(etab)
if isinstance(element, int):
row = row.filter(etab.c.atomic_number == element).one()
else:
elem = element.title()
if not elem in self.atomic_symbols:
raise ValueError("unknown element '%s'" % repr(elem))
row = row.filter(etab.c.element == elem).one()
return ElementData(int(row.atomic_number),
row.element.title(),
row.molar_mass, row.density)
def atomic_number(self, element):
"""
return element's atomic number
Parameters:
element (string or int): atomic number or symbol
Returns:
integer: atomic number
"""
return self._elem_data(element).Z
def symbol(self, element):
"""
return element symbol
Parameters:
element (string or int): atomic number or symbol
Returns:
string: element symbol
"""
return self._elem_data(element).symbol
def molar_mass(self, element):
"""
return molar mass of element
Parameters:
element (string or int): atomic number or symbol
Returns:
float: molar mass of element in amu
"""
return self._elem_data(element).mass
def density(self, element):
"""
return density of pure element
Parameters:
element (string or int): atomic number or symbol
Returns:
float: density of element in gr/cm^3
"""
return self._elem_data(element).density
def xray_edges(self, element):
"""
returns dictionary of X-ray absorption edge energy (in eV),
fluorescence yield, and jump ratio for an element.
Parameters:
element (string or int): atomic number or symbol
Returns:
dictionary: keys of edge (iupac symbol), and values of
XrayEdge namedtuple of (energy, fyield, edge_jump))
References:
Elam, Ravel, and Sieber.
"""
elem = self.symbol(element)
ltab = self.tables['xray_levels']
out = {}
for r in self.query(ltab).filter(ltab.c.element == elem).all():
out[str(r.iupac_symbol)] = XrayEdge(r.absorption_edge,
r.fluorescence_yield,
r.jump_ratio)
return out
def xray_edge(self, element, edge):
"""
returns XrayEdge for an element and edge
Parameters:
element (string or int): atomic number or symbol
edge (string): X-ray edge
Returns:
XrayEdge: namedtuple of (energy, fyield, edge_jump))
Example:
>>> xdb = XrayDB()
>>> xdb.xray_edge('Co', 'K')
XrayEdge(edge=7709.0, fyield=0.381903, jump_ratio=7.796)
References:
Elam, Ravel, and Sieber.
"""
return self.xray_edges(element).get(edge.title(), None)
def xray_lines(self, element, initial_level=None, excitation_energy=None):
"""
returns dictionary of X-ray emission lines of an element, with
Parameters:
initial_level (string or list/tuple of string): initial level(s) to
limit output.
excitation_energy (float): energy of excitation, limit output those
excited by X-rays of this energy (in eV).
Returns:
dictionary: keys of lines (Siegbahn symbol), values of Xray Lines
Notes:
if both excitation_energy and initial_level are given, excitation_level
will limit output
Example:
>>> xdb = XrayDB()
>>> for key, val in xdb.xray_lines('Ga', 'K').items():
>>> print(key, val)
'Ka3', XrayLine(energy=9068.0, intensity=0.000326203,
initial_level=u'K', final_level=u'L1')
'Ka2', XrayLine(energy=9223.8, intensity=0.294438,
initial_level=u'K', final_level=u'L2')
'Ka1', XrayLine(energy=9250.6, intensity=0.57501,
initial_level=u'K', final_level=u'L3')
'Kb3', XrayLine(energy=10263.5, intensity=0.0441511,
initial_level=u'K', final_level=u'M2')
'Kb1', XrayLine(energy=10267.0, intensity=0.0852337,
initial_level=u'K', final_level=u'M3')
'Kb5', XrayLine(energy=10348.3, intensity=0.000841354,
initial_level=u'K', final_level=u'M4,5')
References:
Elam, Ravel, and Sieber.
"""
elem = self.symbol(element)
ttab = self.tables['xray_transitions']
row = self.query(ttab).filter(ttab.c.element == elem)
if excitation_energy is not None:
initial_level = []
for ilevel, dat in self.xray_edges(elem).items():
if dat[0] < excitation_energy:
initial_level.append(ilevel.title())
if initial_level is not None:
if isinstance(initial_level, (list, tuple)):
row = row.filter(ttab.c.initial_level.in_(initial_level))
else:
row = row.filter(ttab.c.initial_level == initial_level.title())
out = {}
for r in row.all():
out[str(r.siegbahn_symbol)] = XrayLine(r.emission_energy, r.intensity,
r.initial_level, r.final_level)
return out
def xray_line_strengths(self, element, excitation_energy=None):
"""
return the absolute line strength in cm^2/gr for all available lines
Parameters:
element (string or int): Atomic symbol or number for element
excitation_energy (float): incident energy, in eV
Returns:
dictionary: elemental line with fluorescence cross section in cm2/gr.
References:
Elam, Ravel, and Sieber.
"""
out = {}
lines = self.xray_lines(element, excitation_energy=excitation_energy)
for label, eline in lines.items():
edge = self.xray_edge(element, eline.initial_level)
if edge is None and ',' in eline.initial_level:
ilevel, _ = eline.initial_level.split(',')
edge = self.xray_edge(element, ilevel)
if edge is not None:
mu = self.mu_elam(element, [edge.energy*(0.999),
edge.energy*(1.001)], kind='photo')
out[label] = (mu[1]-mu[0]) * eline.intensity * edge.fyield
return out
def ck_probability(self, element, initial, final, total=True):
"""
return Coster-Kronig transition probability for an element and
initial/final levels
Parameters:
element (string or int): Atomic symbol or number for element
initial (string): initial level
final (string): final level
total (bool): whether to return total or partial probability
Returns:
float: transition probability
Example:
>>> xdb = XrayDB()
>>> xdb.ck_probability('Cu', 'L1', 'L3', total=True)
0.681
References:
Elam, Ravel, and Sieber.
"""
elem = self.symbol(element)
ctab = self.tables['Coster_Kronig']
row = self.query(ctab).filter(ctab.c.element == elem)
row = row.filter(ctab.c.initial_level == initial.title())
row = row.filter(ctab.c.final_level == final.title()).all()
out = 0.0
if len(row) > 0:
row = row[0]
out = row.transition_probability
if total:
out = row.total_transition_probability
return out
def corehole_width(self, element, edge=None, use_keski=False):
"""
returns core hole width for an element and edge
Parameters:
element (string, integer): atomic number or symbol for element
edge (string or None): edge for hole, return all if None
use_keski (bool) : force use of KeskiRahkonen and Krause table for all data.
Returns:
float: corehole width in eV.
Notes:
Uses Krause and Oliver where data is available (K, L lines Z > 10)
Uses Keski-Rahkonen and Krause otherwise
References:
Krause and Oliver, 1979
Keski-Rahkonen and Krause, 1974
"""
version_qy = self.tables['Version'].select().order_by('date')
version_id = version_qy.execute().fetchall()[-1].id
ctab = self.tables['corelevel_widths']
if version_id < 4 or use_keski:
ctab = self.tables['KeskiRahkonen_Krause']
rows = self.query(ctab).filter(ctab.c.element == self.symbol(element))
if edge is not None:
rows = rows.filter(ctab.c.edge == edge.title())
result = rows.all()
if len(result) == 1:
result = result[0].width
else:
result = [(r.edge, r.width) for r in result]
return result
def cross_section_elam(self, element, energies, kind='photo'):
"""
returns Elam Cross Section values for an element and energies
Parameters:
element (string or int): atomic number or symbol for element
energies (float or ndarray): energies (in eV) to calculate cross-sections
kind (string): one of 'photo', 'coh', and 'incoh' for photo-absorption,
coherent scattering, and incoherent scattering cross sections,
respectively. Default is 'photo'.
Returns:
ndarray of scattering data
References:
Elam, Ravel, and Sieber.
"""
elem = self.symbol(element)
kind = kind.lower()
if kind not in ('coh', 'incoh', 'photo'):
raise ValueError('unknown cross section kind=%s' % kind)
stab = self.tables['scattering']
if kind == 'photo':
stab = self. tables['photoabsorption']
row = self.query(stab).filter(stab.c.element == elem).all()[0]
tab_lne = np.array(json.loads(row.log_energy))
if kind.startswith('coh'):
tab_val = np.array(json.loads(row.log_coherent_scatter))
tab_spl = np.array(json.loads(row.log_coherent_scatter_spline))
elif kind.startswith('incoh'):
tab_val = np.array(json.loads(row.log_incoherent_scatter))
tab_spl = np.array(json.loads(row.log_incoherent_scatter_spline))
else:
tab_val = np.array(json.loads(row.log_photoabsorption))
tab_spl = np.array(json.loads(row.log_photoabsorption_spline))
en = 1.0*as_ndarray(energies)
emin_tab = 10*int(0.102*np.exp(tab_lne[0]))
en[np.where(en < emin_tab)] = emin_tab
out = np.exp(elam_spline(tab_lne, tab_val, tab_spl, np.log(en)))
if len(out) == 1:
return out[0]
return out
def mu_elam(self, element, energies, kind='total'):
"""
returns attenuation cross section for an element at energies (in eV)
Parameters:
element (string or int): atomic number or symbol for element
energies (float or ndarray): energies (in eV) to calculate cross-sections
kind (string): one of 'photo' or 'total' for photo-electric or
total attenuation, respectively. Default is 'total'.
Returns:
ndarray of scattering values in units of cm^2/gr
References:
Elam, Ravel, and Sieber.
"""
calc = self.cross_section_elam
kind = kind.lower()
if kind.startswith('tot'):
xsec = calc(element, energies, kind='photo')
xsec += calc(element, energies, kind='coh')
xsec += calc(element, energies, kind='incoh')
elif kind.startswith('photo'):
xsec = calc(element, energies, kind='photo')
elif kind.lower().startswith('coh'):
xsec = calc(element, energies, kind='coh')
elif kind.lower().startswith('incoh'):
xsec = calc(element, energies, kind='incoh')
else:
raise ValueError('unknown cross section kind=%s' % kind)
return xsec
def coherent_cross_section_elam(self, element, energies):
"""returns coherenet scattering crossrxr section for an element
at energies (in eV)
returns values in units of cm^2 / gr
arguments
---------
element: atomic number, atomic symbol for element
energies: energies in eV to calculate cross-sections
Data from Elam, Ravel, and Sieber.
"""
return self.cross_section_elam(element, energies, kind='coh')
def incoherent_cross_section_elam(self, element, energies):
"""returns incoherenet scattering cross section for an element
at energies (in eV)
returns values in units of cm^2 / gr
arguments
---------
element: atomic number, atomic symbol for element
energies: energies in eV to calculate cross-sections
Data from Elam, Ravel, and Sieber.
"""
return self.cross_section_elam(element, energies, kind='incoh')
| [
"newville@cars.uchicago.edu"
] | newville@cars.uchicago.edu |
839102efc0192ae4e85bf57901922beb79119de4 | 6579ba67f84674521af0771fd0b79247c13542c9 | /fibonacci1.py | 147200d3bd8b1c4b81b39236eb50696b61f2e71d | [] | no_license | juandiego26/python-lab | f3f8c248119e3c0b8e4297daffda56f3367bd579 | 1bf17c5ef738ae3fde84d655a758c02770a2d50e | refs/heads/master | 2020-05-19T18:58:56.011276 | 2019-05-13T16:05:52 | 2019-05-13T16:05:52 | 185,167,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,412 | py | def fibonacci(max):
#max = max
print(max)
print('')
a, b = 0, 1
while a < max:
yield a
print(a)#el valor de retorno
a, b = b, a+b
fib1 = fibonacci(20)
fib_nums = [num for num in fib1]
print(fib_nums)#se muestra la lista generada
print('*'.center(50,'*'))
double_fib_nums = [num * 2 for num in fib1] # no va a funcionar(no se muestra los valores de fib1), ya que fib1 se utilizo arriba
print(double_fib_nums)#se muestra una lista vacia por que ya no exiten los valores de fib1
double_fib_nums = [num * 2 for num in fibonacci(30)] # sí funciona, ya que se vulve a llamar a fibonacci(x), un un leve cambio que n se multiplicara x 2
print(double_fib_nums)#se muestran los valores recien generados #
print('')
print('Ejemplos extra'.center(50,'-'))
##ejemplos
# define a list
lista = [4, 7, 0, 3]
# get an iterator using iter()
mi_iterador = iter(lista)#establecemos el objeto iterador
## iterate through it using next()
#prints 4
print(next(mi_iterador))#muestra el siguiente elemento de la lista
#prints 7
print(next(mi_iterador))#muestra el siguiente elemento de la lista
## next(obj) is same as obj.__next__()
#prints 0
print(mi_iterador.__next__())#muestra el siguiente elemento de la lista
#prints 3
print(mi_iterador.__next__())#muestra el siguiente elemento de la lista
## This will raise error, no items left
next(mi_iterador)#muestra error xq ya no hay elementos a mostrar | [
"silgajuandiego@gmail.com"
] | silgajuandiego@gmail.com |
1a53c172d91fd40f1f2114534a29a26f0ffc39a0 | 23ba87e51028f2bb32d95cbb3849a4181b8c455a | /docs/conf.py | bfedc049bf7dbd475e45e1a789dc9e47ed9e7481 | [] | no_license | mdhor/2021sp-final-project-mdhor | 16070a3da7eed90a61cde153b38a2f3002714d1e | bb836a788ff7dd0149714525516436b5b1f8a442 | refs/heads/master | 2023-04-23T07:21:59.280737 | 2021-05-10T13:12:39 | 2021-05-10T13:17:30 | 361,442,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,532 | py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import django
from sphinx.ext.apidoc import main
sys.path.insert(0, os.path.abspath(".."))
# -- Django setup ------------------------------------------------------------
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
django.setup()
# -- Project information -----------------------------------------------------
project = "Final Project, Mattias Hornum, CSCI E-29"
copyright = "2021, Mattias Hornum"
author = "Mattias Hornum"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.napoleon"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
main(
[
"-e",
"-f",
"-o",
".",
"..",
"../final_project/contrib*",
"../final_project/conftest*",
"../final_project/users*",
"../final_project/tests*",
"../final_project/utils*",
"../prisjakt/migrations*",
"../conftest*",
"../manage*",
"../config*",
"../modules*",
]
)
| [
"mattiashornum@gmail.com"
] | mattiashornum@gmail.com |
f0b62c69b7243042d00f0401f6e56e363a4ac84f | d9da2513667876ed15a7423b830e35ebb345879f | /vis/jac_vis.py | 324b098270e7422b520f91146b85090f757598b7 | [] | no_license | UltimateJupiter/Epoch_Double_Descent | 0bcb710476cb42c9a3c119891f74491d29f9fd35 | 418f8a4b8d95cdb1c377863d66018c2308c68783 | refs/heads/master | 2023-04-15T23:00:57.697152 | 2021-04-23T06:36:16 | 2021-04-23T06:36:16 | 357,182,164 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 728 | py | import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.use("Agg")
import numpy as np
def plot_jac_svd(D, splitted_norms, slices, layer_names, pic_name):
plt.figure(figsize=(10,5))
plt.subplot(121)
for i, norms in enumerate(splitted_norms):
plt.scatter(D, norms, label=layer_names[i])
plt.xlabel(r"$\sigma_i$")
plt.title(r"$||\cdot||$")
plt.legend()
plt.subplot(122)
for i, norms in enumerate(splitted_norms):
s, e = slices[i]
plt.scatter(D, norms / np.sqrt(e-s), label=layer_names[i])
plt.xlabel(r"$\sigma_i$")
plt.title(r"$||\cdot||\ normalized$")
plt.legend()
plt.suptitle(pic_name)
plt.savefig("./figs/{}.jpg".format(pic_name))
| [
"xingyu.zhu@duke.edu"
] | xingyu.zhu@duke.edu |
2af6992830f3f1ef19f0236bfb1b4c45c43943bf | e1c4c9b40287f711bb2e27b7d4062263d38389c8 | /docs/conf.py | be67ebfd82633fa231c53daa4a2c709c6fceb810 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | vladvasiliu/django-aws-utils | 962c0cf31417dfcab94527c62ed2939d1947567e | 18d73c4f9802b4073d3ed244f462e13b8e03f951 | refs/heads/master | 2023-02-05T18:16:09.955565 | 2020-12-22T18:20:44 | 2020-12-22T18:20:44 | 323,386,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,347 | py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
import os
import sys
from pkg_resources import get_distribution
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
sys.path.insert(0, os.path.abspath("."))
sys.path.insert(0, os.path.abspath("../"))
sys.path.insert(0, os.path.abspath("../django_aws_utils"))
# -- Project information -----------------------------------------------------
project = "Django AWS Utils"
copyright = "2020, Vlad Vasiliu"
author = "Vlad Vasiliu"
release = get_distribution("django-aws-utils").version
# for example take major/minor
version = ".".join(release.split(".")[:2])
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["releases", "sphinx_rtd_theme", "autoapi.extension"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
trim_footnote_reference_space = True
master_doc = "index"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
pygments_style = "sphinx"
autoapi_type = "python"
autoapi_dirs = ["../django_aws_utils"]
| [
"vladvasiliun@yahoo.fr"
] | vladvasiliun@yahoo.fr |
d80b2863e81ce3a7c7d437c7ded6c8d02fa5ba45 | 181eb5c111a39e236b1130c048ce05aa55c8bdbd | /imagemanipulation1.py | e5351518253d9296dc4f51c4f3ed51d2aa82c2cf | [] | no_license | mbaseman1918/testrep | 5edd0992a92bc31913865960f3d999d1e9bcff39 | 90fe5cfb2d06f2eda58c2c1a2269cdc0dc153ddc | refs/heads/master | 2020-03-21T16:55:24.602950 | 2018-08-22T17:42:51 | 2018-08-22T17:42:51 | 138,802,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | from PIL import Image
img = Image.open("basemanphoto.jpg")
width, height = img.size
pixels = img.load()
for i in range(width):
for j in range(height):
r, g, b = pixels[i,j]
Y = 0.299*r + 0.587*g + 0.114*b
r = int(Y)
g = int(Y)
b = int(Y)
pixels[i, j] = (r, g, b)
img.show()
| [
"39381570+mbaseman1918@users.noreply.github.com"
] | 39381570+mbaseman1918@users.noreply.github.com |
1d12443b25111882fb8b6f7c4b3b0faff8eaae87 | 1ff4f977c34a16d79a1f3c05c2e1767a0742fd29 | /apps/myApp/views.py | e9509d65f2b78fb3da37a081f190502c5b8614d2 | [] | no_license | igleciasjoseph/wishListRepo | 1e27cc319aba15c0d4950e34ef9d48df83406b58 | 371cbc8c193ca680cc3540af083ff4aa53c8a74c | refs/heads/master | 2020-05-17T04:57:40.925536 | 2019-04-25T23:04:43 | 2019-04-25T23:04:43 | 183,521,595 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,103 | py | from django.shortcuts import render, HttpResponse, redirect
from django.contrib import messages
from .models import *
import bcrypt
def index(request):
# User.objects.all().delete()
return render(request, 'myApp/index.html')
def register(request):
errors = User.objects.reg_validator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
return redirect('/')
else:
hash = bcrypt.hashpw(
request.POST['password'].encode(), bcrypt.gensalt()).decode()
user = User.objects.create(
name=request.POST['name'], username=request.POST['username'], datehired = request.POST['datehired'], password=hash)
request.session['id'] = user.id
return redirect('/dashboard')
def login(request):
errors = User.objects.login_validator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
return redirect('/')
else:
user = User.objects.get(username=request.POST['username'])
request.session['id'] = user.id
return redirect('/dashboard')
def dashboard(request):
if 'id' not in request.session:
messages.error(request, 'Please log in to enter')
return redirect('/')
else:
user = User.objects.get(id=request.session['id'])
allitems = List.objects.all()
myitems = user.lists.all()
otheritems = allitems.difference(myitems)
context = {
'user': user,
'myitems': myitems,
'otheritems': otheritems,
}
return render(request, 'myApp/dashboard.html', context)
def additem(request):
return render(request, 'myApp/additem.html')
def create(request):
user = User.objects.get(id=request.session['id'])
errors = List.objects.list_validator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
return redirect('/additem')
else:
item = List.objects.create(item=request.POST['item'], creator=user)
user.lists.add(item)
return redirect('/dashboard')
def add(request, items_id):
user = User.objects.get(id=request.session['id'])
item = List.objects.get(id=items_id)
user.lists.add(item)
return redirect('/dashboard')
def displayitem(request, item_id):
item = List.objects.get(id=item_id)
user = User.objects.get(id=item.creator.id)
joiners = item.users.all().exclude(id=user.id)
context = {
'item': item,
'joiners': joiners,
}
return render(request, 'myApp/itempage.html', context)
def delete(request, item_id):
item_to_delete = List.objects.get(id=item_id)
item_to_delete.delete()
return redirect('/dashboard')
def remove(request, item_id):
item = List.objects.get(id=item_id)
user = User.objects.get(id=request.session['id'])
user.lists.remove(item)
return redirect('/dashboard')
def logout(request):
request.session.clear()
return redirect('/')
| [
"igleciasjoseph@gmail.com"
] | igleciasjoseph@gmail.com |
e716e576229b4be0e8e202d22c9db7f31a4d0675 | 9b0f0345129001616ea456ee856597c6ed06ba7a | /basic/2017/p2.py | 99f46e002a08cce7cba98c32a0f5afeddabbb47e | [] | no_license | Abinash04/Python | 88366200bbdcc2e93a2168469ca891b23fd5e890 | 7a920348de69fbac35df7585f57335547b36d8e6 | refs/heads/master | 2023-07-06T11:10:17.999698 | 2023-07-04T19:54:43 | 2023-07-04T19:54:43 | 95,747,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | #Ask the user for a number. Depending on whether the number is even
#or odd, print out an appropriate message to the user.
try:
num = int(raw_input('Enter a number \n'))
except:
flag = 1
while(flag):
print 'Enter a valid number..'
num = raw_input('Enter a number \n')
if num.isdigit():
break
else:
flag += 1
if int(num) % 2 ==0:
print 'The number', num, ' is an even number'
else:
print 'The number', num, ' is an odd number'
| [
"Abinash.behera04@gmail.com"
] | Abinash.behera04@gmail.com |
8fa6e7131a0816841e1b93a970778de8b6e032d7 | f4999995e75d4a249ef54fcfa5eb2a4a7f384c04 | /Expenses/test_expenses.py | ebb6210da10a41efcfb3d40241299221fe229a01 | [] | no_license | bomendez/bomendez.github.io | 29babe6cd6f91c573bb4685cae956b2a520bc971 | 37c8437631ee6af94964493b3f68c926012985ec | refs/heads/master | 2021-11-17T15:45:40.487929 | 2021-10-04T21:31:06 | 2021-10-04T21:31:06 | 159,450,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 974 | py | from expenses import (get_actual_trip_cost, get_actual_mileage_rate,
get_reimbursement_amount, calculate_mileage)
def test_get_actual_trip_cost():
assert(get_actual_trip_cost(0, 0, 0, 0) == 0.0)
assert(get_actual_trip_cost(0, 1, 1, 1) == 0.0)
assert(get_actual_trip_cost(10, 5, 100, 2) == 0.0)
assert(get_actual_trip_cost(1000, 1036, 36, 3.09) == 3.09)
def test_get_actual_mileage_rate():
assert(get_actual_mileage_rate(0, 10) == 0.0)
assert(get_actual_mileage_rate(100, -10) == 0.0)
assert(get_actual_mileage_rate(24, 2.99) == 0.1246)
def test_get_reimbursement_amount():
assert(get_reimbursement_amount(0) == 0.00)
assert(get_reimbursement_amount(10) == 5.75)
def test_calculate_mileage():
assert(calculate_mileage(0, 0) == 0)
assert(calculate_mileage(0, 10) == 0)
assert(calculate_mileage(10, 9) == 0)
assert(calculate_mileage(100, 0) == 0)
assert(calculate_mileage(1000, 1010) == 10)
| [
"mendez.bo@northeastern.edu"
] | mendez.bo@northeastern.edu |
61bd1a05d01646eacb38eeb8de6d27a1fd8fed0f | d781fb58b8e0e65b080162a960784f690d28a840 | /post/migrations/0001_initial.py | 8d281a66b0394eaea77de497293f913bde0f32b3 | [] | no_license | songulkarahan/django-blog | e49f1e880ac6eaddbf2856ea438b896f774dc56e | 084d49af43a7cb158ab1003d60a6daf522d2c73a | refs/heads/master | 2022-10-03T19:27:39.936574 | 2020-06-08T03:54:49 | 2020-06-08T03:54:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2020-04-17 17:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=120)),
('content', models.TextField()),
('publishing_date', models.DateTimeField()),
],
),
]
| [
"songul.karahan@bil.omu.edu.tr"
] | songul.karahan@bil.omu.edu.tr |
751b7d53431b7c9cfcb6013b00156c22e235fe27 | 8ef46cddc8a99f3ec57215b7e3e4437784d8da4a | /ITP1/ITP1_4_C.py | 8cd75db003d1f7687e43b5e3ba47744e14f2cec7 | [] | no_license | ctf4bba/Aizu-Online-Judge | aecab5118f2c982457288ba4d35c57d9a30f794e | e483a7a66319ce840f776f9caf4cc26400c01785 | refs/heads/master | 2023-01-06T02:15:59.417193 | 2020-10-24T13:49:02 | 2020-10-24T13:49:02 | 296,346,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | while True:
a, op, b = input().split()
if op == '+':
print(int(a) + int(b))
elif op == '-':
print(int(a) - int(b))
elif op == '*':
print(int(a) * int(b))
elif op == '/':
print(int(a) // int(b))
else:
break
| [
"marchosias515@gmail.com"
] | marchosias515@gmail.com |
343b784858067494c7511d5345bccb020965f8a5 | 8448d0697c5040096b36bf20f237b8222a80a4c7 | /generators.py | 1360024a0aa21107f768de998f338c0bc513efde | [] | no_license | sindaakyil/python | db7b67a53f4b8e07977bc190255615c17d436073 | 6d66e6ddf003b63b5a3e267f2f883f67d24bc1d8 | refs/heads/main | 2023-04-01T21:27:29.597711 | 2021-04-06T20:12:04 | 2021-04-06T20:12:04 | 340,478,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | def coube():
result=[]
for i in range(1,5,1):
result.append(i**3)
return result
print(coube())
#generators kullanmamızın nedeni eğer oluşturduğumuz bir değeri bir liste içinde saklamak gerekmıyorsak sadece o an ulaşmak istiyorsak daha sonra o elemana ulaşmamız gerekmiyorsa kullanılır.
def cube():
for i in range(5):
yield i**3
generator = cube()
iterator=iter(generator)
print(next(iterator))
print(next(iterator))
print(next(iterator))
print(next(iterator))
def cube():
for i in range(5):
yield i**3
iterator= cube()
for i in iterator:
print(i)
def cube():
for i in range(5):
yield i**3
for i in cube():
print(i) | [
"73520969+sindaakyil@users.noreply.github.com"
] | 73520969+sindaakyil@users.noreply.github.com |
0d160ba199dab7e9a7dc75d135a93b6079e79e19 | 2d3a7258d62795117ffe4be5b9fb97a2e5c0bcd9 | /interpreter/Mparser.py | 186ff3042a776d02cd481e0290be9f9bd39c7f85 | [] | no_license | kosjakub/Interpreter | 511765e69c30b30532a6618ec453ca39f8a99372 | 36c378e00c8d639d5c7e8be851094bd4e8075280 | refs/heads/main | 2023-04-14T09:29:45.447456 | 2021-04-26T16:35:23 | 2021-04-26T16:35:23 | 361,820,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,580 | py | import scanner
import ply.yacc as yacc
import time
import AST as c
import TreePrinter
import re
tokens = scanner.tokens
precedence = (
("right", ","),
("right", "ADDASSIGN", "SUBASSIGN", "MULASSIGN", "DIVASSIGN", "="),
("left", "EQ", "NOTEQ", "GREATEREQ", "LESSEREQ", ">", "<"),
('left', '+', '-', 'DOTADD', 'DOTMINUS'),
('left', '*', '/', 'DOTMUL', 'DOTDIV'),
("right", "'", "then", "ELSE"),
("nonassoc", "{", "}")
)
def p_program(p):
'''program : statement_list '''
#p[0] = c.Statements(p[1])
p[0] = p[1]
def p_primary_expression(p):
'''primary_expression : ID
| INTNUM
| FLOATNUM
| NORMSTRING
| '(' expression ')'
| '[' ']'
| '[' index_expression_list ']' '''
if len(p) > 2:
if p[1] == '[' and p[2] == ']':
p[0] = "[]"
elif p[1] == '[' and p[3] == ']':
p[0] = c.Matrix(p[2] if isinstance(p[2][0], c.Vector) else [c.Vector(p[2])], p)
elif isinstance(p[1], str) and re.compile(r'"([^"\n]|(\\"))*"').match(p[1]):
p[0] = c.Id(p[1])
elif isinstance(p[1], str):
p[0] = c.Variable(p[1], p)
elif isinstance(p[1], (int, float)) :
p[0] = c.Constant(p[1])
def p_postfix_expression(p):
'''postfix_expression : primary_expression
| array_expression
| postfix_expression "'" '''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = c.UnaryExp("'", p[1], p)
def p_index_expression(p):
'''index_expression : ':'
| expression '''
p[0] = p[1]
def p_index_expression_list(p):
'''index_expression_list : index_expression
| index_expression_list ',' index_expression
| index_expression_list ';' index_expression '''
#print("\n\n\n")
if len(p) == 2:
#print("len 2", p[1])
p[0] = [p[1]]
elif p[2] == ",":
p[0] = p[1]
#print (",p[1]", p[1])
if isinstance(p[1][-1], c.Vector):
p[1][-1].values.append(p[3])
else:
p[0].append(p[3])
#print (",p[3]", p[3])
elif p[2] == ";":
#print (";p[1]", p[1])
if isinstance(p[1][-1], c.Vector):
p[0] = p[1]
else:
p[0] = [c.Vector(p[1])]
#print(";p[3]", p[3])
p[0].append(c.Vector([p[3]]))
def p_print_index_expression_list(p):
'''print_index_expression_list : ID
| print_index_expression_list ',' ID '''
if len(p) == 2:
p[0] = [c.Variable(p[1], p)]
elif p[2] == ",":
p[0] = p[1]
p[0].append(c.Variable(p[3], p))
pass
def p_array_expression(p):
'''array_expression : ID '(' index_expression_list ')'
| ID '[' index_expression_list ']'
'''
if p[2] == '(' and p[4] == ')':
p[0] = c.ArrayExp(c.Variable(p[1], p), p[3] if isinstance(p[3], list) else [p[3]],p)
elif p[2] == '[' and p[4] == ']':
p[0] = c.ArrayExp(c.Variable(p[1], p), p[3] if isinstance(p[3], list) else [p[3]],p)
def p_unary_expression(p):
'''unary_expression : postfix_expression
| '-' postfix_expression'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = c.UnaryExp("-", p[2], p)
def p_multiplicative_expression(p):
'''multiplicative_expression : unary_expression
| multiplicative_expression '*' unary_expression
| multiplicative_expression '/' unary_expression
| multiplicative_expression DOTMUL unary_expression
| multiplicative_expression DOTDIV unary_expression '''
if len(p) > 2:
if p[2] == '*':
p[0] = c.OperationExp(p[1], '*', p[3], p)
elif p[2] == '/':
p[0] = c.OperationExp(p[1], '/', p[3], p)
elif p[2] == ".*":
p[0] = c.MatrixExp(p[1], '.*', p[3], p)
elif p[2] == "./":
p[0] = c.MatrixExp(p[1], './', p[3], p)
else:
p[0] = p[1]
def p_additive_expression(p):
'''additive_expression : multiplicative_expression
| additive_expression '+' multiplicative_expression
| additive_expression '-' multiplicative_expression
| additive_expression DOTADD multiplicative_expression
| additive_expression DOTMINUS multiplicative_expression '''
if len(p) > 2:
if p[2] == '+':
p[0] = c.OperationExp(p[1], '+', p[3], p)
elif p[2] == '-':
p[0] = c.OperationExp(p[1], '-', p[3], p)
elif p[2] == ".+":
p[0] = c.MatrixExp(p[1], '.+', p[3], p)
elif p[2] == ".-":
p[0] = c.MatrixExp(p[1], '.-', p[3], p)
else:
p[0] = p[1]
def p_relational_expression(p):
'''relational_expression : additive_expression
| relational_expression '<' additive_expression
| relational_expression '>' additive_expression
| relational_expression LESSEREQ additive_expression
| relational_expression GREATEREQ additive_expression '''
if len(p) == 2:
p[0] = p[1]
elif p[2] == "<":
p[0] = c.RelationalExp(p[1], "<", p[3], p)
elif p[2] == ">":
p[0] = c.RelationalExp(p[1], ">", p[3],p)
elif p[2] == "<=":
p[0] = c.RelationalExp(p[1], "<=", p[3], p)
elif p[2] == ">=":
p[0] = c.RelationalExp(p[1], ">=", p[3], p)
def p_equality_expression(p):
'''equality_expression : relational_expression
| equality_expression EQ relational_expression
| equality_expression NOTEQ relational_expression '''
if len(p) == 2:
p[0] = p[1]
elif p[2] == "==":
p[0] = c.RelationalExp(p[1], "==", p[3], p)
elif p[2] == "!=":
p[0] = c.RelationalExp(p[1], "!=", p[3], p)
def p_special_expression(p):
'''special_expression : equality_expression
| PRINT print_index_expression_list
| PRINT NORMSTRING
| RETURN expression
| CONTINUE
| BREAK'''
if p[1] == "print":
p[0] = c.Print(p[2])
elif p[1] == "return":
p[0] = c.Return(p[2])
elif p[1] == "continue":
p[0] = c.Continue(p)
elif p[1] == "break":
p[0] = c.Break(p)
elif len(p) == 2:
p[0] = p[1]
def p_expression(p):
'''expression : special_expression
| expression ':' special_expression '''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = c.Range(p[1], p[3])
def p_assignment_expression(p):
'''assignment_expression : postfix_expression '=' expression
| postfix_expression ADDASSIGN expression
| postfix_expression SUBASSIGN expression
| postfix_expression MULASSIGN expression
| postfix_expression DIVASSIGN expression'''
if p[2] == "=":
p[0] = c.AssignmentExpression(p[1], "=", p[3])
elif p[2] == "+=":
p[0] = c.AssignmentExpression(p[1], "+=", p[3])
elif p[2] == "-=":
p[0] = c.AssignmentExpression(p[1], "-=", p[3])
elif p[2] == "*=":
p[0] = c.AssignmentExpression(p[1], "*=", p[3])
elif p[2] == "/=":
p[0] = c.AssignmentExpression(p[1], "/=", p[3])
def p_init_expression(p):
''' init_expression : ID "=" additive_expression
| ID "=" ZEROS '(' index_expression_list ')'
| ID "=" ONES '(' index_expression_list ')'
| ID "=" EYE '(' index_expression_list ')' '''
if len(p) == 4:
p[0] = c.InitExpression(c.Id(p[1]), "=", p[3], p)
elif p[3] == "zeros":
p[0] = c.InitExpression(c.Id(p[1]), "=", c.Zeros(p[5] if isinstance(p[5], list) else [p[5]]), p)
elif p[3] == "ones":
p[0] = c.InitExpression(c.Id(p[1]), "=", c.Ones(p[5] if isinstance(p[5], list) else [p[5]]), p)
elif p[3] == "eye":
p[0] = c.InitExpression(c.Id(p[1]), "=", c.Eye(p[5] if isinstance(p[5], list) else [p[5]]), p)
def p_eostmt(p):
'''eostmt : ','
| ';' '''
p[0] = c.Eostmt(";")
def p_statement(p):
'''statement : assignment_statement
| expression_statement
| selection_statement
| iteration_statement
| init_statement '''
p[0] = p[1]
def p_statement_list(p):
'''statement_list : statement
| statement statement_list '''
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = [p[1]]
if isinstance(p[2], list):
for v in p[2]:
p[0].append(v)
else:
p[0].append(p[2])
def p_expression_statement(p):
'''expression_statement : eostmt
| expression eostmt'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[1]
#p[0].append(p[2])
def p_assignment_statement(p):
'''assignment_statement : assignment_expression eostmt '''
p[0] = p[1]
def p_init_statement(p):
'''init_statement : init_expression eostmt '''
p[0] = p[1]
def p_selection_statement(p):
'''selection_statement : IF '(' expression ')' statement %prec then
| IF '(' expression ')' statement ELSE statement
| IF '(' expression ')' '{' statement_list '}' %prec then
| IF '(' expression ')' '{' statement_list '}' ELSE '{' statement_list '}'
'''
if len(p) == 6:
p[0] = c.IfStatement(p[3], [p[5]])
elif len(p) == 8 and p[6] == 'else':
p[0] = c.IfElseStatement(p[3], [p[5]], [p[7]])
elif len(p) == 8 and p[6] != 'else':
p[0] = c.IfStatement(p[3], p[6] if isinstance(p[6], list) else [p[6]])
def p_iteration_statement(p):
'''iteration_statement : WHILE '(' expression ')' '{' statement_list '}'
| FOR ID '=' expression '{' statement_list '}'
| FOR '(' ID '=' expression ')' '{' statement_list '}' '''
if p[1] == "while":
p[0] = c.WhileStatement(p[3], p[6] if isinstance(p[6], list) else [p[6]])
elif p[1] == "for" and p[2] == '(':
p[0] = c.ForStatement(c.InitExpression(c.Id(p[3]), "=", p[5], p), p[8])
else:
p[0] = c.ForStatement(c.InitExpression(c.Id(p[2]), "=", p[4], p), p[6])
# Error rule for syntax errors
def p_error(p):
if p:
print("Syntax error - line {0}, column {1}: LexToken({2}, '{3}')".format(p.lineno, 0, p.type, p.value))
else:
print("Unexpected end of input")
parser = yacc.yacc()
| [
"kos96@interia.eu"
] | kos96@interia.eu |
f0202a4f34472c4c3be1f395aaae592e9ea9f454 | 7d9f92fba6af53bd385e0b4173134241c9998fff | /items/admin.py | 418f8f0f830fcebdebd5feddc8bd7ec707691ed5 | [] | no_license | ljarufe/intifil | 856f77c6ece7f444fd331a3eff3c35260201f78f | d478a8a1309d526a2508ca7b559e16de03aaa384 | refs/heads/master | 2021-01-02T09:09:13.613026 | 2013-10-21T17:00:03 | 2013-10-21T17:00:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,138 | py | # -*- coding: utf-8 -*-
from django.contrib import admin
from modeltranslation.admin import TranslationAdmin
from common.admin import BasePermissionAdmin
from items.models import Category, Photo, HomePhoto, Item, Video, SubItem
class CategoryAdmin(BasePermissionAdmin, TranslationAdmin):
"""
Category model admin
"""
list_display = ("name", "slug",)
class PhotoInLine(admin.TabularInline):
"""
Photo inline model admin
"""
model = Photo
exclude = ("name",)
class VideoInLine(admin.TabularInline):
"""
Video inline model admin
"""
model = Video
exclude = ("name",)
class HomePhotoAdmin(admin.ModelAdmin):
"""
Home photo model admin
"""
list_display = ("get_item", "get_shape_display",)
class ItemAdmin(TranslationAdmin):
"""
Item model admin
"""
list_display = ("name", "category", "order",)
list_display_links = ("name", "category")
list_editable = ('order', )
list_filter = ("category",)
exclude = ('order',)
def save_model(self, request, obj, form, change):
"""
Guarda un nuevo item de la página de inicio con el orden por defecto
al final de la lista
"""
if not change:
if form.cleaned_data["home_photo"]:
obj.order = Item.get_default_order()
obj.save()
class SubItemAdmin(TranslationAdmin):
"""
Subitem model admin
"""
list_display = ("name", "item", "order",)
list_display_links = ("name", "item")
list_editable = ('order', )
list_filter = ("item",)
inlines = [PhotoInLine, VideoInLine,]
class PhotoVideoAdmin(TranslationAdmin):
"""
Photo and video model admin
"""
list_display = ("name", "subitem", "order",)
list_display_links = ("name", "subitem")
list_editable = ('order', )
list_filter = ("subitem",)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Photo, PhotoVideoAdmin)
admin.site.register(HomePhoto, HomePhotoAdmin)
admin.site.register(Item, ItemAdmin)
admin.site.register(SubItem, SubItemAdmin)
admin.site.register(Video, PhotoVideoAdmin)
| [
"luisjarufe@gmail.com"
] | luisjarufe@gmail.com |
e07ad01c23c45836b064759f00be7e07f68f04e8 | f04a36fdaa415c6a47d3727e783b2dce11e3dd43 | /blog/views.py | 8ae814785c3273121fdfa345ef1043693a0d0a73 | [
"BSD-3-Clause"
] | permissive | hellprise/cook_blog | e9486452cc53a1300fce5ea9ea54dbe5c0408bf0 | d55734af1625256f940e55d267beb38d911bfda4 | refs/heads/main | 2023-06-25T21:43:20.284389 | 2021-07-28T14:36:45 | 2021-07-28T14:36:45 | 390,378,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | from django.shortcuts import render
from django.views.generic import ListView, DetailView
from blog.models import Post
class PostListView(ListView):
model = Post
def get_queryset(self):
return Post.objects.filter(category__slug=self.kwargs.get('slug')).select_related('category')
class PostDetailView(DetailView):
model = Post
context_object_name = 'post'
slug_url_kwarg = 'post_slug'
def home(request):
return render(request, 'base.html')
| [
"you@example.com"
] | you@example.com |
aaadf802942b3625c3648dfa76e8026553953ccb | cf96c6f9bee731a2ebd97cea0267095158380357 | /webssh/django_webssh/tools/tools.py | 5ac8ae24fbe1e1160ae7adecb54dee321cbdcd3e | [
"Apache-2.0"
] | permissive | zhailibao/django-webssh | 9a2ecffa0bf1bfcef46b77224c78501dc0f3dcbe | ee250f65188d52b42f3ee6cee1f33c224c69474c | refs/heads/master | 2020-04-15T19:53:13.005337 | 2019-01-10T11:37:37 | 2019-01-10T11:37:37 | 164,969,315 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 955 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : HuYuan
# @File : tools.py
from django import forms
from django_webssh import models
import time
import random
import hashlib
class ValidationData(forms.ModelForm):
class Meta:
model = models.HostTmp
exclude = ['datetime']
def unique():
ctime = str(time.time())
salt = str(random.random())
m = hashlib.md5(bytes(salt, encoding='utf-8'))
m.update(bytes(ctime, encoding='utf-8'))
return m.hexdigest()
def get_key_obj(pkeyobj, pkey_file=None, pkey_obj=None, password=None):
if pkey_file:
with open(pkey_file) as fo:
try:
pkey = pkeyobj.from_private_key(fo, password=password)
return pkey
except:
pass
else:
try:
pkey = pkeyobj.from_private_key(pkey_obj, password=password)
return pkey
except:
pkey_obj.seek(0)
| [
"hy2803660215@163.com"
] | hy2803660215@163.com |
95a4b7f4ef92f184eefee95bceee085fc44064e8 | ecd2c20608e1f4a1646c87767762bd72db618d65 | /photo_blog/settings.py | a119b668239d31d500b4fa6a3be1f70c0a501c4a | [] | no_license | RianGirard/photo_blog | 129858ee32cbc2ff0521c8219b72b9d83c015726 | e461fa62abe027965b7143cce544d25634d5bf9c | refs/heads/master | 2023-06-20T14:36:38.040663 | 2021-07-21T01:02:13 | 2021-07-21T01:02:13 | 383,640,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,867 | py | """
Django settings for photo_blog project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
from decouple import config
config.encoding = 'cp1251'
SECRET_KEY = config('SECRET_KEY')
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
MEDIA_URL = '/media/' # for image upload
MEDIA_ROOT = os.path.join(BASE_DIR, 'media') # ditto
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'blog',
'profiles',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites', # required for django-allauth
'allauth', # ditto
'allauth.account', # ditto
'allauth.socialaccount', # ditto
# 'allauth.socialaccount.providers.github', # ditto
'sorl.thumbnail', # required for sorl.thumbnail
'crispy_forms',
]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'photo_blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'photo_blog/templates')], # added this in: os.path.join(BASE_DIR, '[mysite]/templates')
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'photo_blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = ( # added this
os.path.join(BASE_DIR, 'photo_blog/static'),
)
# following are parameters for django-allauth:
# https://django-allauth.readthedocs.io/en/latest/configuration.html
SITE_ID = 1
LOGIN_URL = '/login'
LOGIN_REDIRECT_URL = '/'
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_LOGOUT_ON_GET = True
ACCOUNT_LOGIN_ON_PASSWORD_RESET = True
ACCOUNT_LOGOUT_REDIRECT = '/'
ACCOUNT_PRESERVE_USERNAME_CASING = False
ACCOUNT_SESSION_REMEMBER = True
ACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE = True
ACCOUNT_SIGNUP_REDIRECT_URL = '/'
ACCOUNT_USERNAME_MIN_LENGTH = 2
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend"
)
# EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' # for PROD
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' # for DEV | [
"riangirard@gmail.com"
] | riangirard@gmail.com |
a6c79444b3064e23c966de91bf6fbc9553061d4d | 6c507cccc6c13cbd37298be2f70d7c8e92356ea9 | /master_content_server/util_errors.py | 5e9c809124dd2c12c9ebdf53af15a6ec8f489199 | [] | no_license | kuroneko/harem | 309a3d53fc6a518001c9c330e3fae04a41926c5e | c9223e19af03dc1a89416f104760aa983119f396 | refs/heads/master | 2021-05-29T00:39:41.001286 | 2011-05-21T23:04:37 | 2011-05-21T23:04:37 | 105,629,747 | 0 | 0 | null | 2017-10-03T08:30:41 | 2017-10-03T08:30:41 | null | UTF-8 | Python | false | false | 1,686 | py | #!/usr/bin/python
import sys
from util_html import meidokon_html_headers
from util_html import meidokon_html_footers
DEFAULT_LANG = 'EN'
DEFAULT_ERR = 'Status: 500 Internal Server Error'
DEFAULT_MSG = 'Something Went Boom, Sorry'
HOMEPAGE_LINK = 'index.py'
# Start the HTTP errors
HTTP_ERRORS = {}
# English, mothafucker, do you speak it?!
HTTP_ERRORS['EN'] = {}
HTTP_ERRORS['EN'][400] = 'Status: 400 Bad Request'
HTTP_ERRORS['EN'][403] = 'Status: 403 Forbidden'
HTTP_ERRORS['EN'][404] = 'Status: 404 Not found'
HTTP_ERRORS['EN'][500] = 'Status: 500 Internal Server Error'
GEN_ERRORS = {}
GEN_ERRORS['EN'] = {}
GEN_ERRORS['EN']['ERR_NO_SUCH_FILE'] = 'FAILED! There is no such file with that hash'
GEN_ERRORS['EN']['XMLRPC_TAG_LIST_GET'] = 'Failure while retrieving a list of all tags for display'
GEN_ERRORS['EN']['FAIL_WRITING_UPLOAD'] = 'Failure while attempting to write uploaded file to disk'
GEN_ERRORS['EN']['UNRECOGNISED_MODE'] = 'Unknown tagging mode was given'
GEN_ERRORS['EN']['GENERIC'] = 'An error occurred, of an unknown type'
def http_error(error_code, msg=DEFAULT_MSG, lang=DEFAULT_LANG):
if lang not in HTTP_ERRORS.keys():
lang = DEFAULT_LANG
err = HTTP_ERRORS[lang].get(error_code, DEFAULT_ERR)
print err
print
meidokon_html_headers()
print msg
sys.exit(msg)
def gen_error(error_code, msg=DEFAULT_MSG, lang=DEFAULT_LANG):
if lang not in GEN_ERRORS.keys():
lang = DEFAULT_LANG
err = GEN_ERRORS[lang].get(error_code, DEFAULT_ERR)
print '<div style="border:1px black dotted;">'
print err, "<br />"
print "Further data from error site: ", msg, "<br />"
print """<a href="%s">Go home</a><br />\n""" % HOMEPAGE_LINK
print '</div>'
sys.exit(msg)
| [
"barneydesmond@gmail.com"
] | barneydesmond@gmail.com |
ddfa6b18501db445e3bdbdecfb58362d13a6d3f9 | 4dac2dc078d4536146fdf835e2f693b65a7cc224 | /filelayout.py | 45c4b27261325774fa76687b8dd482143c7ec5c1 | [] | no_license | estansifer/website | 6dfe0e5b7e9321579ab8728c43498ba44003c9b3 | 477815e14d3e3738f6b3331d12f36a73c58cc17c | refs/heads/master | 2022-11-07T19:14:12.027964 | 2020-06-29T00:22:22 | 2020-06-29T00:22:22 | 275,692,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,385 | py | import sys
import os.path as op
root_dir = op.join(sys.path[0], '..')
input_dir = op.join(root_dir, 'input')
output_dir = op.join(root_dir, 'www')
working_dir = op.join(root_dir, 'working')
latex_dir = op.join(working_dir, 'latex')
pandoc_html_template = op.join(input_dir, 'template.html')
pandoc_cyoa_template = op.join(input_dir, 'template_cyoa.html')
pandoc_blog_template = op.join(input_dir, 'template_blog.html')
pandoc_blog_compact_template = op.join(input_dir, 'template_blog_index_compact.html')
pandoc_blog_expanded_template = op.join(input_dir, 'template_blog_index_expanded.html')
main_dir = op.join(input_dir, 'main')
cyoa_dir = op.join(input_dir, 'cyoa')
output_auto_generated_dir = op.join(output_dir, 'a')
output_resources_dir = op.join(output_dir, 'r')
output_cyoa_dir = op.join(output_dir, 'cyoa')
output_cyoa_index = op.join(output_cyoa_dir, 'index.html')
output_blog_dir = op.join(output_dir, 'posts')
output_blog_index_compact = op.join(output_blog_dir, 'index.html')
def cyoa_target_path(name):
return op.join(output_cyoa_dir, name)
def blog_target_path(name):
return op.join(output_blog_dir, name)
def blog_index_expanded_path(year):
return op.join(output_blog_dir, 'index_{}'.format(year))
def path_to_auto_resource(name):
return op.join(output_auto_generated_dir, name)
def link_to_auto_resource(name):
return op.join('/a/', name)
| [
"eric.stansifer+git@gmail.com"
] | eric.stansifer+git@gmail.com |
9bc038b8197fe6eb113bc4dec0c269c99a1d01f2 | 7fde3b3050f3ac825acc071d30883024d931e1c9 | /scripts/pre_processing_json.py | 32c727b3081aeb6595be74a70a7dd23505e0c33a | [] | no_license | madzik176/bme-bigdata | 0fd93e1a9d014855b959e9a9ecc303abb9960271 | f704e54edab6d5e2c90c363e9c4fdad7c9d10852 | refs/heads/master | 2021-05-15T15:35:15.648457 | 2017-11-01T17:29:42 | 2017-11-01T17:29:42 | 107,396,758 | 0 | 0 | null | 2017-10-18T11:08:18 | 2017-10-18T11:08:18 | null | UTF-8 | Python | false | false | 837 | py | import pandas as pd
import urllib, json
url = "https://data.seattle.gov/resource/pu5n-trf4.json"
with urllib.request.urlopen(url) as response:
data = response.read()
data_json = json.loads(data.decode())
df_all = pd.DataFrame(data_json)
df = df_all.loc[:,['event_clearance_date',
'event_clearance_group']]
df = df.dropna(how='any')
# Simple global stat
stat1 = pd.value_counts(df['event_clearance_group'].values, sort=True)
print(stat1)
# Simple group by stat
df['event_clearance_date'] = pd.to_datetime(df['event_clearance_date'])
df_SL = df[(df.event_clearance_group == 'SHOPLIFTING')]
df_SL = df_SL.set_index('event_clearance_date')
df_SL.sort_index(inplace=True)
df_SL['year'] = df_SL.index.year
s_SL = df_SL.groupby(['year'])['event_clearance_group'].value_counts()
print(s_SL) | [
"fando.magic@gmail.com"
] | fando.magic@gmail.com |
24adb24e67fa33ec18f9e784d8da59184ae4f620 | 267e8e4fc40b5426f91504fe6fecd1bc3b7451c0 | /ex_2.py | 6bcbdeb8925e3395e5d498274dbc36e6852d5fb8 | [] | no_license | calgaryfx/TPW | e6b3a378a92985424bd6f49afa307f245b1c4128 | 34b2bd4b7f2e7034cf7fbda81fc19b3d0f5506d5 | refs/heads/main | 2023-04-23T02:43:30.528467 | 2021-05-06T14:46:13 | 2021-05-06T14:46:13 | 364,643,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | # Ex.2: Hello (p.3)
user = input("Please input your name: ")
print(f"Hello {user.title()}.")
# Task: Write a program that asks the user to enter his or her name.
# The program should respond with a hello message to the user, using his or her name.
# The Python Workbook practice. Solution not available.
| [
"markrogers@Marks-MacBook-Air.local"
] | markrogers@Marks-MacBook-Air.local |
52a05f56e294b3506f0f956c66a69164680ea4cd | 81b724bbdfdfae1cd0260774fa99256f6ad30c7a | /spring_18/ECE_6750/backup/Hw_6/bcp.py | b2d8f93e5485eb4f679e76361fa3baf145954fe3 | [] | no_license | vkrao100/course_docs | be372900d678300e817ff45e73535899110fcf7a | 64b380ce2083b63b58c0ee74edd6c434a57184d4 | refs/heads/master | 2020-03-30T21:37:42.449310 | 2018-10-04T21:11:48 | 2018-10-04T21:11:48 | 151,635,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 77 | py | import utility
import numpy
(A,x) = utility.parse("statemin.txt")
print(x)
| [
"vkrao@vkrao-ult.localdomain"
] | vkrao@vkrao-ult.localdomain |
ab6d3bdc20edc01a1bc5726f67e2790c2c0d32a0 | 20d0678dee488d59f46868003b2224c2d7288e8e | /kaggle/biological-response/logloss.py | 56f1b8df6532ddbb9279e675bdef49f1d5663ccb | [] | no_license | trivedipankaj/Machine-Learning | e74758737763b112d569d931a5e12ee1be354707 | 95bb3973f4da22a1627854a79f47193896782444 | refs/heads/master | 2016-09-06T15:29:50.157033 | 2015-04-05T11:02:35 | 2015-04-05T11:02:35 | 30,797,324 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | import scipy as sp
def llfun(act, pred):
epsilon = 1e-15
pred = sp.maximum(epsilon, pred)
pred = sp.minimum(1-epsilon, pred)
ll = sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred)))
ll = ll * -1.0/len(act)
return ll
| [
"pankajk@valyoo.in"
] | pankajk@valyoo.in |
89b5ffc396c40540df2fb3de8ea43fa4e9444552 | 0a74f7afa97a0d31219fdf68b85d0733ef04caf3 | /python/pyspark/errors/error_classes.py | e87d37c63e77b262c0d228f0bff9b092a034d35e | [
"CC0-1.0",
"MIT",
"Python-2.0",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"CDDL-1.1",
"BSD-2-Clause",
"EPL-2.0",
"CDDL-1.0",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-free-unknown",... | permissive | viirya/spark-1 | 37ad4643b3b31c2a3dfc3d5d55eb2aefa112d5d0 | 37aa62f629e652ed70505620473530cd9611018e | refs/heads/master | 2023-08-31T20:14:55.834184 | 2023-07-11T16:08:21 | 2023-07-11T16:08:21 | 21,467,907 | 1 | 3 | Apache-2.0 | 2023-07-12T01:21:50 | 2014-07-03T15:40:08 | Scala | UTF-8 | Python | false | false | 22,184 | py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
ERROR_CLASSES_JSON = """
{
"APPLICATION_NAME_NOT_SET" : {
"message" : [
"An application name must be set in your configuration."
]
},
"ARGUMENT_REQUIRED": {
"message": [
"Argument `<arg_name>` is required when <condition>."
]
},
"ATTRIBUTE_NOT_CALLABLE" : {
"message" : [
"Attribute `<attr_name>` in provided object `<obj_name>` is not callable."
]
},
"ATTRIBUTE_NOT_SUPPORTED" : {
"message" : [
"Attribute `<attr_name>` is not supported."
]
},
"AXIS_LENGTH_MISMATCH" : {
"message" : [
"Length mismatch: Expected axis has <expected_length> element, new values have <actual_length> elements."
]
},
"BROADCAST_VARIABLE_NOT_LOADED": {
"message": [
"Broadcast variable `<variable>` not loaded."
]
},
"CALL_BEFORE_INITIALIZE": {
"message": [
"Not supported to call `<func_name>` before initialize <object>."
]
},
"CANNOT_ACCEPT_OBJECT_IN_TYPE": {
"message": [
"`<data_type>` can not accept object `<obj_name>` in type `<obj_type>`."
]
},
"CANNOT_ACCESS_TO_DUNDER": {
"message": [
"Dunder(double underscore) attribute is for internal use only."
]
},
"CANNOT_APPLY_IN_FOR_COLUMN": {
"message": [
"Cannot apply 'in' operator against a column: please use 'contains' in a string column or 'array_contains' function for an array column."
]
},
"CANNOT_BE_EMPTY": {
"message": [
"At least one <item> must be specified."
]
},
"CANNOT_BE_NONE": {
"message": [
"Argument `<arg_name>` can not be None."
]
},
"CANNOT_CONVERT_COLUMN_INTO_BOOL": {
"message": [
"Cannot convert column into bool: please use '&' for 'and', '|' for 'or', '~' for 'not' when building DataFrame boolean expressions."
]
},
"CANNOT_CONVERT_TYPE": {
"message": [
"Cannot convert <from_type> into <to_type>."
]
},
"CANNOT_DETERMINE_TYPE": {
"message": [
"Some of types cannot be determined after inferring."
]
},
"CANNOT_GET_BATCH_ID": {
"message": [
"Could not get batch id from <obj_name>."
]
},
"CANNOT_INFER_ARRAY_TYPE": {
"message": [
"Can not infer Array Type from an list with None as the first element."
]
},
"CANNOT_INFER_EMPTY_SCHEMA": {
"message": [
"Can not infer schema from empty dataset."
]
},
"CANNOT_INFER_SCHEMA_FOR_TYPE": {
"message": [
"Can not infer schema for type: `<data_type>`."
]
},
"CANNOT_INFER_TYPE_FOR_FIELD": {
"message": [
"Unable to infer the type of the field `<field_name>`."
]
},
"CANNOT_MERGE_TYPE": {
"message": [
"Can not merge type `<data_type1>` and `<data_type2>`."
]
},
"CANNOT_OPEN_SOCKET": {
"message": [
"Can not open socket: <errors>."
]
},
"CANNOT_PARSE_DATATYPE": {
"message": [
"Unable to parse datatype. <msg>."
]
},
"CANNOT_PROVIDE_METADATA": {
"message": [
"metadata can only be provided for a single column."
]
},
"CANNOT_SET_TOGETHER": {
"message": [
"<arg_list> should not be set together."
]
},
"CANNOT_SPECIFY_RETURN_TYPE_FOR_UDF": {
"message": [
"returnType can not be specified when `<arg_name>` is a user-defined function, but got <return_type>."
]
},
"COLUMN_IN_LIST": {
"message": [
"`<func_name>` does not allow a Column in a list."
]
},
"CONTEXT_ONLY_VALID_ON_DRIVER" : {
"message" : [
"It appears that you are attempting to reference SparkContext from a broadcast variable, action, or transformation. SparkContext can only be used on the driver, not in code that it run on workers. For more information, see SPARK-5063."
]
},
"CONTEXT_UNAVAILABLE_FOR_REMOTE_CLIENT" : {
"message" : [
"Remote client cannot create a SparkContext. Create SparkSession instead."
]
},
"DIFFERENT_ROWS" : {
"message" : [
"<error_msg>"
]
},
"DIFFERENT_SCHEMA" : {
"message" : [
"Schemas do not match:",
"df schema: <df_schema>",
"expected schema: <expected_schema>"
]
},
"DISALLOWED_TYPE_FOR_CONTAINER" : {
"message" : [
"Argument `<arg_name>`(type: <arg_type>) should only contain a type in [<allowed_types>], got <return_type>"
]
},
"DUPLICATED_FIELD_NAME_IN_ARROW_STRUCT" : {
"message" : [
"Duplicated field names in Arrow Struct are not allowed, got <field_names>"
]
},
"EXCEED_RETRY" : {
"message" : [
"Retries exceeded but no exception caught."
]
},
"HIGHER_ORDER_FUNCTION_SHOULD_RETURN_COLUMN" : {
"message" : [
"Function `<func_name>` should return Column, got <return_type>."
]
},
"INCORRECT_CONF_FOR_PROFILE" : {
"message" : [
"`spark.python.profile` or `spark.python.profile.memory` configuration",
" must be set to `true` to enable Python profile."
]
},
"INVALID_BROADCAST_OPERATION": {
"message": [
"Broadcast can only be <operation> in driver."
]
},
"INVALID_CALL_ON_UNRESOLVED_OBJECT": {
"message": [
"Invalid call to `<func_name>` on unresolved object."
]
},
"INVALID_CONNECT_URL" : {
"message" : [
"Invalid URL for Spark Connect: <detail>"
]
},
"INVALID_ITEM_FOR_CONTAINER": {
"message": [
"All items in `<arg_name>` should be in <allowed_types>, got <item_type>."
]
},
"INVALID_NDARRAY_DIMENSION": {
"message": [
"NumPy array input should be of <dimensions> dimensions."
]
},
"INVALID_PANDAS_UDF" : {
"message" : [
"Invalid function: <detail>"
]
},
"INVALID_PANDAS_UDF_TYPE" : {
"message" : [
"`<arg_name>` should be one the values from PandasUDFType, got <arg_type>"
]
},
"INVALID_RETURN_TYPE_FOR_PANDAS_UDF": {
"message": [
"Pandas UDF should return StructType for <eval_type>, got <return_type>."
]
},
"INVALID_TIMEOUT_TIMESTAMP" : {
"message" : [
"Timeout timestamp (<timestamp>) cannot be earlier than the current watermark (<watermark>)."
]
},
"INVALID_TYPE" : {
"message" : [
"Argument `<arg_name>` should not be a <data_type>."
]
},
"INVALID_TYPENAME_CALL" : {
"message" : [
"StructField does not have typeName. Use typeName on its type explicitly instead."
]
},
"INVALID_UDF_EVAL_TYPE" : {
"message" : [
"Eval type for UDF must be <eval_type>."
]
},
"INVALID_WHEN_USAGE": {
"message": [
"when() can only be applied on a Column previously generated by when() function, and cannot be applied once otherwise() is applied."
]
},
"INVALID_WINDOW_BOUND_TYPE" : {
"message" : [
"Invalid window bound type: <window_bound_type>."
]
},
"JAVA_GATEWAY_EXITED" : {
"message" : [
"Java gateway process exited before sending its port number."
]
},
"JVM_ATTRIBUTE_NOT_SUPPORTED" : {
"message" : [
"Attribute `<attr_name>` is not supported in Spark Connect as it depends on the JVM. If you need to use this attribute, do not use Spark Connect when creating your session."
]
},
"KEY_VALUE_PAIR_REQUIRED" : {
"message" : [
"Key-value pair or a list of pairs is required."
]
},
"LENGTH_SHOULD_BE_THE_SAME" : {
"message" : [
"<arg1> and <arg2> should be of the same length, got <arg1_length> and <arg2_length>."
]
},
"MASTER_URL_NOT_SET" : {
"message" : [
"A master URL must be set in your configuration."
]
},
"MISSING_LIBRARY_FOR_PROFILER" : {
"message" : [
"Install the 'memory_profiler' library in the cluster to enable memory profiling."
]
},
"MISSING_VALID_PLAN" : {
"message" : [
"Argument to <operator> does not contain a valid plan."
]
},
"MIXED_TYPE_REPLACEMENT" : {
"message" : [
"Mixed type replacements are not supported."
]
},
"NEGATIVE_VALUE" : {
"message" : [
"Value for `<arg_name>` must be greater than or equal to 0, got '<arg_value>'."
]
},
"NOT_BOOL" : {
"message" : [
"Argument `<arg_name>` should be a bool, got <arg_type>."
]
},
"NOT_BOOL_OR_DICT_OR_FLOAT_OR_INT_OR_LIST_OR_STR_OR_TUPLE" : {
"message" : [
"Argument `<arg_name>` should be a bool, dict, float, int, str or tuple, got <arg_type>."
]
},
"NOT_BOOL_OR_DICT_OR_FLOAT_OR_INT_OR_STR" : {
"message" : [
"Argument `<arg_name>` should be a bool, dict, float, int or str, got <arg_type>."
]
},
"NOT_BOOL_OR_FLOAT_OR_INT" : {
"message" : [
"Argument `<arg_name>` should be a bool, float or str, got <arg_type>."
]
},
"NOT_BOOL_OR_FLOAT_OR_INT_OR_LIST_OR_NONE_OR_STR_OR_TUPLE" : {
"message" : [
"Argument `<arg_name>` should be a bool, float, int, list, None, str or tuple, got <arg_type>."
]
},
"NOT_BOOL_OR_FLOAT_OR_INT_OR_STR" : {
"message" : [
"Argument `<arg_name>` should be a bool, float, int or str, got <arg_type>."
]
},
"NOT_BOOL_OR_LIST" : {
"message" : [
"Argument `<arg_name>` should be a bool or list, got <arg_type>."
]
},
"NOT_BOOL_OR_STR" : {
"message" : [
"Argument `<arg_name>` should be a bool or str, got <arg_type>."
]
},
"NOT_CALLABLE" : {
"message" : [
"Argument `<arg_name>` should be a callable, got <arg_type>."
]
},
"NOT_COLUMN" : {
"message" : [
"Argument `<arg_name>` should be a Column, got <arg_type>."
]
},
"NOT_COLUMN_OR_DATATYPE_OR_STR" : {
"message" : [
"Argument `<arg_name>` should be a Column, str or DataType, but got <arg_type>."
]
},
"NOT_COLUMN_OR_FLOAT_OR_INT_OR_LIST_OR_STR" : {
"message" : [
"Argument `<arg_name>` should be a column, float, integer, list or string, got <arg_type>."
]
},
"NOT_COLUMN_OR_INT" : {
"message" : [
"Argument `<arg_name>` should be a Column or int, got <arg_type>."
]
},
"NOT_COLUMN_OR_INT_OR_LIST_OR_STR_OR_TUPLE" : {
"message" : [
"Argument `<arg_name>` should be a Column, int, list, str or tuple, got <arg_type>."
]
},
"NOT_COLUMN_OR_INT_OR_STR" : {
"message" : [
"Argument `<arg_name>` should be a Column, int or str, got <arg_type>."
]
},
"NOT_COLUMN_OR_LIST_OR_STR" : {
"message" : [
"Argument `<arg_name>` should be a Column, list or str, got <arg_type>."
]
},
"NOT_COLUMN_OR_STR" : {
"message" : [
"Argument `<arg_name>` should be a Column or str, got <arg_type>."
]
},
"NOT_DATAFRAME" : {
"message" : [
"Argument `<arg_name>` should be a DataFrame, got <arg_type>."
]
},
"NOT_DATATYPE_OR_STR" : {
"message" : [
"Argument `<arg_name>` should be a DataType or str, got <arg_type>."
]
},
"NOT_DICT" : {
"message" : [
"Argument `<arg_name>` should be a dict, got <arg_type>."
]
},
"NOT_EXPRESSION" : {
"message" : [
"Argument `<arg_name>` should be a Expression, got <arg_type>."
]
},
"NOT_FLOAT_OR_INT" : {
"message" : [
"Argument `<arg_name>` should be a float or int, got <arg_type>."
]
},
"NOT_FLOAT_OR_INT_OR_LIST_OR_STR" : {
"message" : [
"Argument `<arg_name>` should be a float, int, list or str, got <arg_type>."
]
},
"NOT_IMPLEMENTED" : {
"message" : [
"<feature> is not implemented."
]
},
"NOT_INSTANCE_OF" : {
"message" : [
"<value> is not an instance of type <data_type>."
]
},
"NOT_INT" : {
"message" : [
"Argument `<arg_name>` should be an int, got <arg_type>."
]
},
"NOT_INT_OR_SLICE_OR_STR" : {
"message" : [
"Argument `<arg_name>` should be an int, slice or str, got <arg_type>."
]
},
"NOT_IN_BARRIER_STAGE" : {
"message" : [
"It is not in a barrier stage."
]
},
"NOT_ITERABLE" : {
"message" : [
"<objectName> is not iterable."
]
},
"NOT_LIST" : {
"message" : [
"Argument `<arg_name>` should be a list, got <arg_type>."
]
},
"NOT_LIST_OF_COLUMN" : {
"message" : [
"Argument `<arg_name>` should be a list[Column]."
]
},
"NOT_LIST_OF_COLUMN_OR_STR" : {
"message" : [
"Argument `<arg_name>` should be a list[Column]."
]
},
"NOT_LIST_OF_FLOAT_OR_INT" : {
"message" : [
"Argument `<arg_name>` should be a list[float, int], got <arg_type>."
]
},
"NOT_LIST_OF_STR" : {
"message" : [
"Argument `<arg_name>` should be a list[str], got <arg_type>."
]
},
"NOT_LIST_OR_NONE_OR_STRUCT" : {
"message" : [
"Argument `<arg_name>` should be a list, None or StructType, got <arg_type>."
]
},
"NOT_LIST_OR_STR_OR_TUPLE" : {
"message" : [
"Argument `<arg_name>` should be a list, str or tuple, got <arg_type>."
]
},
"NOT_LIST_OR_TUPLE" : {
"message" : [
"Argument `<arg_name>` should be a list or tuple, got <arg_type>."
]
},
"NOT_NUMERIC_COLUMNS" : {
"message" : [
"Numeric aggregation function can only be applied on numeric columns, got <invalid_columns>."
]
},
"NOT_OBSERVATION_OR_STR" : {
"message" : [
"Argument `<arg_name>` should be a Observation or str, got <arg_type>."
]
},
"NOT_SAME_TYPE" : {
"message" : [
"Argument `<arg_name1>` and `<arg_name2>` should be the same type, got <arg_type1> and <arg_type2>."
]
},
"NOT_STR" : {
"message" : [
"Argument `<arg_name>` should be a str, got <arg_type>."
]
},
"NOT_STR_OR_LIST_OF_RDD" : {
"message" : [
"Argument `<arg_name>` should be a str or list[RDD], got <arg_type>."
]
},
"NOT_STR_OR_STRUCT" : {
"message" : [
"Argument `<arg_name>` should be a str or structType, got <arg_type>."
]
},
"NOT_WINDOWSPEC" : {
"message" : [
"Argument `<arg_name>` should be a WindowSpec, got <arg_type>."
]
},
"NO_ACTIVE_SESSION" : {
"message" : [
"No active Spark session found. Please create a new Spark session before running the code."
]
},
"ONLY_ALLOWED_FOR_SINGLE_COLUMN" : {
"message" : [
"Argument `<arg_name>` can only be provided for a single column."
]
},
"ONLY_ALLOW_SINGLE_TRIGGER" : {
"message" : [
"Only a single trigger is allowed."
]
},
"PIPE_FUNCTION_EXITED" : {
"message" : [
"Pipe function `<func_name>` exited with error code <error_code>."
]
},
"PYTHON_HASH_SEED_NOT_SET" : {
"message" : [
"Randomness of hash of string should be disabled via PYTHONHASHSEED."
]
},
"PYTHON_VERSION_MISMATCH" : {
"message" : [
"Python in worker has different version <worker_version> than that in driver <driver_version>, PySpark cannot run with different minor versions.",
"Please check environment variables PYSPARK_PYTHON and PYSPARK_DRIVER_PYTHON are correctly set."
]
},
"RDD_TRANSFORM_ONLY_VALID_ON_DRIVER" : {
"message" : [
"It appears that you are attempting to broadcast an RDD or reference an RDD from an ",
"action or transformation. RDD transformations and actions can only be invoked by the ",
"driver, not inside of other transformations; for example, ",
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values ",
"transformation and count action cannot be performed inside of the rdd1.map ",
"transformation. For more information, see SPARK-5063."
]
},
"RESULT_COLUMNS_MISMATCH_FOR_PANDAS_UDF" : {
"message" : [
"Column names of the returned pandas.DataFrame do not match specified schema.<missing><extra>"
]
},
"RESULT_LENGTH_MISMATCH_FOR_PANDAS_UDF" : {
"message" : [
"Number of columns of the returned pandas.DataFrame doesn't match specified schema. Expected: <expected> Actual: <actual>"
]
},
"RESULT_LENGTH_MISMATCH_FOR_SCALAR_ITER_PANDAS_UDF" : {
"message" : [
"The length of output in Scalar iterator pandas UDF should be the same with the input's; however, the length of output was <output_length> and the length of input was <input_length>."
]
},
"SCHEMA_MISMATCH_FOR_PANDAS_UDF" : {
"message" : [
"Result vector from pandas_udf was not the required length: expected <expected>, got <actual>."
]
},
"SESSION_ALREADY_EXIST" : {
"message" : [
"Cannot start a remote Spark session because there is a regular Spark session already running."
]
},
"SESSION_NOT_SAME" : {
"message" : [
"Both Datasets must belong to the same SparkSession."
]
},
"SESSION_OR_CONTEXT_EXISTS" : {
"message" : [
"There should not be an existing Spark Session or Spark Context."
]
},
"SHOULD_NOT_DATAFRAME": {
"message": [
"Argument `<arg_name>` should not be a DataFrame."
]
},
"SLICE_WITH_STEP" : {
"message" : [
"Slice with step is not supported."
]
},
"STATE_NOT_EXISTS" : {
"message" : [
"State is either not defined or has already been removed."
]
},
"STOP_ITERATION_OCCURRED" : {
"message" : [
"Caught StopIteration thrown from user's code; failing the task: <exc>"
]
},
"STOP_ITERATION_OCCURRED_FROM_SCALAR_ITER_PANDAS_UDF" : {
"message" : [
"pandas iterator UDF should exhaust the input iterator."
]
},
"TOO_MANY_VALUES" : {
"message" : [
"Expected <expected> values for `<item>`, got <actual>."
]
},
"UNEXPECTED_RESPONSE_FROM_SERVER" : {
"message" : [
"Unexpected response from iterator server."
]
},
"UNEXPECTED_TUPLE_WITH_STRUCT" : {
"message" : [
"Unexpected tuple <tuple> with StructType."
]
},
"UNKNOWN_EXPLAIN_MODE" : {
"message" : [
"Unknown explain mode: '<explain_mode>'. Accepted explain modes are 'simple', 'extended', 'codegen', 'cost', 'formatted'."
]
},
"UNKNOWN_INTERRUPT_TYPE" : {
"message" : [
"Unknown interrupt type: '<interrupt_type>'. Accepted interrupt types are 'all'."
]
},
"UNKNOWN_RESPONSE" : {
"message" : [
"Unknown response: <response>."
]
},
"UNSUPPORTED_DATA_TYPE" : {
"message" : [
"Unsupported DataType `<data_type>`."
]
},
"UNSUPPORTED_DATA_TYPE_FOR_ARROW" : {
"message" : [
"Single data type <data_type> is not supported with Arrow."
]
},
"UNSUPPORTED_DATA_TYPE_FOR_ARROW_CONVERSION" : {
"message" : [
"<data_type> is not supported in conversion to Arrow."
]
},
"UNSUPPORTED_DATA_TYPE_FOR_ARROW_VERSION" : {
"message" : [
"<data_type> is only supported with pyarrow 2.0.0 and above."
]
},
"UNSUPPORTED_DATA_TYPE_FOR_IGNORE_ROW_ORDER" : {
"message" : [
"Cannot ignore row order because undefined sorting for data type."
]
},
"UNSUPPORTED_JOIN_TYPE" : {
"message" : [
"Unsupported join type: <join_type>. Supported join types include: \\"inner\\", \\"outer\\", \\"full\\", \\"fullouter\\", \\"full_outer\\", \\"leftouter\\", \\"left\\", \\"left_outer\\", \\"rightouter\\", \\"right\\", \\"right_outer\\", \\"leftsemi\\", \\"left_semi\\", \\"semi\\", \\"leftanti\\", \\"left_anti\\", \\"anti\\", \\"cross\\"."
]
},
"UNSUPPORTED_LITERAL" : {
"message" : [
"Unsupported Literal '<literal>'."
]
},
"UNSUPPORTED_NUMPY_ARRAY_SCALAR" : {
"message" : [
"The type of array scalar '<dtype>' is not supported."
]
},
"UNSUPPORTED_OPERATION" : {
"message" : [
"<operation> is not supported."
]
},
"UNSUPPORTED_PARAM_TYPE_FOR_HIGHER_ORDER_FUNCTION" : {
"message" : [
"Function `<func_name>` should use only POSITIONAL or POSITIONAL OR KEYWORD arguments."
]
},
"UNSUPPORTED_SIGNATURE" : {
"message" : [
"Unsupported signature: <signature>."
]
},
"UNSUPPORTED_WITH_ARROW_OPTIMIZATION" : {
"message" : [
"<feature> is not supported with Arrow optimization enabled in Python UDFs. Disable 'spark.sql.execution.pythonUDF.arrow.enabled' to workaround.."
]
},
"VALUE_NOT_ACCESSIBLE": {
"message": [
"Value `<value>` cannot be accessed inside tasks."
]
},
"VALUE_NOT_ANY_OR_ALL" : {
"message" : [
"Value for `<arg_name>` must be 'any' or 'all', got '<arg_value>'."
]
},
"VALUE_NOT_BETWEEN" : {
"message" : [
"Value for `<arg_name>` must be between <min> and <max>."
]
},
"VALUE_NOT_NON_EMPTY_STR" : {
"message" : [
"Value for `<arg_name>` must be a non empty string, got '<arg_value>'."
]
},
"VALUE_NOT_PEARSON" : {
"message" : [
"Value for `<arg_name>` only supports the 'pearson', got '<arg_value>'."
]
},
"VALUE_NOT_POSITIVE" : {
"message" : [
"Value for `<arg_name>` must be positive, got '<arg_value>'."
]
},
"VALUE_NOT_TRUE" : {
"message" : [
"Value for `<arg_name>` must be True, got '<arg_value>'."
]
},
"VALUE_OUT_OF_BOUND" : {
"message" : [
"Value for `<arg_name>` must be greater than <lower_bound> or less than <upper_bound>, got <actual>"
]
},
"WRONG_NUM_ARGS_FOR_HIGHER_ORDER_FUNCTION" : {
"message" : [
"Function `<func_name>` should take between 1 and 3 arguments, but provided function takes <num_args>."
]
},
"WRONG_NUM_COLUMNS" : {
"message" : [
"Function `<func_name>` should take at least <num_cols> columns."
]
}
}
"""
ERROR_CLASSES_MAP = json.loads(ERROR_CLASSES_JSON)
| [
"gurwls223@apache.org"
] | gurwls223@apache.org |
233d40f8e8ad5f80d43b6f72ef1e8e6ef8eca9d2 | 27623ad6f6a62b123926e00ee5a778c221d08867 | /zhstory.py | a322f0435875edd35756463992751f08394f2f2b | [] | no_license | zuoqin/zhaws | 90f17a3d328976a901a8b23bca1ee710c9f9f755 | 631a597bf96a8cd288d3232a9f81a27f8a37656b | refs/heads/master | 2021-01-07T19:37:30.104154 | 2020-07-07T13:10:13 | 2020-07-07T13:10:13 | 241,800,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,501 | py | import requests
from bs4 import BeautifulSoup
import datetime
import json
import urllib3
def scrape(event, context):
qp = '/political/coupgate-localized-civil-war-now-underway-doj'
if 'queryStringParameters' in event and 'url' in event['queryStringParameters']:
qp = event['queryStringParameters']['url']
data = deal_scrape(qp)
html = """
<!DOCTYPE html>
<html>
<head>
<title>{}</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.4.1/css/bootstrap.min.css">
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.4.1/jquery.min.js"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.4.1/js/bootstrap.min.js"></script>
</head>
<body>
<h1>{}</h1>
{}
<p>{}</p>
</body>
</html>
"""
return {
'statusCode': 200,
'headers': {"content-type": "text/html"},
'body': html.format(data['title'], data['title'], data['body'], data['updated'])
}
def deal_scrape(article):
url = 'https://www.zerohedge.com' + str(article)
req = urllib3.PoolManager()
res = req.request('GET', url, headers={
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.106 Safari/537.36'
})
content = BeautifulSoup(res.data, 'html.parser')
div = content.find('div', {'class': "layout-content"})
item = div
title = ''.join(map(str, item.findAll('h1', {'class': "page-title"})[0].span.contents))
body = ''.join(map(str, item.findAll('div', {'class': "node__content"})[0].findAll('div', {'property': "schema:text"})[0].contents))
body = body.replace("https://www.zerohedge.com/news", "https://news.ehedge.xyz/story?url=%2Fnews")
body = body.replace("https://www.zerohedge.com/article", "https://news.ehedge.xyz/story?url=%2Farticle")
body = body.replace("https://www.zerohedge.com/markets", "https://news.ehedge.xyz/story?url=%2Fmarkets")
body = body.replace("https://www.zerohedge.com/health", "https://news.ehedge.xyz/story?url=%2Fhealth")
body = body.replace("https://www.zerohedge.com/economics", "https://news.ehedge.xyz/story?url=%2Feconomics")
body = body.replace("/s3/files", "https://www.zerohedge.com/s3/files")
updated = item.findAll('div', {'class': "submitted-datetime"})[0].span.text
return {'body': body, 'title': title, 'updated': updated}
| [
"zuoqin@mail.ru"
] | zuoqin@mail.ru |
4ebc28f370f7858fb7d74279bc76973b0265a4d1 | aa94ba78ac3d54a183f7aacfeca0622448e0011d | /Third.py | cd69de02b8bda17cd2fce2f97e565340f687265e | [] | no_license | saysiva/Phyton | 2dbba9ab1785400c50bfbc82e664069c50b72546 | 916f5bed1f330f8a03388117d1b18957fed40168 | refs/heads/master | 2021-06-03T05:51:28.062326 | 2017-12-03T02:12:15 | 2017-12-03T02:12:15 | 4,682,404 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 251 | py |
list1 = [1,2,3,4,5]
list2 = [2,3,4,5,6]
list3 = [1,2,3]
print (list1);
print (list2);
print (list3);
dot = 0
for e,f,g in zip(list1,list2,list3):
print (e);
print (f);
print (g);
dot += e*f*g
print ("====" * 10)
print (dot);
| [
"noreply@github.com"
] | noreply@github.com |
7447ea78d23cd5832c5067f68109b57365ce8537 | c901c382df0d8eddcdb8c1d1e98a885d0fea3872 | /my_CartPole/d__PG.py | 12d44fcb1b2b2e4a89214a4705c97cee639e1514 | [] | no_license | FHsong/RL | d8b6a560d04732e1f02e7dc07edbb00bae9ca942 | 7ec21517cf372e88f97acde869601d8774770758 | refs/heads/master | 2022-11-17T03:41:24.937974 | 2020-06-30T14:55:51 | 2020-06-30T14:55:51 | 274,860,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,322 | py | import gym,os
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
# Default parameters for plots
matplotlib.rcParams['font.size'] = 18
matplotlib.rcParams['figure.titlesize'] = 18
matplotlib.rcParams['figure.figsize'] = [9, 7]
matplotlib.rcParams['font.family'] = ['KaiTi']
matplotlib.rcParams['axes.unicode_minus']=False
import tensorflow as tf
import tensorlayer as tl
from tensorflow import keras
from tensorflow.keras import layers,optimizers,losses
RANDOMSEED = 1 # 设置随机种子。建议大家都设置,这样试验可以重现。
# 定义策略网络,生成动作的概率分布
class Network(keras.Model):
def __init__(self):
super(Network, self).__init__()
# 输入为长度为4的向量,输出为左、右2个动作
self.fc1 = layers.Dense(30, kernel_initializer='he_normal', activation='relu')
self.fc2 = layers.Dense(16, kernel_initializer='he_normal', activation='relu')
self.fc3 = layers.Dense(2, kernel_initializer='he_normal', activation='softmax')
def call(self, x):
# 状态输入s的shape为向量:[4]
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
class PolicyGradient():
def __init__(self, env, learning_rate, n_episode):
self.env = env
# 定义相关参数
self.learning_rate = learning_rate # 学习率
self.gamma = 0.95 # 折扣
self.n_episode = n_episode
# 创建策略网络
self.pi = Network()
# self.pi = self.get_model([None, env.observation_space.shape[0]])
# self.pi.train()
self.optimizer = tf.optimizers.Adam(lr=learning_rate) # 网络优化器
# 用于保存每个ep的数据。
self.ep_states, self.ep_actions, self.ep_rewards = [], [], []
def get_model(self, inputs_shape):
"""
创建一个神经网络
输入: state
输出: act
"""
self.tf_obs = tl.layers.Input(inputs_shape, tf.float32, name="observations")
layer = tl.layers.Dense(
n_units=30, act=tf.nn.tanh, W_init=tf.random_normal_initializer(mean=0, stddev=0.3),
b_init=tf.constant_initializer(0.1), name='fc1'
)(self.tf_obs)
# fc2
all_act = tl.layers.Dense(
n_units=self.env.action_space.n, act=None, W_init=tf.random_normal_initializer(mean=0, stddev=0.3),
b_init=tf.constant_initializer(0.1), name='all_act'
)(layer)
return tl.models.Model(inputs=self.tf_obs, outputs=all_act, name='PG model')
def choose_action(self, state):
"""
用神经网络输出的**策略pi**,选择动作。
输入: state
输出: act
"""
_logits = self.pi(np.array([state], np.float32)).numpy()
# 从类别分布中采样1个动作, shape: [1], 动作的概率越高,该动作就有更高的概率被采样到
a = np.random.choice(len(_logits[0]), p=_logits[0])
return int(a)
def store_transition(self, state, action, reward):
"""
保存数据到buffer中
"""
self.ep_states.append(np.array([state], np.float32))
self.ep_actions.append(action)
self.ep_rewards.append(reward)
def train(self):
"""
通过带权重更新方法更新神经网络
"""
# _discount_and_norm_rewards中存储的就是这一ep中,每个状态的G值。
discounted_ep_rs_norm = self._discount_and_norm_rewards()
with tf.GradientTape() as tape:
# 把s放入神经网络,计算_logits
_logits = self.pi(np.vstack(self.ep_states))
# 敲黑板
## _logits和真正的动作的差距
# 差距也可以这样算,和sparse_softmax_cross_entropy_with_logits等价的:
# neg_log_prob = tf.reduce_sum(-tf.log(self.all_act_prob)*tf.one_hot(self.tf_acts, self.n_actions), axis=1)
neg_log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=_logits, labels=np.array(self.ep_actions))
# 在原来的差距乘以G值,也就是以G值作为更新
loss = tf.reduce_mean(neg_log_prob * discounted_ep_rs_norm)
grad = tape.gradient(loss, self.pi.trainable_weights)
self.optimizer.apply_gradients(zip(grad, self.pi.trainable_weights))
self.ep_states, self.ep_actions, self.ep_rewards = [], [], [] # empty episode data
return discounted_ep_rs_norm
def _discount_and_norm_rewards(self):
"""
通过回溯计算G值
"""
# 先创建一个数组,大小和ep_rs一样。ep_rs记录的是每个状态的收获r。
discounted_ep_rs = np.zeros_like(self.ep_rewards)
running_add = 0
# 从ep_rs的最后往前,逐个计算G
for t in reversed(range(0, len(self.ep_rewards))):
running_add = running_add * self.gamma + self.ep_rewards[t]
discounted_ep_rs[t] = running_add
# 归一化G值。
# 我们希望G值有正有负,这样比较容易学习。
discounted_ep_rs -= np.mean(discounted_ep_rs)
discounted_ep_rs /= np.std(discounted_ep_rs)
return discounted_ep_rs
def run(self):
print('----------------- Policy Gradient -----------------')
total_reward = 0
returns = []
np.random.seed(RANDOMSEED)
tf.random.set_seed(RANDOMSEED)
self.env.seed(RANDOMSEED) # 不加这一句优化效果极差,也不知道为什么
for episode in range(self.n_episode):
state = self.env.reset()
for step in range(200):
action = self.choose_action(state)
next_state, reward, done, _ = self.env.step(action)
total_reward += reward
self.store_transition(next_state, action, reward)
if done:
self.train()
break
state = next_state
if episode % 20 == 0:
returns.append(total_reward / 20)
total_reward = 0
print('Episode: {}/{} | Episode Average Reward: {:.4f}'
.format(episode, self.n_episode, returns[-1]))
| [
"849628370@qq.com"
] | 849628370@qq.com |
187ef3deb7d3b23abc9b01a4aa9cebc080e7fcce | c1114dd424ee0c072fcbeb3258c3e43f6af783b7 | /NewExampleByMyself.py | 09255c8b00e49141df0bfbf328684c57ef984e56 | [] | no_license | tianyunkeml/TensorFlow-Project | c7c6b9e3849f3f4f5ec5288d5687ccb2907e6c03 | 94e1189166aba092d84206792a3f36580d20a538 | refs/heads/master | 2021-01-01T16:37:05.998918 | 2017-07-30T21:11:06 | 2017-07-30T21:11:06 | 97,872,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,198 | py | # I want to try autoencoder with tensorflow myself
# Basic idea is that: the data is a k * k matrix,
# with row m and column n being 1,
# and all other elements are 0:
# eg. m = 2, n = 6, then data may look like this(I used 10 * 10 for simplicity):
# 0 0 0 0 0 1 0 0 0 0
# 1 1 1 1 1 1 1 1 1 1
# 0 0 0 0 0 1 0 0 0 0
# 0 0 0 0 0 1 0 0 0 0
# 0 0 0 0 0 1 0 0 0 0
# 0 0 0 0 0 1 0 0 0 0
# 0 0 0 0 0 1 0 0 0 0
# 0 0 0 0 0 1 0 0 0 0
# 0 0 0 0 0 1 0 0 0 0
# 0 0 0 0 0 1 0 0 0 0
# and I want to encode it into 2-dimensional layer, because 2-dimensional
# data can completely decide it. Lets see the accuracy here.
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import random
import pdb
class myExample(object):
def __init__(self):
# parameter settings here
self.matrixSize = 30
self.dataSize = 900#00
self.batchSize = 30#0
self.training_epochs = 10#0
self.learning_rate = 0.001
self.n_input = self.matrixSize ** 2
self.n_hidden_1 = 300
self.n_hidden_2 = 100
# Generate data for feeding with batches
def dataGenerator(self):
allBatch = []
for _ in range(int(self.dataSize / self.batchSize)):
batch = []
for i in range(self.batchSize):
randRow = random.randint(1, self.matrixSize)
randCol = random.randint(1, self.matrixSize)
batch.append(self.generateOneData(randRow, randCol))
allBatch.append(batch)
return allBatch
def generateOneData(self, row, col):
data = np.zeros((self.matrixSize, self.matrixSize))
for i in range(self.matrixSize):
data[row - 1][i] = 1
data[i][col - 1] = 1
return list(data.flatten())
def tfModel(self):
def encoder(x):
with tf.device('/gpu:0'):
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, self.weights['encoder_h1']),
self.biases['encoder_b1']))
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, self.weights['encoder_h2']),
self.biases['encoder_b2']))
return layer_2
def decoder(x):
with tf.device('/gpu:0'):
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, self.weights['decoder_h1']),
self.biases['decoder_b1']))
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, self.weights['decoder_h2']),
self.biases['decoder_b2']))
return layer_2
data = self.dataGenerator()
self.weights = {
'encoder_h1': tf.Variable(tf.random_normal([self.n_input, self.n_hidden_1])),
'encoder_h2': tf.Variable(tf.random_normal([self.n_hidden_1, self.n_hidden_2])),
'decoder_h1': tf.Variable(tf.random_normal([self.n_hidden_2, self.n_hidden_1])),
'decoder_h2': tf.Variable(tf.random_normal([self.n_hidden_1, self.n_input]))
}
self.biases = {
'encoder_b1': tf.Variable(tf.random_normal([self.n_hidden_1])),
'encoder_b2': tf.Variable(tf.random_normal([self.n_hidden_2])),
'decoder_b1': tf.Variable(tf.random_normal([self.n_hidden_1])),
'decoder_b2': tf.Variable(tf.random_normal([self.n_input]))
}
# X is the input matrix data
X = tf.placeholder('float', [None, self.n_input])
encoder_op = encoder(X)
decoder_op = decoder(encoder_op)
# define prediction and true value
y_pred = decoder_op
y_true = X
# define lost function and optimizer
cost = tf.reduce_mean(tf.abs(tf.pow(y_true - y_pred, 1)))
optimizer = tf.train.AdamOptimizer(self.learning_rate).minimize(cost)
# start session
with tf.Session(config = tf.ConfigProto(log_device_placement = True)) as sess:
init = tf.global_variables_initializer()
sess.run(init)
total_batch = int(self.dataSize / self.batchSize)
for epoch in range(self.training_epochs):
for i in range(total_batch):
batch = data[i]
# pdb.set_trace()
_, c = sess.run([optimizer, cost], feed_dict = {X: batch})
print('Epoch:', '%04d' % (epoch + 1), 'cost = ', '{:.9f}'.format(c))
# test
numOfPics = 10
testData = data[-1][:numOfPics]
encode_decode = sess.run(
y_pred, feed_dict = {X: testData})
f, a = plt.subplots(2, 10, figsize = (10, 2))
# pdb.set_trace()
for i in range(numOfPics):
a[0][i].imshow(np.reshape(testData[i], (self.matrixSize, self.matrixSize)))
a[1][i].imshow(np.reshape(encode_decode[i], (self.matrixSize, self.matrixSize)))
plt.show()
if __name__ == '__main__':
myExample().tfModel() | [
"tianyunke666@gmail.com"
] | tianyunke666@gmail.com |
0b6fd5ae007c182ed45f4091fec779a6be824f0e | 9e64d199f399316f2234283caa338b8264aa8d9d | /bi-matrix/DataStat/settings.py | 43efb0dcae021bd673c09e9ed2486df6e9ec251d | [] | no_license | zwj1314/bigdata | adbc74951c901e4804f1ee2325af33835aeb457f | 3245766b84beec1a19e1556abe5ee0afa1b3dd3d | refs/heads/master | 2022-12-25T09:45:04.605954 | 2019-07-09T01:29:22 | 2019-07-09T01:29:22 | 143,855,160 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,707 | py | # mysql数据库配置
mysqlArgs = {
"host": '172.16.2.147',
"port": 3306,
"user": "BEHAVIOR_APP",
"password": "czwssYpiZmV6svsQ",
"db": "BEHAVIOR",
"charset": 'utf8mb4',
}
# mysql数据库配置[bia]
biaArgs = {
"host": '172.16.2.147',
"port": 3307,
"user": "BIA_APP",
"password": "6wiuysOXgKHGvWbR",
"db": "BIA",
"charset": 'utf8mb4',
}
# mysql数据库配置[bia]
nonoArgs = {
"host": '172.16.2.109',
"port": 3315,
"user": "bi_etl",
"password": "eEEna6GO",
"db": "db_nono",
"charset": 'utf8mb4',
}
# mysql数据库配置[bia]
memberArgs = {
"host": '172.16.2.109',
"port": 3321,
"user": "bi_query",
"password": "4t89me1e",
"db": "usr_member",
"charset": 'utf8mb4',
}
# redis数据库配置
redisArgs = {
"host": '172.16.2.157',
"port": 6379,
"password": "Mzjf_redis"
}
# 读取多久前的redis数据(单位:分钟)
nMinAgo = 10
# 获取指标统计api
metrics_api = r'http://172.16.2.160/bi-apiserver/mrc/metrics'
bizcode_metrics_list={
# 白领贷业务线各操作对应的指标集合
"87":{'apply':{'count':1007,'amt':1008},
'jinjian':{'count':1009,'amt':1010},
'finish':{'count':1011,'amt':1012}
},
# 精英贷业务线各操作对应的指标集合
"140":{'apply':{'count':1000,'amt':1001},
'jinjian':{'count':1002,'amt':1003},
'finish':{'count':1004,'amt':1005},
'invitcode':{'count':1006}
},
# 保本理财指标集合
"1":{'zf_chujie':{'count':1016,'amt':1014}
},
# 曾凤账户指标集合
"2":{'zf_chujie':{'count':1015,'amt':1013}
}
}
| [
"weijiaojiao1994@gmail.com"
] | weijiaojiao1994@gmail.com |
5f5298a16f93cc4d542ecb2156cb15f41cb7de98 | 15ceed561cf14e1e006b4264df65bd1167579719 | /ch06.py | 2a9213e8e849bcedd5d28251c58230725a60acde | [] | no_license | emptist/ekua | e6c697d2d107cfc5383eaabdfbed5e91e04f98cf | 2fff0b2c1760823aa2a89579becb47a833d3ecb4 | refs/heads/master | 2020-05-07T13:56:41.940214 | 2015-05-23T17:14:44 | 2015-05-23T17:14:44 | 32,388,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,380 | py |
# coding: utf-8
# # Data loading, storage, and file formats
# ## I added this: Mongodb testing
# http://stackoverflow.com/questions/23300295/cant-connect-to-meteor-with-pymongo
# 首先安裝pymongo:
# conda install -c https://conda.binstar.org/travis pymongo
# In[62]:
import pymongo
from pandas import *
# 用python來改寫meteor數據庫,並實時反應在應用中
# testing with my meteor project ekua
# In[19]:
conn = pymongo.MongoClient(r'mongodb://localhost:3001/meteor')
conn
# In[35]:
dbns = conn.database_names()
dbns
# In[59]:
conn.meteor.collection_names()
# In[60]:
sysidx = conn.meteor.system.indexes.find({})
# In[61]:
#from pandas import *
DataFrame(list(sysidx))
# In[73]:
stocks = conn.meteor.Vehicles.find({})
dfsts = DataFrame(list(stocks))
dfsts
# In[74]:
dfsts.數量 = [200,100]
#dfsts
# In[81]:
df2 = DataFrame({'代碼':['150153','150154'],
'數量':[200,100],
'配額':[0.5,0.5],
'操作':['買入','賣出']})
df2
# In[118]:
ndf = dfsts.merge(df2)
ndf
# In[126]:
ndf.T.to_dict()
# In[128]:
ndict = ndf.set_index('_id').T.to_dict()
ndict
# In[130]:
ndict.keys()
# In[131]:
for i in ndict:
print(ndict[i])
# In[ ]:
stocks = conn.meteor.Vehicles
for idx in ndict:
stocks.update({'_id':idx}, ndict[idx])
# In[136]:
DataFrame(list(stocks.find()))
| [
"jigme1968@gmail.com"
] | jigme1968@gmail.com |
07def4cb2871dff1a7a59d1b63f632293e8344e1 | 3abf46191ac74fbf55444b87f06825d399c88eab | /heyyz/__version__.py | e04e81957b271109ddb1aba4a802345473d8af1d | [
"MIT"
] | permissive | CarlHey/heyyz | 15c15251ea476df3b6ab18c713972788a062e522 | 0fe0c412be4b0abb787bc36a89b6443a37e17d38 | refs/heads/master | 2023-05-03T02:47:58.090989 | 2021-05-26T09:32:03 | 2021-05-26T09:32:03 | 328,415,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63 | py | VERSION = (0, 1, 9)
__version__ = '.'.join(map(str, VERSION))
| [
"hey@heying.site"
] | hey@heying.site |
330ab40209e6e5f5882ecd62a64b9790681af307 | e16788d0863db2e8e44dd042cff536c7ff3d9006 | /basic_train.py | c7ba03890d5ab9641c1fc802f838b7c5c9a9aa8a | [] | no_license | egroeg121/CNN-Training-Template | 1b2e9abb6929694e9047a348846951428bbfa9de | 4768ec6d45dc4934048b53a14a2e4c679e3add13 | refs/heads/master | 2020-09-22T12:59:27.636856 | 2019-12-01T18:26:30 | 2019-12-01T18:26:30 | 225,206,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,654 | py | import torch
from torch import nn,optim
from torch.nn import functional
from torchvision import datasets,transforms
class trainer:
def __init__(self):
self.train_loader,self.test_loader = dataloaders(batch_size=32)
self.model = basicConv()
self.loss_func = nn.CrossEntropyLoss()
self.optimizer = optim.SGD(self.model.parameters(),lr=0.001, momentum=0.9)
self.epoch = 0
self.train_loss_history = []
pass
def train_iter(self,data):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
# zero the parameter gradients
self.optimizer.zero_grad()
# forward + backward + optimize
outputs = self.model(inputs)
loss = self.model(outputs, labels)
loss.backward()
self.optimizer.step()
return loss
def validate(self):
pass
def train_epoch(self):
self.epoch +=1
running_loss = 0
for i,data in enumerate(self.train_loader):
iter_loss = self.train_iter(data)
running_loss += iter_loss
self.train_loss_history.append(running_loss)
return running_loss
def dataloaders(loc='../data',batch_size=1,shuffle=True):
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(loc, train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=shuffle,)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(loc, train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=False,)
return train_loader,test_loader
class basicConv(nn.Module):
def __init__(self):
super(basicConv, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = functional.relu(x)
x = self.conv2(x)
x = functional.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = functional.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = functional.log_softmax(x, dim=1)
return output | [
"george.barnett.121@gmail.com"
] | george.barnett.121@gmail.com |
f9a984456905dac28a3adcaa81b7e1929610d9ea | 62adf11c3fca497e4eb90dac671c2ab6188feae6 | /script/deploy_native.py | 3b907c188b77fd836a1de34bef30cdd948a2a49b | [
"Apache-2.0"
] | permissive | LouisCAD/JWM | 5fff028b1d3c56bbdebb88649cc363d50f3ce2ce | 38a27e04bfda410572780235e9f5a66ed0a15548 | refs/heads/main | 2023-06-13T02:23:49.313293 | 2021-07-05T17:56:43 | 2021-07-05T17:56:43 | 383,407,926 | 2 | 0 | Apache-2.0 | 2021-07-06T09:09:19 | 2021-07-06T09:09:19 | null | UTF-8 | Python | false | false | 3,397 | py | #! /usr/bin/env python3
import argparse, build, clean, common, glob, os, platform, revision, subprocess, sys
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--dry-run", action="store_true")
(args, _) = parser.parse_known_args()
# Build
build.build_native()
# Update poms
os.chdir(os.path.dirname(__file__) + "/../" + common.system)
rev = revision.revision()
artifact = "jwm-" + common.system + "-" + common.arch
with open("deploy/META-INF/maven/org.jetbrains.jwm/" + artifact + "/pom.xml", "r+") as f:
pomxml = f.read()
f.seek(0)
f.write(pomxml.replace("0.0.0-SNAPSHOT", rev))
f.truncate()
with open("deploy/META-INF/maven/org.jetbrains.jwm/" + artifact + "/pom.properties", "r+") as f:
pomprops = f.read()
f.seek(0)
f.write(pomprops.replace("0.0.0-SNAPSHOT", rev))
f.truncate()
with open(os.path.join('build', 'jwm.version'), 'w') as f:
f.write(rev)
# .jar
print("Packaging " + artifact + "-" + rev + ".jar")
subprocess.check_call(["jar",
"--create",
"--file", "target/" + artifact + "-" + rev + ".jar",
"-C", "build", build.target_native,
"-C", "build", "jwm.version",
"-C", "target/classes", ".",
"-C", "deploy", "META-INF/maven/org.jetbrains.jwm/" + artifact
])
if not args.dry_run:
print("Deploying", artifact + "-" + rev + ".jar")
subprocess.check_call([
common.mvn,
"--batch-mode",
"--settings", "deploy/settings.xml",
"-Dspace.username=Nikita.Prokopov",
"-Dspace.password=" + os.getenv("SPACE_TOKEN"),
"deploy:deploy-file",
"-Dfile=target/" + artifact + "-" + rev + ".jar",
"-DpomFile=deploy/META-INF/maven/org.jetbrains.jwm/" + artifact + "/pom.xml",
"-DrepositoryId=space-maven",
"-Durl=" + common.space_jwm,
])
# -sources.jar
lombok = common.deps()[0]
print("Delomboking sources")
classpath = common.deps() + ["../shared/target/classes"]
subprocess.check_call([
"java",
"-jar",
lombok,
"delombok",
"java",
"--classpath",
common.classpath_separator.join(classpath),
"-d", "target/delomboked/org/jetbrains/jwm"
])
print("Packaging " + artifact + "-" + rev + "-sources.jar")
subprocess.check_call([
"jar",
"--create",
"--file", "target/" + artifact + "-" + rev + "-sources.jar",
"-C", "target/delomboked", ".",
"-C", "deploy", "META-INF/maven/org.jetbrains.jwm/" + artifact
])
if not args.dry_run:
print("Deploying " + artifact + "-" + rev + "-sources.jar")
mvn = "mvn.cmd" if common.system == "windows" else "mvn"
subprocess.check_call([
mvn,
"--batch-mode",
"--settings", "deploy/settings.xml",
"-Dspace.username=Nikita.Prokopov",
"-Dspace.password=" + os.getenv("SPACE_TOKEN"),
"deploy:deploy-file",
"-Dpackaging=java-source",
"-Dfile=target/" + artifact + "-" + rev + "-sources.jar",
"-DpomFile=deploy/META-INF/maven/org.jetbrains.jwm/" + artifact + "/pom.xml",
"-DrepositoryId=space-maven",
"-Durl=" + common.space_jwm,
])
# Restore poms
with open("deploy/META-INF/maven/org.jetbrains.jwm/" + artifact + "/pom.xml", "w") as f:
f.write(pomxml)
with open("deploy/META-INF/maven/org.jetbrains.jwm/" + artifact + "/pom.properties", "w") as f:
f.write(pomprops)
return 0
if __name__ == "__main__":
sys.exit(main())
| [
"niki@tonsky.me"
] | niki@tonsky.me |
4e5edd8fae1f0b8969b8d01ebb9cdc696f1cb1e4 | 0abc546a1442cae56ddcdc43f85497b37fc89036 | /scripts/graph_check_transitivity.py | b1793d5f4b9923cd0e824952d489b64036bc0a11 | [] | no_license | yangjl/cgat | 01a535531f381ace0afb9ed8dc3a0fcff6290446 | 01758b19aa1b0883f0e648f495b570f1b6159be4 | refs/heads/master | 2021-01-18T03:55:14.250603 | 2014-02-24T10:32:45 | 2014-02-24T10:32:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,395 | py | '''
graph_check_transitivity.py -
======================================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
.. todo::
describe purpose of the script.
Usage
-----
Example::
python graph_check_transitivity.py --help
Type::
python graph_check_transitivity.py --help
for command line help.
Command line options
--------------------
'''
import os
import sys
import string
import re
import getopt
import time
import optparse
import math
import tempfile
""" program $Id: graph_check_transitivity.py 2782 2009-09-10 11:40:29Z andreas $
python graph_check_transitivity < graph.in
check whether all edges in a graph are transitive, i.e.,
for every two edges A->B and B->C check whether A->C exists.
Edges are taken to be undirected.
"""
import CGAT.Experiment as E
import CGAT.Histogram as Histogram
def main( argv = None ):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv == None: argv = sys.argv
parser = E.OptionParser( version = "%prog version: $Id: graph_check_transitivity.py 2782 2009-09-10 11:40:29Z andreas $")
parser.add_option("--filename-missing", dest="filename_missing", type="string",
help="missing entries.")
parser.add_option("--filename-found", dest="filename_found", type="string",
help="found entries.")
parser.add_option("--report-step1", dest="report_step1", type="int",
help="report interval for input.")
parser.add_option("--report-step2", dest="report_step2", type="int",
help="report interval for processing.")
parser.add_option("--use-subsets", dest="subsets", action="store_true",
help="do subset calculation. Third field contains a redundancy code.")
parser.set_defaults(
filename_missing = None,
filename_found = None,
report_step1 = 100000,
report_step2 = 10000,
subsets = False,
)
(options, args) = E.Start( parser )
# retrieve data
vals = {}
niterations = 0
ninput = 0
for line in sys.stdin:
if line[0] == "#": continue
niterations += 1
if options.loglevel >= 1 and (niterations % options.report_step1 == 0):
options.stdlog.write( "# input: %i\n" % (niterations))
options.stdlog.flush()
v1, v2, w = line[:-1].split("\t")[:3]
if v1 == v2: continue
if v1 not in vals: vals[v1] = []
if v2 not in vals: vals[v2] = []
if not options.subsets:
w = ninput
vals[v1].append( (v2, w) )
vals[v2].append( (v1, w) )
ninput += 1
## make everything unique
for key, v1 in vals.items():
vals[key] = tuple(set(v1))
keys = vals.keys()
keys.sort()
niterations = 0
nkeys = len(keys)
missing = []
ntotal = 0
nfound = 0
counted = {}
nremoved = 0
if options.filename_found:
outfile_found = open(options.filename_found, "w")
for v1 in keys:
niterations += 1
if options.loglevel >= 1 and (niterations % options.report_step2 == 0):
options.stdlog.write( "# loop: %i\n" % (niterations))
options.stdlog.flush()
for v2, c2 in vals[v1]:
## only to half-symmetric test
for v3, c3 in vals[v2]:
if (c2, c3) in counted:
nremoved += 1
# print "v1=", v1, "v2=", v2, "v3=", v3, "c2=", c2, "c3=", c3, "removed"
continue
## do not do self-comparisons
if v1 == v3: continue
if c2 == c3: continue
counted[(c2,c3)] = True
ntotal += 1
if v3 in map(lambda x: x[0], vals[v1]) or v1 in map(lambda x: x[0], vals[v3]):
nfound += 1
if options.filename_found:
outfile_found.write( "\t".join( (v1, v2, v3) ) + "\n" )
# print "v1=", v1, "v2=", v2, "v3=", v3, "c2=", c2, "c3=", c3, "found"
else:
missing.append( (v1, v2, v3) )
# print "v1=", v1, "v2=", v2, "v3=", v3, "c2=", c2, "c3=", c3, "missing"
nmissing = len(missing)
options.stdout.write( "number of egdes\t%i\n" % ninput)
options.stdout.write( "number of vertices\t%i\n" % nkeys)
options.stdout.write( "number of removed triplets\t%i\n" % nremoved)
options.stdout.write( "number of tested triplets\t%i\t%6.4f\n" % (ntotal, float(ntotal) / float(ntotal)))
options.stdout.write( "number of realized triplets\t%i\t%6.4f\n" % (nfound, float(nfound) / float(ntotal)))
options.stdout.write( "number of incomplete triplets\t%i\t%6.4f\n" % (nmissing, float(nmissing) / float(ntotal)))
if options.filename_missing:
outfile = open(options.filename_missing, "w")
for v1, v2, v3 in missing:
outfile.write( "\t".join( (v1, v2, v3) ) + "\n")
outfile.close()
if options.filename_found:
outfile_found.close()
E.Stop()
if __name__ == "__main__":
sys.exit( main( sys.argv) )
| [
"andreas.heger@gmail.com"
] | andreas.heger@gmail.com |
1d9c3616c035da8730928b2c6d124ebe273b931d | afd2087e80478010d9df66e78280f75e1ff17d45 | /torch/distributed/checkpoint/state_dict_saver.py | a99cd129aeb637da7d11cb88ad101de0a72d8c56 | [
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | pytorch/pytorch | 7521ac50c47d18b916ae47a6592c4646c2cb69b5 | a6f7dd4707ac116c0f5fb5f44f42429f38d23ab4 | refs/heads/main | 2023-08-03T05:05:02.822937 | 2023-08-03T00:40:33 | 2023-08-03T04:14:52 | 65,600,975 | 77,092 | 24,610 | NOASSERTION | 2023-09-14T21:58:39 | 2016-08-13T05:26:41 | Python | UTF-8 | Python | false | false | 4,458 | py | from typing import Optional
import torch
import torch.distributed as dist
from .planner import SavePlanner
from .default_planner import DefaultSavePlanner
from .storage import (
StorageWriter,
)
from .metadata import Metadata, STATE_DICT_TYPE
from .utils import _DistWrapper
__all__ = ["save_state_dict"]
def save_state_dict(
state_dict: STATE_DICT_TYPE,
storage_writer: StorageWriter,
process_group: Optional[dist.ProcessGroup] = None,
coordinator_rank: int = 0,
no_dist: bool = False,
planner: Optional[SavePlanner] = None,
) -> Metadata:
"""
Saves a distributed model in SPMD style.
This function is different from ``torch.save()`` as it handles
``ShardedTensor`` by having each rank only save their local shards.
.. warning::
There is no guarantees of Backwards Compatibility across PyTorch versions
for saved state_dicts.
.. warning::
If using the `process_group` argument, make sure that only its ranks
call `save_state_dict` and that all data in state_dict belong to it.
.. note::
When saving checkpoint for FSDP's `ShardingStrategy.HYBRID_SHARD`, only one of
the shard_group should be calling `save_state_dict` and the corresponding process
group needs to be passed in.
.. note::
This function can be used to save a state_dict without having a process group
initialized by passing ``no_dist=True``.
Args:
state_dict (Dict[str, Any]): The state_dict to save.
storage_writer (StorageWriter):
Instance of StorageWrite use to perform writes.
process_group (ProcessGroup):
ProcessGroup to be used for cross-rank synchronization.
coordinator_rank (int): Rank to use to coordinate the checkpoint.
rank0 is used by default.
no_dist (bool): If ``True``, distributed checkpoint will not save
in SPMD style. (Default: ``False``)
Returns:
Metadata: Metadata object for the saved checkpoint.
Example:
>>> # xdoctest: +SKIP
>>> my_model = MyModule()
>>> model_state_dict = my_model.state_dict()
>>> fs_storage_writer = torch.distributed.checkpoint.FileSystemWriter("/checkpoint/1")
>>> torch.distributed.checkpoint.save_state_dict(
>>> state_dict=model_state_dict,
>>> storage_writer=fs_storage_writer,
>>> )
.. note::
save_state_dict uses collectives to coordinate writes across ranks.
For NCCL-based process groups, internal tensor representations of
objects must be moved to the GPU device before communication takes place.
In this case, the device used is given by ``torch.cuda.current_device()``
and it is the user's responsibility to ensure that this is set so that
each rank has an individual GPU, via ``torch.cuda.set_device()``.
"""
torch._C._log_api_usage_once("torch.distributed.checkpoint.save_state_dict")
distW = _DistWrapper(process_group, not no_dist, coordinator_rank)
if planner is None:
planner = DefaultSavePlanner()
assert planner is not None
global_metatadata = None
def local_step():
assert planner is not None
planner.set_up_planner(state_dict, distW.is_coordinator)
storage_writer.set_up_storage_writer(distW.is_coordinator)
local_plan = planner.create_local_plan()
local_plan = storage_writer.prepare_local_plan(local_plan)
return local_plan
def global_step(all_local_plans):
nonlocal global_metatadata
assert planner is not None
all_local_plans, global_metatadata = planner.create_global_plan(
all_local_plans
)
all_local_plans = storage_writer.prepare_global_plan(all_local_plans)
return all_local_plans
central_plan = distW.reduce_scatter("plan", local_step, global_step)
def write_data():
assert planner is not None
final_local_plan = planner.finish_plan(central_plan)
all_writes = storage_writer.write_data(final_local_plan, planner)
all_writes.wait()
return all_writes.value()
def finish_checkpoint(all_results):
assert global_metatadata is not None
storage_writer.finish(metadata=global_metatadata, results=all_results)
return global_metatadata
return distW.all_reduce("write", write_data, finish_checkpoint)
| [
"pytorchmergebot@users.noreply.github.com"
] | pytorchmergebot@users.noreply.github.com |
b695dc1cd6ac27aeb81909e86ad63a50c0fac5c4 | 23611933f0faba84fc82a1bc0a85d97cf45aba99 | /google-cloud-sdk/lib/surface/compute/instance_groups/describe.py | 8a88e0e197d87deb862c3ee4c7fd71f847b772b4 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | KaranToor/MA450 | 1f112d1caccebdc04702a77d5a6cee867c15f75c | c98b58aeb0994e011df960163541e9379ae7ea06 | refs/heads/master | 2021-06-21T06:17:42.585908 | 2020-12-24T00:36:28 | 2020-12-24T00:36:28 | 79,285,433 | 1 | 1 | Apache-2.0 | 2020-12-24T00:38:09 | 2017-01-18T00:05:44 | Python | UTF-8 | Python | false | false | 1,985 | py | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for describing instance groups."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import instance_groups_utils
class Describe(base_classes.MultiScopeDescriber):
"""Describe an instance group."""
SCOPES = (base_classes.ScopeType.regional_scope,
base_classes.ScopeType.zonal_scope)
@property
def global_service(self):
return None
@property
def regional_service(self):
return self.compute.regionInstanceGroups
@property
def zonal_service(self):
return self.compute.instanceGroups
@property
def global_resource_type(self):
return None
@property
def regional_resource_type(self):
return 'regionInstanceGroups'
@property
def zonal_resource_type(self):
return 'instanceGroups'
@staticmethod
def Args(parser):
base_classes.MultiScopeDescriber.AddScopeArgs(
parser, 'instanceGroups', Describe.SCOPES)
def ComputeDynamicProperties(self, args, items):
return instance_groups_utils.ComputeInstanceGroupManagerMembership(
compute=self.compute,
project=self.project,
http=self.http,
batch_url=self.batch_url,
items=items,
filter_mode=instance_groups_utils.InstanceGroupFilteringMode.ALL_GROUPS)
Describe.detailed_help = base_classes.GetMultiScopeDescriberHelp(
'instance group', Describe.SCOPES)
| [
"toork@uw.edu"
] | toork@uw.edu |
de542b456aefd96706241bd989b44dee32e69555 | 08c4578e0ac25c33ecb56652e15a1cf76fe1dfd1 | /Snakefile | ac767cc73650764ae7a359f77ca107ab8f725af0 | [
"MIT"
] | permissive | explodecomputer/covid-uob-pooling | 45ac3ac555c399faaa9be207b5e761be7d244922 | 79ad1440f2dd8ebf4b3d0385366a6019ca821495 | refs/heads/master | 2023-03-22T09:03:21.849186 | 2021-03-12T22:13:17 | 2021-03-12T22:13:17 | 299,046,322 | 0 | 1 | null | 2020-12-19T22:37:14 | 2020-09-27T14:10:55 | R | UTF-8 | Python | false | false | 1,216 | import os
os.makedirs("data", exist_ok=True)
os.makedirs("results", exist_ok=True)
os.makedirs("docs", exist_ok=True)
rule all:
input: "docs/sim.html"
rule data:
input: "data/Living Circles Count Update.xlsx"
output: "data/circles.rdata"
shell:
"cd scripts; Rscript data.r"
rule containment:
input: "scripts/containment.r"
output: "data/containment.rdata"
shell:
"cd scripts; Rscript containment.r"
rule ppv:
input: "docs/ppv.rmd"
output: "docs/ppv.html"
rule ct:
input: "docs/ct.rmd"
output: "data/efficiency_params.rdata", "docs/ct.html"
shell:
"cd docs; Rscript -e 'rmarkdown::render(\"ct.rmd\", output_format=\"all\")'"
rule lfd:
input: "docs/lfd.Rmd"
output: "data/lfd_fit.rdata", "docs/lfd.html"
shell:
"cd docs; Rscript -e 'rmarkdown::render(\"lfd.Rmd\", output_format=\"all\")'"
rule sim:
input: "data/circles.rdata", "scripts/functions.r", "scripts/sim.r", "data/lfd_fit.rdata", "data/efficiency_params.rdata", "data/containment.rdata"
output: "results/sim.rdata"
shell:
"cd scripts; Rscript sim.r"
rule sim_doc:
input: "results/sim.rdata", "docs/sim.rmd"
output: "docs/sim.html"
shell:
"cd docs; Rscript -e 'rmarkdown::render(\"sim.rmd\", output_format=\"all\")'"
| [
"explodecomputer@gmail.com"
] | explodecomputer@gmail.com | |
a591d050d766e7e68ee405e98d8330371871d6f9 | 001f70eb08670694976e182794cb842c1c1536a8 | /tests/test_int.py | 3b4b8f02950832634a08ae4f8e0b63ecdbd35a68 | [] | no_license | hitesh70738/Final_project_crud_app | 0c04623739b25b1b7527374f4faf8c3f7ba35975 | 6292c4673a63bc7862d9964ba4650004ce46dd6e | refs/heads/main | 2023-02-09T07:26:37.289238 | 2021-01-05T15:07:12 | 2021-01-05T15:07:12 | 323,407,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,502 | py | import unittest
import time
from flask import url_for
from urllib.request import urlopen
from os import getenv
from flask_testing import LiveServerTestCase
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from application import app, db
from application.models import Teams, Players
class TestBase(LiveServerTestCase):
def create_app(self):
app.config['SQLALCHEMY_DATABASE_URI'] = getenv("DATABASE_URI")
app.config['SECRET_KEY'] = getenv("SECRET_KEY")
return app
def setUp(self):
"""Setup the test driver and create test users"""
print("--------------------------NEXT-TEST----------------------------------------------")
chrome_options = Options()
chrome_options.binary_location = "/usr/bin/chromium-browser"
chrome_options.add_argument("--headless")
self.driver = webdriver.Chrome(executable_path="/home/hites/chromedriver", chrome_options=chrome_options)
self.driver.get("http://localhost:5000")
db.session.commit()
db.drop_all()
db.create_all()
def tearDown(self):
self.driver.quit()
print("--------------------------END-OF-TEST----------------------------------------------\n\n\n-------------------------UNIT-AND-SELENIUM-TESTS----------------------------------------------")
def test_server_is_up_and_running(self):
response = urlopen("http://localhost:5000")
self.assertEqual(response.code, 200)
class TestCreateTeam(TestBase):
def create_team(self):
self.driver.find_element_by_xpath("/html/body/a[2]").click()
time.sleep(1)
self.driver.find_element_by_xpath('//*[@id="team_name"]').send_keys('Arsenal')
self.driver.find_element_by_xpath('//*[@id="sponsor"]').send_keys('Emirates')
self.driver.find_element_by_xpath('//*[@id="submit"]').click()
time.sleep(1)
assert url_for('home') in self.driver.current_url
assert Teams.query.filter_by(id=1).first().team_name == 'Arsenal'
assert Teams.query.filter_by(id=1).first().sponsor == 'Emirates'
class TestAddPlayer(TestCreateTeam):
def add_player(self):
self.driver.find_element_by_xpath('/html/body/form[3]/input').click()
time.sleep(1)
self.driver.find_element_by_xpath('//*[@id="name"]').send_keys('Thomas Partey')
self.driver.find_element_by_xpath('//*[@id="position"]').send_keys('CDM')
self.driver.find_element_by_xpath('//*[@id="club"]').send_keys('Arsenal FC')
self.driver.find_element_by_xpath('//*[@id="height"]').send_keys('1.8')
self.driver.find_element_by_xpath('//*[@id="submit"]').click()
time.sleep(1)
assert url_for('add') in self.driver.current_url
assert Players.query.filter_by(id=1).first().name == 'Thomas Partey'
assert Players.query.filter_by(id=1).first().position == 'CDM'
assert Players.query.filter_by(id=1).first().position == 'Arsenal FC'
assert Players.query.filter_by(id=1).first().position == '1.8'
class TestAddPlayer1(TestCreateTeam):
def add_player(self):
self.driver.find_element_by_xpath('/html/body/form[3]/input').click()
time.sleep(1)
self.driver.find_element_by_xpath('//*[@id="name"]').send_keys('Thomas')
self.driver.find_element_by_xpath('//*[@id="position"]').send_keys('CM')
self.driver.find_element_by_xpath('//*[@id="club"]').send_keys('Watford FC')
self.driver.find_element_by_xpath('//*[@id="height"]').send_keys('1.2')
self.driver.find_element_by_xpath('//*[@id="submit"]').click()
time.sleep(1)
assert url_for('add') in self.driver.current_url
assert Players.query.filter_by(id=1).first().name == 'Thomas'
assert Players.query.filter_by(id=1).first().position == 'CM'
assert Players.query.filter_by(id=1).first().position == 'Watford FC'
assert Players.query.filter_by(id=1).first().position == '1.2'
class TestAddPlayer2(TestCreateTeam):
def add_player(self):
self.driver.find_element_by_xpath('/html/body/form[3]/input').click()
time.sleep(1)
self.driver.find_element_by_xpath('//*[@id="name"]').send_keys('Partey')
self.driver.find_element_by_xpath('//*[@id="position"]').send_keys('CB')
self.driver.find_element_by_xpath('//*[@id="club"]').send_keys('')
self.driver.find_element_by_xpath('//*[@id="height"]').send_keys('')
self.driver.find_element_by_xpath('//*[@id="submit"]').click()
time.sleep(1)
assert url_for('add') in self.driver.current_url
assert Players.query.filter_by(id=1).first().name == 'Partey'
assert Players.query.filter_by(id=1).first().position == 'CB'
assert Players.query.filter_by(id=1).first().position == ''
assert Players.query.filter_by(id=1).first().position == ''
class TestAddPlayer3(TestCreateTeam):
def add_player(self):
self.driver.find_element_by_xpath('/html/body/form[3]/input').click()
time.sleep(1)
self.driver.find_element_by_xpath('//*[@id="name"]').send_keys('Bale')
self.driver.find_element_by_xpath('//*[@id="position"]').send_keys('LW')
self.driver.find_element_by_xpath('//*[@id="club"]').send_keys('')
self.driver.find_element_by_xpath('//*[@id="height"]').send_keys('')
self.driver.find_element_by_xpath('//*[@id="submit"]').click()
time.sleep(1)
assert url_for('add') in self.driver.current_url
assert Players.query.filter_by(id=1).first().name == 'Bale'
assert Players.query.filter_by(id=1).first().position == 'LW'
assert Players.query.filter_by(id=1).first().position == ''
assert Players.query.filter_by(id=1).first().position == ''
class TestAddPlayer4(TestCreateTeam):
def add_player(self):
self.driver.find_element_by_xpath('/html/body/form[3]/input').click()
time.sleep(1)
self.driver.find_element_by_xpath('//*[@id="name"]').send_keys('Messi')
self.driver.find_element_by_xpath('//*[@id="position"]').send_keys('RW')
self.driver.find_element_by_xpath('//*[@id="club"]').send_keys('Barca')
self.driver.find_element_by_xpath('//*[@id="height"]').send_keys('')
self.driver.find_element_by_xpath('//*[@id="submit"]').click()
time.sleep(1)
assert url_for('add') in self.driver.current_url
assert Players.query.filter_by(id=1).first().name == 'Messi'
assert Players.query.filter_by(id=1).first().position == 'RW'
assert Players.query.filter_by(id=1).first().position == 'Barca'
assert Players.query.filter_by(id=1).first().position == ''
class TestUpdateTeam(TestCreateTeam):
def create_team(self):
self.driver.find_element_by_xpath("/html/body/a[1]").click()
time.sleep(1)
self.driver.find_element_by_xpath('//*[@id="team_name"]').send_keys('New team')
self.driver.find_element_by_xpath('//*[@id="sponsor"]').send_keys('New sponsor')
self.driver.find_element_by_xpath('//*[@id="submit"]').click()
assert url_for('home') in self.driver.current_url
class TestDeleteTeam(TestCreateTeam):
def create_team(self):
self.driver.find_element_by_xpath("/html/body/form[2]/input").click()
time.sleep(1)
assert url_for('delete') in self.driver.current_url
if __name__ == '__main__':
unittesting.main(port=5000) | [
"hitesh70738@gmail.com"
] | hitesh70738@gmail.com |
e310587519e72a83f6f1e4449d2a4bbda201c315 | 3221f4e12a1e0c6fb574a71113732914e367efad | /tests/test_calculator_oop.py | a643c1c437d5bf78b9510b290595bed74fe00e9f | [] | no_license | akinleries/python-works | d618fd05011f970e73054cdb153f11cf82f83038 | 0d4e2e41fe8636d7d5cd49d562c5bd43d50955e2 | refs/heads/master | 2023-04-11T15:32:41.201671 | 2021-04-30T15:08:33 | 2021-04-30T15:08:33 | 363,176,350 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,222 | py | import unittest
from calculate_oop import Calculator
class calculator_oop_test(unittest.TestCase):
def test_add_result(self):
self.assertEqual(Calculator.add(2, 3), 5)
self.assertEqual(Calculator.add(-2, -3), -5)
self.assertEqual(Calculator.add(-2, 3), 1)
self.assertEqual(Calculator.add(5345676540, 323456745), 5345676540 + 323456745)
def test_add_result_type(self):
self.assertIsInstance(Calculator.add(2, 3), int)
self.assertIsInstance(Calculator.add(-2, -3), int)
self.assertIsInstance(Calculator.add(2, -3), int)
self.assertIsInstance(Calculator.add(5345676540, 323456745), int)
def test_add_non_int_type(self):
with self.assertRaises(TypeError):
Calculator.add(2, "wer")
def test_subtract_result(self):
self.assertEqual(Calculator.subtract(5, 2), 3)
self.assertEqual(Calculator.subtract(3, 2), 1)
self.assertEqual(Calculator.subtract(34, 24), 10)
self.assertEqual(Calculator.subtract(5345676540, 323456745), 5345676540 - 323456745)
def test_subtract_result_type(self):
self.assertIsInstance(Calculator.subtract(5, 2), int)
self.assertIsInstance(Calculator.subtract(3, 2), int)
self.assertIsInstance(Calculator.subtract(34, 24), int)
self.assertIsInstance(Calculator.subtract(5345676540, 323456745), int)
def test_add_none_int_type(self):
with self.assertRaises(TypeError):
Calculator.subtract(2, "wer")
def test_multiply_result(self):
self.assertEqual(Calculator.multiply(5, 2), 10)
self.assertEqual(Calculator.multiply(3, 2), 6)
self.assertEqual(Calculator.multiply(10, 10), 100)
self.assertEqual(Calculator.multiply(5345676540, 323456745), 5345676540 * 323456745)
def test_multiply_result_type(self):
self.assertIsInstance(Calculator.multiply(5, 2), int)
self.assertIsInstance(Calculator.multiply(-3, -2), int)
self.assertIsInstance(Calculator.multiply(34, 24), int)
self.assertIsInstance(Calculator.multiply(5345676540, 323456745), int)
def test_multiply_non_int_type(self):
with self.assertRaises(TypeError):
Calculator.multiply(2, "wer")
def test_divide_result(self):
self.assertEqual(Calculator.divide(10, 2), 5)
self.assertEqual(Calculator.divide(6, 3), 2)
self.assertEqual(Calculator.divide(10, 5), 2)
self.assertEqual(Calculator.divide(5345676540, 323456745), 5345676540 / 323456745)
def test_divide_result_type(self):
self.assertIsInstance(Calculator.divide(5, 2), float)
self.assertIsInstance(Calculator.divide(6, 2), float)
self.assertIsInstance(Calculator.divide(10, 2), float)
self.assertIsInstance(Calculator.divide(5345676540, 323456745), float)
def test_divide_non_int_type(self):
with self.assertRaises(TypeError):
Calculator.divide(2, "wer")
if __name__ == '__main__':
unittest.main()
| [
"ezekielakintunde18@gmail.com"
] | ezekielakintunde18@gmail.com |
1ccbf579f9ecd74af6ca97e873ff405282991e42 | b0391f6746cdf4344d03f616e1e82588b54adeb6 | /nishatScrapping.py | 3692858a806ed95c9829b787790ffef6828a7303 | [] | no_license | Ziatariq/WEBHYPE_SCRAPPER | a523205188618a99f5fa3e898bb7800b0a5fbfa7 | 43062c4f88075de1679313de9d74a1357655ef9f | refs/heads/master | 2022-09-29T11:04:20.497468 | 2020-06-03T15:21:56 | 2020-06-03T15:21:56 | 268,561,969 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,345 | py | import requests
import random
from datetime import datetime
import requests
import pymongo
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["fypDb"]
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
ua = UserAgent()
header = {'user-agent':ua.chrome}
type_array = ['women','men','kids','accessories']
type_temp =0
while(type_temp < len(type_array)):
if(type_array[type_temp] == "men"):
pagecount=2
else:
pagecount=5
while (pagecount > 0):
print("pagecount = ", pagecount)
url = "https://nishatlinen.com/pk/" + type_array[type_temp] + ".html?p="+str(pagecount)
response = requests.get(url,headers=header)
print(url)
soup = BeautifulSoup(response.content, 'html.parser')
nishat = soup.findAll("li", {"class": "item product product-item"})
for nish in nishat:
price = nish.find('span', {'class': 'price'}).text
buy_url = nish.find('a')['href']
title = nish.find('a', {'class': "product-item-link"}).text.strip()
# print("Type = ", type_array[type_temp])
# print("Title = ", title)
# print("Price = ", price)
# print("Buy URL = ", buy_url)
imageUrl = nish.find('img')['src']
dataObject = {
"id": random.choice(list(range(0, 100000))) + random.choice(list(range(77, 15400))) + random.choice(
list(range(55, 5000))),
'name': title,
'pictures': [imageUrl],
'stock': 'N/A',
'price': price,
'discount': 0,
'salePrice': 0,
'description': '',
'tags': [type_array[type_temp], title],
'rating': random.choice(list(range(3, 5))),
'category': type_array[type_temp],
'colors': [],
'size': [],
'buyUrl': buy_url,
'gender': type_array[type_temp],
'brand': title,
'date': datetime.today(),
'mainBrand': 'nishat'
}
print(dataObject)
# mydb.products.insert_one(dataObject)
pagecount -=1
type_temp += 1 | [
"noreply@github.com"
] | noreply@github.com |
63343e5590bf39482f37a91203e0c8ba6b022a08 | 08ca401b4f1d4973ea6c8b653509ae65e04f7208 | /users/migrations/0001_initial.py | 2f7c3746e13e5c770b426b2006020b9b02ea765f | [] | no_license | Paul-C3/Group_project | e6006f24a3633b8733a101bd66232c988c8b55e9 | fc06d4ff7cee4cbd80f2859401ca614688c94f99 | refs/heads/master | 2022-11-23T15:42:13.864462 | 2020-07-29T01:58:05 | 2020-07-29T01:58:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | # Generated by Django 2.2 on 2020-07-29 01:43
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(default='default.jpg', upload_to='profile_pics')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"dezzuttia@gmail.com"
] | dezzuttia@gmail.com |
5e0d645e8d8db30e316d5aab006e9160adad1df9 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_impromptus.py | fee549bf75880518cd29b8bb36287ecde035b251 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py |
#calss header
class _IMPROMPTUS():
def __init__(self,):
self.name = "IMPROMPTUS"
self.definitions = impromptu
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['impromptu']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
6e1d01f79dda195d47526e92c52367add9bbc673 | eeabaaf9731e8231e6ab5a35d2de945b382255e7 | /gosenProject/products/serializers/common.py | 93b9ce69f6a791922370b11a63fef676e1dca5b0 | [] | no_license | ActoSoft/Gosen-Backend | a1253e01d9e8c78a9f212c700d9876be0b3997bb | ec5536e382d6311122a6c5e8f93cc300ed22e7fd | refs/heads/master | 2022-12-13T12:20:08.930177 | 2020-04-18T19:48:43 | 2020-04-18T19:48:43 | 191,101,673 | 0 | 0 | null | 2022-12-08T07:27:11 | 2019-06-10T05:21:05 | Python | UTF-8 | Python | false | false | 1,119 | py | from rest_framework import serializers
from ..models import Product, ProductStock, ProductImage
from stocks.serializers.nested import BasicStockSerializer
class ProductImageSerializer(serializers.ModelSerializer):
class Meta:
model = ProductImage
fields = ['id', 'image']
class ProductStockSerializer(serializers.ModelSerializer):
stock = BasicStockSerializer(many=False, required=True)
class Meta:
model = ProductStock
fields = ['id', 'stock', 'qty', 'deleted']
class ProductListSerializer(serializers.ModelSerializer):
images = ProductImageSerializer(many=True, required=False)
class Meta:
model = Product
fields = ['id', 'name', 'description', 'images']
class ProductDetailSerializer(serializers.ModelSerializer):
images = ProductImageSerializer(many=True, required=False)
stocks = ProductStockSerializer(many=True, required=True)
class Meta:
model = Product
fields = '__all__'
class ProductCreateSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = '__all__'
| [
"martin.melo.dev.97@gmail.com"
] | martin.melo.dev.97@gmail.com |
f691ee2989bae7c40b5a2f82be950381023c2c20 | ed51f4726d1eec4b7fec03a1ebaa32d983f1008d | /gardens/apps.py | ffc10b571d2b501f8f6b438e5e9dbaf6f81e5928 | [] | no_license | kaczuchg711/OrdiTree | ececbbb13fa48364441ebdde7f52980b2e1175fe | 2b87535b8a60b9aca83674e5975f39f3f832c58a | refs/heads/master | 2021-05-18T03:36:50.498916 | 2020-06-13T12:42:44 | 2020-06-13T12:42:44 | 251,085,715 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | from django.apps import AppConfig
class GardensConfig(AppConfig):
name = 'gardens'
| [
"you@example.com"
] | you@example.com |
83c689ae14f37b8c74cb167ffd86dd167b97207b | 4ef57c9aa197d02a1053e476db56d49e2b9abfc1 | /api/servicemanager/pgrest.py | 844cdd37b347b83c60665af213e49aa6f3abf84c | [
"MIT"
] | permissive | radomd92/botjagwar | 62c1734a41e8b2128a790211d133fa223fcc7e4a | 46d38c10dda69871f6b3d2b89bcf787cba49a764 | refs/heads/master | 2023-09-03T01:49:31.438859 | 2023-08-18T08:51:38 | 2023-08-18T08:51:38 | 87,109,884 | 10 | 0 | MIT | 2023-08-18T08:51:39 | 2017-04-03T18:58:33 | Python | UTF-8 | Python | false | false | 6,908 | py | from logging import getLogger
from random import randint
import requests
from api.config import BotjagwarConfig
log = getLogger('pgrest')
config = BotjagwarConfig()
class BackendError(Exception):
pass
class Backend(object):
postgrest = config.get('postgrest_backend_address')
def check_postgrest_backend(self):
if not self.postgrest:
raise BackendError(
'No Postgrest defined. '
'set "postgrest_backend_address" to use this. '
'Expected service port is 8100'
)
class StaticBackend(Backend):
@property
def backend(self):
self.check_postgrest_backend()
return 'http://' + self.postgrest + ':8100'
class DynamicBackend(Backend):
backends = ["http://" + Backend.postgrest + ":81%s" %
(f'{i}'.zfill(2)) for i in range(16)]
@property
def backend(self):
self.check_postgrest_backend()
bkd = self.backends[randint(0, len(self.backends) - 1)]
return bkd
class PostgrestBackend(object):
backend = StaticBackend()
def __init__(self, use_postgrest: [bool, str] = 'automatic'):
"""
Translate templates
:param use_postgrest: True or False to fetch on template_translations table.
Set this argument to 'automatic' to use `postgrest_backend_address` only if it's filled
"""
if use_postgrest == 'automatic':
try:
self.online = True if self.backend.backend else False
except BackendError:
self.online = False
else:
assert isinstance(use_postgrest, bool)
self.online = use_postgrest
class TemplateTranslation(PostgrestBackend):
"""
Controller to fetch already-defined template name mappings
from the Postgres database through PostgREST.
"""
def get_mapped_template_in_database(self, title, target_language='mg'):
if self.online:
return self.postgrest_get_mapped_template_in_database(title, target_language)
def add_translated_title(self, title, translated_title, source_language='en', target_language='mg'):
if self.online:
return self.postgrest_add_translated_title(
title, translated_title, source_language, target_language)
def postgrest_get_mapped_template_in_database(self, title, target_language='mg'):
response = requests.get(self.backend.backend + '/template_translations', params={
'source_template': 'eq.' + title,
'target_language': 'eq.' + target_language
})
data = response.json()
if response.status_code == 200: # HTTP OK
if 'target_template' in data:
return data['target_template']
elif response.status_code == 404: # HTTP Not found
return None
else: # other HTTP error:
raise BackendError(f'Unexpected error: HTTP {response.status_code}; ' + response.text)
def postgrest_add_translated_title(self, title, translated_title, source_language='en', target_language='mg'):
response = requests.post(self.backend.backend + '/template_translations', json={
'source_template': title,
'target_template': translated_title,
'source_language': source_language,
'target_language': target_language
})
if response.status_code in (400, 500): # HTTP Bad request or HTTP server error:
raise BackendError(f'Unexpected error: HTTP {response.status_code}; ' + response.text)
return None
class JsonDictionary(PostgrestBackend):
def __init__(self, use_postgrest: [bool, str] = 'automatic', use_materialised_view: [bool] = True):
super(JsonDictionary, self).__init__(use_postgrest)
if use_materialised_view:
self.endpoint_name = '/json_dictionary'
else:
self.endpoint_name = '/vw_json_dictionary'
def look_up_dictionary(self, w_language, w_part_of_speech, w_word):
params = {
'language': 'eq.' + w_language,
'part_of_speech': 'eq.' + w_part_of_speech,
'word': 'eq.' + w_word
}
resp = requests.get(self.backend.backend + self.endpoint_name, params=params)
data = resp.json()
return data
def look_up_word(self, language, part_of_speech, word):
params = {
'language': 'eq.' + language,
'part_of_speech': 'eq.' + part_of_speech,
'word': 'eq.' + word
}
log.debug(params)
resp = requests.get(self.backend.backend + '/word', params=params)
data = resp.json()
return data
class ConvergentTranslations(PostgrestBackend):
endpoint = '/convergent_translations'
def get_convergent_translation(self, target_language, en_definition=None, fr_definition=None,
suggested_definition=None, part_of_speech=None):
params = {
# 'language': 'eq.' + target_language
}
if part_of_speech is not None:
params['part_of_speech'] = 'eq.' + part_of_speech
if en_definition is not None:
params['en_definition'] = 'eq.' + en_definition
if fr_definition is not None:
params['fr_definition'] = 'eq.' + fr_definition
if suggested_definition is not None:
params['suggested_definition'] = 'eq.' + suggested_definition
if len(params) < 2:
raise BackendError("Expected at least one of 'en_definition', 'fr_definition' or 'suggested_definition'")
response = requests.get(self.backend.backend + self.endpoint, params=params)
data = response.json()
if response.status_code == 200: # HTTP OK
return data
if response.status_code == 404: # HTTP Not found
return None
# other HTTP error:
raise BackendError(f'Unexpected error: HTTP {response.status_code}; ' + response.text)
def get_suggested_translations_fr_mg(self, target_language, definition=None,
suggested_definition=None, part_of_speech=None):
params = {
# 'language': 'eq.' + target_language
}
if part_of_speech is not None:
params['part_of_speech'] = 'eq.' + part_of_speech
if definition is not None:
params['definition'] = 'eq.' + definition
if suggested_definition is not None:
params['suggested_definition'] = 'eq.' + suggested_definition
response = requests.get(self.backend.backend + '/suggested_translations_fr_mg', params=params)
data = response.json()
if response.status_code == 200: # HTTP OK
return data
# other HTTP error:
raise BackendError(f'Unexpected error: HTTP {response.status_code}; ' + response.text)
| [
"rado.md92@gmail.com"
] | rado.md92@gmail.com |
f4cd065a1176b5f99a4b4063581c8fc397b5a8c4 | d935ee142cf32fd54285a78b57588202cdd74a3c | /HW10prog1_C_n_k.py | ee20d5e9c4b44afaf339a65662a681bee168e560 | [] | no_license | mattfisc/cs240 | 7fc799f1dc223463b88172595119de8ddd51c916 | cc40eb75051da2b88bc14e2d13e35e5ff93203ac | refs/heads/master | 2022-11-07T23:34:35.743083 | 2020-06-24T21:06:45 | 2020-06-24T21:06:45 | 274,768,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py |
def coeffient(n , k):
# termination
if k==0 or k ==n :
return 1
# recursive call
return coeffient(n-1 , k-1) + coeffient(n-1 , k)
n = 6
k = 2
print "Value of C(%d,%d) is (%d)" %(n , k , coeffient(n , k))
# ----- OUTPUT --------
# Value of C(6,2) is (15)
| [
"mattfisc@gmail.com"
] | mattfisc@gmail.com |
ef10da8008259ff805e21eee9608bb81a3c91f16 | cefe9e52dacb3fd89eb413a5b982bf179c1cb671 | /bukkitadmin/plugins.py | 654af7a6d06341a7d826800602a4315a3556f039 | [] | no_license | andrepl/bukkitadmin | e06921a53f39f46f721a51dd3d7466c9741bd743 | 33c97593540371df1c0946c6f627f731801b45a3 | refs/heads/master | 2020-08-09T20:11:43.522155 | 2014-08-02T15:52:54 | 2014-08-02T15:52:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,296 | py | from __future__ import absolute_import
import logging
import os
import shutil
import yaml
from . import jenkins, bukkitdev
import itertools
from bukkitadmin.util import extract_plugin_info, hashfile, download_file, query_yes_no, prompt_choices, format_search_result
from bukkitadmin.versionparser import parse_version
class InvalidPlugin(Exception):
pass
class PluginFile(object):
@property
def shasum(self):
return hashfile(path=self.jarpath)
def __init__(self, jarpath):
self.jarpath = jarpath
self._plugin_yml = extract_plugin_info(self.jarpath)
if self._plugin_yml is None:
raise InvalidPlugin("%s is not a valid plugin file." % (self.jarpath,));
def __repr__(self):
return "%s-%s" % (self.name, self.version)
def reload(self):
self._plugin_yml = extract_plugin_info(self.jarpath)
@classmethod
def is_valid_plugin(cls, jarpath):
return extract_plugin_info(jarpath) is not None
@property
def authors(self):
authors = set()
for key in ('author', 'authors'):
if key in self._plugin_yml:
if isinstance(self._plugin_yml[key], basestring):
authors.add(self._plugin_yml[key])
else:
for author in self._plugin_yml[key]:
authors.add(author)
return list(authors)
@property
def name(self):
return self._plugin_yml['name']
@property
def main(self):
return self._plugin_yml['main']
@property
def version(self):
return self._plugin_yml['version']
@property
def dependencies(self):
if 'depend' not in self._plugin_yml:
return []
return self._plugin_yml['depend']
def _get_meta_path(self):
return os.path.splitext(self.jarpath)[0] + ".yml"
def get_meta(self):
if not self.has_meta():
return {}
with open(self._get_meta_path()) as metafile:
data = yaml.load(metafile)
return data
def set_meta(self, meta):
with open(self._get_meta_path(), 'w') as metafile:
yaml.dump(meta, metafile)
def has_meta(self):
return os.path.exists(self._get_meta_path())
def has_correct_name(self):
return os.path.basename(self.jarpath) == "%s.jar" % (self.name,)
def newer_than(self, other):
my_version = parse_version(self.version)
other_version = parse_version(other.version)
if my_version == other_version:
return self.shasum != other.shasum
return my_version > other_version
def is_live(self):
return os.path.basename(os.path.dirname(self.jarpath)) == 'plugins'
def rename_jar(self):
meta = None
if self.has_meta():
meta = self.get_meta()
if os.path.exists(self._get_meta_path()):
os.unlink(self._get_meta_path())
newjarpath = os.path.join(os.path.dirname(self.jarpath), "%s.jar" % (self.name,))
if os.path.exists(newjarpath):
raise IOError("File %s already exists" % (newjarpath,))
shutil.move(self.jarpath, newjarpath)
self.jarpath = newjarpath
if meta is not None:
self.set_meta(meta)
class PluginNotFound(Exception):
pass
class NoPluginSource(Exception):
pass
class Library(object):
"""The bukkit plugin registry"""
VALID_SOURCE_TYPES = {
"jenkins": jenkins.PluginSource
}
@classmethod
def get(cls, rootdir=None):
if rootdir is None:
rootdir = os.getcwd()
libdir = os.path.join(rootdir, "plugin-library")
if not os.path.exists(libdir):
raise IOError("Plugin library not found.")
return Library(libdir)
def __init__(self, path):
self.path = path
self.reload_sources()
self.reload()
def reload_sources(self):
sources_file = os.path.join(self.path, ".sources.yml")
if not os.path.exists(sources_file):
with open(sources_file, 'w') as sources:
yaml.dump({}, sources)
self.sources = {'bukkitdev': bukkitdev.PluginSource()}
sources = yaml.load(open(sources_file))
for source_name, source_cfg in sources.iteritems():
source_type = source_cfg.pop('type', None)
if source_type is None or source_type not in self.VALID_SOURCE_TYPES:
logging.warn("Unknown source type %s" % (source_type,))
continue
self.add_source(source_name, source_type, **source_cfg)
def add_source(self, name, type='jenkins', **kwargs):
if name in self.sources:
raise KeyError("source %s is already registered" % (name,))
if type not in self.VALID_SOURCE_TYPES:
raise KeyError("Unknown source type %s" % (type,))
source = self.VALID_SOURCE_TYPES[type](name, **kwargs)
self.sources[name] = source
def remove_source(self, name):
if name in self.sources:
return self.sources.pop(name)
def save_sources(self):
cfg = {}
for source_name, source in self.sources.iteritems():
if source.source_type == 'bukkitdev':
continue # don't save the bukkitdev one, its default
cfg[source_name] = source.serialize()
yaml.dump(cfg, open(os.path.join(self.path, ".sources.yml"), 'w'))
def reload(self):
self._cached = []
for _file in os.listdir(self.path):
if not _file.endswith(".jar"):
continue
try:
plugin = PluginFile(os.path.join(self.path, _file))
self._cached.append(plugin)
except InvalidPlugin as e:
logging.warn("Invalid jar file found in plugin registry: %s" % (_file,))
continue
logging.debug("Found %s" % (_file,))
def update_plugin(self, plugin):
if isinstance(plugin, basestring):
plugin = self.get_plugin(plugin)
source = self.get_plugin_source(plugin)
if source is None:
raise NoPluginSource()
url = source.get_download_url(plugin)
meta = plugin.get_meta()
if meta.get('last_download_url', '') == url:
return False
filename = download_file(url)
pf = PluginFile(filename)
meta['last_download_url'] = url
plugin.set_meta(meta)
ret = False
if pf.newer_than(plugin):
ret = True
shutil.move(filename, plugin.jarpath)
plugin.reload()
return ret
def register_new_plugin(self, name, source=None, jarpath=None):
info = None
dest = None
meta = {}
plugin = self.get_plugin(name)
if plugin is not None:
raise KeyError("Plugin %s is already in the registry")
if jarpath:
info = extract_plugin_info(jarpath)
dest = os.path.join(self.path, "%s.jar" %(info['name'],))
shutil.copy(jarpath, dest)
else:
if source is None:
source = self.sources['bukkitdev']
elif isinstance(source, basestring):
source = self.sources[source]
peek, results = itertools.tee(source.search(name))
only = None
second = None
try:
only = next(peek)
second = next(peek)
except StopIteration:
if only:
choice = only
else:
raise PluginNotFound(name)
_generator_cache = {'choices': results}
def get_choices():
_generator_cache['choices'], choices = itertools.tee(_generator_cache['choices'])
return choices
if second:
choice = prompt_choices(get_choices, choice_formatter=format_search_result,
header="Found multiple matches for '%s' on source %s" % (name, source.name))
if not choice:
return 0
download_url, meta = source.search_result_url(choice)
file = download_file(download_url)
info = extract_plugin_info(file)
dest = os.path.join(self.path, "%s.jar" %(info['name'],))
shutil.move(file, dest)
pluginfile = PluginFile(dest)
pluginfile.set_meta(meta)
self._cached.append(pluginfile)
if not jarpath:
self.get_plugin_dependencies(pluginfile)
def get_plugin_dependencies(self, plugin):
if isinstance(plugin, basestring):
plugin = self.get_plugin(plugin)
if plugin.dependencies:
print "Checking dependencies for %s" % (plugin.name,)
for dep in plugin.dependencies:
depjar = self.get_plugin(dep)
if depjar is None:
source = self.get_plugin_source(plugin)
print " %s not registered, searching %s" % (dep, source.name)
try:
self.register_new_plugin(dep, source=self.get_plugin_source(plugin))
except PluginNotFound:
if not isinstance(source, bukkitdev.PluginSource):
print "%s not found on %s, searching bukkitdev" % (dep, source.name)
self.register_new_plugin(dep)
else:
raise
else:
print " %s - Dependency Satisfied." % (depjar.name,)
def unregister_plugin(self, pluginname, clean_unused_dependencies=True):
removed = []
plugin = None
if isinstance(pluginname, basestring):
plugin = self.get_plugin(pluginname)
else:
plugin = pluginname
pluginname = plugin.name
if plugin is None:
raise PluginNotFound("%s is not a registered plugin" % (pluginname,))
self._cached.remove(plugin)
if os.path.exists(plugin._get_meta_path()):
os.unlink(plugin._get_meta_path())
os.unlink(plugin.jarpath)
removed.append(plugin)
if clean_unused_dependencies or clean_unused_dependencies is None:
unused = []
all_dependencies = self.get_remaining_dependencies()
for dep in plugin.dependencies:
dep_plugin = self.get_plugin(dep)
if dep_plugin is None:
print "dependency %s is not registered." % (dep,)
continue
for ad in all_dependencies:
if ad.name == dep_plugin.name:
# existing dependency is still required.
break;
else:
unused.append(dep)
if unused and clean_unused_dependencies is None:
print "The following dependencies are no longer required: %s" % (", ".join([repr(p) for p in unused]),)
if not query_yes_no("Remove unused dependencies?"):
return removed
for up in unused:
print "removing unused dependency: %s" % (up,)
removed += self.unregister_plugin(up, clean_unused_dependencies=clean_unused_dependencies)
return removed
def get_remaining_dependencies(self):
deps = set()
for p in self.plugins:
for dep in p.dependencies:
dp = self.get_plugin(dep)
if dp is not None:
deps.add(dp)
return list(deps)
def get_plugin_source(self, plugin):
if isinstance(plugin, basestring):
plugin = self.get_plugin(plugin)
if not plugin.has_meta():
return self.sources['bukkitdev']
meta = plugin.get_meta()
if 'source' in meta:
return self.sources.get(meta['source'], None)
return self.sources['bukkitdev']
@property
def plugins(self):
return list(self._cached)
def get_plugin(self, name):
for p in self._cached:
if p.name.lower() == name.lower():
return p
| [
"andrepleblanc@gmail.com"
] | andrepleblanc@gmail.com |
e36d5216b192e842d632a87650507221796a33e3 | bcee50b3cbaf7a8000dffb7326cf467ae432b626 | /basic/15650/nm_2_dfs.py | cfe1b263b0584c44a10c3b12c47bba7fd97e0bce | [] | no_license | entrekid/algorithms | 53e5e563f6350b76047d8163ecd6e623dbe6e8d1 | 64377821718b3e44faf6a05be4d3ebf99b674489 | refs/heads/master | 2022-04-06T21:49:42.081981 | 2020-03-03T14:58:52 | 2020-03-03T14:58:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | n, m = map(int, input().split())
check_list = [False] * n
num_list = [elem + 1 for elem in range(n)]
result_list = []
def nm_dfs2(num):
if num == m:
print(*result_list)
return
for iter in range(n):
if check_list[iter] == True:
continue
check_list[iter] = True
result_list.append(num_list[iter])
nm_dfs2(num + 1)
result_list.pop()
for j in range(iter + 1, n):
check_list[j] = False
nm_dfs2(0) | [
"root@LAPTOP-S2FAKB33.localdomain"
] | root@LAPTOP-S2FAKB33.localdomain |
0399c62994e2f73d779c420bad0683c1d981a66e | de83c4b5caa71e0bf2f3709d8e92187916fd73ea | /pomodoro/views.py | 7cfd8ba6a799eed3259928facc38f742a6e7f2c2 | [] | no_license | onurcankurum/pomodoro | 4abee3002c7b15cad60ba282410202300a0695cd | 026e955a1fccf20e0ab1302d5a654432fc266ad8 | refs/heads/master | 2023-02-07T06:05:42.189973 | 2020-12-27T20:10:12 | 2020-12-27T20:10:12 | 324,838,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,450 | py | import datetime
from django.shortcuts import render
from django.http import HttpResponse
from chronometer.models import Timer
from django.views.generic import View
from chronometer.views import Start,Resume,Pause,Stop,TimerView
from .forms import SegmentForm
import json
import time
from chronometer.quaries import query,totalTime,ranking,details
from django.core.exceptions import ObjectDoesNotExist
import json
class Hometest(View):
start =Start()
pause=Pause()
resume=Resume()
stop=Stop()
def lazim(self,request,choosen):
try:
self.timer = self.pause.post(request,request.user.pk,choosen)#devamm eden segment varsa durduralım
except ObjectDoesNotExist : #kullanıcının timer nesnesi yoksa oluşturalım
newUser = Timer(user=request.user,status='stopped')
newUser.save()
self.timer = self.start.post(request,request.user.pk,choosen) #kullanıcı oluşturmadan girince hata veriyo çünkü user nesnesi boş
self.timer = self.pause.post(request,request.user.pk,choosen)
def get(self,request):
context=self.loadata(self.request)
print(context)
return render(request,'hometest.html',context)
def post(self,request):
context=self.loadata(self.request)
if 'add' in request.POST:
self.timer = self.pause.post(request,request.user.pk,dict(request.POST)['add'][0])
self.timer = self.resume.post(request,request.user.pk,dict(request.POST)['add'][0])#3 ü birden çalışıyo buna bir bak biara
self.timer = self.pause.post(request,request.user.pk,dict(request.POST)['add'][0])
return render(request,'hometest.html',context)
if 'start' in request.POST:
self.lazim(request,dict(request.POST)['select'][0])
self.timer = self.resume.post(request,request.user.pk,dict(request.POST)['select'][0])
if 'stop' in request.POST:
self.lazim(request,dict(request.POST)['select'][0])
self.timer = self.pause.post(request,request.user.pk,dict(request.POST)['select'][0])
return render(request,'hometest.html',context)
def loadata(self,request):
if (request.user.is_authenticated):
try:
context={"dersler":query(request.user)}
except ObjectDoesNotExist:
self.lazim(request,'seçilmedi')
context={"dersler":query(request.user)}
print(context)
else:
print("giriş başarısız")
context={"dersler":['derslerinizi','görmek için','giriş yapın']}
return context
def newTask(self,request):
pass
def isNew(self,user):
timer = Timer.objects.get(user=suser)
class Statistics(View):
context={}
def post(self,request):
return render(request,'statistics.html',{})
def get(self,request):
print(totalTime(request.user))
self.context['ranking']=ranking()
liste=totalTime(request.user)
details = ','.join([str(i[0]) for i in liste])
details2 = ','.join([str(i[1]) for i in liste])
self.context['details']=details
self.context['details2']=details2
print(self.context)
return render(request,'statistics.html',self.context)
def home(request):
start =Start()
pause=Pause()
resume=Resume()
stop=Stop()
if (request.user.is_authenticated):
print("giriş başarılı")
context={"dersler":query(request.user.pk)}
if(request.method!='GET'):#get ile geldiysek sayfaya yeni giriş yapıyoruz ve ders seçilmediği için carlist boş olacak bu yüzdenbu blok çalışmayacak
if(dict(request.POST)['add-task'][0]!=''):
choosen=dict(request.POST)['add-task'][0]
else:
choosen=dict(request.POST)['carlist'][0]
try:
timer = pause.post(request,request.user.pk,choosen)#devamm eden segment varsa durduralım
except AttributeError: #kullanıcının timer nesnesi yoksa oluşturalım
newUser = Timer(user=request.user,status='stopped')
newUser.save()
timer = start.post(request,request.user.pk,choosen) #kullanıcı oluşturmadan girince hata veriyo çünkü user nesnesi boş
timer = pause.post(request,request.user.pk,choosen)
timer['current']='start'
try:
if(dict(request.POST)['current'][0]=='pause'):
timer=pause.post(request,request.user.pk,choosen)
timer['current']='start'
else:
timer = resume.post(request,request.user.pk,choosen)
timer['current']='pause'
except KeyError:
timer['current']='start'
timer['min']=time.gmtime(timer['duration']).tm_min
timer['sec']=time.gmtime(timer['duration']).tm_sec
#timer['selected']=dict(request.POST)['carlist'][0]
timer['allders']=totalTime(request.user.pk)
timer['focused']=query(request.user.pk)
else:
timer={"focused":query(request.user.pk)}
timer['ranks']=ranking()
timer['username']=request.user.username
timer['username']=request.user.username
timer['ranks']=ranking()
return render(request,'home.html',timer)
| [
"dragokula@gmail.com"
] | dragokula@gmail.com |
a53414492e195973f5a92c78c45bdc10b98e1b69 | f9364a60157f2d4eaaddb053980055d12ecf4c9e | /recurrent/myrlcopy.py | d5295657595461ba6d60f45add2d17c7ef77c32c | [] | no_license | lorenzoviva/Tesi | de038a2d1b69076bc9e9f15b4c34a76d5e6e0c50 | a8a439e32f1d7484ff6b0bf96a0cfe877001640c | refs/heads/master | 2023-07-19T00:22:06.155791 | 2020-03-05T16:52:00 | 2020-03-05T16:52:00 | 245,185,242 | 0 | 0 | null | 2023-07-06T21:47:40 | 2020-03-05T14:31:28 | Python | UTF-8 | Python | false | false | 2,817 | py | import numpy as np
import gym
from dnc.dnc import DNC
import torch
import torch.optim as optim
import torch.nn as nn
gamma = 0.99 # discount factor for reward
def repackage_hidden(h):
"""Wraps hidden states in new Tensors,
to detach them from their history."""
if isinstance(h, torch.Tensor):
return h.detach()
elif isinstance(h, (list, )):
return [repackage_hidden(v) for v in h]
else:
return tuple(repackage_hidden(v) for v in h)
def repackage_hidden_dnc(h):
if h is None:
return None
(chx, mhxs, _) = h
chx = repackage_hidden(chx)
if type(mhxs) is list:
mhxs = [dict([(k, repackage_hidden(v)) for k, v in mhx.items()]) for mhx in mhxs]
else:
mhxs = dict([(k, repackage_hidden(v)) for k, v in mhxs.items()])
return (chx, mhxs, None)
def discount_rewards(r):
""" take 1D float array of rewards and compute discounted reward """
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(range(0, r.size)):
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
env = gym.make('CartPole-v0')
input_size = 4
policy = DNC(4,200,gpu_id=0, output_size=1)
hidden = None
observation = env.reset()
episode_number = 0
reward_sum = 0
reset = False
done_reward_stack, x_stack, y_stack, done_stack = [], [], [], []
while episode_number <= 5000:
env.render()
x = torch.from_numpy(np.reshape(observation,[1,4])).unsqueeze(1)
x = x.type(torch.FloatTensor)
x = x.cuda()
hidden = repackage_hidden_dnc(hidden)
left_prob, hidden = policy(x, hidden, reset_experience=reset)
reset = False
action = 1 if np.random.uniform() < left_prob.item() else 0
# record various intermediates (needed later for backprop)
x_stack.append(x)
y = 1 if action == 0 else 0
y_stack.append(y)
observation, reward, done, info = env.step(action)
reward_sum += reward
done_stack.append(done * 1)
done_reward_stack.append(reward) # record reward (has to be done after we call step() to get reward for previous action)
optimizer = optim.Adam(policy.parameters(), lr=0.0001, eps=1e-9, betas=[0.9, 0.98]) # 0.0001
if done:
episode_number += 1
observation = env.reset()
reset = True
# stack together all inputs, hidden states, action gradients, and rewards for this episode
epx = np.vstack(x_stack)
epy = np.vstack(y_stack)
epr = np.vstack(done_reward_stack)
epd = np.vstack(done_stack)
x_stack, done_reward_stack, y_stack, done_stack = [], [], [], [] # reset array memory
discounted_epr = discount_rewards(epr).astype('float32')
discounted_epr -= np.mean(discounted_epr)
discounted_epr /= np.std(discounted_epr) | [
"lorenzoviva@github.com"
] | lorenzoviva@github.com |
82a0e0d28994984b8a494fad02e967299d94d678 | eb817a5a5fd66d00906d2ac2574e2ef749780877 | /defining_classes/demos_metaclasses.py | b2fd1f30b2205a42c5e9f106569b3de0e8110ce2 | [
"MIT"
] | permissive | Minkov/python-oop-2021-02 | 5afcc356f59196fdfcfd217b455b8621176f578b | bd387dde165f4338eed66c4bc0b4b516ee085340 | refs/heads/main | 2023-04-01T08:07:39.096457 | 2021-04-05T18:24:40 | 2021-04-05T18:24:40 | 341,306,261 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | class Singleton(type):
__instances = {}
def __new__(cls, *args, **kwargs):
return super().__new__(cls, *args, **kwargs)
def __call__(cls, *args, **kwargs):
if cls not in cls.__instances:
cls.__instances[cls] = super().__call__(*args, **kwargs)
return cls.__instances[cls]
class PersonFactory(metaclass=Singleton):
pass
p = PersonFactory()
print(PersonFactory() == PersonFactory())
print(PersonFactory() == PersonFactory())
print(PersonFactory() == PersonFactory())
print(PersonFactory() == PersonFactory())
print(PersonFactory() == PersonFactory())
print(PersonFactory() == PersonFactory())
| [
"DonchoMinkov@gmail.com"
] | DonchoMinkov@gmail.com |
f611c298ac38c70e4845c780d01c9b58bd3c5387 | f87c6dbecfa3501c57cc121f9849269228c818be | /A1/lotto.py | 772766e37031bd212b9e4cf251f3d173eb70498e | [] | no_license | jason-lui/A00930386_1510_assignments | 69a4f66adac0e33e2ea79cd21336ec95ea565569 | 503e636293a0f150da4644ea482d44020f02bf19 | refs/heads/master | 2022-03-11T19:05:05.585468 | 2019-11-24T04:51:49 | 2019-11-24T04:51:49 | 210,057,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,071 | py | import doctest
def number_generator():
"""
Print 6 unique numbers.
Numbers range from [1, 49].
:postconditions: generates 6 unique numbers in [1, 49]
"""
import random
# Generate a sample of 6 numbers from [1, 49]
res = random.sample(list(range(50)), 6)
# Sort res in ascending order
res.sort()
# Print res in ascending order in a single line
print(res[0], res[1], res[2], res[3], res[4], res[5])
return
def main():
"""
Drive the program.
"""
number_generator()
def main():
"""
Drive the program.
"""
doctest.testmod()
if __name__ == '__main__':
main()
# Component(s) of computational thinking
# Pattern Matching and Data Representation
# I chose to represent the randomly generated numbers in a list because the sort() method
# can organize the list in ascending order.
# Algorithms and Automation
# number_generator() uses the randint() function from the random module to generate random numbers.
# The sort() method is also applied to format the list as required.
| [
"mrjasonlui@gmail.com"
] | mrjasonlui@gmail.com |
78b0d82b027f95caf78dbfefa9926b6566ac2e1e | 988419b3d285b50f6038da17a1a4100857707e8b | /scripts/measure_rpi.py | c79cea97c11f8a7c6046c6409b2820e77c9b7db3 | [
"BSD-2-Clause"
] | permissive | saman-aghazadeh/Riptide | 58fec5677aeab92a174644693321de0c87a11d38 | bda83182f8bcdea7e06743f68bc1e9b4d675cd61 | refs/heads/master | 2022-06-20T00:19:25.686522 | 2020-05-13T18:18:41 | 2020-05-13T18:18:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,782 | py | import os
import numpy as np
import tensorflow as tf
import argparse
import tvm
from tvm import autotvm
from tvm import relay
from tvm.relay import testing
import tvm.relay.testing.tf as tf_testing
from tvm.autotvm.tuner import XGBTuner, GATuner, GridSearchTuner
from tvm.contrib.util import tempdir
from tvm.contrib import util
#import tvm.contrib.graph_runtime as runtime
import tvm.contrib.debugger.debug_runtime as runtime
import riptide.models
from riptide.get_models import get_model
from riptide.binary.binary_layers import Config, DQuantize, XQuantize
os.environ["CUDA_VISIBLE_DEVICES"] = ''
device_key = 'rpi3b'
target = tvm.target.arm_cpu("rasp3b")
target_host = 'llvm -device=arm_cpu -target=arm-linux-gnueabihf -mattr=+neon'
parser = argparse.ArgumentParser()
parser.add_argument(
'--activation_bits',
type=int,
default=1,
help='number of activation bits',
required=False)
parser.add_argument(
'--model',
type=str,
choices=['vggnet', 'vgg11', 'resnet18', 'alexnet_normal', 'alexnet', 'darknet', 'squeezenet', 'squeezenet_normal', 'squeezenet_batchnorm', 'vggnet_normal'],
help='neural network model',
required=True)
parser.add_argument(
'--log_file',
type=str,
default='log.log',
help='logfile to store tuning results',
required=False)
parser.add_argument(
'--unipolar',
action='store_false',
help='Whether to use bipolar or unipolar quantization')
args = parser.parse_args()
model = args.model
activation_bits = args.activation_bits
log_file = args.log_file
config = Config(
actQ=DQuantize,
weightQ=XQuantize,
bits=activation_bits,
use_act=False,
use_bn=False,
use_maxpool=True,
bipolar=args.unipolar)
with config:
model = get_model(model)
#model = riptide.models.vggnet_normal.vggnet()
# Init model shapes.
test_input = tf.keras.Input(shape=[224, 224, 3], batch_size=1, dtype='float32')
output = model(test_input)
# Parse model to relay
with target:
net, params = relay.frontend.from_keras(
model, shape={
'input_1': [1, 224, 224, 3]
}, layout='NHWC')
num_threads = 4
os.environ["TVM_NUM_THREADS"] = str(num_threads)
with autotvm.apply_history_best(log_file):
print("Compile...")
with relay.build_config(opt_level=3):
graph, lib, params = relay.build_module.build(
net, target=target, params=params)
batch_size = 1
num_class = 1000
image_shape = (224, 224, 3)
data_shape = (batch_size, ) + image_shape
tmp = util.tempdir()
lib_fname = tmp.relpath('net.tar')
lib.export_library(lib_fname)
# Upload module to device
print("Upload...")
#remote = autotvm.measure.request_remote(
# device_key, 'fleet.cs.washington.edu', 9190, timeout=10000)
remote = tvm.rpc.connect('jwfromm-rpi', 9090)
# upload the library to remote device and load it
remote.upload(lib_fname)
rlib = remote.load_module('net.tar')
# create the remote runtime module
ctx = remote.cpu(0)
module = runtime.create(graph, rlib, ctx)
# set parameter (upload params to the remote device. This may take a while)
module.set_input(**params)
module.set_input(
'input_1',
tvm.nd.array(
np.random.uniform(size=image_shape).astype('float32')))
module.run()
# Evaluate
print("Evaluate inference time cost...")
ftimer = module.module.time_evaluator("run", ctx, number=1, repeat=1)
prof_res = np.array(ftimer().results) * 1000 # Convert to milliseconds
print("Mean inference time (std dev): %.2f ms (%.2f ms)" %
(np.mean(prof_res), np.std(prof_res)))
| [
"jwfromm@cs.washington.edu"
] | jwfromm@cs.washington.edu |
005e6a8d7f20ae9bcc7a387f6cf8b691bc2da6d2 | aaa3ab0c89f558a33ddcad9bcc5a687049dbc599 | /backend/src/websocket/socket.py | c7efe44db002bc33abccdeaebe9cf23e1008b529 | [] | no_license | vetordev/Hypersup | 5d059282971bf45f54f8be49071984371f98aabe | 961ac24209a3772fef5016ca851f82bc2fc40bd1 | refs/heads/master | 2021-02-16T18:40:06.197712 | 2020-03-18T22:20:13 | 2020-03-18T22:20:13 | 245,034,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | from flask import request
class Socket:
def __init__(self, socket, app):
self.socket = socket
self.app = app
def run(self):
@self.socket.on('connect')
def connect():
print('New Connection; Id: ${id}'.format(id=request.sid))
| [
"you@example.com"
] | you@example.com |
f1e42d10133a550e216b7326118d6d44d7a683d9 | d9652e4f7292aa6f7031ff867d7c5ad99c99c64d | /2020/day-3/challenge.py | e24ea8be3c74ad1d9fd3e6dc5fa4ab1ad2d78ca1 | [] | no_license | jbgury/adventofcode | f55d941af657661df42e7669a1dee1afb98e6c59 | 10d80f9862895654871f9bc0ede7fbcb3ff1889c | refs/heads/master | 2023-01-23T09:39:49.426544 | 2020-12-06T21:03:44 | 2020-12-06T21:03:44 | 319,130,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 783 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import operator
lines = []
for line in sys.stdin:
lines.append(line.rstrip('\n'))
def count_tree((move_right, move_down), lines):
position_r = 0
nb_trees = 0
for l in lines[::move_down]:
if l[position_r] == '#':
nb_trees += 1
position_r += move_right
position_r = position_r % len(l)
return nb_trees
print("--- PART 1 ---")
slope = (3, 1)
l1 = count_tree(slope, lines)
print("Key = %s" % l1)
print("--- PART 2 ---")
slopes = ((1, 1), (3, 1), (5, 1), (7, 1), (1, 2))
result_list = map(lambda x: count_tree(x, lines), slopes)
print("values are : %s " % (','.join(map(str, result_list))))
print("Key to solve the puzzle is %s " % reduce(operator.mul, result_list))
| [
"jean-baptiste.gury@cambiatech.com"
] | jean-baptiste.gury@cambiatech.com |
f104b8460b85216dc55272488848f1390cfc68a8 | 54adc2ec1eb7972dc8b8101d969176d45f2a949d | /analyse.py | d76982023669808194bbfd3b91e68deb04310b54 | [] | no_license | Ngugi1/csoe | 5d8939b953cdb6a1707c8ec99ed117562fdf3b5f | 5ad1b9d8add4a5d684cabc9a23823099bca18ab2 | refs/heads/master | 2022-03-12T06:41:54.503548 | 2019-11-24T13:07:10 | 2019-11-24T13:07:10 | 223,015,916 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,409 | py | # Load repos
# Get first and last commit
# Split the timestamps into 10 periods
# Get LOC of c in P
# Get complexity
from datetime import datetime
from pydriller import RepositoryMining, GitRepository
import re
# Repositories and their main branches
repos = [("aries", "trunk")]
# Prepare repo for mining
def configure_repo(repository,
reverse=None,
repo_start=None,
repo_end=None,
file_types=[".java"]):
# Mine the main branch and only look for commits
# for java files
name, branch = repository
return RepositoryMining(name,
only_in_branch=branch,
only_modifications_with_file_types=file_types,
reversed_order=reverse,
since=repo_start,
to=repo_end)
# Traverse repo commits
def traverse(repository):
return repository.traverse_commits()
# Initialize GitRepository
def init_git_repo(path):
return GitRepository(path)
# Given a unix timestamp, give back a date
def unix_timestamp_to_date(timestamp):
return datetime.fromtimestamp(timestamp)
# Given a date, convert to unix timestamp
def date_string_to_timestamp(date):
return int(date.strftime("%s"))
# Get Bug Inducing Commits
def get_bug_inducing_commits(git_repo, current_commit, current_modification, current_period):
current_period_start, current_period_end = current_period
bic = []
working_repo = init_git_repo(git_repo)
# Get bug introducing changes only within this time period
for commit_list in \
list(working_repo.get_commits_last_modified_lines(
commit=current_commit,
modification=current_modification).values()):
for commit_hash in commit_list:
bug_inducing_commit = working_repo.get_commit(commit_hash)
commit_date = bug_inducing_commit.committer_date
if date_string_to_timestamp(commit_date) >= current_period_start and \
(date_string_to_timestamp(commit_date) <= current_period_end):
bic.append(bug_inducing_commit)
return bic
# get the initial and last commit dates
# These will be used to split the time into periods
def get_repo_life(chronological, reversed_chronological):
# Traverse chronological
commits = traverse(chronological)
# Traverse reversed repo
reversed_commits = traverse(reversed_chronological)
# Get initial commit
initial_commit = next(commits)
# Get the last commit
last_commit = next(reversed_commits)
return initial_commit.author_date, last_commit.author_date
# Given the project lifetime, split it into portions defined by
# period length
def split_time_periods(project_start, project_end, period_length_months):
# convert months to seconds
period_length_in_secs = 86400 * 30 * period_length_months
time_periods = []
# consider only full time periods - don't analyse the remainder
while project_start <= project_end:
time_periods.append((project_start, project_start + period_length_in_secs))
# Add a second to the border periods to avoid overlapping of analysis
project_start += period_length_in_secs + 1
return time_periods
# Get lines of code for all files
def get_lines_of_code(modification):
# modification : Modification
return modification.nloc
# Get the cyclomatic complexity
def get_complexity(modification):
# commit : CommitObject
return modification.complexity
# Get the filename
def get_file_name(modification):
return modification.filename
def update_number_of_developers(dictionary, key, developer):
# No. of developers is at index 3
if key_exists(dictionary, key):
print(dictionary[key][3])
dictionary[key][3].add(developer)
return dictionary[key][3]
else:
return {developer}
# Update number of changes
def update_number_of_file_changes(dictionary, key):
# Number of changes is at index 2
if key_exists(dictionary, key):
return dictionary[key][2] + 1
else:
return 0
# does a key exist in th e dictionary?
def key_exists(dictionary, key):
if key in dictionary:
return True
else:
return False
# Commit message Regex
fixed_regex = re.compile(r'fix(e[ds])?[ \t]*(for)[ ]*?(bugs?)?(defects?)?(pr)?[# \t]*')
patched_regex = re.compile(r'patch(ed)?[ \t]*(for)[ ]*?(bugs?)?(defects?)?(pr)?[# \t]*')
bugs_regex = re.compile(r'(\sbugs?\s|\spr\s|\sshow_bug\.cgi\?id=)[# \t]*')
# check if a commit is fixing a bug
def is_bug_fixing_commit(commit):
if fixed_regex.search(commit.msg) or \
patched_regex.search(commit.msg) or \
bugs_regex.search(commit.msg):
print(commit.msg)
return True
else:
return False
# Set past faults
def set_past_faults(p_faults, filename):
if key_exists(p_faults, filename):
return p_faults[filename]
else:
return 0
# Calculate all metrics for a given time period
def process_time_period(project_name, period, past_faults={}):
# buffer for faults in this period, they will be used in the next time period
# as past faults
current_faults = {}
# Period : (start_period, end_period)
start_period, end_period = period
# Metrics is a dictionary to keep track of metrics for this period
metrics = {}
# total changes over this period
total_changes = 0
# Traverse all commits in the time period
for commit_c in traverse(configure_repo(repo, repo_start=unix_timestamp_to_date(start_period),
repo_end=unix_timestamp_to_date(end_period))):
# Find all modifications
for modification in commit_c.modifications:
# Update the metrics
if get_file_name(modification).endswith('.java') and modification.change_type.name != "DELETE"\
and modification.change_type.name != "RENAME":
print("----mmmmmmmmmmmmmmmmmm-----")
print(modification.change_type.name)
print("----mmmmmmmmmmmmmmmmmm-----")
total_changes += 1
metrics[get_file_name(modification)] = [get_lines_of_code(modification),
get_complexity(modification),
update_number_of_file_changes(metrics,
get_file_name(modification)),
update_number_of_developers(metrics,
get_file_name(modification),
commit_c.author.email
# Developer count
),
total_changes, # Total changes
set_past_faults(past_faults, get_file_name(modification))]
# Check if this commit is fixing a bug and find commits that introduced the bug
if is_bug_fixing_commit(commit_c):
for buggy_commit in get_bug_inducing_commits(project_name, commit_c, modification, period):
for buggy_modification in buggy_commit.modifications:
# Increase the number of faults in this file
if key_exists(current_faults, get_file_name(buggy_modification)):
current_faults[get_file_name(buggy_modification)] = \
current_faults[get_file_name(buggy_modification)] + 1
else:
current_faults[get_file_name(buggy_modification)] = 1
# Also mark the file as buggy if it is in our metrics
print("\n+++++++++++++++++++++++")
print(get_file_name(buggy_modification))
print("\n++++++++++++++++++++++++")
if get_file_name(modification).endswith('.java'):
if get_file_name(modification).endswith('.java') and \
key_exists(metrics, get_file_name(buggy_modification)):
metrics[get_file_name(buggy_modification)].append(1)
else:
metrics[get_file_name(modification)] = [0, 0, 0, set(), 0, 0, 1]
# Compute entropy of changes - probability that the file will change
for filename in metrics:
# Add entropy for every file
metrics[filename][3] = len(metrics[filename][3])
metrics[filename][4] = float(format(metrics[filename][2] / total_changes, ".6f"))
print(metrics)
# Process all repos
for repo in repos:
start, end = get_repo_life(configure_repo(repo), configure_repo(repo, reverse=True))
for p_period in split_time_periods(int(start.strftime("%s")), int(end.strftime("%s")), 3):
# Basic metrics for a period
repo_name, _ = repo
process_time_period(repo_name, p_period)
exit()
| [
"ngugindunguapps@gmail.com"
] | ngugindunguapps@gmail.com |
92cb0ab3f70161b2d376b8784ed7d75a50826a68 | 5e6989edb5efd56df912647515c8fabdde16a80b | /sendmail.py | d20b264b3168c0df82dfb1e52ddfe865354fc2bd | [] | no_license | MaciejWiatr/flask-api | fdb65d2d9a90fd6dc7daee37fe851fdcab888ecd | 835cf3478d13076b315c7e03d6dd9aee7e4cb0f3 | refs/heads/master | 2020-06-01T07:52:55.278238 | 2019-06-30T18:09:02 | 2019-06-30T18:09:02 | 190,707,767 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,422 | py | import smtplib
import codecs
import os
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
fullpath = os.path.realpath(__file__)
folder = os.path.dirname(fullpath)
def send_mail(bot_email, bot_password, my_email, name, email):
# me == my email address
# you == recipient's email address
me = my_email
you = "maciekwiatr17@gmail.com"
# Create message container - the correct MIME type is multipart/alternative.
msg = MIMEMultipart('alternative')
msg['Subject'] = "Prosba o kontakt"
msg['From'] = me
msg['To'] = you
# Create the body of the message (a plain-text and an HTML version).
with codecs.open((folder +'/html/email.html'), 'r', 'utf-8') as file:
html_file = file.read()
print(html_file)
html = html_file.replace('*!*', name).replace('+!+', email)
# Record the MIME types of both parts - text/plain and text/html.
part2 = MIMEText(html.encode('utf-8'), 'html', 'utf-8')
# Attach parts into message container.
# According to RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred.
msg.attach(part2)
# Send the message via local SMTP server.
mail = smtplib.SMTP('smtp.gmail.com', 587)
mail.ehlo()
mail.starttls()
mail.login(bot_email, bot_password)
mail.sendmail(bot_email, my_email, msg.as_string())
mail.quit()
| [
"maciekwiatr17@gmail.com"
] | maciekwiatr17@gmail.com |
9c1182f4dec607a9cf5b2d60ff5929ead162678b | 9470eccaf472b1ff6ff4e267321f0dd68b55bcab | /app/tests/test_examples.py | 76ceabfcc9ae09c8c68853e1d9f258b0f7ad73f2 | [
"MIT"
] | permissive | PittRgz/auto-testing | 1f57fef5791a26a491e8b86b698b19b47a922a1e | 1d035a81f17aea3bf76836dc92a7a91977a4218f | refs/heads/main | 2023-07-04T19:16:55.911996 | 2021-08-03T19:45:01 | 2021-08-03T19:45:01 | 392,406,097 | 0 | 0 | MIT | 2021-08-03T19:47:09 | 2021-08-03T17:51:01 | Python | UTF-8 | Python | false | false | 468 | py | """
This is a simple tests file
"""
def test_sum():
"""Testing a sum of two numbers"""
val_1 = 4
val_2 = 3
res = 7
assert val_1 + val_2 == res
def test_strings_equal():
"""Testing that two strings are equal"""
string_1 = 'string'
string_2 = 'string'
assert string_1 == string_2
def test_multiplication():
"""Testing multiplication of two numbers"""
val_1 = 4
val_2 = 3
res = 12
assert val_1 + val_2 == res
| [
"pedrorodriguez@MacBook-Pro-de-Pedro.local"
] | pedrorodriguez@MacBook-Pro-de-Pedro.local |
c69eb01195ec0cf56f62751cbb04390378955500 | c6d94fad0eb1ca7342de1fc3491f18233ae32b81 | /cwb/data/source/earthquake_info.py | 6d59a39c8c0e67a302fa262d818d73406ec62e25 | [] | no_license | shengtai0201/OpenDataCollection | f16e04085f266851b078dcef06384f9f95f6216c | 064f73e1973ffff338913048dff429b62a6a63a7 | refs/heads/master | 2021-01-25T06:45:27.554847 | 2017-06-24T09:44:21 | 2017-06-24T09:44:21 | 93,606,728 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 625 | py | # 地震規模標籤
class Magnitude:
def __init__(self):
# 規模類型
self.magnitude_type = None
# 規模大小
self.magnitude_value = None
# 地震資訊
class EarthquakeInfo:
def __init__(self, origin_time, epicenter, depth, magnitude):
# 發震時間
self.origin_time = origin_time
# 震央資料
self.epicenter = epicenter
# 震源深度,單位公里
self.depth = depth
# 規模資訊,包含規模類型及規模大小
self.magnitude = magnitude
# 發布地震參數單位
self.source = None
| [
"shengtai0201@gmail.com"
] | shengtai0201@gmail.com |
884626558656a25eb482c9309b7910cfdf19bd55 | 83e27a3a1680db233378e7dfb96d5b95a2410f9c | /flask-大型网站架构4-显示-修改资料/tests/test-user_model.py | 03919195527bb164b700de0622acf52e5d16d4f1 | [] | no_license | dcl1994/Flask_Study | 2425af269b601a9291b61455442e1a6d765defab | 8353f26a1b393b4adfd50ff24ce4267fb9857065 | refs/heads/master | 2020-03-21T08:04:44.471648 | 2017-10-22T02:54:55 | 2017-10-22T02:54:55 | 138,318,578 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | import unittest
from app.models import User
class UserModelTestCase(unittest.TestCase):
def test_password_setter(self):
u=User(password='cat')
self.assertTrue(u.password_hash is not None)
def test_no_password_getter(self):
u=User(password='cat')
with self.assertRaises(AttributeError):
u.password
def test_password_verification(self):
u=User(password='cat')
self.assertTrue(u.verify_password('cat'))
self.assertFalse(u.verify_password('dog'))
def test_password_salts_are_random(self):
u=User(password='cat')
u2=User(password='cat')
self.assertTrue(u.password_hash!=u2.password_hash) | [
"834930269@qq.com"
] | 834930269@qq.com |
6e8c5bb4be321f6d97fe22346495d10f7176c10e | 900dac4c57f71f7ec9171178efa31bd95c29b002 | /core/network/nets/resnet_utils.py | 764108eaf11058a25b158bb9f185a9e870ba75e1 | [
"MIT"
] | permissive | atranitell/TensorGate | 9c6ad6f4d02b8de7de9d4e0b65e56436d06c6c63 | 1c134d86f4db6f0e44ec3b0c28d50cc493fb3aeb | refs/heads/master | 2021-03-22T02:53:20.498174 | 2018-03-02T05:05:47 | 2018-03-02T05:05:47 | 84,150,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,677 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains building blocks for various versions of Residual Networks.
Residual networks (ResNets) were proposed in:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385, 2015
More variants were introduced in:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027, 2016
We can obtain different ResNet variants by changing the network depth, width,
and form of residual unit. This module implements the infrastructure for
building them. Concrete ResNet units and full ResNet networks are implemented in
the accompanying resnet_v1.py and resnet_v2.py modules.
Compared to https://github.com/KaimingHe/deep-residual-networks, in the current
implementation we subsample the output activations in the last residual unit of
each block, instead of subsampling the input activations in the first residual
unit of each block. The two implementations give identical results but our
implementation is more memory efficient.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
slim = tf.contrib.slim
class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])):
"""A named tuple describing a ResNet block.
Its parts are:
scope: The scope of the `Block`.
unit_fn: The ResNet unit function which takes as input a `Tensor` and
returns another `Tensor` with the output of the ResNet unit.
args: A list of length equal to the number of units in the `Block`. The list
contains one (depth, depth_bottleneck, stride) tuple for each unit in the
block to serve as argument to unit_fn.
"""
def subsample(inputs, factor, scope=None):
"""Subsamples the input along the spatial dimensions.
Args:
inputs: A `Tensor` of size [batch, height_in, width_in, channels].
factor: The subsampling factor.
scope: Optional variable_scope.
Returns:
output: A `Tensor` of size [batch, height_out, width_out, channels] with the
input, either intact (if factor == 1) or subsampled (if factor > 1).
"""
if factor == 1:
return inputs
else:
return slim.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)
def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None):
"""Strided 2-D convolution with 'SAME' padding.
When stride > 1, then we do explicit zero-padding, followed by conv2d with
'VALID' padding.
Note that
net = conv2d_same(inputs, num_outputs, 3, stride=stride)
is equivalent to
net = slim.conv2d(inputs, num_outputs, 3, stride=1, padding='SAME')
net = subsample(net, factor=stride)
whereas
net = slim.conv2d(inputs, num_outputs, 3, stride=stride, padding='SAME')
is different when the input's height or width is even, which is why we add the
current function. For more details, see ResnetUtilsTest.testConv2DSameEven().
Args:
inputs: A 4-D tensor of size [batch, height_in, width_in, channels].
num_outputs: An integer, the number of output filters.
kernel_size: An int with the kernel_size of the filters.
stride: An integer, the output stride.
rate: An integer, rate for atrous convolution.
scope: Scope.
Returns:
output: A 4-D tensor of size [batch, height_out, width_out, channels] with
the convolution output.
"""
if stride == 1:
return slim.conv2d(inputs, num_outputs, kernel_size, stride=1, rate=rate,
padding='SAME', scope=scope)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
inputs = tf.pad(inputs,
[[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return slim.conv2d(inputs, num_outputs, kernel_size, stride=stride,
rate=rate, padding='VALID', scope=scope)
@slim.add_arg_scope
def stack_blocks_dense(net, blocks, output_stride=None,
outputs_collections=None):
"""Stacks ResNet `Blocks` and controls output feature density.
First, this function creates scopes for the ResNet in the form of
'block_name/unit_1', 'block_name/unit_2', etc.
Second, this function allows the user to explicitly control the ResNet
output_stride, which is the ratio of the input to output spatial resolution.
This is useful for dense prediction tasks such as semantic segmentation or
object detection.
Most ResNets consist of 4 ResNet blocks and subsample the activations by a
factor of 2 when transitioning between consecutive ResNet blocks. This results
to a nominal ResNet output_stride equal to 8. If we set the output_stride to
half the nominal network stride (e.g., output_stride=4), then we compute
responses twice.
Control of the output feature density is implemented by atrous convolution.
Args:
net: A `Tensor` of size [batch, height, width, channels].
blocks: A list of length equal to the number of ResNet `Blocks`. Each
element is a ResNet `Block` object describing the units in the `Block`.
output_stride: If `None`, then the output will be computed at the nominal
network stride. If output_stride is not `None`, it specifies the requested
ratio of input to output spatial resolution, which needs to be equal to
the product of unit strides from the start up to some level of the ResNet.
For example, if the ResNet employs units with strides 1, 2, 1, 3, 4, 1,
then valid values for the output_stride are 1, 2, 6, 24 or None (which
is equivalent to output_stride=24).
outputs_collections: Collection to add the ResNet block outputs.
Returns:
net: Output tensor with stride equal to the specified output_stride.
Raises:
ValueError: If the target output_stride is not valid.
"""
# The current_stride variable keeps track of the effective stride of the
# activations. This allows us to invoke atrous convolution whenever applying
# the next residual unit would result in the activations having stride larger
# than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
for block in blocks:
with tf.variable_scope(block.scope, 'block', [net]) as sc:
for i, unit in enumerate(block.args):
if output_stride is not None and current_stride > output_stride:
raise ValueError('The target output_stride cannot be reached.')
with tf.variable_scope('unit_%d' % (i + 1), values=[net]):
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
if output_stride is not None and current_stride == output_stride:
net = block.unit_fn(net, rate=rate, **dict(unit, stride=1))
rate *= unit.get('stride', 1)
else:
net = block.unit_fn(net, rate=1, **unit)
current_stride *= unit.get('stride', 1)
net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)
if output_stride is not None and current_stride != output_stride:
raise ValueError('The target output_stride cannot be reached.')
return net
def resnet_arg_scope(weight_decay=0.0001,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True,
activation_fn=tf.nn.relu,
use_batch_norm=True):
"""Defines the default ResNet arg scope.
TODO(gpapan): The batch-normalization related default values above are
appropriate for use in conjunction with the reference ResNet models
released at https://github.com/KaimingHe/deep-residual-networks. When
training ResNets from scratch, they might need to be tuned.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: The moving average decay when estimating layer activation
statistics in batch normalization.
batch_norm_epsilon: Small constant to prevent division by zero when
normalizing activations by their variance in batch normalization.
batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
activations in the batch normalization layer.
activation_fn: The activation function which is used in ResNet.
use_batch_norm: Whether or not to use batch normalization.
Returns:
An `arg_scope` to use for the resnet models.
"""
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': None
}
with slim.arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=activation_fn,
normalizer_fn=slim.batch_norm if use_batch_norm else None,
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
# The following implies padding='SAME' for pool1, which makes feature
# alignment easier for dense prediction tasks. This is also used in
# https://github.com/facebook/fb.resnet.torch. However the accompanying
# code of 'Deep Residual Learning for Image Recognition' uses
# padding='VALID' for pool1. You can switch to that choice by setting
# slim.arg_scope([slim.max_pool2d], padding='VALID').
with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
return arg_sc
| [
"atranitell@gmail.com"
] | atranitell@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.