blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 246
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6d36440509a5ce458028cddd6f11386e9c9f9b3e | 37c189ce1dfe146df449d61add792de639d04321 | /api/can_api_v2_definition.py | e4ceaddb9c7ebf78ed14e4f9807f171fc516f0fe | [
"MIT",
"CC-BY-4.0"
] | permissive | yangyijane/covid-data-model | 2f9d1db874e2b883f4c856989d1e0876ab293a77 | 3d7d8ec65fbabf1ce5e1749f328c96cdf6199905 | refs/heads/main | 2023-08-22T06:39:33.401142 | 2021-10-25T15:59:53 | 2021-10-25T15:59:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,290 | py | from typing import List, Optional, Dict
import enum
import textwrap
from libs.datasets.dataset_utils import AggregationLevel
from libs import base_model
from libs.datasets import timeseries
import pydantic
import datetime
from covidactnow.datapublic.common_fields import GetByValueMixin
CDC_TRANSMISSION_LEVEL_DESCRIPTION = textwrap.dedent(
"""
Community transmission level for region, calculated using the CDC definition.
Possible values:
- 0: Low
- 1: Moderate
- 2: Substantial
- 3: High
- 4: Unknown
See [definitions of CDC community transmission levels](
https://covid.cdc.gov/covid-data-tracker/#cases_community) for more
details.
Note that the value may differ from what the CDC website reports
given we have different data sources. We have also introduced an
"Unknown" level for when both case data and test positivity data are
missing for at least 15 days. The CDC does not have an "Unknown"
level and instead will designate a location as "Low" when case and
test positivity data are missing.
"""
)
class TestPositivityRatioMethod(GetByValueMixin, enum.Enum):
"""Method used to determine test positivity ratio."""
CMSTesting = "CMSTesting"
CDCTesting = "CDCTesting"
HHSTesting = "HHSTesting"
VALORUM = "Valorum"
COVID_TRACKING = "covid_tracking"
OTHER = "other"
class FieldSourceType(GetByValueMixin, enum.Enum):
"""The data source of a field (metric or actual). This enumeration lists the places from which
CAN fetches data. The source is tracked on a per field and region timeseries basis."""
NYTimes = "NYTimes"
CMSTesting = "CMSTesting"
CDCTesting = "CDCTesting"
HHSTesting = "HHSTesting"
HHSHospital = "HHSHospital"
VALORUM = "Valorum"
COVID_TRACKING = "covid_tracking"
USA_FACTS = "USAFacts"
TestAndTrace = "TestAndTrace"
CANScrapersStateProviders = "CANScrapersStateProviders"
OTHER = "other"
class TestPositivityRatioDetails(base_model.APIBaseModel):
"""Details about how the test positivity ratio was calculated."""
source: TestPositivityRatioMethod = pydantic.Field(
..., description="Source data for test positivity ratio."
)
class DemographicDistributions(base_model.APIBaseModel):
"""Distributions of demographic data.
Note that different regions may have different demographic distributions for
the same field. For instance, health departments in different states may report
different age ranges.
The data provided matches the source distributions.
"""
age: Optional[Dict[str, int]] = pydantic.Field(None)
race: Optional[Dict[str, int]] = pydantic.Field(None)
ethnicity: Optional[Dict[str, int]] = pydantic.Field(None)
sex: Optional[Dict[str, int]] = pydantic.Field(None)
class HospitalResourceUtilization(base_model.APIBaseModel):
capacity: Optional[int] = pydantic.Field(..., description="Total capacity for resource.")
currentUsageTotal: Optional[int] = pydantic.Field(
..., description="Currently used capacity for resource by all patients (COVID + Non-COVID)"
)
currentUsageCovid: Optional[int] = pydantic.Field(
..., description="Currently used capacity for resource by COVID "
)
class Actuals(base_model.APIBaseModel):
"""Known actuals data."""
cases: Optional[int] = pydantic.Field(
..., description="Cumulative confirmed or suspected cases."
)
deaths: Optional[int] = pydantic.Field(
...,
description=(
"Cumulative deaths that are suspected or confirmed to have been caused by COVID-19."
),
)
positiveTests: Optional[int] = pydantic.Field(
..., description="Cumulative positive test results to date"
)
negativeTests: Optional[int] = pydantic.Field(
..., description="Cumulative negative test results to date"
)
contactTracers: Optional[int] = pydantic.Field(..., description="Number of Contact Tracers")
hospitalBeds: Optional[HospitalResourceUtilization] = pydantic.Field(
...,
description="""
Information about acute bed utilization details.
Fields:
* capacity - Current staffed acute bed capacity.
* currentUsageTotal - Total number of acute beds currently in use
* currentUsageCovid - Number of acute beds currently in use by COVID patients.
""",
)
icuBeds: Optional[HospitalResourceUtilization] = pydantic.Field(
...,
description="""
Information about ICU bed utilization details.
Fields:
* capacity - Current staffed ICU bed capacity.
* currentUsageTotal - Total number of ICU beds currently in use
* currentUsageCovid - Number of ICU beds currently in use by COVID patients.
""",
)
newCases: Optional[int] = pydantic.Field(
...,
description="""
New confirmed or suspected cases.
New cases are a processed timeseries of cases - summing new cases may not equal
the cumulative case count.
Processing steps:
1. If a region does not report cases for a period of time but then begins reporting again,
we will exclude the first day that reporting recommences. This first day likely includes
multiple days worth of cases and can be misleading to the overall series.
2. We remove any days with negative new cases.
3. We apply an outlier detection filter to the timeseries, which removes any data
points that seem improbable given recent numbers. Many times this is due to
backfill of previously unreported cases.
""",
)
newDeaths: Optional[int] = pydantic.Field(
...,
description="""
New confirmed or suspected COVID-19 deaths.
New deaths is an estimate of deaths per day; summing new deaths may not equal the
cumulative death count.
Processing steps:
1. If a region does not report deaths for a period of time but then begins reporting again,
we will exclude the first day that reporting recommences. This first day likely includes
multiple days worth of deaths and can be misleading to the overall series.
2. We remove any days with negative new deaths.
3. We apply an outlier detection filter to the timeseries, which removes any data
points that seem improbable given recent numbers. Many times this is due to
backfill of previously unreported deaths.
""",
)
vaccinesDistributed: Optional[int] = pydantic.Field(
None, description="Number of vaccine doses distributed."
)
vaccinationsInitiated: Optional[int] = pydantic.Field(
None,
description="""
Number of vaccinations initiated.
This value may vary by type of vaccine, but for Moderna and Pfizer this indicates
number of people vaccinated with the first dose.
""",
)
vaccinationsCompleted: Optional[int] = pydantic.Field(
None,
description="""
Number of vaccinations completed.
This value may vary by type of vaccine, but for Moderna and Pfizer this indicates
number of people vaccinated with both the first and second dose.
""",
)
vaccinesAdministered: Optional[int] = pydantic.Field(
None, description="Total number of vaccine doses administered."
)
vaccinesAdministeredDemographics: Optional[DemographicDistributions] = pydantic.Field(
None, description="Demographic distributions for administered vaccines."
)
vaccinationsInitiatedDemographics: Optional[DemographicDistributions] = pydantic.Field(
None, description="Demographic distributions for initiated vaccinations."
)
# When adding a new "actual" field here remember to add a `FieldAnnotations` in `Annotations`.
class ActualsTimeseriesRow(Actuals):
"""Actual data for a specific day."""
date: datetime.date = pydantic.Field(..., description="Date of timeseries data point")
class AnomalyAnnotation(base_model.APIBaseModel):
date: datetime.date = pydantic.Field(..., description="Date of anomaly")
type: timeseries.TagType = pydantic.Field(..., description="Type of annotation")
original_observation: float = pydantic.Field(
..., description="Original value on this date detected as anomalous."
)
class FieldSource(base_model.APIBaseModel):
type: Optional[FieldSourceType] = pydantic.Field(
None, description="The type of data source from a CAN list of data source types"
)
url: Optional[str] = pydantic.Field(
None, description="URL of a webpage containing the data at the source"
)
name: Optional[str] = pydantic.Field(None, description="A human readable name of the source")
class FieldAnnotations(base_model.APIBaseModel):
"""Annotations associated with one field."""
sources: List[FieldSource]
anomalies: List[AnomalyAnnotation]
class Annotations(base_model.APIBaseModel):
"""Annotations for each field."""
# Keep this list of fields in sync with the fields in `Actuals`
cases: Optional[FieldAnnotations] = pydantic.Field(None, description="Annotations for cases")
deaths: Optional[FieldAnnotations] = pydantic.Field(None, description="Annotations for deaths")
positiveTests: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for positiveTests"
)
negativeTests: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for negativeTests"
)
contactTracers: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for contactTracers"
)
hospitalBeds: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for hospitalBeds"
)
icuBeds: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for icuBeds"
)
newCases: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for newCases"
)
newDeaths: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for newDeaths"
)
vaccinesDistributed: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for vaccinesDistributed"
)
vaccinationsInitiated: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for vaccinationsInitiated"
)
vaccinationsCompleted: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for vaccinationsCompleted"
)
vaccinesAdministered: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for vaccinesAdministered"
)
# Keep this list of fields in sync with the fields in `Metrics`
testPositivityRatio: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for testPositivityRatio"
)
caseDensity: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for caseDensity"
)
contactTracerCapacityRatio: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for contactTracerCapacityRatio"
)
infectionRate: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for infectionRate"
)
infectionRateCI90: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for infectionRateCI90"
)
icuCapacityRatio: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for icuCapacityRatio"
)
vaccinationsInitiatedRatio: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for vaccinationsInitiatedRatio"
)
vaccinationsCompletedRatio: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for vaccinationsCompletedRatio"
)
class Metrics(base_model.APIBaseModel):
"""Calculated metrics data based on known actuals."""
testPositivityRatio: Optional[float] = pydantic.Field(
...,
description="Ratio of people who test positive calculated using a 7-day rolling average.",
)
testPositivityRatioDetails: Optional[TestPositivityRatioDetails] = pydantic.Field(None)
caseDensity: Optional[float] = pydantic.Field(
...,
description="The number of cases per 100k population calculated using a 7-day rolling average.",
)
contactTracerCapacityRatio: Optional[float] = pydantic.Field(
...,
description=(
"Ratio of currently hired tracers to estimated "
"tracers needed based on 7-day daily case average."
),
)
infectionRate: Optional[float] = pydantic.Field(
..., description="R_t, or the estimated number of infections arising from a typical case."
)
infectionRateCI90: Optional[float] = pydantic.Field(
...,
description="90th percentile confidence interval upper endpoint of the infection rate.",
)
icuCapacityRatio: Optional[float] = pydantic.Field(
...,
description="Ratio of staffed intensive care unit (ICU) beds that are currently in use.",
)
vaccinationsInitiatedRatio: Optional[float] = pydantic.Field(
None, description=("Ratio of population that has initiated vaccination.")
)
vaccinationsCompletedRatio: Optional[float] = pydantic.Field(
None, description=("Ratio of population that has completed vaccination.")
)
@staticmethod
def empty():
"""Returns an empty Metrics object."""
return Metrics(
testPositivityRatio=None,
caseDensity=None,
contactTracerCapacityRatio=None,
infectionRate=None,
infectionRateCI90=None,
icuCapacityRatio=None,
)
@enum.unique
class RiskLevel(enum.Enum):
"""COVID Risk Level.
## Risk Level Definitions
*Low* - On track to contain COVID
*Medium* - Slow disease growth
*High* - At risk of outbreak
*Critical* - Active or imminent outbreak
*Unknown* - Risk unknown
*Extreme* - Severe outbreak
"""
LOW = 0
MEDIUM = 1
HIGH = 2
CRITICAL = 3
UNKNOWN = 4
EXTREME = 5
@enum.unique
class CDCTransmissionLevel(enum.Enum):
"""CDC community transmission level."""
LOW = 0
MODERATE = 1
SUBSTANTIAL = 2
HIGH = 3
UNKNOWN = 4
class RiskLevels(base_model.APIBaseModel):
"""COVID risk levels for a region."""
overall: RiskLevel = pydantic.Field(..., description="Overall risk level for region.")
testPositivityRatio: RiskLevel = pydantic.Field(
..., description="Test positivity ratio risk level."
)
caseDensity: RiskLevel = pydantic.Field(..., description="Case density risk level.")
contactTracerCapacityRatio: RiskLevel = pydantic.Field(
..., description="Contact tracer capacity ratio risk level."
)
infectionRate: RiskLevel = pydantic.Field(..., description="Infection rate risk level.")
icuCapacityRatio: RiskLevel = pydantic.Field(..., description="ICU capacity ratio risk level.")
@classmethod
def empty(cls) -> "RiskLevels":
return RiskLevels(
overall=RiskLevel.LOW,
testPositivityRatio=RiskLevel.LOW,
caseDensity=RiskLevel.LOW,
contactTracerCapacityRatio=RiskLevel.LOW,
infectionRate=RiskLevel.LOW,
icuCapacityRatio=RiskLevel.LOW,
)
# Additional class used for bulk timeseries where we are not including all risk levels
# right now, only the overall risk level.
class RiskLevelsRow(base_model.APIBaseModel):
overall: RiskLevel = pydantic.Field(..., description="Overall risk level for region.")
caseDensity: RiskLevel = pydantic.Field(..., description="Case density risk level for region.")
class RiskLevelTimeseriesRow(RiskLevelsRow):
"""Timeseries data for risk levels. Currently only surfacing overall risk level for region."""
date: datetime.date = pydantic.Field(..., description="Date of timeseries data point")
class MetricsTimeseriesRow(Metrics):
"""Metrics data for a specific day."""
date: datetime.date = pydantic.Field(..., description="Date of timeseries data point")
class CdcTransmissionLevelTimeseriesRow(base_model.APIBaseModel):
date: datetime.date = pydantic.Field(..., description="Date of timeseries data point")
cdcTransmissionLevel: CDCTransmissionLevel = pydantic.Field(
..., description=CDC_TRANSMISSION_LEVEL_DESCRIPTION
)
class RegionSummary(base_model.APIBaseModel):
"""Summary of actual and prediction data for a single region."""
fips: str = pydantic.Field(
...,
description=(
"FIPS Code. FIPS codes are either 2-digit state codes, "
"5-digit county codes, 5-digit CBSA codes, or 1-digit '0' for the entire USA."
),
)
country: str = pydantic.Field(..., description="2-letter ISO-3166 Country code.")
state: Optional[str] = pydantic.Field(
..., description="2-letter ANSI state code. For CBSA regions, state is omitted."
)
county: Optional[str] = pydantic.Field(..., description="County name")
level: AggregationLevel = pydantic.Field(..., description="Level of region.")
lat: Optional[float] = pydantic.Field(
..., description="Latitude of point within the state or county. Currently a placeholder."
)
locationId: str = pydantic.Field(
...,
description="Location ID as defined here: https://github.com/covidatlas/li/blob/master/docs/reports-v1.md#general-notes",
)
long: Optional[float] = pydantic.Field(
..., description="Longitude of point within the state or county. Currently a placeholder."
)
population: int = pydantic.Field(
..., description="Total Population in geographic region.", gt=0
)
metrics: Metrics = pydantic.Field(...)
riskLevels: RiskLevels = pydantic.Field(..., description="Risk levels for region.")
cdcTransmissionLevel: CDCTransmissionLevel = pydantic.Field(
..., description=CDC_TRANSMISSION_LEVEL_DESCRIPTION
)
actuals: Actuals = pydantic.Field(...)
annotations: Annotations = pydantic.Field(...)
lastUpdatedDate: datetime.date = pydantic.Field(..., description="Date of latest data")
url: Optional[str] = pydantic.Field(
..., description="URL linking to Covid Act Now location page."
)
class RegionSummaryWithTimeseries(RegionSummary):
"""Summary data for a region with prediction timeseries data and actual timeseries data."""
metricsTimeseries: List[MetricsTimeseriesRow] = pydantic.Field(...)
actualsTimeseries: List[ActualsTimeseriesRow] = pydantic.Field(...)
riskLevelsTimeseries: List[RiskLevelTimeseriesRow] = pydantic.Field(...)
cdcTransmissionLevelTimeseries: List[CdcTransmissionLevelTimeseriesRow] = pydantic.Field(...)
@property
def region_summary(self) -> RegionSummary:
data = {}
# Iterating through self does not force any conversion
# https://pydantic-docs.helpmanual.io/usage/exporting_models/#dictmodel-and-iteration
for field, value in self:
if field not in RegionSummary.__fields__:
continue
data[field] = value
return RegionSummary(**data)
class AggregateRegionSummary(base_model.APIBaseModel):
"""Summary data for multiple regions."""
__root__: List[RegionSummary] = pydantic.Field(...)
class AggregateRegionSummaryWithTimeseries(base_model.APIBaseModel):
"""Timeseries and summary data for multiple regions."""
__root__: List[RegionSummaryWithTimeseries] = pydantic.Field(...)
class RegionTimeseriesRowWithHeader(base_model.APIBaseModel):
"""Prediction timeseries row with location information."""
date: datetime.date = pydantic.Field(..., description="Date of timeseries data point")
country: str = pydantic.Field(..., description="2-letter ISO-3166 Country code.")
state: Optional[str] = pydantic.Field(..., description="2-letter ANSI state code.")
county: Optional[str] = pydantic.Field(..., description="County name")
fips: str = pydantic.Field(
...,
description=(
"FIPS Code. FIPS codes are either 2-digit state codes, "
"5-digit county codes, 5-digit CBSA codes, or 1-digit '0' for the entire USA."
),
)
lat: Optional[float] = pydantic.Field(
..., description="Latitude of point within the state or county"
)
long: Optional[float] = pydantic.Field(
..., description="Longitude of point within the state or county"
)
locationId: str = pydantic.Field(
...,
description="Location ID as defined here: https://github.com/covidatlas/li/blob/master/docs/reports-v1.md#general-notes",
)
actuals: Optional[Actuals] = pydantic.Field(..., description="Actuals for given day")
metrics: Optional[Metrics] = pydantic.Field(..., description="Metrics for given day")
riskLevels: Optional[RiskLevelsRow] = pydantic.Field(
..., description="Risk Levels for given day"
)
cdcTransmissionLevel: Optional[CDCTransmissionLevel] = pydantic.Field(
..., description=CDC_TRANSMISSION_LEVEL_DESCRIPTION
)
class AggregateFlattenedTimeseries(base_model.APIBaseModel):
"""Flattened timeseries data for multiple regions."""
__root__: List[RegionTimeseriesRowWithHeader] = pydantic.Field(...)
| [
"noreply@github.com"
] | yangyijane.noreply@github.com |
c3eaff4220b08fc56b7f529974b9922daf3908ab | 765e0744b33ee9e9b85829d8af542141ed0d1210 | /mproxy.py | c24eedd38d4254ff9ff84835e9aada4814eba6f5 | [] | no_license | tylerwowen/mitm_proxy | 0b5a0ebb13424c3993dce2c431b1ce54c5e8cd66 | 367d190b238c6f497ce4791abd3360e0c6447c1c | refs/heads/master | 2023-04-07T23:33:38.094417 | 2023-03-28T22:35:47 | 2023-03-28T22:35:47 | 55,040,650 | 2 | 0 | null | 2023-03-28T22:35:48 | 2016-03-30T07:07:37 | Python | UTF-8 | Python | false | false | 27 | py | import src
src.__init__()
| [
"ouyang@cs.ucsb.edu"
] | ouyang@cs.ucsb.edu |
b301f0492fba0e2d359e1d51edb2d2c889cdd464 | 7836becef5fce2c55bef3ac08765f856f1ab2689 | /lx_blog/api_v1/profile.py | e0d7fd6f9b83eef6a1333c73621f3eb50556aa1f | [] | no_license | 2218997647/Blog_Test_LX | b049acd392c146fc054c918d32aa8976aca70b9e | d5e0c3e16e10a22e1f9547375256785a86f055b7 | refs/heads/master | 2020-04-05T21:48:19.407676 | 2018-11-12T15:37:42 | 2018-11-12T15:37:42 | 157,233,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,803 | py | # coding:utf-8
from . import api
from lx_blog.utils.commons import login_required
from flask import g, current_app, jsonify, request, session
from lx_blog.utils.response_code import RET
from lx_blog.models import User
from lx_blog import db, constants
@api.route("/users/name", methods=["PUT"])
@login_required
def change_user_name():
"""修改用户名"""
# 使用了login_required装饰器后,可以从g对象中获取用户user_id
user_id = g.user_id
# 获取用户想要设置的用户名
req_data = request.get_json()
if not req_data:
return jsonify(errno=RET.PARAMERR, errmsg="参数不完整")
name = req_data.get("name") # 用户想要设置的名字
if not name:
return jsonify(errno=RET.PARAMERR, errmsg="名字不能为空")
# 保存用户昵称name,并同时判断name是否重复(利用数据库的唯一索引)
try:
User.query.filter_by(id=user_id).update({"name": name})
db.session.commit()
except Exception as e:
current_app.logger.error(e)
db.session.rollback()
return jsonify(errno=RET.DBERR, errmsg="设置用户错误")
# 修改session数据中的name字段
session["name"] = name
return jsonify(errno=RET.OK, errmsg="OK", data={"name": name})
@api.route("/user", methods=["GET"])
@login_required
def get_user_profile():
"""获取个人信息"""
user_id = g.user_id
# 查询数据库获取个人信息
try:
user = User.query.get(user_id)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="获取用户信息失败")
if user is None:
return jsonify(errno=RET.NODATA, errmsg="无效操作")
return jsonify(errno=RET.OK, errmsg="OK", data=user.to_dict())
| [
"2218997647@qq.com"
] | 2218997647@qq.com |
db1facd386d242b1b380c5534537494db308c88d | 002aca3621afbe787f5f2133d3dbe8af6e5e9e33 | /contraClientes.py | dfaa170150fe8c98060e42d61179b22d6a65867c | [] | no_license | Toti848/Joselyn | 5ce55f38ac3092b5f283ddad47a0351ad78eefe3 | 735e123ef7a80770d54a26afe8ef9f112b37e7be | refs/heads/master | 2022-12-02T06:59:42.999170 | 2020-08-20T03:35:21 | 2020-08-20T03:35:21 | 284,575,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,978 | py | from tkinter import *
from tkinter import font
from tkinter import messagebox as msg
from tkinter import ttk
class Main:
def __init__(self):
#Pantalla
self.raiz = Tk()
self.raiz.title ("Acceso a los Clientes")
self.raiz.geometry('600x200')
#Fuente
self.fuente = font.Font(weight="bold")
self.user = StringVar()
self.pasw = StringVar()
#Titulo
self.lb_tituloPantalla = Label(self.raiz, text = "ACCESO DEL LOS CLIENTES", font = self.fuente)
self.lb_tituloPantalla.place(x = 180, y = 20)
#User
self.lb_User = Label(self.raiz, text = "User:")
self.lb_User.place(x = 100, y = 60)
self.txt_User = Entry(self.raiz, textvariable=self.user, justify="right", width = 30)
self.txt_User.place(x = 230, y = 60)
#Password
self.lb_Password = Label(self.raiz, text = "Password:")
self.lb_Password.place(x = 100, y = 90)
self.txt_Password = Entry(self.raiz, textvariable=self.pasw, justify="right", width = 30)
self.txt_Password.place(x = 230, y = 90)
#Boton Limpiar
self.bt_borrar = Button(self.raiz, text="Limpiar", width=15, command = self.Limpiar)
self.bt_borrar.place(x = 190, y = 130)
#Boton Acceder
self.bt_enviar = Button(self.raiz, text="Acceder", width=15, command = self.Acceder)
self.bt_enviar.place(x = 310, y = 130)
self.raiz.mainloop()
def Acceder(self):
if(self.user.get() == "Cliente" and self.pasw.get() == "p34"):
from Cliente_Socket import Chat_C
self.raiz.destroy()
Chat_C()
else:
msg.showinfo("Error", "La contrasena o ususario es incorrecta")
def Limpiar(self):
self.user.set("")
self.pasw.set("")
def main():
Main()
return 0
if __name__ == "__main__":
main() | [
"noreply@github.com"
] | Toti848.noreply@github.com |
856043c72dfa18187c13e630e6c9e58fcc3c660b | a56a74b362b9263289aad96098bd0f7d798570a2 | /venv/lib/python3.8/site-packages/matplotlib/_pylab_helpers.py | 2407b573c4aabbe64132bc3a0ae71163132785bc | [
"MIT"
] | permissive | yoonkt200/ml-theory-python | 5812d06841d30e1068f6592b5730a40e87801313 | 7643136230fd4f291b6e3dbf9fa562c3737901a2 | refs/heads/master | 2022-12-21T14:53:21.624453 | 2021-02-02T09:33:07 | 2021-02-02T09:33:07 | 132,319,537 | 13 | 14 | MIT | 2022-12-19T17:23:57 | 2018-05-06T08:17:45 | Python | UTF-8 | Python | false | false | 3,445 | py | """
Manage figures for pyplot interface.
"""
import atexit
import gc
class Gcf:
"""
Singleton to manage a set of integer-numbered figures.
This class is never instantiated; it consists of two class
attributes (a list and a dictionary), and a set of static
methods that operate on those attributes, accessing them
directly as class attributes.
Attributes
----------
figs
dictionary of the form {*num*: *manager*, ...}
_activeQue
list of *managers*, with active one at the end
"""
_activeQue = []
figs = {}
@classmethod
def get_fig_manager(cls, num):
"""
If figure manager *num* exists, make it the active
figure and return the manager; otherwise return *None*.
"""
manager = cls.figs.get(num, None)
if manager is not None:
cls.set_active(manager)
return manager
@classmethod
def destroy(cls, num):
"""
Try to remove all traces of figure *num*.
In the interactive backends, this is bound to the
window "destroy" and "delete" events.
"""
if not cls.has_fignum(num):
return
manager = cls.figs[num]
manager.canvas.mpl_disconnect(manager._cidgcf)
cls._activeQue.remove(manager)
del cls.figs[num]
manager.destroy()
gc.collect(1)
@classmethod
def destroy_fig(cls, fig):
"*fig* is a Figure instance"
num = next((manager.num for manager in cls.figs.values()
if manager.canvas.figure == fig), None)
if num is not None:
cls.destroy(num)
@classmethod
def destroy_all(cls):
# this is need to ensure that gc is available in corner cases
# where modules are being torn down after install with easy_install
import gc # noqa
for manager in list(cls.figs.values()):
manager.canvas.mpl_disconnect(manager._cidgcf)
manager.destroy()
cls._activeQue = []
cls.figs.clear()
gc.collect(1)
@classmethod
def has_fignum(cls, num):
"""
Return *True* if figure *num* exists.
"""
return num in cls.figs
@classmethod
def get_all_fig_managers(cls):
"""
Return a list of figure managers.
"""
return list(cls.figs.values())
@classmethod
def get_num_fig_managers(cls):
"""
Return the number of figures being managed.
"""
return len(cls.figs)
@classmethod
def get_active(cls):
"""
Return the manager of the active figure, or *None*.
"""
if len(cls._activeQue) == 0:
return None
else:
return cls._activeQue[-1]
@classmethod
def set_active(cls, manager):
"""
Make the figure corresponding to *manager* the active one.
"""
oldQue = cls._activeQue[:]
cls._activeQue = [m for m in oldQue if m != manager]
cls._activeQue.append(manager)
cls.figs[manager.num] = manager
@classmethod
def draw_all(cls, force=False):
"""
Redraw all figures registered with the pyplot
state machine.
"""
for f_mgr in cls.get_all_fig_managers():
if force or f_mgr.canvas.figure.stale:
f_mgr.canvas.draw_idle()
atexit.register(Gcf.destroy_all)
| [
"kitae.yoon@deliveryhero.co.kr"
] | kitae.yoon@deliveryhero.co.kr |
6af792a1f4600d4fa1047802376523a08b69a6c1 | 7f2b233462e38f7fd2094bd4e304ca1b7525ec24 | /practice sessin.py | 7cc2b376eca0f3676554c0f2627c86df150aab19 | [] | no_license | vin531999/vin531999 | 919ec99b8dc21a3a8e19357967b72d16211dd733 | 647fb7d4e91b8d6a966a61c150831168c67677be | refs/heads/master | 2021-04-12T18:35:29.594555 | 2020-03-24T04:31:55 | 2020-03-24T04:31:55 | 249,100,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | n = int(input("enter the numbers :"))
c = []
d = 1
for i in range (n):
i = int(input(" "))
c.append(i)
d = d*i
print(d)
| [
"noreply@github.com"
] | vin531999.noreply@github.com |
62098dedacaa8ca0444becbb5aa5a5b7341645ea | d11f36debe9c5c2b5af87221782eebb4d6968d2e | /lesson19.py | fa9276880d4cc7e112e9e6e78b74ae6fef529263 | [] | no_license | OmorovAzat/lesson1 | cde12fcc19d86b22139ca9ac0773b059fd1165f8 | ad1d113c56bbc0961b0543c45b4df94eea3c0314 | refs/heads/master | 2023-05-07T08:31:58.450064 | 2021-05-30T08:54:51 | 2021-05-30T08:54:51 | 372,166,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | #Решение домашнего задания
# a = [1, 2, 3]
# b = [i * 2 for i in a]
# print(b)
# l1 = [1, 2, 3]
# res = 0
# for num in l1:
# res += num ** 2
# print(res)
# time1 = 3
# time2 = 6.7
# time3 = 11.8
#
# print(time1 // 2)
# print(time2 // 2)
# print(time3 // 2)
# s = 'Hello,world'
# if ' ' in s:
# s = s.upper()
# else:
# s = s.lower()
#
# print(s)
| [
"bdante025@gmail.com"
] | bdante025@gmail.com |
0d1f8ac232ce4709e84ae40a42d8f4df1accaf4b | d6b4f2a17eacfda27a2823e180c45ae783cea43b | /introducao-python/knn/iris-knn.py | 6778b550102d6ea70867ead5f0fa9ce434993cc0 | [] | no_license | leonardoFiedler/data-science-course | ba31d9c4b0cced259d554b00786981c8b4a6f1c9 | fe7f576a66091bc9d2db4e28e9368d05575315cd | refs/heads/master | 2020-09-04T19:30:03.941010 | 2019-11-22T21:56:13 | 2019-11-22T21:56:13 | 219,868,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score
(X, y) = load_iris(return_X_y=True)
data = train_test_split(X, y, test_size=0.2, random_state=1)
(X_train, X_test, y_train, y_test) = data
# Numero do K - quantidade de itens a serem verificados e separados
k = 5
labelsResults = []
for i in range(len(X_test)):
x = X_test[i, :]
d = X_train - x
d = np.square(d).sum(axis=1)
sortedMatDis = np.argsort(d)
labels = []
for j in range(k):
idx = sortedMatDis[j]
labels.append(y_train[idx])
labelsResults.append(pd.value_counts(labels).idxmax())
print(labelsResults)
print('Score:', accuracy_score(y_test, labelsResults)) | [
"leonardo.fiedler.96@gmail.com"
] | leonardo.fiedler.96@gmail.com |
337238a653f2c421c1f017238cbef58842b56a43 | 567ecf4ea5afbd7eb3003f7e14e00c7b9289b9c6 | /ax/storage/json_store/decoders.py | 7a586e03ddb3b32b0a5780c941e67e791e29d11a | [
"MIT"
] | permissive | danielrjiang/Ax | f55ef168a59381b5a03c6d51bc394f6c72ed0f39 | 43014b28683b3037b5c7307869cb9b75ca31ffb6 | refs/heads/master | 2023-03-31T12:19:47.118558 | 2019-12-02T16:47:39 | 2019-12-02T16:49:36 | 225,493,047 | 0 | 0 | MIT | 2019-12-03T00:09:52 | 2019-12-03T00:09:51 | null | UTF-8 | Python | false | false | 3,501 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from datetime import datetime
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from ax.core.arm import Arm
from ax.core.base_trial import TrialStatus
from ax.core.batch_trial import AbandonedArm, BatchTrial, GeneratorRunStruct
from ax.core.generator_run import GeneratorRun
from ax.core.runner import Runner
from ax.core.trial import Trial
if TYPE_CHECKING:
# import as module to make sphinx-autodoc-typehints happy
from ax import core # noqa F401 # pragma: no cover
def batch_trial_from_json(
experiment: "core.experiment.Experiment",
index: int,
trial_type: Optional[str],
status: TrialStatus,
time_created: datetime,
time_completed: Optional[datetime],
time_staged: Optional[datetime],
time_run_started: Optional[datetime],
abandoned_reason: Optional[str],
run_metadata: Optional[Dict[str, Any]],
generator_run_structs: List[GeneratorRunStruct],
runner: Optional[Runner],
abandoned_arms_metadata: Dict[str, AbandonedArm],
num_arms_created: int,
status_quo: Optional[Arm],
status_quo_weight_override: float,
optimize_for_power: Optional[bool],
) -> BatchTrial:
"""Load Ax BatchTrial from JSON.
Other classes don't need explicit deserializers, because we can just use
their constructors (see decoder.py). However, the constructor for Batch
does not allow us to exactly recreate an existing object.
"""
batch = BatchTrial(experiment=experiment)
batch._index = index
batch._trial_type = trial_type
batch._status = status
batch._time_created = time_created
batch._time_completed = time_completed
batch._time_staged = time_staged
batch._time_run_started = time_run_started
batch._abandoned_reason = abandoned_reason
batch._run_metadata = run_metadata or {}
batch._generator_run_structs = generator_run_structs
batch._runner = runner
batch._abandoned_arms_metadata = abandoned_arms_metadata
batch._num_arms_created = num_arms_created
batch._status_quo = status_quo
batch._status_quo_weight_override = status_quo_weight_override
batch.optimize_for_power = optimize_for_power
return batch
def trial_from_json(
experiment: "core.experiment.Experiment",
index: int,
trial_type: Optional[str],
status: TrialStatus,
time_created: datetime,
time_completed: Optional[datetime],
time_staged: Optional[datetime],
time_run_started: Optional[datetime],
abandoned_reason: Optional[str],
run_metadata: Optional[Dict[str, Any]],
generator_run: GeneratorRun,
runner: Optional[Runner],
num_arms_created: int,
) -> Trial:
"""Load Ax trial from JSON.
Other classes don't need explicit deserializers, because we can just use
their constructors (see decoder.py). However, the constructor for Trial
does not allow us to exactly recreate an existing object.
"""
trial = Trial(experiment=experiment, generator_run=generator_run)
trial._index = index
trial._trial_type = trial_type
trial._status = status
trial._time_created = time_created
trial._time_completed = time_completed
trial._time_staged = time_staged
trial._time_run_started = time_run_started
trial._abandoned_reason = abandoned_reason
trial._run_metadata = run_metadata or {}
trial._runner = runner
trial._num_arms_created = num_arms_created
return trial
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
278d818322d05275fb24dbbd4ce90fbb73f4aad5 | cbd347d69f4ae9725ec479795e21ef45d6ccf41a | /tests/constants.py | 968375f731522300035d26c8d7a6828c743fad50 | [
"Apache-2.0"
] | permissive | pyni/perception | 9d1e398964312d8ebdef0374e1e089fa7ff28397 | 81262bd05524e9d28568d55107718783023ae14c | refs/heads/master | 2020-03-24T02:23:21.647095 | 2018-07-19T20:05:00 | 2018-07-19T20:05:00 | 142,373,747 | 1 | 0 | Apache-2.0 | 2018-07-26T01:46:36 | 2018-07-26T01:46:36 | null | UTF-8 | Python | false | false | 122 | py | IM_HEIGHT = 100
IM_WIDTH = 100
NUM_POINTS = 100
NUM_ITERS = 500
BINARY_THRESH = 127
COLOR_IM_FILEROOT = 'data/test_color'
| [
"jmahler@berkeley.edu"
] | jmahler@berkeley.edu |
63d15beb9622fc5048da342646160ea270b446d8 | c4d379713ad8133c61d427c07b29f4121dcd86c5 | /workspace/root/topology/tani_utils.py | 1d93da9c1c42e8e0c77a07b025ee1c6ebc8e4cf3 | [] | no_license | agiulianomirabella/melanoma-detector | 80fe02d4ca18034ee0119e2c797df886a00ceb9e | 33ab38e3e559505f0225c86ba455a02636d3b839 | refs/heads/master | 2022-12-25T04:54:27.443543 | 2020-09-23T16:23:23 | 2020-09-23T16:23:23 | 292,315,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,546 | py | from root.utils import * # pylint: disable= unused-wildcard-import
from root.utils import makeUnique, getGrayValueCoordinates
import numpy as np
from copy import copy
from scipy.ndimage.morphology import generate_binary_structure
'''
A helpful auxiliary permutation list for subcells computation
'''
#Maximum value for spaceDimension:
maximumSpaceDimension = 2
permutations1 = [[], [[-0.5], [0.5]], [[-0.5, -0.5], [-0.5, 0], [-0.5, 0.5], [0, -0.5], [0, 0.5], [0.5, -0.5], [0.5, 0], [0.5, 0.5]]]
'''
This module will compute cell's features, such as dimension, or subcells.
- A cell is a numpy array
'''
def dim(cell):
return len([i for i in range(len(cell)) if cell[i]%1 == 0])
def getRationalIndices(cell):
return [i for i in range(len(cell)) if cell[i]%1 != 0]
def getSubCells(cell): #return a list of subCells
out = []
x = permutations1[len(cell)]
for l in x:
if all(l[i]==0 for i in getRationalIndices(cell)):
a = cell + np.array(l)
out.append(a)
return makeUnique(out)
'''
This module will define functions to extract CCs eulerchar feature.
- A CC is a list of arrays (coordinates of cells belonging to the CC)
'''
def getAllCells(cc):
out = copy(cc)
for cell in cc:
out = out + getSubCells(cell)
return makeUnique(out)
def euler(cc):
if len(cc)==0:
return 0
out = 0
allCells = getAllCells(cc)
for d in range(len(cc[0])+1):
out = out + ((-1)**d) * len([c for c in allCells if dim(c) == d])
return out
| [
"giulianomirabella@gmail.com"
] | giulianomirabella@gmail.com |
ef058a2f7e1c06430d246fe4dc5decaa6c3441d5 | 19e9939c91674b51c7574c7103d9abb12b3a56bb | /examples/BingAdsPythonConsoleExamples/BingAdsPythonConsoleExamples/v11/bulk_keywords_ads.py | 940766c36b292a5ada4817e24525b47af81bf4af | [
"MIT"
] | permissive | dariusmb/BingAds-Python-SDK | 0257225d304948aa41caff42d7dd7972e1bd7457 | bd5814ed66cf5ff809bea8f3231460cc3724c942 | refs/heads/master | 2020-04-01T14:22:15.911884 | 2018-10-16T15:45:26 | 2018-10-16T15:45:26 | 153,291,404 | 0 | 0 | null | 2018-10-16T13:36:08 | 2018-10-16T13:36:07 | null | UTF-8 | Python | false | false | 17,874 | py | from auth_helper import *
from bulk_service_manager_helper import *
from output_helper import *
# You must provide credentials in auth_helper.py.
def main(authorization_data):
errors=[]
try:
# Let's create a new budget and share it with a new campaign.
upload_entities=[]
bulk_budget=BulkBudget()
bulk_budget.client_id='YourClientIdGoesHere'
budget=set_elements_to_none(campaign_service.factory.create('Budget'))
budget.Amount=50
budget.BudgetType='DailyBudgetStandard'
budget.Id=BUDGET_ID_KEY
budget.Name="My Shared Budget " + strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
bulk_budget.budget=budget
upload_entities.append(bulk_budget)
bulk_campaign=BulkCampaign()
# The client_id may be used to associate records in the bulk upload file with records in the results file. The value of this field
# is not used or stored by the server; it is simply copied from the uploaded record to the corresponding result record.
# Note: This bulk file Client Id is not related to an application Client Id for OAuth.
bulk_campaign.client_id='YourClientIdGoesHere'
campaign=set_elements_to_none(campaign_service.factory.create('Campaign'))
# When using the Campaign Management service, the Id cannot be set. In the context of a BulkCampaign, the Id is optional
# and may be used as a negative reference key during bulk upload. For example the same negative reference key for the campaign Id
# will be used when adding new ad groups to this new campaign, or when associating ad extensions with the campaign.
campaign.Id=CAMPAIGN_ID_KEY
campaign.Name="Summer Shoes " + strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
campaign.Description="Summer shoes line."
# You must choose to set either the shared budget ID or daily amount.
# You can set one or the other, but you may not set both.
campaign.BudgetId=BUDGET_ID_KEY
campaign.DailyBudget=None
campaign.BudgetType=None
campaign.TimeZone='PacificTimeUSCanadaTijuana'
campaign.Status='Paused'
# You can set your campaign bid strategy to Enhanced CPC (EnhancedCpcBiddingScheme)
# and then, at any time, set an individual ad group or keyword bid strategy to
# Manual CPC (ManualCpcBiddingScheme).
# For campaigns you can use either of the EnhancedCpcBiddingScheme or ManualCpcBiddingScheme objects.
# If you do not set this element, then ManualCpcBiddingScheme is used by default.
campaign_bidding_scheme=set_elements_to_none(campaign_service.factory.create('EnhancedCpcBiddingScheme'))
campaign.BiddingScheme=campaign_bidding_scheme
# Used with FinalUrls shown in the expanded text ads that we will add below.
campaign.TrackingUrlTemplate="http://tracker.example.com/?season={_season}&promocode={_promocode}&u={lpurl}"
bulk_campaign.campaign=campaign
bulk_ad_group=BulkAdGroup()
bulk_ad_group.campaign_id=CAMPAIGN_ID_KEY
ad_group=set_elements_to_none(campaign_service.factory.create('AdGroup'))
ad_group.Id=AD_GROUP_ID_KEY
ad_group.Name="Women's Red Shoes"
ad_group.AdDistribution='Search'
end_date=campaign_service.factory.create('Date')
end_date.Day=31
end_date.Month=12
end_date.Year=strftime("%Y", gmtime())
ad_group.EndDate=end_date
search_bid=campaign_service.factory.create('Bid')
search_bid.Amount=0.09
ad_group.SearchBid=search_bid
ad_group.Language='English'
# For ad groups you can use either of the InheritFromParentBiddingScheme or ManualCpcBiddingScheme objects.
# If you do not set this element, then InheritFromParentBiddingScheme is used by default.
ad_group_bidding_scheme=set_elements_to_none(campaign_service.factory.create('ManualCpcBiddingScheme'))
ad_group.BiddingScheme=ad_group_bidding_scheme
# You could use a tracking template which would override the campaign level
# tracking template. Tracking templates defined for lower level entities
# override those set for higher level entities.
# In this example we are using the campaign level tracking template.
ad_group.TrackingUrlTemplate=None
bulk_ad_group.ad_group=ad_group
# In this example only the first 3 ads should succeed.
# The Title of the fourth ad is empty and not valid,
# and the fifth ad is a duplicate of the second ad
bulk_expanded_text_ads=[]
for index in range(5):
bulk_expanded_text_ad=BulkExpandedTextAd()
bulk_expanded_text_ad.ad_group_id=AD_GROUP_ID_KEY
expanded_text_ad=set_elements_to_none(campaign_service.factory.create('ExpandedTextAd'))
expanded_text_ad.TitlePart1='Contoso'
expanded_text_ad.TitlePart2='Fast & Easy Setup'
expanded_text_ad.Text='Huge Savings on red shoes.'
expanded_text_ad.Path1='seattle'
expanded_text_ad.Path2='shoe sale'
expanded_text_ad.Type='ExpandedText'
expanded_text_ad.Status=None
expanded_text_ad.EditorialStatus=None
# With FinalUrls you can separate the tracking template, custom parameters, and
# landing page URLs.
final_urls=campaign_service.factory.create('ns4:ArrayOfstring')
final_urls.string.append('http://www.contoso.com/womenshoesale')
expanded_text_ad.FinalUrls=final_urls
# Final Mobile URLs can also be used if you want to direct the user to a different page
# for mobile devices.
final_mobile_urls=campaign_service.factory.create('ns4:ArrayOfstring')
final_mobile_urls.string.append('http://mobile.contoso.com/womenshoesale')
expanded_text_ad.FinalMobileUrls=final_mobile_urls
# You could use a tracking template which would override the campaign level
# tracking template. Tracking templates defined for lower level entities
# override those set for higher level entities.
# In this example we are using the campaign level tracking template.
expanded_text_ad.TrackingUrlTemplate=None
# Set custom parameters that are specific to this ad,
# and can be used by the ad, ad group, campaign, or account level tracking template.
# In this example we are using the campaign level tracking template.
url_custom_parameters=campaign_service.factory.create('ns0:CustomParameters')
parameters=campaign_service.factory.create('ns0:ArrayOfCustomParameter')
custom_parameter1=campaign_service.factory.create('ns0:CustomParameter')
custom_parameter1.Key='promoCode'
custom_parameter1.Value='PROMO' + str(index)
parameters.CustomParameter.append(custom_parameter1)
custom_parameter2=campaign_service.factory.create('ns0:CustomParameter')
custom_parameter2.Key='season'
custom_parameter2.Value='summer'
parameters.CustomParameter.append(custom_parameter2)
url_custom_parameters.Parameters=parameters
expanded_text_ad.UrlCustomParameters=url_custom_parameters
bulk_expanded_text_ad.ad=expanded_text_ad
bulk_expanded_text_ads.append(bulk_expanded_text_ad)
bulk_expanded_text_ads[1].ad.Title="Quick & Easy Setup"
bulk_expanded_text_ads[2].ad.Title="Fast & Simple Setup"
bulk_expanded_text_ads[3].ad.Title=''
bulk_expanded_text_ads[4].ad.Title="Quick & Easy Setup"
# In this example only the second keyword should succeed. The Text of the first keyword exceeds the limit,
# and the third keyword is a duplicate of the second keyword.
bulk_keywords=[]
for index in range(3):
bulk_keyword=BulkKeyword()
bulk_keyword.ad_group_id=AD_GROUP_ID_KEY
keyword=set_elements_to_none(campaign_service.factory.create('Keyword'))
keyword.Bid=set_elements_to_none(campaign_service.factory.create('Bid'))
keyword.Bid.Amount=0.47
keyword.Param2='10% Off'
keyword.MatchType='Broad'
keyword.Text='Brand-A Shoes'
# For keywords you can use either of the InheritFromParentBiddingScheme or ManualCpcBiddingScheme objects.
# If you do not set this element, then InheritFromParentBiddingScheme is used by default.
keyword_bidding_scheme=set_elements_to_none(campaign_service.factory.create('InheritFromParentBiddingScheme'))
keyword.BiddingScheme=keyword_bidding_scheme
bulk_keyword.keyword=keyword
bulk_keywords.append(bulk_keyword)
bulk_keywords[0].keyword.Text=(
"Brand-A Shoes Brand-A Shoes Brand-A Shoes Brand-A Shoes Brand-A Shoes "
"Brand-A Shoes Brand-A Shoes Brand-A Shoes Brand-A Shoes Brand-A Shoes "
"Brand-A Shoes Brand-A Shoes Brand-A Shoes Brand-A Shoes Brand-A Shoes"
)
# Write the entities created above, to temporary memory.
# Dependent entities such as BulkKeyword must be written after any dependencies,
# for example the BulkCampaign and BulkAdGroup.
upload_entities.append(bulk_campaign)
upload_entities.append(bulk_ad_group)
for bulk_expanded_text_ad in bulk_expanded_text_ads:
upload_entities.append(bulk_expanded_text_ad)
for bulk_keyword in bulk_keywords:
upload_entities.append(bulk_keyword)
output_status_message("\nAdding campaign, budget, ad group, keywords, and ads . . .")
download_entities=write_entities_and_upload_file(bulk_service_manager, upload_entities)
budget_results=[]
campaign_results=[]
adgroup_results=[]
keyword_results=[]
for entity in download_entities:
if isinstance(entity, BulkBudget):
budget_results.append(entity)
output_bulk_budgets([entity])
if isinstance(entity, BulkCampaign):
campaign_results.append(entity)
output_bulk_campaigns([entity])
if isinstance(entity, BulkAdGroup):
adgroup_results.append(entity)
output_bulk_ad_groups([entity])
if isinstance(entity, BulkExpandedTextAd):
output_bulk_expanded_text_ads([entity])
if isinstance(entity, BulkKeyword):
keyword_results.append(entity)
output_bulk_keywords([entity])
# Here is a simple example that updates the keyword bid to use the ad group bid.
update_bulk_keyword=BulkKeyword()
update_bulk_keyword.ad_group_id=adgroup_results[0].ad_group.Id
update_keyword=campaign_service.factory.create('Keyword')
update_keyword.Id=next((keyword_result.keyword.Id for keyword_result in keyword_results if
keyword_result.keyword.Id is not None and keyword_result.ad_group_id==update_bulk_keyword.ad_group_id), None)
# You can set the Bid.Amount property to change the keyword level bid.
update_keyword.Bid=campaign_service.factory.create('Bid')
update_keyword.Bid.Amount=0.46
# The keyword bid will not be updated if the Bid property is not specified or if you create
# an empty Bid.
#update_keyword.Bid=campaign_service.factory.create('Bid')
# The keyword level bid will be deleted ("delete_value" will be written in the bulk upload file), and
# the keyword will effectively inherit the ad group level bid if you explicitly set the Bid property to None.
#update_keyword.Bid=None
# It is important to note that the above behavior differs from the Bid settings that
# are used to update keywords with the Campaign Management servivce.
# When using the Campaign Management service with the Bing Ads Python SDK, if the
# Bid property is not specified or is set explicitly to None, your keyword bid will not be updated.
# For examples of how to use the Campaign Management service for keyword updates, please see KeywordsAds.py.
update_bulk_keyword.keyword=update_keyword
upload_entities=[]
upload_entities.append(update_bulk_keyword)
output_status_message("\nUpdating the keyword bid to use the ad group bid . . .")
download_entities=write_entities_and_upload_file(bulk_service_manager, upload_entities)
for entity in download_entities:
if isinstance(entity, BulkKeyword):
output_bulk_keywords([entity])
# Here is a simple example that updates the campaign budget.
download_parameters=DownloadParameters(
download_entities=[
'Budgets',
'Campaigns'
],
result_file_directory=FILE_DIRECTORY,
result_file_name=DOWNLOAD_FILE_NAME,
overwrite_result_file=True,
last_sync_time_in_utc=None
)
upload_entities=[]
get_budget_results=[]
get_campaign_results=[]
# Download all campaigns and shared budgets in the account.
download_entities=download_file(bulk_service_manager, download_parameters)
output_status_message("Downloaded all campaigns and shared budgets in the account.\n")
for entity in download_entities:
if isinstance(entity, BulkBudget):
get_budget_results.append(entity)
output_bulk_budgets([entity])
if isinstance(entity, BulkCampaign):
get_campaign_results.append(entity)
output_bulk_campaigns([entity])
# If the campaign has a shared budget you cannot update the Campaign budget amount,
# and you must instead update the amount in the Budget record. If you try to update
# the budget amount of a Campaign that has a shared budget, the service will return
# the CampaignServiceCannotUpdateSharedBudget error code.
for entity in get_budget_results:
if entity.budget.Id > 0:
# Increase budget by 20 %
entity.budget.Amount *= Decimal(1.2)
upload_entities.append(entity)
for entity in get_campaign_results:
if entity.campaign.BudgetId == None or entity.campaign.BudgetId <= 0:
# Increase budget by 20 %
entity.campaign.DailyBudget *= 1.2
upload_entities.append(entity)
if len(upload_entities) > 0:
output_status_message("Changed local campaign budget amounts. Starting upload.\n")
download_entities=write_entities_and_upload_file(bulk_service_manager, upload_entities)
for entity in download_entities:
if isinstance(entity, BulkBudget):
get_budget_results.append(entity)
output_bulk_budgets([entity])
if isinstance(entity, BulkCampaign):
get_campaign_results.append(entity)
output_bulk_campaigns([entity])
else:
output_status_message("No campaigns or shared budgets in account.\n")
# Delete the campaign, ad group, keywords, and ads that were previously added.
# You should remove this region if you want to view the added entities in the
# Bing Ads web application or another tool.
upload_entities=[]
for budget_result in budget_results:
budget_result.status='Deleted'
upload_entities.append(budget_result)
for campaign_result in campaign_results:
campaign_result.campaign.Status='Deleted'
upload_entities.append(campaign_result)
output_status_message("\nDeleting campaign, budget, ad group, ads, and keywords . . .")
download_entities=write_entities_and_upload_file(bulk_service_manager, upload_entities)
for entity in download_entities:
if isinstance(entity, BulkBudget):
output_bulk_budgets([entity])
if isinstance(entity, BulkCampaign):
output_bulk_campaigns([entity])
output_status_message("Program execution completed")
except WebFault as ex:
output_webfault_errors(ex)
except Exception as ex:
output_status_message(ex)
# Main execution
if __name__ == '__main__':
print("Python loads the web service proxies at runtime, so you will observe " \
"a performance delay between program launch and main execution...\n")
authorization_data=AuthorizationData(
account_id=None,
customer_id=None,
developer_token=DEVELOPER_TOKEN,
authentication=None,
)
bulk_service_manager=BulkServiceManager(
authorization_data=authorization_data,
poll_interval_in_milliseconds=5000,
environment=ENVIRONMENT,
)
campaign_service=ServiceClient(
service='CampaignManagementService',
authorization_data=authorization_data,
environment=ENVIRONMENT,
version=11,
)
# You should authenticate for Bing Ads production services with a Microsoft Account,
# instead of providing the Bing Ads username and password set.
authenticate(authorization_data)
main(authorization_data)
| [
"eur@microsoft.com"
] | eur@microsoft.com |
070fec483cc8b3618847116669bce6bd58c2f158 | b290100dc3f40cc7867e21080c92135a75bca06b | /labwork/labwork/urls.py | 6b1f3561f1545c492a3cecbffcdb95dbef43ecd2 | [] | no_license | Kunduzha/labwork | a13d69eaff5ca3cad8a17bb038f85683648cc82f | 366af69bbbffcdf0422fe0ea83021e52ca82a61e | refs/heads/master | 2023-04-05T01:43:17.305601 | 2021-03-31T10:55:05 | 2021-03-31T10:55:05 | 353,324,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 749 | py | """labwork URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"kuzanai@mail.ru"
] | kuzanai@mail.ru |
52e97583338ee135280976c809802e41e82f2615 | 4321285ff5eed67fbca253ba7647235032700dfe | /RequestService/liquid_requests.py | fd208f5fa6a8659798a0d78942ac7498d37a46d0 | [] | no_license | TheBigGinge/Analytics | e8fd5ce3f04ce8ce32458500a264c10682dfbdc5 | 27c82bfdd4f06b9e80ee8f7ac7226370c62c4eb2 | refs/heads/master | 2020-12-18T22:19:56.904634 | 2016-08-08T19:53:55 | 2016-08-08T19:53:55 | 34,743,144 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,305 | py | import requests
import json
import os
import getpass
import base64
class LiquidPlannerRequest:
base_uri = 'https://app.liquidplanner.com/api'
workspace_id = None
project_id = None
email = 'ryanm@payscale.com'
password = 'Huge-Large1978'
session = None
def __init__(self, email=None, password=None):
if email is not None:
self.email = email
self.password = password
def get_workspace_id(self):
return self.workspace_id
def set_workspace_id(self, workspace_id):
self.workspace_id = workspace_id
def set_project_id(self, project_id):
self.project_id = project_id
def get(self, uri, options={}):
return requests.get(self.base_uri + uri,
data=options,
headers={'Content-Type': 'application/json'},
auth=(self.email, self.password))
def post(self, uri, options={}):
return requests.post(self.base_uri + uri,
data=options,
headers={'Content-Type': 'application/json'},
auth=(self.email, self.password))
def put(self, uri, options={}):
return requests.put(self.base_uri + uri,
data=options,
headers={'Content-Type': 'application/json'},
auth=(self.email, self.password))
def account(self):
"""
Returns a dictionary with information about the current user.
"""
return json.loads(self.get('/account').content)
def workspaces(self):
"""
Returns a list of dictionaries, each a workspace in which the user is a member
Workspaces are the root directory
"""
return json.loads(self.get('/workspaces').content)
def packages(self):
"""
Returns a dictionary of all packages
A workspace is made up of packages
"""
return json.loads(self.get('/workspaces/' + str(self.workspace_id) +
'/packages').content)
def projects(self):
"""
Returns a list of dictionaries, each a project in a workspace
A package can be made up of projects and tasks
"""
return json.loads(self.get('/workspaces/' + str(self.workspace_id) +
'/projects').content)
def pull_all_tasks(self):
"""
Returns a list of dictionaries, each a task in a workspace
Tasks can live in projects or packages
"""
return json.loads(self.get('/workspaces/' + str(self.workspace_id) +
'/tasks').content)
def create_task(self, data):
"""
Creates a task by POSTing data
:params data:
Commands for the api
"""
return json.loads(self.post('/workspaces/' + str(self.workspace_id) +
'/tasks', json.dumps({'task': data})).content)
def update_task(self, data):
"""
Updates a task by PUTing data
:params data:
Commands for the api
"""
return json.loads(self.put('/workspaces/' + str(self.workspace_id) +
'/tasks/' + str(data['id']), json.dumps({'task': data})).content)
def write_task_comment(self, task_id, comment):
"""
Writes a comment to a task
"""
return json.loads(self.post('/workspaces/' + str(self.workspace_id) +
'/tasks/' + str(task_id) + '/comments', json.dumps({'comment': comment})).content)
def check_for_task_changes(self):
return json.loads(self.get('/workspaces/' + str(self.workspace_id)
+ '/changes').content)
def pull_task_by_id(self, id_number):
"""
Returns a list of dictionaries, each a task in a workspace
Tasks can live in projects or packages
"""
return json.loads(self.get('/workspaces/' + str(self.workspace_id) +
'/tasks/' + str(id_number)).content)
def pull_task_note(self, task_id):
return self.get('/workspaces/' + str(self.workspace_id) +
'/tasks/' + str(task_id) + '/note').content | [
"rphm78@gmail.com"
] | rphm78@gmail.com |
8f9f79f922e726902af3535889af3665812574bd | 63cf7b2d363d3d4a371f7214f20b5503b401b5ee | /ene-jun-2020/joe tareas y parciales/ordinario/ordinario_busqueda.py | 263816de6626eea036c7531f294391a226f47535 | [] | no_license | joeguerrero735/AlgoritmosSistemas | fdfb03b1072835c5b40e4c5f1e954d0f5c740d72 | ed3132d7738ee31af40c99b2811488ea84c04d6a | refs/heads/master | 2022-09-06T22:45:01.039420 | 2020-05-30T05:03:18 | 2020-05-30T05:03:18 | 263,782,689 | 0 | 0 | null | 2020-05-14T01:17:54 | 2020-05-14T01:17:53 | null | UTF-8 | Python | false | false | 1,481 | py | def binarySearch(arreglo, inicial, final, x):
medio = (inicial + final)//2
if inicial > final:
return -1
if arreglo[medio][0] == x :
if x < arreglo[medio][0]:
return binarySearch(arreglo, inicial, medio-1, x)
else:
return binarySearch(arreglo, medio+1, final, x)
entrada_datos = input().split()
# número de cosa que va a comprar
tamaño=int(entrada_datos[0])
#artículo que busca
palabra=entrada_datos[1]
## arreglo
arreglo = []
for i in range(tamaño):
b = input().split()
arreglo.append(b)
def quickSort(arr,start,end):
pivot = start
point = end
while pivot != point:
if len(arr[point]) < len(arr[pivot]) and point > pivot:
# Si la regla no se cumple, cambio.
arr[point], arr[pivot] = arr[pivot], arr[point]
pivot, point = point, pivot
elif len(arr[point]) > len(arr[pivot]) and point < pivot:
# Si la regla no se cumple, cambio.
arr[point], arr[pivot] = arr[pivot], arr[point]
pivot, point = point, pivot
if pivot > point:
point += 1
else:
point -= 1
# Izquierda.
if pivot != start:
quickSort(arr, start, pivot-1)
# Derecha.
if pivot != end:
quickSort(arr, pivot+1, end)
quickSort(arreglo,0,len(arreglo)-1)
#print(arreglo)
print(binarySearch(arreglo, 0, len(arreglo), palabra)+1) | [
"yoyom_wwe@hotmail.com"
] | yoyom_wwe@hotmail.com |
62e62e7af49f8e9e5000474f3e2369b29eac4b01 | 76fd9a2d3b732a73b688a0c227bfe07219ca1ace | /wp3db/__init__.py | 951dc19a9d4263b1eba80b26bb6f304589845284 | [] | no_license | wp3-wearable/dbmodels | 61d1add4b09eae1f7be9d36a8db73df8bf9b240d | f881f98e6091649d8b8a79b5b9bc895792f23d54 | refs/heads/master | 2020-04-13T14:25:25.887785 | 2018-12-27T11:14:51 | 2018-12-27T11:14:51 | 163,262,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24 | py | from .db import Session
| [
"dev.trk.9001@gmail.com"
] | dev.trk.9001@gmail.com |
2e89c56bda2683eb9350f7721d4902cb212adae9 | 0bd2de354c696939fedcecb72c924eaa95feb5b2 | /job/views.py | 41186e674e7661a4fa1918f1f8466c05446a4a6b | [] | no_license | RishabhVerma098/personel-portfolio | 94a0643e522d8c43082712b8c0ec5f9094abebc8 | c11aca5b878a5d7490c7159f3b447a45013a5d04 | refs/heads/master | 2021-07-17T09:45:38.546492 | 2019-02-09T14:28:20 | 2019-02-09T14:28:20 | 169,574,563 | 0 | 0 | null | 2020-06-07T14:32:31 | 2019-02-07T13:18:29 | HTML | UTF-8 | Python | false | false | 190 | py | from django.shortcuts import render
# Create your views here.
from .models import jobs
def home(request):
job = jobs.objects
return render(request, 'job/home.html', {'jobs': job})
| [
"vermarishabh0987@gmail.com"
] | vermarishabh0987@gmail.com |
9b1e0c1c235c51767a012be75e7b2a6729897c0d | e9da16d2d1468a47de78c66e4397216038e7ba88 | /zscdumin/谷歌翻译/python代码/getICAPSUrlList.py | e8dee91a1bf115825a8623c2154bb110ff77687d | [] | no_license | ZSCDumin/Spider | 9b28beb5d5166365c3b954c32d28a409a79fb085 | ba63f7122ce8e530cdfaaee56670dd5624d4c864 | refs/heads/master | 2021-06-27T02:04:29.622633 | 2020-09-13T01:20:18 | 2020-09-13T01:20:18 | 137,719,217 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,269 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2018-01-28 20:30:54
# @Author : 杜敏 (2712220318@qq.com)
# @Link : https://github.com/ZSCDumin
# @Version : $Id$
import requests
from bs4 import BeautifulSoup
def getHTMLText(url):
try:
r = requests.get(url, timeout=30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return "爬取失败"
def getUrlList(urlList, downLoadList, url):
html = getHTMLText(url)
soup = BeautifulSoup(html, 'html.parser')
urls = soup.find_all('a')
for url in urls:
try:
href = str(url.attrs['href']) # 获取照片路径
if "view" in href and "paper" in href and len(href) > 66:
paperInfoUrl = href.replace("view", "viewPaper")
print(paperInfoUrl)
paperDownloadUrl = href.replace("view", "download")
print(paperDownloadUrl)
urlList.append(paperInfoUrl)
downLoadList.append(paperDownloadUrl)
except:
continue
def saveUrlAsFile(urlList, downLoadList, fPath, num):
path = fPath + "\\" + "ICAPS_20" + num + "_PaperInfoUrl.txt"
with open(path, 'w') as f: # 写入文件
for url in urlList:
print(url)
f.write(url + "\n")
f.close()
print("Url列表文件保存成功")
path = fPath + "\\" + "ICAPS_20" + num + "_DownloadUrl.txt"
with open(path, 'w') as f: # 写入文件
for url in downLoadList:
print(url)
f.write(url + "\n")
f.close()
print("Download列表文件保存成功")
def main():
urlList = [] # URL列表
downLoadList = [] # 下载列表
fPath = "F:\\接单项目\\谷歌翻译\\论文数据\\ICAPS" # 文件存储路径
for i in range(9, 19):
if i < 10:
num = "0" + str(i)
else:
num = str(i)
url = "https://www.aaai.org/ocs/index.php/ICAPS/ICAPS" + num + "/schedConf/presentations" # 爬取页面URL
print(url)
getUrlList(urlList, downLoadList, url)
saveUrlAsFile(urlList, downLoadList, fPath, num)
urlList.clear()
downLoadList.clear()
main()
| [
"2712220318@qq.com"
] | 2712220318@qq.com |
dfd3789007df10b47fed17eb4ecef1dbbe054537 | 2edbdd6763f86aca4f6ee67ced390fb477ed0e44 | /udf/extract_type_modify_law.py | b2453cfaf4db58be6073271509e34dc2f95f64b6 | [] | no_license | nhatan172/deepdive | f498901c3faa474d3ab5166ef7a694e1626e2f01 | 492afee641436e4d5a068a7cec1ff3969d964518 | refs/heads/master | 2021-04-06T02:05:17.095127 | 2018-04-12T04:07:45 | 2018-04-12T04:07:45 | 124,988,988 | 0 | 0 | null | 2018-03-21T23:19:06 | 2018-03-13T03:49:20 | Python | UTF-8 | Python | false | false | 12,317 | py | #!/usr/bin/env python
# -*- coding:utf8 -*-
from deepdive import *
import re
import handle_string
import divlaw
def lenIterator(list):
sum = 0
for i in list :
sum += 1
return sum
def getTitle(string):
temp = re.finditer(r"\:(\s|\n|\*|\_|\#)*(\“|\")",string,re.DOTALL)
end_title = len(string)
if lenIterator(temp) > 0 :
temp = re.finditer(r"\:(\s|\n|\*|\_|\#)*(\“|\")",string,re.DOTALL)
for i in temp:
end_title = i.start()
break
return string[:end_title]
def get_numerical_symbol(title):
title = re.sub(r'(\“(.(?!\“|\”))+.{2})|(\"(.(?!\"))+.{2})',"",title,re.M|re.DOTALL)
get_title1 = re.search(r'(của\s.*)\s(đã được|được)',title)
get_title = re.search(r'[0-9]+(/[0-9]+)*((/|-)[A-ZĐƯ]+[0-9]*)+(\s|\_|\#|\*|\.|\\)',title,re.M|re.I)
# get_id = re.search(r'[0-9]+(/[0-9]+)*((/|-)[A-ZĐ]+[0-9]*)+',get_content.group())
# get_title1 = re.search(r'([0-9]+(/[0-9]+)*((/|-)[A-ZĐ]+[0-9]*)\s(đã được))|([0-9]+(/[0-9]+)*((/|-)[A-ZĐ]+[0-9]*)\s(được))',title)
if(get_title1 is not None):
number = re.search(r'[0-9]+(/[0-9]+)*((/|-)[A-ZĐƯ]+[0-9]*)+(\s|\_|\#|\*|\.|\\)',get_title1.group())
if(number is not None):
return (re.search(r'[0-9]+(/[0-9]+)*((/|-)[A-ZĐƯ]+[0-9]*)+',number.group(),re.U|re.I)).group()
elif ((get_title is not None) and (get_title1 is None)):
return (re.search(r'[0-9]+(/[0-9]+)*((/|-)[A-ZĐƯ]+[0-9]*)+',get_title.group(),re.U|re.I)).group()
else :
return None
@tsv_extractor
@returns(lambda
law_id ="text",
type = "int",
doc_content_update = "text",
symbol = "text",
position = "text",
modified_law_date_release = "text"
:[])
def extract(
law_id = "text",
totalLaw = "int",
law_content = "text",
law_len = "int",
totalItem = "int",
item_content = "text",
item_len = "int",
totalpoint = "int",
point_content = "text",
part_index ="int",
chap_index ="int",
sec_index ="int",
law_index ="int",
item_index ="int",
point_index ="int",
numerical_symbol = "text",
date_released ="text"
):
doc_content_update = None
if law_content is not None:
# law_content = handle_string.to_unicode(law_content)
law_content = law_content[:law_len]
# pass
# law_content = law_content.encode('utf-8')
if (item_content is not None) :
# # item_content = handle_string.to_unicode(item_content)
# # if item_len != len(item_content):
item_content = item_content[:item_len]
# pass
# item_content = item_content.encode('utf-8')
number = None
type = 0
point = 0
p = re.compile(r'((((S|s)ửa đổi)(\s|\,)*((b|B)ổ sung)*)|((b|B)ổ sung))')
p1= re.compile(r'(đã\s|đã được\s)((((S|s)ửa đổi)(\s|\,)*((b|B)ổ sung)*)|((b|B)ổ sung))')
position = "0_0_0_0_0_0"
if(totalpoint > 0):
number = get_numerical_symbol(getTitle(point_content))
if(number is not None):
numerical_symbol = number
date_released = None
position = "{}_{}_{}_{}_{}_{}".format(part_index+1,chap_index+1,sec_index+1,law_index+1,item_index+1,point_index+1)
type_modify = re.search(r'(((b|B)ổ sung cụm từ)|((b|B)ổ sung từ))',point_content)
if(type_modify is not None):
type = 3
doc_content_update = point_content
point = 1
else :
type_change_name = re.search(r'(S|s)ửa đổi tên',point_content)
if(type_change_name is not None):
type = 6
doc_content_update = point_content
point = 1
else:
type_delete = re.search(r'(b|B)ãi bỏ',point_content)
inQuote = False
if type_delete is not None :
inQuote = divlaw.itemInQuote(point_content,type_delete.start())
if(type_delete is not None) and not inQuote:
type = 2
doc_content_update = point_content
point = 1
else:
type_delete_text = re.search(r'(((b|B)ỏ cụm từ)|((b|B)ỏ từ))',point_content)
if(type_delete_text is not None):
type = 7
doc_content_update = point_content
point =1
else:
type_add_text = p.finditer(point_content)
type_add_text1 = p1.finditer(point_content)
len1 = lenIterator(type_add_text)
len2 = lenIterator(type_add_text1)
if( (len1 != len2) and (len1 > 0)):
type = 1
doc_content_update = point_content
point = 1
else :
# type_change_text = re.search(r'(t|T)hay\s.*cụm từ',point_content)
type_change_text = re.search(r'((t|T)hay\s)*(cụm\s)*từ\s.*(được\s)*(thay\s)*bằng\s(cụm\s)*từ',point_content)
if(type_change_text is not None):
type = 4
doc_content_update = point_content
point = 1
else :
type_name_to_name = re.search(r'((t|T)ên của\s).+(((S|s)ửa đổi\s)*(\,\s)*((b|B)ổ sung\s)*)(thành)',point_content)
if(type_name_to_name is not None):
type = 5
doc_content_update =point_content
point = 1
else :
point = 0
if(totalItem > 0 and point == 0):
number = get_numerical_symbol(getTitle(item_content))
if(number is not None):
numerical_symbol = number
date_released = None
position = "{}_{}_{}_{}_{}_{}".format(part_index+1,chap_index+1,sec_index+1,law_index+1,item_index+1,0)
type_modify = re.search(r'(b|B)ổ sung cụm từ',item_content)
if(type_modify is not None):
type = 3
doc_content_update = item_content
point = 1
else:
type_change_name = re.search(r'(S|s)ửa đổi tên',item_content)
if(type_change_name is not None):
type = 6
doc_content_update = item_content
point = 1
else:
type_delete = re.search(r'(b|B)ãi bỏ',item_content)
inQuote = False
if type_delete is not None :
inQuote = divlaw.itemInQuote(item_content,type_delete.start())
if(type_delete is not None) and not inQuote:
type = 2
doc_content_update = item_content
point = 1
else:
type_delete_text = re.search(r'(((b|B)ỏ cụm từ)|((b|B)ỏ từ))',item_content)
if(type_delete_text is not None):
type = 7
doc_content_update = item_content
point = 1
else:
# type_add_text = re.search(r'((((S|s)ửa đổi)(\s|\,)*((b|B)ổ sung)*)|((b|B)ổ sung))',item_content)
# if(type_add_text is not None):
type_add_text = p.finditer(item_content)
type_add_text1 = p1.finditer(item_content)
len1 = lenIterator(type_add_text)
len2 = lenIterator(type_add_text1)
if( (len1 != len2) and (len1 > 0)):
type = 1
doc_content_update = item_content
point=1
else:
# type_change_text = re.search(r'(t|T)hay\s.*cụm từ',item_content)
type_change_text = re.search(r'((t|T)hay\s)*(cụm\s)*từ\s.*(được\s)*(thay\s)*bằng\s(cụm\s)*từ',item_content)
if(type_change_text is not None):
type = 4
doc_content_update = item_content
point = 1
else :
type_name_to_name = re.search(r'((t|T)ên của\s).+(((S|s)ửa đổi\s)*(\,\s)*((b|B)ổ sung\s)*)(thành)',item_content)
if(type_name_to_name is not None):
type = 5
doc_content_update = item_content
point = 1
else :
point = 0
# if(totalpoint > 0 and point == 1 ):
# doc_content_update = point_content
if(totalLaw >0 and point == 0 ):
number = get_numerical_symbol(getTitle(law_content))
if(number is not None):
numerical_symbol = number
date_released = None
position = "{}_{}_{}_{}_{}_{}".format(part_index+1,chap_index+1,sec_index+1,law_index+1,0,0)
type_modify = re.search(r'(b|B)ổ sung cụm từ',law_content)
if(type_modify is not None):
type = 3
doc_content_update = law_content
point = 1
else:
type_change_name = re.search(r'(S|s)ửa đổi tên',law_content)
if(type_change_name is not None):
type = 6
doc_content_update = law_content
point = 1
else:
type_delete = re.search(r'(b|B)ãi bỏ',law_content)
inQuote = False
if type_delete is not None :
inQuote = divlaw.itemInQuote(law_content,type_delete.start())
if(type_delete is not None) and not inQuote:
type = 2
doc_content_update = law_content
point = 1
else:
type_delete_text = re.search(r'(((b|B)ỏ cụm từ)|((b|B)ỏ từ))',law_content)
if(type_delete_text is not None):
type = 7
doc_content_update = law_content
point = 1
else:
type_add_text = p.finditer(law_content)
type_add_text1 = p1.finditer(law_content)
len1 = lenIterator(type_add_text)
len2 = lenIterator(type_add_text1)
if( (len1 != len2) and (len1 > 0)):
type = 1
doc_content_update = law_content
point = 1
else:
type_change_text = re.search(r'((t|T)hay\s)*(cụm\s)*từ\s.*(được\s)*(thay\s)*bằng\s(cụm\s)*từ',law_content)
if(type_change_text is not None):
type = 4
doc_content_update = law_content
point = 1
else :
type_name_to_name = re.search(r'((t|T)ên của\s).+(((S|s)ửa đổi\s)*(\,\s)*((b|B)ổ sung\s)*)(thành)',law_content)
if(type_name_to_name is not None):
type = 5
doc_content_update = law_content
point = 1
else :
point = 0
# if(totalItem > 0):
# doc_content_update = item_content
if(point == 1):
yield[
law_id,
type,
doc_content_update,
numerical_symbol,
position,
date_released
]
| [
"nhatan172@gmail.com"
] | nhatan172@gmail.com |
d56aaa1b76881e1998052b9a341d91955fab83a2 | 1dd6726ebfef9736fea9d4b69c18333909197417 | /New folder/project_test/manage.py | 3a32ab96c4075d09f7fb2b70bdb53957ec7a5dd6 | [] | no_license | Uday-Kiran/My-Scribbles | c7255371ef4c15172dc8acb511d13143533032f4 | fc2383c5877b78dfa4751d4c114762f84504cc48 | refs/heads/master | 2022-01-19T02:36:08.964361 | 2019-07-21T16:40:35 | 2019-07-21T16:40:35 | 198,078,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project_test.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | Uday-Kiran.noreply@github.com |
ee1a31f88eeb3c7e9f45e9d6e74e4f4ac8581dbf | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_381/ch15_2020_09_14_14_10_44_836878.py | 03149e67069cc3803fab0866de1f386bfbe66feb | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | def chris(nome):
if chris == nome:
return 'Todo mundo odeia o Chris'
else:
return 'Olá, {0}'.format(nome)
nome = input('Qual seu nome?')
| [
"you@example.com"
] | you@example.com |
4b6acdbc898c6ac10a32ac3ebabe2a347cac037f | a5846332d42c8705d054792bdac219560d77d32f | /Test/test.py | 5dfe4d8d72022058c8dfbfa854f2b08bdaab2bc1 | [] | no_license | Jinsung-Jeon/Deep_Learning_Code | 2927a65adc9c7ef125603d186d4d5f38cebcfade | a87658c4beb823f9c34019e28cad736b259a7747 | refs/heads/master | 2020-12-13T02:35:52.306209 | 2020-03-06T07:01:11 | 2020-03-06T07:01:11 | 234,288,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,159 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 23 14:32:43 2020
@author: Jinsung
"""
#Chap1 test
runfile('C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap1-Regression analysis Estimation of the number of rings in abalone/abalone.py',
wdir='C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap1-Regression analysis Estimation of the number of rings in abalone')
abalone_exec()
#Chap2 test
runfile('C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap2-Binary Classification predicting a pulsar star/pulsar.py',
wdir='C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap2-Binary Classification predicting a pulsar star')
pulsar_exec()
runfile('C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap2-Binary Classification predicting a pulsar star/pulsar_ext.py',
wdir='C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap2-Binary Classification predicting a pulsar star')
pulsar_exec(adjust_ratio=True)
#Chap3 test
runfile('C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap3-Multi Classification/steel_test.py',
wdir='C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap3-Multi Classification')
steel_exec()
#Chap4 test
runfile('C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap4-MLP based structure/mlp.py',
wdir='C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap4-MLP based structure')
runfile('C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap1-Regression analysis Estimation of the number of rings in abalone/abalone.py',
wdir='C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap1-Regression analysis Estimation of the number of rings in abalone')
set_hidden([])
abalone_exec(epoch_count=50, report=10)
runfile('C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap4-MLP based structure/mlp.py',
wdir='C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap4-MLP based structure')
runfile('C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap2-Binary Classification predicting a pulsar star/pulsar.py',
wdir='C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap2-Binary Classification predicting a pulsar star')
set_hidden(6)
pulsar_exec(epoch_count=50, report=10)
set_hidden([12,6])
pulsar_exec(epoch_count=50, report=10)
runfile('C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap4-MLP based structure/mlp.py',
wdir='C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap4-MLP based structure')
runfile('C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap2-Binary Classification predicting a pulsar star/pulsar_ext.py',
wdir='C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap2-Binary Classification predicting a pulsar star')
set_hidden([12,6])
pulsar_exec(epoch_count=50, report=10, adjust_ratio=True)
runfile('C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap4-MLP based structure/mlp.py',
wdir='C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap4-MLP based structure')
runfile('C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap3-Multi Classification/steel_test.py',
wdir='C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap3-Multi Classification')
LEARNING_RATE = 0.0001
set_hidden([12,6,4])
steel_exec(epoch_count=50, report=10)
#Chap5 test
ad = AbaloneDataset()
am = MlpModel('abalone_model',ad,[])
am.exec_all(epoch_count=10, report=2)
pd = PulsarDataset()
pm = MlpModel('pulsar_model', pd, [4])
pm.exec_all()
pm.visualize(5)
sd = SteelDataset()
sm = MlpModel('steel_model', sd, [12,7])
sm.exec_all(epoch_count=50, report=10)
psd = PulsarSelectDataset()
psm = MlpModel('pulsar_select_model', psd, [4])
psm.exec_all()
fd = FlowersDataset()
fm = MlpModel('flowers_model_1', fd, [10]) #같은 추정확률분포를 통해 언제나 민들레라는 답을 냈다.
fm.exec_all(epoch_count=10, report=2)
fm2 = MlpModel('flowers_model_2', fd, [30,10])
fm2.exec_all(epoch_count=10, report=2)
#Chap6 test
od = Office31Dataset()
om1 = MlpModel('office31_model_1', od, [10])
om1.exec_all(epoch_count=20, report=10)
om2 = MlpModel('office31_model_2', od, [64,32,10])
om2.exec_all(epoch_count=20, report=10, learning_rate=0.0001)
om3 = MlpModel('office31_model_3', od, [64,32,10])
om3.use_adam = True
om3.exec_all(epoch_count=50, report=10, learning_rate=0.0001)
#Chap7 test
fd = FlowersDataset([96, 96], [96, 96, 3])
od = Office31Dataset([96, 96], [96, 96, 3])
fm1 = CnnBasicModel('flowers_model_1', fd, [30, 10])
fm1.exec_all(epoch_count=10, report=2)
fm2 = CnnBasicModel('flowers_model_2', fd, [['full', {'width':30}],['full', {'width':10}]])
fm2.use_adam=False
fm2.exec_all(epoch_count = 10, report = 2)
fm3 = CnnBasicModel('flowers_model_3', fd, [['conv', {'ksize':5, 'chn':6}],
['max', {'stride':4}],
['conv', {'ksize':3, 'chn':12}],
['avg', {'stride':2}]],
True)
fm3.exec_all(epoch_count = 10, report=2)
om1 = CnnBasicModel('officie31_model_1', od,
[['conv', {'ksize':3, 'chn':6}],
['max', {'stride':2}],
['conv', {'ksize':3, 'chn':12}],
['max', {'stride':2}],
['conv', {'ksize':3, 'chn':24}],
['avg', {'stride':3}]])
om1.exec_all(epoch_count=10, report =2)
om2 = CnnBasicModel('officie31_model_2', od,
[['conv', {'ksize':3, 'chn':6, 'actfunc':'sigmoid'}],
['max', {'stride':2}],
['conv', {'ksize':3, 'chn':12, 'actfunc':'sigmoid'}],
['max', {'stride':2}],
['conv', {'ksize':3, 'chn':24, 'actfunc':'sigmoid'}],
['avg', {'stride':3}]])
om2.exec_all(epoch_count=10, report =2)
om3 = CnnBasicModel('officie31_model_3', od,
[['conv', {'ksize':3, 'chn':6, 'actfunc':'tanh'}],
['max', {'stride':2}],
['conv', {'ksize':3, 'chn':12, 'actfunc':'tanh'}],
['max', {'stride':2}],
['conv', {'ksize':3, 'chn':24, 'actfunc':'tanh'}],
['avg', {'stride':3}]])
om3.exec_all(epoch_count=10, report =2)
#Chap8 test
fd = FlowersDataset([96, 96], [96, 96, 3])
od = Office31Dataset([96, 96], [96, 96, 3])
fm1 = CnnRegModel('flowers_model_1', fd, [30, 10])
fm1.exec_all(epoch_count=10, report=2, show_params=True)
fm2 = CnnRegModel('flowers_model_2', fd, [30, 10], l2_decay=0.1)
fm2.exec_all(epoch_count=10, show_cnt=0, show_params=True)
fm3 = CnnRegModel('flowers_model_3', fd, [30, 10], l1_decay=0.1)
fm3.exec_all(epoch_count=10, show_cnt=0, show_params=True)
cnn1 = [['conv', {'ksize':3, 'chn':6}],
['max', {'stride':2}],
['conv', {'ksize':3, 'chn':12}],
['max', {'stride':2}],
['conv', {'ksize':3, 'chn':24}],
['avg', {'stride':3}]]
fcnn1 = CnnRegModel('flowers_cnn_1')
fcnn1.exec_all(epoch_count=10, report=2)
cnn2 = [['conv', {'ksize':3, 'chn':6}],
['max', {'stride':2}],
['dropout', {'keep_prob':0.6}],
['conv', {'ksize':3, 'chn':12}],
['max', {'stride':2}],
['dropout', {'keep_prob':0.6}],
['conv', {'ksize':3, 'chn':24}],
['avg', {'stride':3}],
['dropout', {'keep_prob':0.6}]]
fcnn2 = CnnRegModel('flowers_cnn_2',fd, cnn2)
fcnn2.exec_all(epoch_count=10, report=2, show_cnt=0)
cnn3 = [['noise', {'type':'normal','mean':0,'std':0.01}],
['conv', {'ksize':3, 'chn':6}],
['max', {'stride':2}],
['noise', {'type':'normal','mean':0,'std':0.01}],
['conv', {'ksize':3, 'chn':12}],
['max', {'stride':2}],
['noise', {'type':'normal','mean':0,'std':0.01}],
['conv', {'ksize':3, 'chn':24}],
['avg', {'stride':3}]]
fcnn3 = CnnRegModel('flowers_cnn_3',fd, cnn3)
fcnn3.exec_all(epoch_count=10, report=2, show_cnt=0)
cnn4 = [['batch_normal'],
['conv', {'ksize':3, 'chn':6}],
['max', {'stride':2}],
['batch_normal'],
['conv', {'ksize':3, 'chn':12}],
['max', {'stride':2}],
['batch_normal'],
['conv', {'ksize':3, 'chn':24}],
['avg', {'stride':3}]]
fcnn4 = CnnRegModel('flowers_cnn_4',fd, cnn4)
fcnn4.exec_all(epoch_count=10, report=2, show_cnt=0)
od = Office31Dataset([96, 96], [96, 96, 3])
ocnn1 = CnnRegModel('office31_cnn_1', od, cnn1)
ocnn2 = CnnRegModel('office31_cnn_2', od, cnn1)
ocnn3 = CnnRegModel('office31_cnn_3', od, cnn1)
ocnn4 = CnnRegModel('office31_cnn_4', od, cnn1)
ocnn1.exec_all(epoch_count=10, show_cnt=0)
ocnn2.exec_all(epoch_count=10, show_cnt=0)
ocnn3.exec_all(epoch_count=10, show_cnt=0)
ocnn4.exec_all(epoch_count=10, show_cnt=0)
# Chap9
# inception-v3 model
imagenet = DummyDataset('imagenet', 'select', [299,299,3], 200)
CnnExtModel.set_macro('v3_preproc',
['serial',
['conv', {'ksize':3, 'stride':2, 'chn':32, 'padding':'VALID'}],
['conv', {'ksize':3, 'chn':32, 'padding':'VALID'}],
['conv', {'ksize':3, 'chn':64, 'padding':'SAME'}],
['max', {'ksize':3, 'stride':2, 'padding':'VALID'}],
['conv', {'ksize':1, 'chn':80, 'padding':'VALID'}],
['max', {'ksize':3, 'stride':2, 'padding':'VALID'}]])
CnnExtModel.set_macro('v3_inception1',
['parallel',
['conv', {'ksize':1, 'chn':64}],
['serial',
['conv', {'ksize':1, 'chn':48}],
['conv', {'ksize':5, 'chn':64}]],
['serial',
['conv', {'ksize':1, 'chn':64}],
['conv', {'ksize':3, 'chn':96}],
['conv', {'ksize':3, 'chn':96}]],
['serial',
['avg', {'ksize':3, 'stride':1}],
['conv', {'ksize':1, 'chn':'#chn'}]]])
CnnExtModel.set_macro('v3_resize1',
['parallel',
['conv', {'ksize':1, 'stride':2, 'chn':384}],
['serial',
['conv', {'ksize':1, 'chn':64}],
['conv', {'ksize':3, 'chn':96}],
['conv', {'ksize':3, 'stride':2, 'chn':96}]],
['max', {'ksize':3, 'stride':2}]])
CnnExtModel.set_macro('v3_inception2',
['parallel',
['conv', {'ksize':1, 'chn':192}],
['serial',
['conv', {'ksize':[1,1], 'chn':'#chn'}],
['conv', {'ksize':[1,7], 'chn':'#chn'}],
['conv', {'ksize':[7,1], 'chn':192}]],
['serial',
['conv', {'ksize':[1,1], 'chn':'#chn'}],
['conv', {'ksize':[7,1], 'chn':'#chn'}],
['conv', {'ksize':[1,7], 'chn':'#chn'}],
['conv', {'ksize':[7,1], 'chn':'#chn'}],
['conv', {'ksize':[1,7], 'chn':192}]],
['serial',
['avg', {'ksize':3, 'stride':1}],
['conv', {'ksize':1, 'chn':192}]]])
CnnExtModel.set_macro('v3_resize2',
['parallel',
['serial',
['conv', {'ksize':1, 'chn':192}],
['conv', {'ksize':3, 'stride':2, 'chn':320}]],
['serial',
['conv', {'ksize':[1,1], 'chn':192}],
['conv', {'ksize':[1,7], 'chn':192}],
['conv', {'ksize':[7,1], 'chn':192}],
['conv', {'ksize':[3,3], 'stride':[2,2], 'chn':192}]],
['max', {'ksize':3, 'stride':2}]])
CnnExtModel.set_macro('v3_inception3',
['parallel',
['conv', {'ksize':1, 'chn':320}],
['serial',
['conv', {'ksize':[3,3], 'chn':384}],
['parallel',
['conv', {'ksize':[1,3], 'chn':384}],
['conv', {'ksize':[3,1], 'chn':384}]]],
['serial',
['conv', {'ksize':[1,1], 'chn':448}],
['conv', {'ksize':[3,3], 'chn':384}],
['parallel',
['conv', {'ksize':[1,3], 'chn':384}],
['conv', {'ksize':[3,1], 'chn':384}]]],
['serial',
['avg', {'ksize':3, 'stride':1}],
['conv', {'ksize':1, 'chn':192}]]])
CnnExtModel.set_macro('v3_postproc',
['serial',
['avg', {'stride':8}],
['dropout', {'keep_prob':0.7}]])
CnnExtModel.set_macro('inception_v3',
['serial',
['custom', {'name':'v3_preproc'}],
['custom', {'name':'v3_inception1', 'args':{'#chn':32}}],
['custom', {'name':'v3_inception1', 'args':{'#chn':64}}],
['custom', {'name':'v3_inception1', 'args':{'#chn':64}}],
['custom', {'name':'v3_resize1'}],
['custom', {'name':'v3_inception2', 'args':{'#chn':128}}],
['custom', {'name':'v3_inception2', 'args':{'#chn':160}}],
['custom', {'name':'v3_inception2', 'args':{'#chn':160}}],
['custom', {'name':'v3_inception2', 'args':{'#chn':192}}],
['custom', {'name':'v3_resize2'}],
['custom', {'name':'v3_inception3'}],
['custom', {'name':'v3_inception3'}],
['custom', {'name':'v3_postproc'}]])
inception_v3 = CnnExtModel('inception_v3', imagenet, [['custom', {'name':'inception_v3'}]], dump_structure=True)
fd = FlowersDataset([96, 96], [96, 96, 3])
CnnExtModel.set_macro('flower_preproc',
['serial',
['conv', {'ksize':3, 'stride':2, 'chn':6, 'actions':'#act'}]])
CnnExtModel.set_macro('flower_inception1',
['parallel',
['conv', {'ksize':1, 'chn':4, 'actions':'#act'}],
['conv', {'ksize':3, 'chn':6, 'actions':'#act'}],
['serial',
['conv', {'ksize':3, 'chn':6, 'actions':'#act'}],
['conv', {'ksize':3, 'chn':6, 'actions':'#act'}]],
['serial',
['avg', {'ksize':3, 'stride':1}],
['conv', {'ksize':1, 'chn':4, 'actions':'#act'}]]])
CnnExtModel.set_macro('flower_resize',
['parallel',
['conv', {'ksize':1, 'stride':2, 'chn':12, 'actions':'#act'}],
['serial',
['conv', {'ksize':3, 'chn':12, 'actions':'#act'}],
['conv', {'ksize':3, 'stride':2, 'chn':12, 'actions':'#act'}]],
['avg', {'ksize':3, 'stride':2}]])
CnnExtModel.set_macro('flower_inception2',
['parallel',
['conv', {'ksize':1, 'chn':8, 'action':'#act'}],
['serial',
['conv', {'ksize':[3,3], 'chn':8, 'actions':'#act'}],
['parallel',
['conv', {'ksize':[1,3], 'chn':8, 'actions':'#act'}],
['conv', {'ksize':[3,1], 'chn':8, 'actions':'#act'}]]],
['serial',
['conv', {'ksize':[1,1], 'chn':8, 'actions':'#act'}],
['conv', {'ksize':[3,3], 'chn':8, 'actions':'#act'}],
['parallel',
['conv', {'ksize':[1,3], 'chn':8, 'actions':'#act'}],
['conv', {'ksize':[3,1], 'chn':8, 'actions':'#act'}]]],
['serial',
['avg', {'ksize':3, 'stride':1}],
['conv', {'ksize':1, 'chn':8, 'actions':'#act'}]]])
CnnExtModel.set_macro('flower_postproc',
['serial',
['avg', {'stride':6}],
['dropout', {'keep_prob':0.7}]])
CnnExtModel.set_macro('inception_flower',
['serial',
['custom', {'name':'flower_preproc', 'args':{'#act':'#act'}}],
['custom', {'name':'flower_inception1', 'args':{'#act':'#act'}}],
['custom', {'name':'flower_resize', 'args':{'#act':'#act'}}],
['custom', {'name':'flower_inception1', 'args':{'#act':'#act'}}],
['custom', {'name':'flower_resize', 'args':{'#act':'#act'}}],
['custom', {'name':'flower_inception2', 'args':{'#act':'#act'}}],
['custom', {'name':'flower_resize', 'args':{'#act':'#act'}}],
['custom', {'name':'flower_inception2', 'args':{'#act':'#act'}}],
['custom', {'name':'flower_postproc', 'args':{'#act':'#act'}}]])
conf_flower_LA = ['custom', {'name':'inception_flower', 'args':{'#act':'LA'}}]
model_flower_LA = CnnExtModel('model_flower_LA', fd, conf_flower_LA, dump_structure=True)
model_flower_LA.exec_all(report=2)
conf_flower_LAB = ['custom', {'name':'inception_flower', 'args':{'#act':'LAB'}}]
model_flower_LAB = CnnExtModel('model_flower_LAB', fd, conf_flower_LAB, dump_structure=False)
model_flower_LAB.exec_all(epoch_count=10, report=2)
#Chap10
ad = AutomataDataset()
am_4 = RnnBasicModel('am_4', ad, ['rnn', {'recur_size':4, 'outseq':False}])
am_16 = RnnBasicModel('am_16', ad, ['rnn', {'recur_size':16, 'outseq':False}])
am_64 = RnnBasicModel('am_64', ad, ['rnn', {'recur_size':64, 'outseq':False}])
am_4.exec_all(epoch_count=10, report=2)
am_16.exec_all(epoch_count=10, report=2)
am_64.exec_all(epoch_count=10, report=2)
am_64_drop = RnnBasicModel('am_64_drop', ad, [['rnn', {'recur_size':64, 'outseq':False}],['dropout', {'keep_prob':0.5}]])
am_64_drop.exec_all(epch_count=10, report=2)
#Chap11
ad = AutomataDataset()
am_4 = RnnLstmModel('am_4', ad, ['lstm', {'recur_size':64, 'outseq':False}])
am_4.exec_all(epoch_count=10, report=2)
usd_10_10 = UrbanSoundDataset(10, 10)
usd_10_100 = UrbanSoundDataset(10, 100)
conf_basic = ['rnn', {'recur_size':20, 'outseq':False}]
conf_lstm = ['lstm', {'recur_size':20, 'outseq':False}]
conf_state = ['lstm', {'recur_size':20, 'outseq':False, 'use_state':True}]
us_basic_10_10 = RnnLstmModel('us_basic_10_10', usd_10_10, conf_basic)
us_lstm_10_10 = RnnLstmModel('us_lstm_10_10', usd_10_10, conf_lstm)
us_state_10_10 = RnnLstmModel('us_state_10_10', usd_10_10, conf_state)
us_basic_10_100 = RnnLstmModel('us_basic_10_100', usd_10_100, conf_basic)
us_lstm_10_100 = RnnLstmModel('us_lstm_10_100', usd_10_100, conf_lstm)
us_state_10_100 = RnnLstmModel('us_state_10_100', usd_10_100, conf_state)
us_basic_10_10.exec_all(epoch_count=10, report=2)
us_lstm_10_10.exec_all(epoch_count=10, report=2)
us_state_10_10.exec_all(epoch_count=10, report=2, show_cnt=0)
#Chap12
vsd = np.load('C:\\Users\\Jinsung\\Documents\\Deep_Learning_Code\\Datasets\\chap12\\cache\\AstarIsBorn1937.mp4.npy')
conf1 = [['seqwrap', ['avg', {'stride':30}],
['conv', {'ksize':3, 'chn':12}],
['full', {'width':16}]],
['lstm', {'recur_size':8}]]
vsm1 = RnnExtModel('vsm1', vsd, conf1)
vsm1.exec_all(epoch_count=10, report=2, show_cnt=3)
vsd.shape
#Chap13
mset_all = MnistAutoDataset(train_ratio=1.00)
mset_1p = MnistAutoDataset(train_ratio=0.01)
conf_mlp = [['full',{'width':10}]]
mnist_mlp_all = RnnExtModel('mnist_mlp_all', mset_all, conf_mlp)
mnist_mlp_all.exec_all(epoch_count=10, report=2)
conf_auto = {
'encoder': [['full', {'width':10}]],
'decoder': [['full', {'width':784}]],
'supervised': [['full', {'width':10}]]
}
mnist_auto_1 = Autoencoder('mnist_auto_1',mset_1p, conf_auto)
mnist_auto_1.autoencode(epoch_count=10, report=2)
mnist_auto_1.exec_all(epoch_count=10, report=2)
mnist_auto_fix = Autoencoder('mnist_auto_fix', mset_1p, conf_auto, fix_encoder=True)
mnist_auto_fix.autoencode(epoch_count=10, report=5)
mnist_auto_fix.exec_all(epoch_count=10, report=5)
conf_auto_2 = {
'encoder': [['full', {'width':64}], ['full', {'width':10}]],
'decoder': [['full', {'width':64}], ['full', {'width':784}]],
'supervised': [['full', {'width':10}]]
}
mnist_auto_2 = Autoencoder('mnist_auto_2',mset_1p, conf_auto_2)
mnist_auto_2.autoencode(epoch_count=10, report=2)
mnist_auto_2.exec_all(epoch_count=10, report=2)
conf_hash_1 = {
'encoder': [['full', {'width':10, 'actfunc':'sigmoid'}]],
'decoder': [['full', {'width':784}]],
'supervised': []
}
mnist_hash_1 = Autoencoder('mnist_hash_1',mset_1p, conf_hash_1)
mnist_hash_1.autoencode(epoch_count=10, report=2)
mnist_hash_1.semantic_hashing_index()
mnist_hash_1.semantic_hashing_search()
conf_hash_2 = {
'encoder': [['full', {'width':64}],['full', {'width':10, 'actfunc':'sigmoid'}]],
'decoder': [['full', {'width':64}],['full', {'width':784}]],
'supervised': []
}
mnist_hash_2 = Autoencoder('mnist_hash_2',mset_1p, conf_hash_2)
mnist_hash_2.autoencode(epoch_count=10, report=2)
mnist_hash_2.semantic_hashing_index()
mnist_hash_2.semantic_hashing_search()
mnist_hash_2.autoencode(epoch_count=40, report=10)
mnist_hash_2.semantic_hashing_index()
mnist_hash_2.semantic_hashing_search()
#Chap14
mnist_eng = MnistEngDataset()
conf_eng1 = {
'encoder': [['full', {'width':10}]],
'decoder': [['lstm', {'recur_size':32, 'inseq':False,
'outseq':True, 'timesteps':6}],
['seqwrap', ['full', {'width':27, 'actfunc':'none'}]]]
}
encdec_eng1 = EncoderDecoder('encdec_eng1', mnist_eng, conf_eng1)
encdec_eng1.exec_1_step(epoch_count=10, report=2)
conf_eng2 = {
'encoder': [['full', {'width':10}],
['batch_normal'],
['full', {'width':10}]],
'decoder': [['lstm', {'recur_size':32, 'inseq':False,
'outseq':True, 'timesteps':6}],
['seqwrap', ['full', {'width':27, 'actfunc':'none'}]]]
}
encdec_eng2 = EncoderDecoder('encdec_eng2', mnist_eng, conf_eng2)
encdec_eng2.exec_1_step(epoch_count=10, report=2)
encdec_eng2_2 = EncoderDecoder('encdec_eng2_2', mnist_eng, conf_eng2)
encdec_eng2_2.exec_2_step(epoch_count=10, report=5)
encdec_eng2_3 = EncoderDecoder('encdec_eng2_3', mnist_eng, conf_eng2)
encdec_eng2_3.exec_3_step(epoch_count=10, report=5)
#Chap15
dset_pic_gogh = GanDatasetPicture('gogh.jpg')
dset_pic_jungsun = GanDatasetPicture('jungsun.jpg')
print(dset_pic_gogh)
print(dset_pic_jungsun)
conf_pic = {
'seed_shape': [16],
'generator': [['full', {'width':64}],
['full', {'width':32*32*3, 'actfunc':'sigmoid'}]],
'discriminor': [['full', {'width':64}],
['full', {'width':1, 'actfunc':'none'}]]
}
gan_pic_gogh = Gan("gan_pic_gogh", dset_pic_gogh, conf_pic, dump_structure=True)
gan_pic_gogh.exec_all(epoch_count=100, report=20)
| [
"jjsjjs0902@naver.com"
] | jjsjjs0902@naver.com |
cac26fe53126cbc8d9f1890a5eb47aa89c59b35b | d998988cb9f6f73be4f5b3cc302424119f66c856 | /MGITGatePass/basic_app/migrations/0005_auto_20191217_2052.py | 78882130aea3599e56d61a033f94c7640406fed3 | [] | no_license | sai-sambhu/MGITGatePass | cab9ea545f8b8c5fd959613b364dd161c416fab5 | a14ad7fb79163c1c53d81a5732d2ac0e1ec5544e | refs/heads/master | 2020-11-26T00:51:49.287696 | 2019-12-18T20:02:49 | 2019-12-18T20:02:49 | 228,912,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | # Generated by Django 2.2.5 on 2019-12-17 15:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('basic_app', '0004_auto_20191217_2031'),
]
operations = [
migrations.AddField(
model_name='studentprofileinfo',
name='roll',
field=models.CharField(default='17261A0551', max_length=10),
),
migrations.AlterField(
model_name='studentprofileinfo',
name='profile_pic',
field=models.ImageField(blank=True, default='default.jpeg', upload_to='profile_pics_students'),
),
]
| [
"saisambhuprasad@gmail.com"
] | saisambhuprasad@gmail.com |
8bdf10e79cd099752ace442f1093c5d4cfb72eca | a985414206b78dd6e15b485f1881287da6f67895 | /app/__init__.py | 4ad6d80e134bc4d66d4ae9840d13c461da368b7e | [] | no_license | arangurenalonso/hackathon_sem11 | 927873c0010e02f5247b47eae2cbc882b3f03042 | 1d2810871f491e96cd414390234af9f6f560421f | refs/heads/main | 2023-03-22T00:47:33.853778 | 2021-03-12T21:10:20 | 2021-03-12T21:10:20 | 347,198,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,165 | py | from flask import Flask
from pathlib import Path
from config import Config
from flask_restx import Api
from app.category.categoryResource import CategoryResource, CategoriesResource, category_ns, categories_ns
from app.producto.productoResource import ProductResource, producto_ns
app = Flask(__name__)
app.config.from_object(Config)
authorizations = {
'Bearer Auth': {
'type': 'apiKey',
'in': 'header',
'name': 'Authorization'
}
}
api = Api(app,
title='Pachaqtec Blog',
version='v1',
description='RESTApi Blog',
prefix='/api/', doc='/swagger/',
contact='Jeancarlos De la cruz',
security='Bearer Auth',
authorizations=authorizations,
contact_url='https://www.linkedin.com/in/jeancarlosdelacruz/')
api.add_namespace(category_ns)
category_ns.add_resource(CategoryResource, '/<int:id>')
api.add_namespace(categories_ns)
categories_ns.add_resource(CategoriesResource, '')
api.add_namespace(producto_ns)
producto_ns.add_resource(ProductResource,'')
from app.category import categoryModel
from app.producto import productoModel
| [
"aranguren.alonso@gmail.com"
] | aranguren.alonso@gmail.com |
5ab61d683509ed50cb0997df3f6350787fa30c58 | e30afd7f5166afed9ebfd740765d74a152175f54 | /algorithm-40case/leetcode_exercise/add_two_numbers.py | 9851649b651ca875717c0399a4c9ef6a092d3e53 | [] | no_license | wqdchn/geektime | 214c4ee627023723585ddbc66f3066f816a4fc76 | d953f631d93a4493aed26392954ca1f18a440938 | refs/heads/master | 2021-12-27T16:19:03.680654 | 2021-12-26T11:21:35 | 2021-12-26T11:21:35 | 174,132,464 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 977 | py | # @program: PyDemo
# @description: https://leetcode.com/problems/add-two-numbers/
# @author: wqdong
# @create: 2019-10-01 14:31
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode):
divmod_carry = 0
res = curr = ListNode(0)
while l1 or l2 or divmod_carry:
if l1:
divmod_carry += l1.val
l1 = l1.next
if l2:
divmod_carry += l2.val
l2 = l2.next
divmod_carry, divmod_val = divmod(divmod_carry, 10)
curr.next = curr = ListNode(divmod_val)
return res.next
s = Solution()
l1 = ListNode(2)
l1.next = ListNode(4)
l1.next.next = ListNode(3)
l2 = ListNode(5)
l2.next = ListNode(6)
l2.next.next = ListNode(4)
res = s.addTwoNumbers(l1, l2)
while res:
print(res.val)
res = res.next
| [
"wqdong.chn@gmail.com"
] | wqdong.chn@gmail.com |
191a36449075d84cb10df8758f93f23b02e0ef64 | cfb33b4a1216387c8417af9d04058b87e5d557a9 | /Neutrons/Fast/1SourceMLEM.py | 0d28aff7eb59e63f5c07e39fb4835a1fb576910b | [] | no_license | loomisdevon/DRRSMask | 8c3428acbab7095e8df29008d73d5ab4e7d17b0c | 181955f68b0043a02b38e14b29654779a220b40a | refs/heads/master | 2023-01-15T03:46:43.478169 | 2020-11-26T06:37:36 | 2020-11-26T06:37:36 | 277,006,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,054 | py | # Dual Rotating Radiation Scattering Mask MCNP Code
# Rotate neutron and gamma flux from 0 to 2PI around mask and return signal received from detector
# Authors: Ivan Novikov and Devon Loomis
import os
import os.path
import math
import fileinput
import shutil
import subprocess
import io
import string
import numpy as np
import matplotlib.pyplot as plt
import time
import math
from decimal import Decimal
import csv
from tqdm import *
########### GLOBALS #############
#Configuration Name (this is name of input file without .txt)
CONFIG_NAME = 'DDRS3_rand2_absorber1Source'
SOURCE_NAME = 'source3'
tMATRIXCONFIG_NAME = 'DDRS3_rand2_absorber'
tMatrixFilename = tMATRIXCONFIG_NAME + "tMatrix.csv"
#####################
# Source Info
R = 100
Phi = 45
Theta = 140
##############
#################################
###################################### creating all MCNP input files for simulation of the increasing relative distance between source and detector #######################
''' Parameters:
file_name: MCNP input template
y-pos: y position of center of circle of source path
r: radius of circle of source path
init: inital angle away from xy-plane
final angle away from xy-plane
angle step size
limit: longest distance between the two
'''
def smoothing(fluxArray, smoothingParameter):
smoothingArray = []
for i in range(len(fluxArray)):
totSum = 0
numPnts = 0
if (i - smoothingParameter < 0):
for j in range(i,0,-1):
totSum += fluxArray[j]
numPnts += 1
for k in range(i,i+smoothingParameter,1):
totSum += fluxArray[k]
numPnts += 1
elif (i + smoothingParameter > len(fluxArray)):
for j in range(i,len(fluxArray),1):
totSum += fluxArray[j]
numPnts += 1
for k in range(i,i-smoothingParameter,-1):
totSum += fluxArray[k]
numPnts += 1
else:
for j in range(i,i+smoothingParameter,1):
totSum += fluxArray[j]
numPnts += 1
for k in range(i,i-smoothingParameter,-1):
totSum += fluxArray[k]
numPnts += 1
average = totSum/numPnts
smoothingArray.append(average)
return smoothingArray
def createFiles(file_name, phi, rad, init, final, step_size):
fileList = []
marker=0
rad_phi = math.radians(phi)
for new_theta in range(init, final, step_size):
text_search = None
f =open(file_name)
for line in f:
words = line
sdef = words[0:4]
if (sdef == "SDEF"):
text_search = words
break
f.close()
rad_theta = math.radians(new_theta)
x_pos = round(rad * np.cos(rad_theta)*np.sin(rad_phi),3)
y_pos = round(rad * np.sin(rad_theta)*np.sin(rad_phi),3)
z_pos = round(rad * np.cos(rad_phi),3)
r_mag = np.sqrt(x_pos**2+y_pos**2+z_pos**2)
vecx_pos = round(-x_pos/r_mag,3)
vecy_pos = round(-y_pos/r_mag,3)
vecz_pos = round(-z_pos/r_mag,3)
#theta_rad = np.arctan(z_pos/r)
#vecz_pos = round(-1 * (theta_rad/(np.pi/2)),5)
#replacement_text = sdef + " ERG = 1.42 POS " + str(x_pos) + " " + str(y_pos) + " " + str(z_pos) + " VEC= " + str(vecx_pos) + " " + str(vecy_pos) + " " + str(vecz_pos) + " DIR=d1 par=n" + "\n"
replacement_text = sdef + " ERG = 2.0 POS " + str(x_pos) + " " + str(y_pos) + " " + str(z_pos) + " VEC= " + str(vecx_pos) + " " + str(vecy_pos) + " " + str(vecz_pos) + " DIR=d1 WGT 20 par=n" + "\n"
#replacement_text = sdef + " ERG = 1.42 POS " + str(x_pos) + " " + str(y_pos) + " " + str(z_pos) + " par=n" + "\n"
read_name = file_name
write_name = CONFIG_NAME + SOURCE_NAME + "_" + str(new_theta) + ".txt"
f1 = open(read_name, 'r')
f2 = open(write_name, 'w')
for lines in f1:
f2.write(lines.replace(text_search, replacement_text))
f1.close()
f2.close()
fileList.append(write_name)
return (fileList)
################################# delete runtpe files after every set of commands and delete all output files and input files after program run #######################
''' Parameters
directory: directory containing all files
file: KSEF_2 #####################
remove_all: test to determine whether to delete all files or only runtpe files
'''
def removeFiles(directory, file1, file2, file3, outfile, initfile, t_file, save_one, remove_all):
dir_name = directory
for fname in os.listdir(dir_name):
if (fname != initfile and fname != t_file):
if fname.startswith("binRun"):
os.remove(os.path.join(dir_name, fname))
if (fname.startswith(file1[:-4]) or fname.startswith(outfile[:-4])) and remove_all:
if (fname != file1):
os.remove(os.path.join(dir_name, fname))
if (fname.startswith(file1[:-4]) or fname.startswith(outfile[:-4])) and save_one:
if (fname != file1 and fname != file2 and fname != file3):
os.remove(os.path.join(dir_name, fname))
####################### read MCNP output file, find and return flux value #########################
#######################_file_: MCNP output file name ##################################
'''
def readFlux(_file_):
flux_ = 0
error_ = 0
with open(_file_, 'r') as outfile:
for line in outfile:
if ('+ *Gamma flux in detector*' in line):
lines = [outfile.readline() for i in range(9)] #this reads 9 lines after the fc4 comment
spectrum = [outfile.readline() for i in range(13)] #this reads 13 lines which contain spectrum
#each line has an index [0]-[12]
#print(type(spectrum[1]))
#print(spectrum[1])
#print(float(spectrum[1].split()[1])) #this splits spectrum[i] using spaces
#each spectrum[i].split() has three new indeces [0]-[2]
#float converts each string to float
#Neutron energy is in [0]
#Neutron counts are in [1]
#Error is in [2]
#tmp = 0.0
#print (spectrum)
for j in range(13):
flux_ += float(spectrum[j].split()[1])
error_ += float(spectrum[j].split()[2])
#Fluxin3[i] = tmp
return flux_, error_
'''
def readFlux(_file_,energyBin, binWrite):
flux_Arr = []
error_Arr = []
flux_ = 0
error_ = 0
with open(_file_, 'r') as outfile:
for line in outfile:
if ('+ *Neutron Flux In Detector*' in line):
lines = [outfile.readline() for i in range(9)] #this reads 9 lines after the fc4 comment
spectrum = [outfile.readline() for i in range(energyBin+1)] #this reads 13 lines which contain spectrum
#each line has an index [0]-[12]
#print(type(spectrum[1]))
#print(spectrum[1])
#print(float(spectrum[1].split()[1])) #this splits spectrum[i] using spaces
#each spectrum[i].split() has three new indeces [0]-[2]
#float converts each string to float
#Neutron energy is in [0]
#Neutron counts are in [1]
#Error is in [2]
#tmp = 0.0
for j in range(energyBin+1):
if (binWrite == j and binWrite != 0):
flux_ = float(spectrum[j].split()[1])
error_ = float(spectrum[j].split()[1])
#print (float(spectrum[j].split()[1]))
#flux_Arr.append(float(spectrum[j].split()[1]))
#error_Arr.append(float(spectrum[j].split()[2]))
#flux_ += float(spectrum[j].split()[1])
#error_ += float(spectrum[j].split()[2])
if (binWrite == 0):
flux_ = float(spectrum[energyBin].split()[1])
error_ = float(spectrum[energyBin].split()[2])
#Fluxin3[i] = tmp
return flux_, error_
def initialize(_file_):
global intensity, activity, nps, t
global radius, init_theta, final_theta, step_theta
global init_phi, final_phi, step_phi
global packet
with open(_file_,"r", newline='') as file:
file.readline()
intensity = float(file.readline()[12:])
activity = float(file.readline()[11:])
nps = float(file.readline()[6:])
t = float(file.readline()[4:])
file.readline()
radius = float(file.readline()[9:])
init_theta = int(file.readline()[13:])
final_theta = int(file.readline()[14:])
step_theta = int(file.readline()[13:])
init_phi = float(file.readline()[11:])
final_phi = float(file.readline()[12:])
step_phi = float(file.readline()[11:])
file.readline()
file.readline()
packet = int(file.readline()[9:])
#**********************MAIN**************************
#dir_ = 'C:\\Users\\devon\\Documents\\DRRSMask\\Working_Version\\MLEM\\'
dir_ = os.path.dirname(os.path.abspath(CONFIG_NAME+SOURCE_NAME)) + "\\"
file_ = CONFIG_NAME + SOURCE_NAME + '.txt'
outFile_ = CONFIG_NAME + SOURCE_NAME + '_out.txt'
file_name_ = dir_ + file_
outFile_name_ = dir_ + outFile_
keepInFile = CONFIG_NAME + SOURCE_NAME + '_0.txt'
keepOutFile = CONFIG_NAME + SOURCE_NAME + '_out0.txt'
init_file= dir_ + 'init.txt'
t_file = CONFIG_NAME + "tMatrix.csv"
intensity, activity, nps, t = 0,0,0,0
radius,init_theta,final_theta,step_theta = 0,0,0,0
init_phi,final_phi,step_phi = 0,0,0
packet = 0
initialize(init_file)
originalThetaCountsArray = []
originalThetaErrorArray = []
init_theta = Theta
final_theta = init_theta+360
transmissionMatrix = []
start = time.time()
removeFiles(dir_, file_, keepInFile, keepOutFile, outFile_, init_file, t_file, False, True) # purge directory of any existing MCNP files from previous run
#files = createFiles(file_name_, z, radius, init_ang, final_ang, step)
files = createFiles(file_name_, Phi, R, init_theta, final_theta, step_theta) # create all MCNP input files
commands = []
outFileList = []
j = init_theta
#create set of commands for subprocess of all input files
for i in range(int((final_theta - init_theta) / step_theta)):
binFile = "binRun" + str(j) + ".r"
outFile = (CONFIG_NAME + SOURCE_NAME + "_out" + str(j) + ".txt")
commands.append("mcnp6 i=" + files[i] + " o=" + outFile + " runtpe=" + binFile)
outFileList.append(outFile)
j += step_theta
print("Simulating...")
# give subprocess pak amount of parallel programs to execute until all commands are executed
for x in tqdm(range(0,int((final_theta - init_theta) / step_theta),(packet))):
if (x < (len(commands) - packet)):
commandsub = commands[x:(x+packet)]
else:
commandsub = commands[x:]
processes = [subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, cwd=dir_) for cmd in commandsub]
removeFiles(dir_, file_, keepInFile, keepOutFile, outFile_, init_file, t_file, False, False) # remove runtpe files
for p in processes:
p.wait()
print ("Checkpoint")
theta = init_theta
fluxList = []
errorList = []
sourceThetaList = []
#use for neutrons
#energyBinOfInterest = 13
#use for gammas
energyBinOfInterest = 100
############################read and gather flux values and source distances for each output file and add them to lists###################################
for f in outFileList:
flux, error = readFlux(f, energyBinOfInterest,40)
fluxList.append(flux)
errorList.append(error)
rad_theta = math.radians(theta)
sourceThetaList.append(rad_theta)
theta += step_theta
removeFiles(dir_, file_, keepInFile, keepOutFile, outFile_, init_file, t_file, True, False)
end = time.time()
print("Runtime: ", round((end - start)/60, 2), " mins")
rawFluxArray = np.array(fluxList)
#print (rawFluxArray)
fluxArray = np.array(smoothing(fluxList, 8))
#fluxArray = np.array(fluxList)
errorArray = np.array(errorList)
#print (errorArray)
thetaArray = np.array(sourceThetaList)
countsArray = fluxArray * intensity * t
countsSum = np.sum(countsArray)
#normalizedCountsArray = countsArray / countsSum
normalizedCountsArray = np.copy(countsArray)
normalizedCountsErr = np.sqrt((1/countsArray) + (1/countsSum))
normalizedCountsErrorArray = np.multiply(normalizedCountsArray, normalizedCountsErr)
with open(CONFIG_NAME + SOURCE_NAME + "data.csv","w+", newline='') as file:
writer=csv.writer(file,delimiter=',')
for a in normalizedCountsArray:
writer.writerow([a])
with open(CONFIG_NAME + SOURCE_NAME + "background.csv","w+", newline='') as file:
writer=csv.writer(file,delimiter=',')
for b in normalizedCountsErrorArray:
writer.writerow([b])
###########################END MAIN############################### | [
"devon.loomis@scientic.com"
] | devon.loomis@scientic.com |
af6c43ee70e8b9d1ae987c97a80ae8707f4b001e | 59dbbdf5d29d2490ec8a697dc137aa7456479e89 | /usage/meta.py | 492a6b0640a3b1458ec28a2c5f9d8bdf040928ea | [
"Apache-2.0"
] | permissive | absalon-james/usage | 15d424599528bec7d3184a72b5e9754c325e46ed | a67ceddda8a14244526b3b3a40c0c3feec7035d2 | refs/heads/master | 2021-01-21T14:58:06.023114 | 2016-10-03T20:56:16 | 2016-10-03T20:57:46 | 57,158,746 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | version = '0.1.2'
description = "Python tool for collecting usage information from ceilometer."
| [
"james.absalon@rackspace.com"
] | james.absalon@rackspace.com |
6e615abdf2ddd030aea3917e7b4d7214899e693e | e249e4bb6e3cb2aabf592bcd3f7ec07b7c080eb8 | /cvp_modules/library/cv_server_provision.py | 2d2d507f668d27dc109e4d45be3d50c71cef71db | [] | no_license | arista-eosplus/ansible-cloudvision | c87e230e5286628c3a2f162efab585f4b16ab4c7 | abe124577d1ebeb3dd7b493102fd15795f4a4506 | refs/heads/master | 2021-01-19T08:23:23.139937 | 2017-07-21T03:15:41 | 2017-07-21T03:15:41 | 72,491,582 | 5 | 0 | null | 2017-06-06T13:31:26 | 2016-11-01T01:09:52 | Python | UTF-8 | Python | false | false | 24,752 | py | #!/usr/bin/env python
#
# Copyright (c) 2017, Arista Networks EOS+
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cv_server_provision
version_added: "2.4"
author: "EOS+ CS (ansible-dev@arista.com) (@mharista)"
short_description:
Provision server port by applying or removing template configuration to a
configlet
description:
- This module allows a server team to provision server network ports for
new servers without having to access Arista CVP or asking the network team
to do it for them. Provide the information for connecting to CVP, switch
rack, port the new server is connected to, optional vlan, and an action
and the module will apply the configuration to the switch port via CVP.
Actions are add (applies template config to port),
remove (defaults the interface config) and
show (returns the current port config).
options:
host:
description:
- The hostname or IP address of the CVP node being connected to.
required: true
port:
description:
- The port number to use when making API calls to the CVP node. This
will default to the default port for the specified protocol. Port 80
for http and port 443 for https.
default: None
protocol:
description:
- The protocol to use when making API calls to CVP. CVP defaults to https
and newer versions of CVP no longer support http.
default: https
choices: [https, http]
username:
description:
- The user that will be used to connect to CVP for making API calls.
required: true
password:
description:
- The password of the user that will be used to connect to CVP for API
calls.
required: true
server_name:
description:
- The hostname or identifier for the server that is having it's switch
port provisioned.
required: true
switch_name:
description:
- The hostname of the switch is being configured for the server being
provisioned.
required: true
switch_port:
description:
- The physical port number on the switch that the new server is
connected to.
required: true
port_vlan:
description:
- The vlan that should be applied to the port for this server.
This parameter is dependent on a proper template that supports single
vlan provisioning with it. If a port vlan is specified by the template
specified does not support this the module will exit out with no
changes. If a template is specified that requires a port vlan but no
port vlan is specified the module will exit out with no changes.
default: None
template:
description:
- A path to a Jinja formatted template file that contains the
configuration block that will be applied to the specified switch port.
This template will have variable fields replaced by the module before
being applied to the switch configuration.
required: true
action:
description:
- The action for the module to take. The actions are add, which applies
the specified template config to port, remove, which defaults the
specified interface configuration, and show, which will return the
current port configuration with no changes.
default: show
choices: [show, add, remove]
auto_run:
description:
- Flag that determines whether or not the module will execute the CVP
task spawned as a result of changes to a switch configlet. When an
add or remove action is taken which results in a change to a switch
configlet, CVP will spawn a task that needs to be executed for the
configuration to be applied to the switch. If this option is True then
the module will determined the task number created by the configuration
change, execute it and wait for the task to complete. If the option
is False then the task will remain in the Pending state in CVP for
a network administrator to review and execute.
default: False
type: bool
notes:
requirements: [Jinja2, cvprac >= 0.7.0]
'''
EXAMPLES = '''
- name: Get current configuration for interface Ethernet2
cv_server_provision:
host: cvp_node
username: cvp_user
password: cvp_pass
protocol: https
server_name: new_server
switch_name: eos_switch_1
switch_port: 2
template: template_file.j2
action: show
- name: Remove existing configuration from interface Ethernet2. Run task.
cv_server_provision:
host: cvp_node
username: cvp_user
password: cvp_pass
protocol: https
server_name: new_server
switch_name: eos_switch_1
switch_port: 2
template: template_file.j2
action: remove
auto_run: True
- name: Add template configuration to interface Ethernet2. No VLAN. Run task.
cv_server_provision:
host: cvp_node
username: cvp_user
password: cvp_pass
protocol: https
server_name: new_server
switch_name: eos_switch_1
switch_port: 2
template: single_attached_trunk.j2
action: add
auto_run: True
- name: Add template with VLAN configuration to interface Ethernet2. Run task.
cv_server_provision:
host: cvp_node
username: cvp_user
password: cvp_pass
protocol: https
server_name: new_server
switch_name: eos_switch_1
switch_port: 2
port_vlan: 22
template: single_attached_vlan.j2
action: add
auto_run: True
'''
RETURN = '''
changed:
description: Signifies if a change was made to the configlet
returned: success
type: bool
sample: true
currentConfigBlock:
description: The current config block for the user specified interface
returned: when action = show
type: string
sample: "interface Ethernet4\n!"
newConfigBlock:
description: The new config block for the user specified interface
returned: when action = add or remove
type: string
sample: "interface Ethernet3\n description example\n no switchport\n!"
oldConfigBlock:
description: The current config block for the user specified interface
before any changes are made
returned: when action = add or remove
type: string
sample: "interface Ethernet3\n!"
fullConfig:
description: The full config of the configlet after being updated
returned: when action = add or remove
type: string
sample: "!\ninterface Ethernet3\n!\ninterface Ethernet4\n!"
updateConfigletResponse:
description: Response returned from CVP when configlet update is triggered
returned: when action = add or remove and configuration changes
type: string
sample: "Configlet veos1-server successfully updated and task initiated."
portConfigurable:
description: Signifies if the user specified port has an entry in the
configlet that Ansible has access to
returned: success
type: bool
sample: true
switchConfigurable:
description: Signifies if the user specified switch has a configlet
applied to it that CVP is allowed to edit
returned: success
type: bool
sample: true
switchInfo:
description: Information from CVP describing the switch being configured
returned: success
type: dictionary
sample: {"architecture": "i386",
"bootupTimeStamp": 1491264298.21,
"complianceCode": "0000",
"complianceIndication": "NONE",
"deviceInfo": "Registered",
"deviceStatus": "Registered",
"fqdn": "veos1",
"hardwareRevision": "",
"internalBuildId": "12-12",
"internalVersion": "4.17.1F-11111.4171F",
"ipAddress": "192.168.1.20",
"isDANZEnabled": "no",
"isMLAGEnabled": "no",
"key": "00:50:56:5d:e5:e0",
"lastSyncUp": 1496432895799,
"memFree": 472976,
"memTotal": 1893460,
"modelName": "vEOS",
"parentContainerId": "container_13_5776759195930",
"serialNumber": "",
"systemMacAddress": "00:50:56:5d:e5:e0",
"taskIdList": [],
"tempAction": null,
"type": "netelement",
"unAuthorized": false,
"version": "4.17.1F",
"ztpMode": "false"}
taskCompleted:
description: Signifies if the task created and executed has completed successfully
returned: when action = add or remove, and auto_run = true,
and configuration changes
type: bool
sample: true
taskCreated:
description: Signifies if a task was created due to configlet changes
returned: when action = add or remove, and auto_run = true or false,
and configuration changes
type: bool
sample: true
taskExecuted:
description: Signifies if the automation executed the spawned task
returned: when action = add or remove, and auto_run = true,
and configuration changes
type: bool
sample: true
taskId:
description: The task ID created by CVP because of changes to configlet
returned: when action = add or remove, and auto_run = true or false,
and configuration changes
type: string
sample: "500"
'''
import re
import time
from jinja2 import meta
import jinja2
from ansible.module_utils.basic import AnsibleModule
from cvprac.cvp_client import CvpClient
from cvprac.cvp_client_errors import CvpLoginError, CvpApiError
def connect(module):
''' Connects to CVP device using user provided credentials from playbook.
:param module: Ansible module with parameters and client connection.
:return: CvpClient object with connection instantiated.
'''
client = CvpClient()
try:
client.connect([module.params['host']],
module.params['username'],
module.params['password'],
protocol=module.params['protocol'],
port=module.params['port'])
except CvpLoginError, e:
module.fail_json(msg=str(e))
return client
def switch_info(module):
''' Get dictionary of switch info from CVP.
:param module: Ansible module with parameters and client connection.
:return: Dict of switch info from CVP or exit with failure if no
info for device is found.
'''
switch_name = module.params['switch_name']
switch_info = module.client.api.get_device_by_name(switch_name)
if not switch_info:
module.fail_json(msg=str("Device with name '%s' does not exist."
% switch_name))
return switch_info
def switch_in_compliance(module, sw_info):
''' Check if switch is currently in compliance.
:param module: Ansible module with parameters and client connection.
:param sw_info: Dict of switch info.
:return: Nothing or exit with failure if device is not in compliance.
'''
compliance = module.client.api.check_compliance(sw_info['key'],
sw_info['type'])
if compliance['complianceCode'] != '0000':
module.fail_json(msg=str('Switch %s is not in compliance. Returned'
' compliance code %s.'
% (sw_info['fqdn'],
compliance['complianceCode'])))
def server_configurable_configlet(module, sw_info):
''' Check CVP that the user specified switch has a configlet assigned to
it that Ansible is allowed to edit.
:param module: Ansible module with parameters and client connection.
:param sw_info: Dict of switch info.
:return: Dict of configlet information or None.
'''
configurable_configlet = None
configlet_name = module.params['switch_name'] + '-server'
switch_configlets = module.client.api.get_configlets_by_device_id(
sw_info['key'])
for configlet in switch_configlets:
if configlet['name'] == configlet_name:
configurable_configlet = configlet
return configurable_configlet
def port_configurable(module, configlet):
''' Check configlet if the user specified port has a configuration entry
in the configlet to determine if Ansible is allowed to configure the
port on this switch.
:param module: Ansible module with parameters and client connection.
:param configlet: Dict of configlet info.
:return: True or False.
'''
configurable = False
regex = r'^interface Ethernet%s' % module.params['switch_port']
for config_line in configlet['config'].split('\n'):
if re.match(regex, config_line):
configurable = True
return configurable
def configlet_action(module, configlet):
''' Take appropriate action based on current state of device and user
requested action.
Return current config block for specified port if action is show.
If action is add or remove make the appropriate changes to the
configlet and return the associated information.
:param module: Ansible module with parameters and client connection.
:param configlet: Dict of configlet info.
:return: Dict of information to updated results with.
'''
result = dict()
existing_config = current_config(module, configlet['config'])
if module.params['action'] == 'show':
result['currentConfigBlock'] = existing_config
return result
elif module.params['action'] == 'add':
result['newConfigBlock'] = config_from_template(module)
elif module.params['action'] == 'remove':
result['newConfigBlock'] = ('interface Ethernet%s\n!'
% module.params['switch_port'])
result['oldConfigBlock'] = existing_config
result['fullConfig'] = updated_configlet_content(module,
configlet['config'],
result['newConfigBlock'])
resp = module.client.api.update_configlet(result['fullConfig'],
configlet['key'],
configlet['name'])
if 'data' in resp:
result['updateConfigletResponse'] = resp['data']
if 'task' in resp['data']:
result['changed'] = True
result['taskCreated'] = True
return result
def current_config(module, config):
''' Parse the full port configuration for the user specified port out of
the full configlet configuration and return as a string.
:param module: Ansible module with parameters and client connection.
:param config: Full config to parse specific port config from.
:return: String of current config block for user specified port.
'''
regex = r'^interface Ethernet%s' % module.params['switch_port']
match = re.search(regex, config, re.M)
if not match:
module.fail_json(msg=str('interface section not found - %s'
% config))
block_start, line_end = match.regs[0]
match = re.search(r'!', config[line_end:], re.M)
if not match:
return config[block_start:]
_, block_end = match.regs[0]
block_end = line_end + block_end
return config[block_start:block_end]
def valid_template(port, template):
''' Test if the user provided Jinja template is valid.
:param port: User specified port.
:param template: Contents of Jinja template.
:return: True or False
'''
valid = True
regex = r'^interface Ethernet%s' % port
match = re.match(regex, template, re.M)
if not match:
valid = False
return valid
def config_from_template(module):
''' Load the Jinja template and apply user provided parameters in necessary
places. Fail if template is not found. Fail if rendered template does
not reference the correct port. Fail if the template requires a VLAN
but the user did not provide one with the port_vlan parameter.
:param module: Ansible module with parameters and client connection.
:return: String of Jinja template rendered with parameters or exit with
failure.
'''
template_loader = jinja2.FileSystemLoader('./templates')
env = jinja2.Environment(loader=template_loader,
undefined=jinja2.DebugUndefined)
template = env.get_template(module.params['template'])
if not template:
module.fail_json(msg=str('Could not find template - %s'
% module.params['template']))
data = {'switch_port': module.params['switch_port'],
'server_name': module.params['server_name']}
temp_source = env.loader.get_source(env, module.params['template'])[0]
parsed_content = env.parse(temp_source)
temp_vars = list(meta.find_undeclared_variables(parsed_content))
if 'port_vlan' in temp_vars:
if module.params['port_vlan']:
data['port_vlan'] = module.params['port_vlan']
else:
module.fail_json(msg=str('Template %s requires a vlan. Please'
' re-run with vlan number provided.'
% module.params['template']))
template = template.render(data)
if not valid_template(module.params['switch_port'], template):
module.fail_json(msg=str('Template content does not configure proper'
' interface - %s' % template))
return template
def updated_configlet_content(module, existing_config, new_config):
''' Update the configlet configuration with the new section for the port
specified by the user.
:param module: Ansible module with parameters and client connection.
:param existing_config: String of current configlet configuration.
:param new_config: String of configuration for user specified port to
replace in the existing config.
:return: String of the full updated configuration.
'''
regex = r'^interface Ethernet%s' % module.params['switch_port']
match = re.search(regex, existing_config, re.M)
if not match:
module.fail_json(msg=str('interface section not found - %s'
% existing_config))
block_start, line_end = match.regs[0]
updated_config = existing_config[:block_start] + new_config
match = re.search(r'!\n', existing_config[line_end:], re.M)
if match:
_, block_end = match.regs[0]
block_end = line_end + block_end
updated_config += '\n%s' % existing_config[block_end:]
return updated_config
def configlet_update_task(module):
''' Poll device info of switch from CVP up to three times to see if the
configlet updates have spawned a task. It sometimes takes a second for
the task to be spawned after configlet updates. If a task is found
return the task ID. Otherwise return None.
:param module: Ansible module with parameters and client connection.
:return: Task ID or None.
'''
for num in range(3):
device_info = switch_info(module)
if (('taskIdList' in device_info) and
(len(device_info['taskIdList']) > 0)):
for task in device_info['taskIdList']:
if ('Configlet Assign' in task['description'] and
task['data']['WORKFLOW_ACTION'] == 'Configlet Push'):
return task['workOrderId']
time.sleep(1)
return None
def wait_for_task_completion(module, task):
''' Poll CVP for the executed task to complete. There is currently no
timeout. Exits with failure if task status is Failed or Cancelled.
:param module: Ansible module with parameters and client connection.
:param task: Task ID to poll for completion.
:return: True or exit with failure if task is cancelled or fails.
'''
task_complete = False
while not task_complete:
task_info = module.client.api.get_task_by_id(task)
task_status = task_info['workOrderUserDefinedStatus']
if task_status == 'Completed':
return True
elif task_status in ['Failed', 'Cancelled']:
module.fail_json(msg=str('Task %s has reported status %s. Please'
' consult the CVP admins for more'
' information.' % (task, task_status)))
time.sleep(2)
def main():
""" main entry point for module execution
"""
argument_spec = dict(
host=dict(required=True),
port=dict(required=False, default=None),
protocol=dict(default='https', choices=['http', 'https']),
username=dict(required=True),
password=dict(required=True, no_log=True),
server_name=dict(required=True),
switch_name=dict(required=True),
switch_port=dict(required=True),
port_vlan=dict(required=False, default=None),
template=dict(require=True),
action=dict(default='show', choices=['show', 'add', 'remove']),
auto_run=dict(type='bool', default=False))
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=False)
result = dict(changed=False)
module.client = connect(module)
try:
result['switchInfo'] = switch_info(module)
if module.params['action'] in ['add', 'remove']:
switch_in_compliance(module, result['switchInfo'])
switch_configlet = server_configurable_configlet(module,
result['switchInfo'])
if not switch_configlet:
module.fail_json(msg=str('Switch %s has no configurable server'
' ports.' % module.params['switch_name']))
result['switchConfigurable'] = True
if not port_configurable(module, switch_configlet):
module.fail_json(msg=str('Port %s is not configurable as a server'
' port on switch %s.'
% (module.params['switch_port'],
module.params['switch_name'])))
result['portConfigurable'] = True
result['taskCreated'] = False
result['taskExecuted'] = False
result['taskCompleted'] = False
result.update(configlet_action(module, switch_configlet))
if module.params['auto_run'] and module.params['action'] != 'show':
task_id = configlet_update_task(module)
if task_id:
result['taskId'] = task_id
note = ('Update config on %s with %s action from Ansible.'
% (module.params['switch_name'],
module.params['action']))
module.client.api.add_note_to_task(task_id, note)
module.client.api.execute_task(task_id)
result['taskExecuted'] = True
task_completed = wait_for_task_completion(module, task_id)
if task_completed:
result['taskCompleted'] = True
else:
result['taskCreated'] = False
except CvpApiError, e:
module.fail_json(msg=str(e))
module.exit_json(**result)
if __name__ == '__main__':
main()
| [
"mhartzel@arista.com"
] | mhartzel@arista.com |
975d016f867e515a5d0d42fecfb3e22ccc3a61ff | 55ddcae82338890a7101b2ff6db0856463702314 | /perfectcushion/shop/admin.py | 1ac35dfa7e64e770e7c84c4021ff8ced72dc2336 | [] | no_license | rixinhaha/DjangoEcommerce | d31d6e8c7a4a40ba3f32d0e27ef203c59475c1dc | 0e3a188e8276bbfb63901747f553dd2ab483c284 | refs/heads/master | 2020-08-03T21:18:14.750498 | 2019-09-30T15:30:09 | 2019-09-30T15:30:09 | 211,887,708 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | from django.contrib import admin
from .models import Category,Product
# Register your models here.
class CategoryAdmin(admin.ModelAdmin):
list_display = ['name', 'slug']
prepopulated_fields = {'slug':['name',]}
admin.site.register(Category, CategoryAdmin)
class ProductAdmin(admin.ModelAdmin):
list_display= ['name', 'price', 'stock', 'available', 'created', 'updated']
list_editable= ['price', 'stock', 'available']
prepopulated_fields = {'slug':['name',]}
list_per_page = 20
admin.site.register(Product,ProductAdmin)
| [
"rixinhaha@gmail.com"
] | rixinhaha@gmail.com |
739c0ed4a80c4bad6b0788a2f025475c8e864f1c | 3ec9ace491cd5d06b5b998e7e309a13bd86c7126 | /tests/system/conftest.py | 535e9d9a46ea535834835fff3376b8c958e75f58 | [
"Apache-2.0"
] | permissive | Jitsusama/lets-do-dns | 64467664f42df053b535156fc773be7e874d0bf5 | faff4bf45e9a4be438e15afbe5caa249fe1e5210 | refs/heads/master | 2021-01-20T02:38:10.953843 | 2017-07-21T03:08:30 | 2017-07-21T03:08:30 | 89,433,363 | 8 | 0 | Apache-2.0 | 2019-10-02T15:07:28 | 2017-04-26T03:24:48 | Python | UTF-8 | Python | false | false | 1,843 | py | try:
import ConfigParser as configparser
except ImportError:
import configparser
import os
import pytest
from requests import post
@pytest.fixture(autouse=True)
def os_environ_reset():
"""Reset os.environ in between test runs."""
original_env = os.environ.copy()
yield
os.environ.clear()
os.environ.update(original_env)
@pytest.fixture(scope='module')
def test_configuration():
"""Read test configuration from :file:`config.ini` file.
The INI file must have a ``[DEFAULT]`` section containing the following
parameters:
* ``do_api_key``
* ``do_domain``
* ``do_hostname``
"""
file_path = os.path.realpath(__file__)
directory_path = os.path.dirname(file_path)
config_file = '%s/config.ini' % directory_path
config = configparser.ConfigParser()
config.read(config_file)
return config
@pytest.fixture
def create_response(
do_base_uri, do_auth_header, do_domain, do_hostname, request):
return post(
'%s/%s/records' % (do_base_uri, do_domain),
headers=do_auth_header,
json={'type': 'TXT',
'name': do_hostname,
'data': request.function.__name__})
@pytest.fixture()
def do_api_key(test_configuration):
return test_configuration.get('DEFAULT', 'do_api_key')
@pytest.fixture
def do_auth_header(do_api_key):
return {'Authorization': 'Bearer %s' % do_api_key}
@pytest.fixture
def do_base_uri():
return 'https://api.digitalocean.com/v2/domains'
@pytest.fixture
def do_domain(test_configuration):
return test_configuration.get('DEFAULT', 'do_domain')
@pytest.fixture
def do_hostname(test_configuration):
return test_configuration.get('DEFAULT', 'do_hostname')
@pytest.fixture
def do_record_id(create_response):
return create_response.json()['domain_record']['id']
| [
"joel@grrbrr.ca"
] | joel@grrbrr.ca |
9c17bdc0d3beabedf1313533658ffff019329cce | 8f5d2fb45d6452fc6df00b12fa0bd45446d1029b | /lessons/models.py | 1227de579706645ec9701f1a78da6f80fcc40145 | [] | no_license | luminyanko/renshuu | 4bcbd0f10e0a4a175ab92783e5346ac4b1c44927 | 54a98f59a7d2971e3ba20d5406b3e7ad8c482408 | refs/heads/master | 2023-06-01T20:40:30.090022 | 2021-06-21T08:47:19 | 2021-06-21T08:47:19 | 377,913,945 | 0 | 0 | null | 2021-06-21T08:44:52 | 2021-06-17T17:39:11 | Python | UTF-8 | Python | false | false | 734 | py | from django.db import models
from django.urls import reverse
from django.utils import timezone
from django.contrib.auth.models import User
# Create your models here.
class Tag(models.Model):
tag_name = models.CharField(max_length=50)
def __str__(self):
return self.tag_name
class Lesson(models.Model):
title = models.CharField(max_length=150)
content = models.TextField()
date_created = models.DateTimeField(default=timezone.now)
author = models.ForeignKey(User, on_delete=models.CASCADE)
tag = models.ForeignKey(Tag, on_delete=models.PROTECT)
def get_absolute_url(self):
return reverse('lesson-detail', kwargs={'pk': self.pk})
def __str__(self):
return self.title
| [
"luminyanko@gmail.com"
] | luminyanko@gmail.com |
2a4a81a565fab19cc75a574eb4d85c9994bb0767 | c67f2d0677f8870bc1d970891bbe31345ea55ce2 | /zippy/lib-python/3/test/test_file.py | e9a1ceeeb8a48606fa1ad65140adac2fd3689d05 | [
"BSD-3-Clause"
] | permissive | securesystemslab/zippy | a5a1ecf5c688504d8d16128ce901406ffd6f32c2 | ff0e84ac99442c2c55fe1d285332cfd4e185e089 | refs/heads/master | 2022-07-05T23:45:36.330407 | 2018-07-10T22:17:32 | 2018-07-10T22:17:32 | 67,824,983 | 324 | 27 | null | null | null | null | UTF-8 | Python | false | false | 11,928 | py | import sys
import os
import unittest
from array import array
from weakref import proxy
import io
import _pyio as pyio
from test.support import TESTFN, run_unittest, gc_collect
from collections import UserList
class AutoFileTests(unittest.TestCase):
# file tests for which a test file is automatically set up
def setUp(self):
self.f = self.open(TESTFN, 'wb')
def tearDown(self):
if self.f:
self.f.close()
os.remove(TESTFN)
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
p.write(b'teststring')
self.assertEqual(self.f.tell(), p.tell())
self.f.close()
self.f = None
gc_collect()
self.assertRaises(ReferenceError, getattr, p, 'tell')
def testAttributes(self):
# verify expected attributes exist
f = self.f
f.name # merely shouldn't blow up
f.mode # ditto
f.closed # ditto
def testReadinto(self):
# verify readinto
self.f.write(b'12')
self.f.close()
a = array('b', b'x'*10)
self.f = self.open(TESTFN, 'rb')
n = self.f.readinto(a)
self.assertEqual(b'12', a.tobytes()[:n])
def testReadinto_text(self):
# verify readinto refuses text files
a = array('b', b'x'*10)
self.f.close()
self.f = self.open(TESTFN, 'r')
if hasattr(self.f, "readinto"):
self.assertRaises(TypeError, self.f.readinto, a)
def testWritelinesUserList(self):
# verify writelines with instance sequence
l = UserList([b'1', b'2'])
self.f.writelines(l)
self.f.close()
self.f = self.open(TESTFN, 'rb')
buf = self.f.read()
self.assertEqual(buf, b'12')
def testWritelinesIntegers(self):
# verify writelines with integers
self.assertRaises(TypeError, self.f.writelines, [1, 2, 3])
def testWritelinesIntegersUserList(self):
# verify writelines with integers in UserList
l = UserList([1,2,3])
self.assertRaises(TypeError, self.f.writelines, l)
def testWritelinesNonString(self):
# verify writelines with non-string object
class NonString:
pass
self.assertRaises(TypeError, self.f.writelines,
[NonString(), NonString()])
def testErrors(self):
f = self.f
self.assertEqual(f.name, TESTFN)
self.assertTrue(not f.isatty())
self.assertTrue(not f.closed)
if hasattr(f, "readinto"):
self.assertRaises((IOError, TypeError), f.readinto, "")
f.close()
self.assertTrue(f.closed)
def testMethods(self):
methods = [('fileno', ()),
('flush', ()),
('isatty', ()),
('__next__', ()),
('read', ()),
('write', (b"",)),
('readline', ()),
('readlines', ()),
('seek', (0,)),
('tell', ()),
('write', (b"",)),
('writelines', ([],)),
('__iter__', ()),
]
methods.append(('truncate', ()))
# __exit__ should close the file
self.f.__exit__(None, None, None)
self.assertTrue(self.f.closed)
for methodname, args in methods:
method = getattr(self.f, methodname)
# should raise on closed file
self.assertRaises(ValueError, method, *args)
# file is closed, __exit__ shouldn't do anything
self.assertEqual(self.f.__exit__(None, None, None), None)
# it must also return None if an exception was given
try:
1/0
except:
self.assertEqual(self.f.__exit__(*sys.exc_info()), None)
def testReadWhenWriting(self):
self.assertRaises(IOError, self.f.read)
class CAutoFileTests(AutoFileTests):
open = io.open
class PyAutoFileTests(AutoFileTests):
open = staticmethod(pyio.open)
class OtherFileTests(unittest.TestCase):
def testModeStrings(self):
# check invalid mode strings
for mode in ("", "aU", "wU+"):
try:
f = self.open(TESTFN, mode)
except ValueError:
pass
else:
f.close()
self.fail('%r is an invalid file mode' % mode)
def testStdin(self):
# This causes the interpreter to exit on OSF1 v5.1.
if sys.platform != 'osf1V5':
self.assertRaises((IOError, ValueError), sys.stdin.seek, -1)
else:
print((
' Skipping sys.stdin.seek(-1), it may crash the interpreter.'
' Test manually.'), file=sys.__stdout__)
self.assertRaises((IOError, ValueError), sys.stdin.truncate)
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
try:
f = self.open(TESTFN, bad_mode)
except ValueError as msg:
if msg.args[0] != 0:
s = str(msg)
if TESTFN in s or bad_mode not in s:
self.fail("bad error message for invalid mode: %s" % s)
# if msg.args[0] == 0, we're probably on Windows where there may be
# no obvious way to discover why open() failed.
else:
f.close()
self.fail("no error for invalid mode: %s" % bad_mode)
def testSetBufferSize(self):
# make sure that explicitly setting the buffer size doesn't cause
# misbehaviour especially with repeated close() calls
for s in (-1, 0, 1, 512):
try:
f = self.open(TESTFN, 'wb', s)
f.write(str(s).encode("ascii"))
f.close()
f.close()
f = self.open(TESTFN, 'rb', s)
d = int(f.read().decode("ascii"))
f.close()
f.close()
except IOError as msg:
self.fail('error setting buffer size %d: %s' % (s, str(msg)))
self.assertEqual(d, s)
def testTruncateOnWindows(self):
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
os.unlink(TESTFN)
f = self.open(TESTFN, 'wb')
try:
f.write(b'12345678901') # 11 bytes
f.close()
f = self.open(TESTFN,'rb+')
data = f.read(5)
if data != b'12345':
self.fail("Read on file opened for update failed %r" % data)
if f.tell() != 5:
self.fail("File pos after read wrong %d" % f.tell())
f.truncate()
if f.tell() != 5:
self.fail("File pos after ftruncate wrong %d" % f.tell())
f.close()
size = os.path.getsize(TESTFN)
if size != 5:
self.fail("File size after ftruncate wrong %d" % size)
finally:
f.close()
os.unlink(TESTFN)
def testIteration(self):
# Test the complex interaction when mixing file-iteration and the
# various read* methods.
dataoffset = 16384
filler = b"ham\n"
assert not dataoffset % len(filler), \
"dataoffset must be multiple of len(filler)"
nchunks = dataoffset // len(filler)
testlines = [
b"spam, spam and eggs\n",
b"eggs, spam, ham and spam\n",
b"saussages, spam, spam and eggs\n",
b"spam, ham, spam and eggs\n",
b"spam, spam, spam, spam, spam, ham, spam\n",
b"wonderful spaaaaaam.\n"
]
methods = [("readline", ()), ("read", ()), ("readlines", ()),
("readinto", (array("b", b" "*100),))]
try:
# Prepare the testfile
bag = self.open(TESTFN, "wb")
bag.write(filler * nchunks)
bag.writelines(testlines)
bag.close()
# Test for appropriate errors mixing read* and iteration
for methodname, args in methods:
f = self.open(TESTFN, 'rb')
if next(f) != filler:
self.fail, "Broken testfile"
meth = getattr(f, methodname)
meth(*args) # This simply shouldn't fail
f.close()
# Test to see if harmless (by accident) mixing of read* and
# iteration still works. This depends on the size of the internal
# iteration buffer (currently 8192,) but we can test it in a
# flexible manner. Each line in the bag o' ham is 4 bytes
# ("h", "a", "m", "\n"), so 4096 lines of that should get us
# exactly on the buffer boundary for any power-of-2 buffersize
# between 4 and 16384 (inclusive).
f = self.open(TESTFN, 'rb')
for i in range(nchunks):
next(f)
testline = testlines.pop(0)
try:
line = f.readline()
except ValueError:
self.fail("readline() after next() with supposedly empty "
"iteration-buffer failed anyway")
if line != testline:
self.fail("readline() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
testline = testlines.pop(0)
buf = array("b", b"\x00" * len(testline))
try:
f.readinto(buf)
except ValueError:
self.fail("readinto() after next() with supposedly empty "
"iteration-buffer failed anyway")
line = buf.tobytes()
if line != testline:
self.fail("readinto() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
testline = testlines.pop(0)
try:
line = f.read(len(testline))
except ValueError:
self.fail("read() after next() with supposedly empty "
"iteration-buffer failed anyway")
if line != testline:
self.fail("read() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
try:
lines = f.readlines()
except ValueError:
self.fail("readlines() after next() with supposedly empty "
"iteration-buffer failed anyway")
if lines != testlines:
self.fail("readlines() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
f.close()
# Reading after iteration hit EOF shouldn't hurt either
f = self.open(TESTFN, 'rb')
try:
for line in f:
pass
try:
f.readline()
f.readinto(buf)
f.read()
f.readlines()
except ValueError:
self.fail("read* failed after next() consumed file")
finally:
f.close()
finally:
os.unlink(TESTFN)
class COtherFileTests(OtherFileTests):
open = io.open
class PyOtherFileTests(OtherFileTests):
open = staticmethod(pyio.open)
def test_main():
# Historically, these tests have been sloppy about removing TESTFN.
# So get rid of it no matter what.
try:
run_unittest(CAutoFileTests, PyAutoFileTests,
COtherFileTests, PyOtherFileTests)
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
if __name__ == '__main__':
test_main()
| [
"thezhangwei@gmail.com"
] | thezhangwei@gmail.com |
94f4897b31040b8abc8c30479a440a8e0af48906 | e793abb16a44eff7b48df2c774883d7a469f2005 | /local_code/adam/test.py | 1b238455a5659ce30f9888884b4d2cf00e25d092 | [] | no_license | Junyinghuang/DS4S_group2 | a25f2fe6d31f66452fc03ad02594c9c15a4f0017 | 2a3bcd0fad858e7ef00d69342dbdd47853525b83 | refs/heads/master | 2022-06-20T23:43:11.420048 | 2020-05-12T07:07:56 | 2020-05-12T07:07:56 | 257,669,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | from local_code.adam.new_parameters import get_new_parameters
def test_parameter_creation():
'''
Test to make sure I don't get any wonky results from my parameter adjustor
'''
sigmas = [1,2,3,4]
initial_guesses = [0,0,0,0]
new_guesses = get_new_parameters(sigmas,initial_guesses)
#It would be surprising if any of these were more than, say, 4 sigma away from the initial guess.
is_more_than_four_sigma_away = [int(abs(guess)>(4*sigma)) for guess, sigma in zip(new_guesses,sigmas)]
assert sum(is_more_than_four_sigma_away)==0,"get_new_parameters yields surprising results."
print('Test of parameter creation complete.')
# I've written no other tests, as all the "visualization" code I've written can be tested simply by
# whether or not a plot shows up. | [
"apkunesh@ucdavis.edu"
] | apkunesh@ucdavis.edu |
3cd6e2099d0754d1a415a93ab25595f0ada97a68 | 77909a8a93f60759e0fd32fb632d937c7c8d4d68 | /curso_em_video_exercises/desafio10.py | c1c5bc683719ab4335f0251c26bf81cd73603c19 | [] | no_license | euricoteles/python | 13e39bf0b5916b69794dac39dc55a213b5443718 | dae10d87a9923646dd8257a2ce3da91dc355b603 | refs/heads/master | 2021-09-04T14:51:47.147309 | 2018-01-19T16:57:27 | 2018-01-19T16:57:27 | 116,609,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | # Crie um script que leia quanto dinheiro uma pessoa tem na carteira e converter em dolares.
# variables received by input
valor = int(input('Qual o valor para converter em dolares:'))
dolares = (1.18*valor)/1
# print information
print('O valor em dolares fica : {}'.format(dolares)) | [
"euriconaz@hotmail.com"
] | euriconaz@hotmail.com |
d01f9d1b57765a72c85ec040eab037e9d12c89bb | ca77e9e45d666771c7b0897e7e3093b3d3c12f65 | /scripts/trigger/add_prices.py | ec79a8be575fe0f59c9b16754b18afc1910a7a29 | [] | no_license | 2gDigitalPost/custom | 46175d3a3fc4c3be21dc20203ff0a48fb93b5639 | 6a3a804ef4ef6178044b70ad1e4bc5c56ab42d8d | refs/heads/master | 2020-04-04T07:40:17.962611 | 2016-12-28T18:35:28 | 2016-12-28T18:35:28 | 39,648,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,857 | py | """
This file was generated automatically from a custom script found in Project -> Script Editor.
The custom script was moved to a file so that it could be integrated with GitHub.
"""
__author__ = 'Topher.Hughes'
__date__ = '04/08/2015'
import traceback
def main(server=None, input=None):
"""
The main function of the custom script. The entire script was copied
and pasted into the body of the try statement in order to add some
error handling. It's all legacy code, so edit with caution.
:param server: the TacticServerStub object
:param input: a dict with data like like search_key, search_type, sobject, and update_data
:return: None
"""
if not input:
input = {}
try:
# CUSTOM_SCRIPT00035
# Matthew Tyler Misenhimer
# This is used to have the prices on projects trickle up to titles, then orders
# This is DEPRECATED
sobj = input.get('sobject')
sk = input.get('search_key')
price_str = sobj.get('price')
price = 0
if price_str not in [None,'']:
price = float(price_str)
proj = server.eval("@SOBJECT(twog/proj['code','%s'])" % sobj.get('proj_code'))[0]
current_proj_price_str = proj.get('price')
current_proj_price = 0
if current_proj_price_str not in [None,'']:
current_proj_price = float(current_proj_price_str)
new_proj_price = current_proj_price + price
server.update(proj.get('__search_key__'), {'price': new_proj_price})
title = server.eval("@SOBJECT(twog/title['code','%s'])" % proj.get('title_code'))[0]
current_title_price_str = title.get('price')
current_title_price = 0
if current_title_price_str not in [None,'']:
current_title_price = float(current_title_price_str)
new_title_price = current_title_price + price
server.update(title.get('__search_key__'), {'price': new_title_price})
order = server.eval("@SOBJECT(twog/order['code','%s'])" % title.get('order_code'))[0]
current_order_price_str = order.get('price')
current_order_price = 0
if current_order_price_str not in [None,'']:
current_order_price = float(current_order_price_str)
new_order_price = current_order_price + price
server.update(order.get('__search_key__'), {'price': new_order_price})
except AttributeError as e:
traceback.print_exc()
print str(e) + '\nMost likely the server object does not exist.'
raise e
except KeyError as e:
traceback.print_exc()
print str(e) + '\nMost likely the input dictionary does not exist.'
raise e
except Exception as e:
traceback.print_exc()
print str(e)
raise e
if __name__ == '__main__':
main()
| [
"topher.hughes@2gdigital.com"
] | topher.hughes@2gdigital.com |
41e2a093e82ce2eb956957ab8d320d97248524fb | 1a1a10576c4fabe2879feb00393fe8d3f5211d9b | /todo/settings.py | dfb08b572fb8286e00d5acfcf40919ff7f4a9f4c | [] | no_license | Machele-codez/todo-by-machele | 8a50e7e6f9e560ba40dd7816f1c34324cf4c4f8d | f96903f24a5fdfb587cbf04977b0c2aaf5a92879 | refs/heads/master | 2022-09-13T02:42:36.410948 | 2020-06-03T19:59:33 | 2020-06-03T19:59:33 | 269,172,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,336 | py | """
Django settings for todo project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'gk(c+=qm^h6-b-)g%=ej0%kzmgnbwl=k^35uupjpspq)ql^uw2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'apps.accounts',
'apps.tasks',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
LOGIN_REDIRECT_URL = 'tasks:all_tasks ' | [
"smithbeblack@gmail.com"
] | smithbeblack@gmail.com |
f4b58e180e2f6563fbc03f28f8137aab6bf58d1a | f9d9afad08272fc077f9809eedb8e920fc0fe883 | /Observation/Task.py | a53536d6b3a8175d44f7420b06921eebde12790a | [] | no_license | firekg/is-it-optimal | 1c640a350d25057cf7b32602467d063e5ea65799 | 202b95ae370db7170a12ee942d6e1f0be29cca43 | refs/heads/master | 2020-05-01T13:31:35.441495 | 2019-10-27T15:34:49 | 2019-10-27T15:34:49 | 177,493,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,205 | py | import numpy
import Observe
import Init
import copy
import Teach
import Learn
# Eq. 6a), 6b)
def Knowledgeability_Task(hypo, feature, label, p_teacher_xy_h, p_teacher_x_h, p_y_xh, delta_g_h, phx, num_iteration):
p_learner_h_xy = Learn.Init_step(hypo, feature, label, p_y_xh, phx)
for loop in range(num_iteration):
# Calculate teacher's table
Teach.K_PTeacher_xh(hypo, feature, label, p_teacher_xy_h, p_teacher_x_h, p_learner_h_xy, delta_g_h)
# Calculate learner's table
Learn.K_PLearner_h_xy(hypo, feature, label, p_y_xh, p_learner_h_xy, p_teacher_x_h, phx)
return p_learner_h_xy
# hypo_map: The map of the hypothesis
# return: a map from hypothesis to observation * probability
def Probability_Task(hypo_table, number_hypo, number_feature, number_label, p_teacher_x_h, knowledgeability, iter=100):
feature_set = []
# New knowledgeability table
# Axis 1: index of observations
# Axis 2~3: the delta knowledegeability table
new_knowledgeability_delta_table = numpy.zeros((number_feature + 1, number_hypo, number_hypo), dtype=float)
# Assume there is a true hypo = hypo
# Get all posible hypothesis in the hypo map
for hypo_idx in range(len(hypo_table)):
# Get the observable feature set
for f in range(number_feature):
feature_set.append(f)
obs = 0
# Set the environment
num_hypo, num_feature, num_label, p_teacher_x_h, p_teacher_xy_h, p_learner_h_xy, p_y_xh, delta_g_h, phx = Init.Set(hypo_table, knowledgeability=knowledgeability)
while True:
for h in range(number_hypo):
new_knowledgeability_delta_table[obs][hypo_idx][h] = phx[h]
# Get the PT
p_learner_h_xy = Knowledgeability_Task(num_hypo, num_feature, num_label, p_teacher_xy_h, p_teacher_x_h, p_y_xh, delta_g_h, phx, iter)
# Choose a feature
feature = Observe.Get_Feature(feature_set, hypo_idx, p_teacher_x_h)
obs += 1
prob_find, true_label = Observe.Observe(hypo_table, hypo_idx, feature, p_learner_h_xy)
# Assign the p_learner_h_xy to phx
for h in range(number_hypo):
phx[h] = p_learner_h_xy[h][feature][true_label]
# remove the feature in the feature set,
# make the same feature only be observed once
feature_set.remove(feature)
if (len(feature_set) == 0):
for h in range(number_hypo):
new_knowledgeability_delta_table[obs][hypo_idx][h] = phx[h]
break
return new_knowledgeability_delta_table
def Average_Hypo(prob_map, number_hypos, number_observations):
y = []
for obs in range(number_observations):
sum = 0
for hypo_index in prob_map:
sum += prob_map[hypo_index][obs]
y.append(sum / number_hypos)
return y
| [
"noreply@github.com"
] | firekg.noreply@github.com |
ebbf514c6ec2b30cad1b8c147dfef1377c951533 | 82e5c56806e75a51097a51cb721df1ef07376e6f | /initial_freia/fun.py | 6b89a7f268c642c02b65a655544d8257951ff747 | [] | no_license | hahahasan/MSc-pre-process | 77a2fcdf795830d2b37b2d538e3638d29bd55e80 | ba69d58bc1f9dd7becea431ce9a328b2843d8b66 | refs/heads/master | 2020-03-21T05:43:07.501035 | 2018-06-21T14:14:30 | 2018-06-21T14:14:30 | 138,175,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 13 11:06:10 2018
@author: hm1234
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
rng = np.linspace(0.01, np.sqrt(2), 500)
rng2 = np.arange(10,500)
max_rng = np.amax(rng2)
hi = []
for j in rng2:
n = j
tmp = []
for i in rng:
a = i
b = '**a'
c = str(a) + n*b
tmp.append(eval(c))
if j % 23 == 0:
print('{0:1.0f}'.format(j/max_rng *100), '%')
hi.append(tmp)
print('Done!')
X_rng, Y_rng = np.meshgrid(rng2, rng)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X_rng, Y_rng, np.array(hi).T)
| [
"hm1234@york.ac.uk"
] | hm1234@york.ac.uk |
cc2cf6e6ef6fb653f7cb8de7cc061e8c3a300512 | 34192b4a3964c6dec6ff34bddc06c3690c02bf98 | /Social/feed/models.py | 23c6456108e74c85bdff44c8203513abb97e3ed5 | [] | no_license | Aditya-23/The-Social | 6806882f8688f577047d79d0a02b3092cf3c4a4b | aea48c2ef816196839cad75f9dced50fa85af6f2 | refs/heads/master | 2022-10-22T11:33:22.849389 | 2019-08-11T19:31:07 | 2019-08-11T19:31:07 | 201,806,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,468 | py | from django.db import models
from django.db.models import Q
# Create your models here.
class Question_query(models.query.QuerySet):
def search(self, query = None):
return self.filter(
Q(question__icontains = query)
)
class Search(models.Manager):
def get_queryset(self, query = None):
return Question_query(self.model, using = self._db)
def search(self, query = None):
return self.get_queryset().search(query)
class user(models.Model):
username = models.CharField(max_length = 20, blank = False)
password = models.CharField(max_length = 20, blank = False)
firstname = models.CharField(max_length = 40, blank = False)
lastname = models.CharField(max_length = 40, blank = False)
email = models.EmailField(max_length = 40, blank = False)
#search_user = Search()
def __str__(self):
return self.username
class Question(models.Model):
question = models.CharField(max_length = 100, blank = False)
asked_user = models.ForeignKey('user', on_delete = models.CASCADE)
datetime = models.DateTimeField(auto_now = True)
objects = models.Manager()
search_question = Search()
def __str__(self):
return self.question
class Answer(models.Model):
answer = models.CharField(max_length = 1000, blank = False)
datetime = models.DateTimeField(auto_now = True)
question = models.ForeignKey('Question', on_delete = models.CASCADE)
answered_user = models.ForeignKey('user', on_delete = models.CASCADE)
def __str__(self):
return self.answer | [
"adityamysore002@gmail.com"
] | adityamysore002@gmail.com |
7dfcead14cfcc41518ec35eaa9c96ca9cfbc0be3 | 8fb846f4f4ac5fd417489d731eae8a8a1bdc77c3 | /rllab/misc/console.py | b32d21a249a3d389e0aef97f641591cdb13bb35a | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | zhongwen/rllab | 0a9f9ea2d8995037b83aaae5853a299d5cf9e432 | d8239c05179fcc55d865db7ce933defa3baae24d | refs/heads/master | 2021-01-14T08:36:37.272071 | 2016-08-17T12:29:00 | 2016-08-17T12:29:00 | 65,801,245 | 1 | 1 | null | 2016-08-16T08:18:47 | 2016-08-16T08:18:46 | null | UTF-8 | Python | false | false | 5,514 | py | import sys
import time
import os
import errno
import shlex
import pydoc
import inspect
color2num = dict(
gray=30,
red=31,
green=32,
yellow=33,
blue=34,
magenta=35,
cyan=36,
white=37,
crimson=38
)
def colorize(string, color, bold=False, highlight=False):
attr = []
num = color2num[color]
if highlight:
num += 10
attr.append(str(num))
if bold:
attr.append('1')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def log(s): # , send_telegram=False):
print s
sys.stdout.flush()
class SimpleMessage(object):
def __init__(self, msg, logger=log):
self.msg = msg
self.logger = logger
def __enter__(self):
print self.msg
self.tstart = time.time()
def __exit__(self, etype, *args):
maybe_exc = "" if etype is None else " (with exception)"
self.logger("done%s in %.3f seconds" %
(maybe_exc, time.time() - self.tstart))
MESSAGE_DEPTH = 0
class Message(object):
def __init__(self, msg):
self.msg = msg
def __enter__(self):
global MESSAGE_DEPTH # pylint: disable=W0603
print colorize('\t' * MESSAGE_DEPTH + '=: ' + self.msg, 'magenta')
self.tstart = time.time()
MESSAGE_DEPTH += 1
def __exit__(self, etype, *args):
global MESSAGE_DEPTH # pylint: disable=W0603
MESSAGE_DEPTH -= 1
maybe_exc = "" if etype is None else " (with exception)"
print colorize('\t' * MESSAGE_DEPTH + "done%s in %.3f seconds" % (maybe_exc, time.time() - self.tstart), 'magenta')
def prefix_log(prefix, logger=log):
return lambda s: logger(prefix + s)
def tee_log(file_name):
f = open(file_name, 'w+')
def logger(s):
log(s)
f.write(s)
f.write('\n')
f.flush()
return logger
def collect_args():
splitted = shlex.split(' '.join(sys.argv[1:]))
return {arg_name[2:]: arg_val
for arg_name, arg_val in zip(splitted[::2], splitted[1::2])}
def type_hint(arg_name, arg_type):
def wrap(f):
meta = getattr(f, '__tweak_type_hint_meta__', None)
if meta is None:
f.__tweak_type_hint_meta__ = meta = {}
meta[arg_name] = arg_type
return f
return wrap
def tweak(fun_or_val, identifier=None):
if callable(fun_or_val):
return tweakfun(fun_or_val, identifier)
return tweakval(fun_or_val, identifier)
def tweakval(val, identifier):
if not identifier:
raise ValueError('Must provide an identifier for tweakval to work')
args = collect_args()
for k, v in args.iteritems():
stripped = k.replace('-', '_')
if stripped == identifier:
log('replacing %s in %s with %s' % (stripped, str(val), str(v)))
return type(val)(v)
return val
def tweakfun(fun, alt=None):
"""Make the arguments (or the function itself) tweakable from command line.
See tests/test_misc_console.py for examples.
NOTE: this only works for the initial launched process, since other processes
will get different argv. What this means is that tweak() calls wrapped in a function
to be invoked in a child process might not behave properly.
"""
cls = getattr(fun, 'im_class', None)
method_name = fun.__name__
if alt:
cmd_prefix = alt
elif cls:
cmd_prefix = cls + '.' + method_name
else:
cmd_prefix = method_name
cmd_prefix = cmd_prefix.lower()
args = collect_args()
if cmd_prefix in args:
fun = pydoc.locate(args[cmd_prefix])
if type(fun) == type:
argspec = inspect.getargspec(fun.__init__)
else:
argspec = inspect.getargspec(fun)
# TODO handle list arguments
defaults = dict(
zip(argspec.args[-len(argspec.defaults or []):], argspec.defaults or []))
replaced_kwargs = {}
cmd_prefix += '-'
if type(fun) == type:
meta = getattr(fun.__init__, '__tweak_type_hint_meta__', {})
else:
meta = getattr(fun, '__tweak_type_hint_meta__', {})
for k, v in args.iteritems():
if k.startswith(cmd_prefix):
stripped = k[len(cmd_prefix):].replace('-', '_')
if stripped in meta:
log('replacing %s in %s with %s' % (stripped, str(fun), str(v)))
replaced_kwargs[stripped] = meta[stripped](v)
elif stripped not in argspec.args:
raise ValueError(
'%s is not an explicit parameter of %s' % (stripped, str(fun)))
elif stripped not in defaults:
raise ValueError(
'%s does not have a default value in method %s' % (stripped, str(fun)))
elif defaults[stripped] is None:
raise ValueError(
'Cannot infer type of %s in method %s from None value' % (stripped, str(fun)))
else:
log('replacing %s in %s with %s' % (stripped, str(fun), str(v)))
# TODO more proper conversions
replaced_kwargs[stripped] = type(defaults[stripped])(v)
def tweaked(*args, **kwargs):
all_kw = dict(zip(argspec[0], args) +
kwargs.items() + replaced_kwargs.items())
return fun(**all_kw)
return tweaked
| [
"dementrock@gmail.com"
] | dementrock@gmail.com |
d34828fbd987211cc81fd1989af985a0e9374a74 | 128104139a52489f21df67918fae50b647f27cbe | /printswapneg.py | e3d6c3796765ea2755a55619b285b3480622b3fb | [] | no_license | AdamRichey/Python | aabb254742cf46c4d958b8aa55c26bdca100cdde | 00f1be1e235f6f791c624e3496b78d8863355e81 | refs/heads/master | 2020-03-23T05:54:10.976815 | 2018-07-16T18:15:12 | 2018-07-16T18:15:12 | 141,174,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | arr=[-1]
def neg(arr):
for i in arr:
if arr[i]<0:
arr[i]="dojo"
print arr
print neg(arr) | [
"adamrichey88@gmail.com"
] | adamrichey88@gmail.com |
de9ceaa3537c1f1edf2a30fedb2a4f538e0eec02 | a2efa9d89a721aae5016280ca166caffab97e94f | /exercices_EDX_W1.py | 44e668445b4b6b51f8543dd707285978c0d16491 | [] | no_license | samthib/python_edx | 3dc8378c92092eedfb70b0890ee801465e3a4cd9 | 2970745f6671a0d314b2cd32261928c7a55da3f9 | refs/heads/master | 2022-07-04T19:00:19.037593 | 2020-05-05T20:54:57 | 2020-05-05T20:54:57 | 258,567,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,555 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 20 21:10:42 2020
@author: Sam
"""
##--------------------------------##
#Exercice 1
import string
alphabet = string.ascii_letters
sentence = 'Jim quickly realized that the beautiful gowns are expensive'
count_letters = {}
for i in range(len(sentence)):
if sentence[i] in count_letters:
count_letters[sentence[i]] += 1
else:
count_letters[sentence[i]] = 1
def counter(input_string):
alphabet = string.ascii_letters
for i in range(len(input_string)):
if input_string[i] in alphabet:
if input_string[i] in count_letters:
count_letters[input_string[i]] += 1
else:
count_letters[input_string[i]] = 1
return count_letters
#print(counter(sentence))
address = """Four score and seven years ago our fathers brought forth on this continent, a new nation,
conceived in Liberty, and dedicated to the proposition that all men are created equal. Now we are engaged in a
great civil war, testing whether that nation, or any nation so conceived and so dedicated, can long endure.
We are met on a great battle-field of that war. We have come to dedicate a portion of that field, as a final
resting place for those who here gave their lives that that nation might live. It is altogether fitting and proper
that we should do this. But, in a larger sense, we can not dedicate -- we can not consecrate -- we can not hallow --
this ground. The brave men, living and dead, who struggled here, have consecrated it, far above our poor power to add
or detract. The world will little note, nor long remember what we say here, but it can never forget what they did here.
It is for us the living, rather, to be dedicated here to the unfinished work which they who fought here have thus far so
nobly advanced. It is rather for us to be here dedicated to the great task remaining before us -- that from these honored
dead we take increased devotion to that cause for which they gave the last full measure of devotion -- that we here
highly resolve that these dead shall not have died in vain -- that this nation, under God, shall have a new birth of
freedom -- and that government of the people, by the people, for the people, shall not perish from the earth."""
address_count = counter(address)
#print(address_count)
count_letters_max = max(address_count, key=address_count.get)
#print(count_letters_max)
##--------------------------------##
#Excercie 2
import math
#print(math.pi/4)
import random
random.seed(1) # Fixes the see of the random number generator.
def rand():
return random.uniform(-1, 1)
rand()
def distance(x, y):
diff_x = y[0]-x[0]
diff_y = y[1]-x[1]
distance = math.sqrt(diff_x**2 + diff_y**2)
return distance
x=(0,0)
y=(1,1)
distance(x, y)
def in_circle(x, origin = [0,0]):
radius = distance(x, origin)
if radius < 1:
return True
else:
return False
in_circle((1,1))
random.seed(1)
R=10000
inside = []
count_true = 0
for i in range(R):
point = in_circle((rand(),rand()))
inside.append(point)
if point:
count_true += 1
#print(count_true / R)
difference = (math.pi / 4) - (count_true / R)
#print(difference)
##--------------------------------##
# Exercice 3
"""
Corection
def moving_window_average(x, n_neighbors=1):
n = len(x)
width = n_neighbors*2 + 1
x = [x[0]]*n_neighbors + x + [x[-1]]*n_neighbors
return [sum(x[i:(i+width)]) / width for i in range(n)]
x = [0,10,5,3,1,5]
#print(sum(moving_window_average(x, 1)))
"""
def moving_window_average(x, n_neighbors=1):
width = n_neighbors*2 + 1
x = [x[0]]*n_neighbors + x + [x[-1]]*n_neighbors
n = len(x)
list_x=[]
for i in range(n_neighbors,n-n_neighbors):
sum_x=0
for j in range(-n_neighbors, n_neighbors+1):
sum_x = sum_x + x[i+j]
mean = sum_x / width
list_x.append(mean)
return list_x
x = [0,10,5,3,1,5]
#print(moving_window_average(x, 1))
#print(sum(moving_window_average(x, 1)))
R = 1000
Y = []
x = []
ranges = []
random.seed(1)
for i in range(R):
x.append(random.uniform(0,1))
for i in range(1, 10):
Y.append(moving_window_average(x, i))
for i in range(9):
ranges.append(max(Y[i])-min(Y[i]))
#print(Y[5][9])
#print(ranges)
| [
"noreply@github.com"
] | samthib.noreply@github.com |
9f800cece6947820a5e6c1324c4b395ea3efdf40 | a1c397a8b4dccfbef8b91a67d6910a58ff6aa98e | /02-19-Cuma/tryExcept.py | 13bb01e466c735f27fe0a613acc049813ea2ed45 | [
"MIT"
] | permissive | hhsalik/staj | f0c9a17064c444fa084a102e4c36050ff62fdb4e | d0ee95d5e77a7d7a1f16611d49c87be429a25b31 | refs/heads/master | 2023-04-29T15:17:17.928023 | 2021-05-10T19:25:35 | 2021-05-10T19:25:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | try:
answer = 10 / 0
number = int(input("Enter a number: "))
print(number)
except ZeroDivisionError as err:
print(err)
except ValueError:
print("invalid input") | [
"cihat02020202@gmail.com"
] | cihat02020202@gmail.com |
80aa265104445e40404828f18f155d2af0bd25b2 | c2285444392e8eb1904255b82f62b49f317aca07 | /scripts/load_tags.py | 59a372e1337682c26b6ce0ab3ed0e6c41f62fb28 | [] | no_license | tlskr/tagger | ced0ed36437bb29fe488eb2fae8b03314c5a9558 | 1230a1f36b91bd7ef2d57840dcfa013ca07e5a4a | refs/heads/master | 2022-12-16T00:44:36.798298 | 2018-08-17T13:23:02 | 2018-08-17T13:26:37 | 145,027,493 | 0 | 0 | null | 2022-12-08T02:46:37 | 2018-08-16T18:55:02 | Python | UTF-8 | Python | false | false | 545 | py | #!/usr/bin/env python
# pylint: disable=wrong-import-position
"""
Script loading tags from JSON file to data
Invocation (from project root)
./scripts/load_tags.py
"""
import os
import sys
import gflags
sys.path.append(os.getcwd())
from scripts.main_gflag import main_gflagged
from tagger.load_json import insert_tags
FLAGS = gflags.FLAGS
gflags.DEFINE_string(
"datafile", None, "file holding json data"
)
def main():
insert_tags(FLAGS.datafile)
if __name__ == "__main__":
sys.exit(main_gflagged(sys.argv, main))
| [
"gordon@practicalhorseshoeing.com"
] | gordon@practicalhorseshoeing.com |
0aca43944e8543cb2dde41c31bfa4b1db2c4dc93 | 969be4b7959617a4def52267595ed22a67caeaaa | /wsgi/closetBackend/Invetory/views.py | f0b1837c8705c2e6196fc3f5e170a4867ea44c0a | [] | no_license | pjryan93/closet | 0589d493958e5f9a63c760ebc4c52588622da913 | 04dcf33f0991c547cc27b7214a81ab0d3f149ff3 | refs/heads/master | 2021-01-10T06:15:36.359576 | 2016-03-20T22:02:52 | 2016-03-20T22:02:52 | 54,337,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,952 | py | from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.views.generic import TemplateView
from django.template.context_processors import csrf
from django.shortcuts import render_to_response
from django.template import Context
from django.template.loader import get_template
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from django.contrib.auth import logout
from django.views.decorators.csrf import csrf_protect
from django.contrib.auth.decorators import user_passes_test
from django.conf import settings
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import TokenAuthentication
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.authtoken.models import Token
from models import *
from serializers import *
from rest_framework.renderers import JSONRenderer
import base64, uuid
import cStringIO
import sys
from django.core.files.base import ContentFile
from base64 import b64decode
from django.core.files.images import ImageFile
class HomeView(APIView):
authentication_classes = (TokenAuthentication,)
def post(self,request,format=None):
return Response({'detail': "I suppose you are authenticated"})
def get(self,request,format=None):
print request.user
return Response({'detail': "I suppose you are authenticated"})
class CreateCloset(APIView):
authentication_classes = (TokenAuthentication,)
def get(self,request,format=None):
closets = Closet.objects.filter(owner=request.user)
serializer = ClosetSerializer(closets, many=True)
json = JSONRenderer().render(serializer.data)
defaults = DefaultAmounts.objects.filter(current_closet=closets[0].id)
serializer = ClosetDefaultsSerializer(defaults,many=False)
json_defaults = JSONRenderer().render(serializer.data)
print 'here'
return Response({'closets': json,'defaults':json_defaults})
def post(self,request,format=None):
user = request.user
print request.data
print request.data['name']
print 'done'
if 'name' in request.data and 'gender' in request.data and 'age' in request.data:
closetName = request.data['name']
age = request.data['age']
gender = request.data['gender']
cleaned_gender= 'Male'
if gender == "Male":
cleaned_gender = "M"
elif gender == "Female":
cleaned_gender = "F"
if 'closet_id' in request.data:
current_closet = Closet.objects.get(id = request.data["closet_id"])
current_closet.name = closetName
current_closet.age = age
current_closet.sex = gender
current_closet.save()
return Response({'success': "updated",'name':closetName,'id':current_closet.id})
elif Closet.objects.filter(owner=request.user,name = closetName).count() == 0:
new_closet = Closet(owner = request.user,name = closetName,age = age,sex = cleaned_gender)
new_closet.save()
defaults = DefaultAmounts(current_closet =new_closet )
defaults.save()
sizes = DefaultSizes(current_closet=new_closet)
sizes.setAll(age)
print 'created'
return Response({'success': "created",'name':closetName,'id':new_closet.id})
else:
return Response({'failure':'You have a closet with this name'})
return Response({'failure':'not created'})
class ClosetItem(APIView):
authentication_classes = (TokenAuthentication,)
def get(self,request,format=None):
print request.body
print request.user
closets = Closet.objects.filter(owner=request.user)
if len(closets) == 0:
defaults = DefaultAmounts()
serializer = ClosetDefaultsSerializer(defaults,many=False)
json = JSONRenderer().render(serializer.data)
return Response({'closets': json,'message':'no closets'})
else:
return self.getResponseNoId(request)
if 'id' in request.GET:
closets = Closet.objects.filter(owner=request.user)[0]
serializer1 = ClosetSerializer(closets, many=False)
json_defaults = JSONRenderer().render(serializer1.data)
defaults = DefaultAmounts.objects.filter(current_closet=closets.id)
serializer = ClosetDefaultsSerializer(defaults,many=True)
json = JSONRenderer().render(serializer.data)
clothing_items = ClothingItem.objects.filter(current_closet=closets.id)
item_serializer = ItemSerializer(clothing_items,many=True)
item_json = JSONRenderer().render(item_serializer.data)
size_defaults= DefaultSizes.objects.filter(current_closet=closets.id)
if len(size_defaults) == 0:
default_size = DefaultSizes(current_closet = closets)
default_size .save()
size_defaults= DefaultSizes.objects.filter(current_closet=closets.id)
size_serializer = ItemSizeSerializer(size_defaults,many=True)
size_json = JSONRenderer().render(size_serializer.data)
print 'size_json'
print size_json
return Response({'closets': json, 'defaults':json_defaults,'items':item_json,'sizes':size_json,'message':'success'})
def getResponseNoId(self,request):
closets = Closet.objects.filter(owner=request.user)[0]
serializer1 = ClosetSerializer(closets, many=False)
json_defaults = JSONRenderer().render(serializer1.data)
defaults = DefaultAmounts.objects.filter(current_closet=closets.id)
serializer = ClosetDefaultsSerializer(defaults,many=True)
json = JSONRenderer().render(serializer.data)
clothing_items = ClothingItem.objects.filter(current_closet=closets.id)
item_serializer = ItemSerializer(clothing_items,many=True)
item_json = JSONRenderer().render(item_serializer.data)
size_defaults= DefaultSizes.objects.filter(current_closet=closets.id)
if len(size_defaults) == 0:
default_size = DefaultSizes(current_closet = closets)
default_size .save()
size_defaults= DefaultSizes.objects.filter(current_closet=closets.id)
size_serializer = ItemSizeSerializer(size_defaults,many=True)
size_json = JSONRenderer().render(size_serializer.data)
print 'size_json'
print size_json
return Response({'closets': json, 'defaults':json_defaults,'items':item_json,'sizes':size_json,'message':'success'})
def post(self,request,format=None):
item_name = request.data['name']
item_type = request.data['type']
item_size = request.data['size']
closet_id = request.data['closet_id']
photoDataString = request.data['photoData']
image_output = cStringIO.StringIO()
image_output.write(photoDataString.decode('base64'))
image_output.read()
image_output.seek(0) # Write decoded image to buffer
current_closet = Closet.objects.get(id=closet_id)
x = ClothingItem(name=item_name,clothing_type = item_type,size=item_size,current_closet=current_closet)
file_name = str(x.id) + '.png'
image_data = b64decode(photoDataString)
uploadedImage = ContentFile(image_data,file_name)
print uploadedImage
x.save()
x.item_image = uploadedImage
x.save()
print x.id
return Response({'failure': 'no id' }) | [
"pjryan@my.okcu.edu"
] | pjryan@my.okcu.edu |
2566347edc4da7664cffa99c4c72f58678e8c26a | 648384ac8ff1e1a414c41c1ed19c6d54d7f1aeb2 | /pygame_basic/4_keyboard_event.py | 98eb5facda5d2b3900695aa414f57d47e2adf207 | [] | no_license | Online-abayss/-- | dadb65b372ed7c39c391bc0a85355a314028ce19 | 4ac61ad20167a784419365317d61722d5bbddf3b | refs/heads/main | 2023-08-03T13:13:51.623181 | 2021-09-23T08:42:10 | 2021-09-23T08:42:10 | 397,529,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,212 | py | import pygame
from pygame.constants import K_LEFT, K_RIGHT
pygame.init() # 초가화작업 (무조건 필수) (클래스 정의할떄도 self.init 하는것처럼 그런듯)
# 화면 크기 설정
screen_width = 480 ## 가로크기
screen_height = 640 ## 세로크기
screen = pygame.display.set_mode((screen_width,screen_height)) ## 게임 화면 크기 설정
# 화면 타이틀 설정 (2. background 시작)
pygame.display.set_caption("Test Game") # 게임 타이틀 제작
# 배경 이미지 불러오기
background = pygame.image.load("C:\\Users\\kang\\Desktop\\Pythonworkspace\\pygame_basic\\background.png")
# 캐릭터(스프라이트) 불러오기 (3.man sprite 시작)
character = pygame.image.load("C:\\Users\\kang\\Desktop\\Pythonworkspace\\pygame_basic\\character.png")
character_size = character.get_rect().size ## 캐릭터의 이미지의 가로 및 세로 크기값을 알수있음.
character_width = character_size[0] # 캐릭터의 가로 크기
character_heigth = character_size[1] # 캐릭터의 세로 크기
# 캐릭터 움직임의 관한 좌표를 설정
# 좌표는 11시 꼭짓점 기준으로 0,0을 잡고 우측 밑으로 증가한다.
# y좌표를 바로 밑에껏처럼 하면 캐릭터가 안보인다. 왜냐하면 캐릭터도 마찬가지로 좌표는 11시 꼭짓점을 기준으로 잡아주기에
# 캐릭터의 크기를 생각하고 그만큼 위로 올려서 보이게 해야한다. 또한 중앙으로 캐릭터를 옮기고 싶으면 그냥 화면 가로/2가 아닌 캐릭터 크기의 절반만큼 더 왼쪽으로 옮겨야한다.
character_x_pos = (screen_width/2) -(character_width/2)# x위치를 설정
character_y_pos = screen_height - character_heigth# Y위치를 설정
# 이동 할 좌표
to_x = 0
to_y = 0
# 이벤트 루프
# 키보드 입력에 따른 이동 여부 설정(4. keyboard_event 시작)
running = True # 게임이 계속 진행중인지? 파악
while running:
for event in pygame.event.get(): # 키보드 및 마우스 입력이 들어올경우 그 값에 대응으로 처리 (이벤트 발생 여부)
if event.type == pygame.QUIT: ## 1시 방향 X 표시의 창끄기 표시 명령어
running = False ## 내가 실수로 = 한개만 할걸 두개로 해서 확정이 아닌 조건으로 되서 무한루프로 빠져나오지 못했음.
if event.type == pygame.KEYDOWN: #키가 눌려졌는지 확인
if event.key == pygame.K_LEFT: # 캐릭터를 좌측으로 이동
to_x -= 2
elif event.key == pygame.K_RIGHT: # 캐릭터를 우측으로 이동
to_x += 2
elif event.key == pygame.K_UP: # 캐릭터를 위로 이동
to_y -= 2
elif event.key == pygame.K_DOWN: # 캐릭터를 밑으로 이동
to_y += 2
if event.type == pygame.KEYUP: # 키보드를 때면 멈추기.
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
to_x = 0
elif event.key == pygame.K_UP or event.key == pygame.K_DOWN:
to_y = 0
character_x_pos += to_x
character_y_pos += to_y
# 화면 밖으로 넘어가는걸 방지
if character_x_pos < 0:
character_x_pos = 0
elif character_x_pos > screen_width - character_width: # 우측 끝 - 캐릭터 넓이만큼
character_x_pos = screen_width - character_width
if character_y_pos < 0:
character_y_pos = 0
elif character_y_pos > screen_height - character_heigth: # 스크린 맨밑 - 캐릭터 높이만큼
character_y_pos = screen_height - character_heigth
screen.blit(background, (0,0)) #배경 그리기 ## 여기까지만 하면 반영을 하지 않는다.
#rgb값을 이용하여 배경을 넣을수 있다.
#screen.fill((0,0,255)
screen.blit(character, (character_x_pos,character_y_pos)) # 캐릭터 그리기 및 위치 설정한 값으로 지정
pygame.display.update() # 매 프레임마다 배경을 그려줘야 하기에 설정함
# 게임 종료
pygame.quit()
| [
"noreply@github.com"
] | Online-abayss.noreply@github.com |
7a1df63cd632b5b6f4ccaeaeee6eff6164e582d7 | bffcfa6103ee72d7ac394c14aa861e60616c7ab8 | /pytorch3d/datasets/__init__.py | 1687213018a29e5d75a4c5490368d52e5f4d893a | [
"BSD-3-Clause"
] | permissive | Amit2016-17/pytorch3d | ccac686bc1a3caeb4bd0f38519fbcb83f816501d | 7944d24d4872bdb01b821450840049e28d0ce12b | refs/heads/master | 2022-11-25T10:40:14.409087 | 2020-08-05T13:58:53 | 2020-08-05T14:00:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from .r2n2 import R2N2, BlenderCamera
from .shapenet import ShapeNetCore
from .utils import collate_batched_meshes
__all__ = [k for k in globals().keys() if not k.startswith("_")]
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
1c125d93dacd44efb23a7c8db2107c3a5838f0bb | 25ac403b85c141bde644f92785d086b2049ccbff | /数据/utf8csv2excel.py | 5f12f35818e9db1abf5c1b20b9a128038025af4c | [] | no_license | void126rlz/SHUJRA | 62ac47313a2c42dc439cb027a73376bfbe116ec4 | b7451af92df4bfdd80e71a2e7cbab7eecfb28f23 | refs/heads/main | 2023-07-17T06:26:01.119303 | 2021-09-07T13:39:25 | 2021-09-07T13:39:25 | 379,532,808 | 1 | 1 | null | 2021-07-11T08:42:34 | 2021-06-23T08:28:06 | Jupyter Notebook | UTF-8 | Python | false | false | 1,797 | py | import os # sys
import pandas as pd
home = r'D:\tfp\project\新致软件\3岗位推荐算法\from新致\数据'
fileNamesCSV = os.listdir(home + r'\历史18天的数据') # \*.csv
print(fileNamesCSV)
# !dir D:\tfp\project\新致软件\3岗位推荐算法\from新致\数据\utf8
fileNames = [s[0:-4] for s in fileNamesCSV]
print(fileNames)
# InvalidWorksheetName: Excel worksheet name 'ttyc_personel_educational_experience' must be <= 31 chars.
# print(len('ttyc_personel_educational_experience'))
# [len(f) for f in fileNamesCSV] # <34-3=31
# [18, 14, 18, 41, 24, 37, 34, 17, 23]
[len(f) for f in fileNames]
fileNames=[
# 'ttyc_candidate',
# 'ttyc_label',
# 'ttyc_personnel',
'ttyc_personnel_educational_experience',
'ttyc_personnel_label',
'ttyc_personnel_project_experience',
'ttyc_personnel_work_experience',
'ttyc_position',
'ttyc_position_label'
]
for f in fileNames:
print(f)
df= pd.read_csv(home + '\\历史18天的数据\\' + f + '.csv', error_bad_lines=False )
df.to_excel(home + '\\excel18\\' + f + '.xlsx', sheet_name=f[5:36],index=False)
# f = 'ttyc_personnel'
df= pd.read_csv(home + '\\历史18天的数据\\ttyc_personnel-tfp.csv', error_bad_lines=False )
df.to_excel(home + '\\excel18\\ttyc_personnel-tfp.xlsx', sheet_name='ttyc_personnel',index=False)
# ;号分隔的cvs文件
fileNames2 = ['ttyc_position', 'ttyc_position_label']
for f in fileNames2:
print(f)
df= pd.read_csv(home + '\\utf8\\' + f + '.csv', sep=';') # quotechar='"',
df.to_excel(home + '\\excel\\' + f + '.xlsx', sheet_name=f,index=False)
f = 'ttyc_personnel_project_experienceT'
df= pd.read_csv(home + '\\utf8\\' + f + '.csv')
df.to_excel(home + '\\excel\\' + f + '.xlsx', sheet_name=f[5:36],index=False)
| [
"noreply@github.com"
] | void126rlz.noreply@github.com |
da731a7f36e0c4abfe56e401d84517f945860e52 | 14d5bccb090070fc212651d017b71a1f2c6fadce | /mysite/myapi/models.py | d29e391516d08b76c141036c24658c027c6e2666 | [] | no_license | ryyvntong/SuperheroAPI | 223c78abd0a46a645ce36a871a7df5008dac6d02 | 6dd2e63a44bf7fc251abacd90cf3782fafb24e9d | refs/heads/master | 2020-12-31T21:32:41.788871 | 2020-02-07T22:02:36 | 2020-02-07T22:02:36 | 239,028,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | from django.db import models
# Create your models here.
class Hero(models.Model):
name = models.CharField(max_length=60)
alias = models.CharField(max_length=60)
def __str__(self):
return self.name
| [
"56896048+ryyvntong@users.noreply.github.com"
] | 56896048+ryyvntong@users.noreply.github.com |
7aaee542c16692bd7e6bafbc73a9debf04054e3d | 504dbb060f00d373278f2f210af39fb89a27916b | /karyawan.py | 5dad1d91ebbe692dd9127b5753080694a65460bd | [] | no_license | Aldidwi53/projek-PBO | e56f1b764c632f6d59a550feadac3583b35e1345 | 7232feb29e5e81e4e82241105bfd41b85ec6d21b | refs/heads/main | 2023-02-06T02:01:53.925205 | 2021-01-02T09:09:28 | 2021-01-02T09:09:28 | 324,373,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | import user
import sqlite3
class Karyawan(user.User):
def __init__(self, email, password, nama, gender, alamat, telepon):
super().__init__(email, password, nama, gender, alamat, telepon) | [
"noreply@github.com"
] | Aldidwi53.noreply@github.com |
feee22f88d28bb23ee6a97c99f3208c769d58972 | dc569d08a025447e5386abf0388c99d45bdbfcf1 | /utils.py | 6cc2d12ab1651734c79094fcbdcdd4f3dd2f23ad | [] | no_license | samuelBB/Translating-OOV-Words-Via-Images | cf992d1f27f10352fee61bc89a60ac0bc2d91a9f | 5ae8f3b89786a0813e93b619189e181de8a955dd | refs/heads/master | 2020-05-30T10:08:45.056985 | 2019-05-31T22:37:25 | 2019-05-31T22:37:25 | 189,665,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,088 | py | """
misc utility functions
"""
import re
import os
import errno
import pickle
import random
import logging
import argparse
import datetime as dt
RANDOM_SEED = 2018
### logging
class UpToLevel(object):
def __init__(self, lvl=logging.FATAL):
self.lvl = lvl
def filter(self, record):
return record.levelno <= self.lvl
ROOT = '*'
def init_logging(file=None, stdout=False, stderr=False,
lo_lvl=logging.DEBUG, hi_lvl=logging.FATAL,
file_lo_lvl=None, stdout_lo_lvl=None, stderr_lo_lvl=None,
file_hi_lvl=None, stdout_hi_lvl=None, stderr_hi_lvl=None,
fmt='[%(asctime)s|%(levelname)s|%(module)s'
'.%(funcName)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d_%H:%M:%S',
mode='w'):
logger = logging.getLogger(ROOT)
if is_(lo_lvl):
logger.setLevel(lo_lvl)
if is_(hi_lvl):
logger.addFilter(UpToLevel(hi_lvl))
for name, obj, args, prefix in [
('stdout', stdout, [logging.sys.stdout], 'Stream'),
('stderr', stderr, (), 'Stream'),
( 'file', file, (file, mode), 'File')
]:
if obj:
handler = getattr(logging, prefix + 'Handler')(*args)
handler.setFormatter(logging.Formatter(fmt, datefmt))
lo, hi = locals()[name+'_lo_lvl'], locals()[name+'_hi_lvl']
if is_(lo):
handler.setLevel(lo)
if is_(hi):
handler.addFilter(UpToLevel(hi))
logger.addHandler(handler)
if name == 'file':
return handler
def main_module_name(name, ext=True):
if name == '__main__':
try:
main_file = __import__(name).__file__
name_and_ext = main_file[main_file.rfind('/')+1:]
if ext:
return name_and_ext[:name_and_ext.rfind('.')]
return name_and_ext
except:
pass
return name
def get_logger(name, main=False):
name = main_module_name(name) if main else name
return logging.getLogger(ROOT + '.' + name)
### timing
def time_stamp(fmt='%Y-%-m-%-d_%-H-%-M-%-S'):
return dt.datetime.now().strftime(fmt)
### io
def write_lines(iterable, path):
with open(path, 'w') as io:
for item in iterable:
print(item, file=io)
def read_lines(path):
with open(path) as io:
return [line.strip() for line in io]
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def get_path_elems_unix(path, i, j='', delim='_'):
elems = re.sub('//+', '/', path).strip('/').split('/')
return elems[i] if j == '' or i == j else delim.join(elems[i:j])
def load(path, method=pickle):
with open(path, 'rb') as f:
return method.load(f)
def dump(obj, path, method=pickle, **kw):
if not kw and method.__name__ in ('pickle', 'dill'):
kw = dict(protocol=-1)
with open(path, 'wb') as f:
method.dump(obj, f, **kw)
def scandir_r(path):
for entry in os.scandir(path):
if entry.is_dir(follow_symlinks=False):
yield from scandir_r(entry.path)
else: yield entry
def mapread(path, f):
with open(path) as io:
yield from map(f, io) if f else io
### convenience
def is_(x):
return x is not None
def dedupe(it):
s = set()
for el in it:
if el not in s:
s.add(el)
yield el
### parsing
def arg(*ar, **kw):
return ar, kw
def strip_attrs(opts, *attrs):
for attr in attrs:
yield getattr(opts, attr)
delattr(opts, attr)
def parse_args(*args, strip=None):
parser = argparse.ArgumentParser()
for ar, kw in args:
parser.add_argument(*ar, **kw)
opts = parser.parse_args()
if is_(strip):
return (opts, *strip_attrs(opts, *strip))
return opts
### sampling
_RNG = random.Random(RANDOM_SEED)
def sample(lst, n=None):
return _RNG.sample(lst, n or len(lst)) | [
"baldsammy@gmail.com"
] | baldsammy@gmail.com |
3010b06f90027e9d8277db75879d2a23679e061c | 934107eaba17b352bf7bf3a9c0a45af4f263fd54 | /favorites/manager.py | 5106bb9da8161cbc63e045832e3288fd4baca22e | [] | no_license | quentin338/Purebeurre-p8 | d0324f0fbc7e96a1418b367ea41ef3f4f51cc437 | 15bb4192df331d790ef28140e65213ac604cf96e | refs/heads/master | 2023-08-07T22:25:10.975870 | 2023-07-26T10:21:04 | 2023-07-26T10:21:04 | 207,123,046 | 0 | 0 | null | 2023-07-26T10:21:06 | 2019-09-08T14:17:16 | CSS | UTF-8 | Python | false | false | 310 | py | from django.db import models
from django.db.utils import IntegrityError
class FavoriteManager(models.Manager):
def is_favorite(self, user, ancient_product, new_product):
return bool(self.filter(user=user, ancient_product=ancient_product,
new_product=new_product))
| [
"quentin.bertrand@yahoo.fr"
] | quentin.bertrand@yahoo.fr |
670f081b1ef8d14da851e983a53c42b304f46728 | ace2dc6096eb0b7a540f28e57df8459adafad6ed | /Advanced Algorithms and Complexity/Programming-Assignment-4/circuit_design/circuit_design.py | e2ec8b4f2aabddcc3f0a3899603eacd626bc5794 | [] | no_license | tdslivensky/AlgorithmsAndDataStructures | 6ad2c28204600b1f8f72228c13d29d2c3c9437c9 | e8b1011ab5210bc52854f911e2a7e41a83b36740 | refs/heads/master | 2023-01-11T16:32:49.399654 | 2020-11-13T13:49:18 | 2020-11-13T13:49:18 | 289,050,279 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,016 | py | # python3
n, m = map(int, input().split())
clauses = [ list(map(int, input().split())) for i in range(m) ]
# This solution tries all possible 2^n variable assignments.
# It is too slow to pass the problem.
# Implement a more efficient algorithm here.
def isSatisfiable():
for mask in range(1<<n):
result = [ (mask >> i) & 1 for i in range(n) ]
formulaIsSatisfied = True
for clause in clauses:
clauseIsSatisfied = False
if result[abs(clause[0]) - 1] == (clause[0] < 0):
clauseIsSatisfied = True
if result[abs(clause[1]) - 1] == (clause[1] < 0):
clauseIsSatisfied = True
if not clauseIsSatisfied:
formulaIsSatisfied = False
break
if formulaIsSatisfied:
return result
return None
result = isSatisfiable()
if result is None:
print("UNSATISFIABLE")
else:
print("SATISFIABLE")
print(" ".join(str(-i-1 if result[i] else i+1) for i in range(n)))
| [
"tdslivensky@gmail.com"
] | tdslivensky@gmail.com |
6ec1b92adbf29c397050d278e8c4ddd379f0e719 | 874c45e64e28ec63829b22738c3e7744dac1aeb7 | /test/rtt/utils.py | 8edbf72b300db2b646aba48fc6af9dc0080c5cc6 | [] | no_license | zenokoller/rtt-timestamp-vs-spinbit | 75372d04a29dc93c6161d61516d0cdb72e684bfc | a2aedc47dd8c48cdf9771e8c747dff422f97fa31 | refs/heads/master | 2020-03-20T18:32:38.876614 | 2018-07-06T15:39:14 | 2018-07-06T15:39:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | import pandas as pd
def load_dataframe(path: str, key: str) -> pd.DataFrame:
with pd.HDFStore(path) as store:
return store[key]
| [
"zeno.koller@gmail.com"
] | zeno.koller@gmail.com |
6128440a21b6bad0591564200e67f0c7ab7f0018 | a219c9b0f3ccd1b35c3bb7bb3c7b50e1d9d8ef93 | /d002_1_for_dongusu.py | 9910b109c6ddd3f3d306143754c0ebd1ef137785 | [] | no_license | f0xmulder/python_ornekleri | 3293541b5d4e594dc39e6df623e47ecd4e5e94c2 | d1ebbcefdd7390a4e20a61864b150097f9919e29 | refs/heads/master | 2022-11-04T07:12:20.766931 | 2017-06-22T13:30:45 | 2017-06-22T13:30:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
For Döngüsü
cok satirli
aciklama
Uşak Universitesi
"""
# tek satır aciklama
import numpy
print "Program Baslangıcı"
print "0-10 arası 1.7 artarak"
elemanlar=numpy.arange(0,10,1.7)
print "Sayı\t(3)\t(5)\t(7)"
for e in elemanlar:
print e,"\t", e%3,"\t", e%5,"\t", e%7
print "Herhangi bir liste"
elemanlar=[3,8,7.2,85]
print "Sayı\t(3)\t(5)\t(7)"
for e in elemanlar:
print e,"\t", e%3,"\t", e%5,"\t", e%7
print "Program Sonu"
| [
"noreply@github.com"
] | f0xmulder.noreply@github.com |
0b8e26a5b14106ea4c7a0fcf55baed32e47f43de | 06677c398ea51e3bf78dc45db8cdf97a2b2a296b | /table-cards/cardgen.py | 773492e4f3acd63bb1b3f1079022250713136a9b | [] | no_license | DoESLiverpool/Liverpool-Makefest-2017 | ac0e9df9a8b7fdb68d70d81985690ce25192d856 | 5590cb5f58a3aa403d49c54a6e25ec5e85358b10 | refs/heads/master | 2021-01-21T19:51:51.046353 | 2019-06-28T14:48:22 | 2019-06-28T14:48:22 | 92,169,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,995 | py | #!/usr/bin/env python3
import csv, re, os, shutil, argparse, urllib
#import lxml.etree.ElementTree as ET
from lxml import etree
SVG_FOLDER = 'svgs'
PDF_FOLDER = 'pdfs'
def main():
# parse some flags here
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--infile", help="ods or xlsx input file", default='makers.ods')
parser.add_argument("-t", "--template", help="SVG template filename", default='template.svg')
parser.add_argument("-o", "--outfile", help="PDF outfile name", default='makers.pdf')
parser.add_argument("-c", "--keepcsv", help="Keep the temporary CSV file", default=False, action='store_true')
parser.add_argument("-s", "--keepsvgs", help="Keep temporary SVG files", default=False, action='store_true')
parser.add_argument("-p", "--keeppdfs", help="Keep temporary PDF files", default=False, action='store_true')
parser.add_argument("-x", "--debugflag", help="process one entry and stop", default=False, action='store_true')
args = parser.parse_args()
infile = args.infile
template = args.template
outfile = args.outfile
keep_pdfs = args.keeppdfs
keep_svgs = args.keepsvgs
keep_csv = args.keepcsv
debugflag = args.debugflag
# generate the csv file (comma separated, double quotetd, utf-8)
# TODO: check if libreoffice is running, otherwise this generation fails silently
# because the lockfile exists
os.system('libreoffice --headless --convert-to csv:"Text - txt - csv (StarCalc)":44,34,76,1,1 --outdir . ' + infile)
csvfilename = re.sub(r'\.[a-zA-Z0-9]+$', '', infile) + '.csv'
# check the required dirs exist
if not os.path.exists(SVG_FOLDER):
os.makedirs(SVG_FOLDER)
if not os.path.exists(PDF_FOLDER):
os.makedirs(PDF_FOLDER)
# create each file from line of csv file
with open(csvfilename) as csvfile:
reader = csv.DictReader(csvfile)
i = 1
for row in reader:
#print(row.keys())
# generate the required variables to substitute into the SVG
filesafe_name = re.sub(r"[^\w\s]", '', row['{title}'])
filesafe_name = re.sub(r"\s+", '-', filesafe_name)
filesafe_name = str(i).zfill(2) + '-' + filesafe_name.strip()
filesafe_name = (filesafe_name[:14]) if len(filesafe_name) > 14 else filesafe_name
title = row['{title}'].strip()
name = row['{name}'].strip()
description = row['{description}'].replace('_x000D_','')
# standardise *some* of the possible twitter and web inputs
twitter = '@' + row['{twitter}'].strip().replace('http://','').replace('https://','').replace('twitter.com/','').lstrip('@').strip()
website = row['{website}'].strip().replace('http://','').replace('https://','').replace('www.','').strip()
# parse vars to standardise text input
# replace the placeholders in the new file
svg_file = SVG_FOLDER + '/' + filesafe_name + '.svg'
# read the svg template file in
#tree = ET.parse(template)
#root = tree.getroot()
tree = etree.parse(template)
root = tree.getroot()
for para in root.findall('.//{http://www.w3.org/2000/svg}flowPara'):
if para.text == '{title}':
para.text = title
if len(title) >= 34:
# reduce the text size
parent = para.find('..')
style_tag = parent.attrib['style']
# find the current font size
font_size_tag = re.search('font-size:[0-9.]+px;', style_tag).group()
font_size = float(re.search(r'[0-9.]+', font_size_tag).group())
if len(title) >= 50:
font_size = font_size*0.75
else:
font_size = font_size*0.85
style_tag = re.sub(r'font-size:[0-9.]+px;', 'font-size:' + str(font_size) + 'px;', style_tag)
parent.attrib['style'] = style_tag
print('title font-size: ' + str(font_size) + ' px;')
#print(parent.attrib['style'])
elif para.text == '{name}':
para.text = name
elif para.text == '{description}':
para.text = description
if len(description) >= 512:
# reduce the text size
parent = para.find('..')
style_tag = parent.attrib['style']
# find the current font size
font_size_tag = re.search('font-size:[0-9.]+px;', style_tag).group()
font_size = float(re.search(r'[0-9.]+', font_size_tag).group())
if len(description) > 1200:
font_size = font_size*0.65
elif len(description) > 800:
font_size = font_size*0.75
else:
font_size = font_size*0.85
style_tag = re.sub(r'font-size:[0-9.]+px', 'font-size:' + str(font_size) + 'px', style_tag)
parent.attrib['style'] = style_tag
print('description font-size: ' + str(font_size) + ' px;')
elif para.text == '{twitter}':
if twitter != '@': # is empty
para.text = twitter
else:
para.text = ''
elif para.text == '{website}':
if website[-1:] == '/':
para.text = website[:-1]
else:
para.text = website
# write the adjusted svg
tree.write(svg_file)
pdf_file = PDF_FOLDER + '/' + filesafe_name + '.pdf'
os.system('inkscape --without-gui --file ' + svg_file + ' --export-text-to-path --export-area-page --export-pdf ' + pdf_file)
print('Created: ' + title)
i+=1
if debugflag == True:
print ('Filename: ' + filesafe_name)
print ('Name: ' + name)
print ('Title: ' + title + ' [' + str(len(title)) + ']')
print ('Description: ' + description + ' [' + str(len(description)) + ']')
print ('Twitter: ' + twitter)
print ('Website: ' + website)
quit()
# concatenate all the pdf pages
os.chdir(PDF_FOLDER)
os.system('pdftk *.pdf output ../' + outfile)
os.chdir('..')
# cleanup temporary files
if keep_csv == False:
os.remove(csvfilename)
if keep_svgs == False:
shutil.rmtree(SVG_FOLDER)
if keep_pdfs == False:
shutil.rmtree(PDF_FOLDER)
if __name__ == "__main__":
main()
| [
"patrick@defproc.co.uk"
] | patrick@defproc.co.uk |
7875cbdde6257275a92695eeecc06813145d79d1 | 1ad64831a79a6b163f6a31a9255994c1e4d8a322 | /settings.py | a2fe5dbf18ed9edbb73469731b1b139a097fe630 | [
"Apache-2.0"
] | permissive | benhosmer/brzr-desktop | 1f17b767a9d1b7d6f4bbb3787e5deb9df92f9165 | 1e5e88e0b33325d35002636192019a40d080224e | refs/heads/master | 2020-03-30T07:49:24.701545 | 2013-07-11T16:29:12 | 2013-07-11T16:29:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | import ConfigParser
config = ConfigParser.RawConfigParser()
config.read('brzr.cfg')
database_name = config.get('main', 'database_name')
event_name = config.get('main', 'event_name')
| [
"ben.hosmer@gmail.com"
] | ben.hosmer@gmail.com |
d95df558fb04770dfcce9d62774614c18e3bfa25 | 401e455157f74b28b0ad12df6bdae3c159888db6 | /tiviapp/apis/twitter_rest.py | 2711d64b25094018a1871401c9eeafc7e1407e66 | [] | no_license | AnilSener/tivi | 0624e356524ae03fddd771a43a174211b8e1909d | 701e1ab2948d898fe12999bc5be954f61a3f4e99 | refs/heads/master | 2021-01-01T06:04:31.707925 | 2015-07-03T15:34:36 | 2015-07-03T15:34:36 | 38,221,946 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,230 | py | __author__ = 'anil'
from twython import TwythonStreamer,Twython,TwythonError
from tivi.celery import app
from tiviapp.models import *
import time
####################################################################
consumer_key="Vs7V2k4vPWMMyTFqLzqPkM6wE"
consumer_secret="aWNRzh74LUT1fuW35y6VzRDtvuimQ4LjFGMnMMkEXI0Y9LSpkf"
access_token="258113369-63Y2Cqr9q0Bo02WU4AS8Bjiv3JnHP2Us7HimK26G"
access_token_secret="Z4Sf9EyLbOJ4jPI5WlZPZUyv3OwluuZXiKXn0pamk8Dly"
###################################################################
twitter = Twython(consumer_key, consumer_secret,access_token,access_token_secret)
@app.task()
def exec_User_Follows():
twitter_users=TwitterUser.objects.all()
if len(twitter_users)==0:
print "No users available Wait 5 minutes for the next API call"
time.sleep(300)
else:
for i,user in enumerate(twitter_users):
print user.userName,"!!!"
try:
print "!!!TIME FOR FOLLOWERS!!!"
followers=twitter.get_followers_list(screen_name=user.userName,include_user_entities=True,count=200)
for f in followers:
print f
except TwythonError as e:
print e.message | [
"anil_sener@yahoo.com"
] | anil_sener@yahoo.com |
81adc9b89c325fae8eb969a4530b965c9f2ee337 | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-ice/aliyunsdkice/request/v20201109/DescribeQueryConfigsRequest.py | d4de87a4f55304fb57408fb816c6f72a3b5b2c81 | [
"Apache-2.0"
] | permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 1,437 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkice.endpoint import endpoint_data
class DescribeQueryConfigsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'ICE', '2020-11-09', 'DescribeQueryConfigs','ice')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Type(self): # String
return self.get_query_params().get('Type')
def set_Type(self, Type): # String
self.add_query_param('Type', Type)
| [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
102f8aafd37eaff350427f12ac5e476d7dda9c04 | 992816021cb79580e9c739ca2650fe9bc83829bd | /medapp/profile/admin.py | b838f95fde2b89db4d347aec60b042db9c24c2e0 | [] | no_license | etiennekruger/medapp-api | 096de56dc523425981b48c6d80930bb3df799639 | af9232f548db28e6716c0a14e38b44dcb9b57690 | refs/heads/master | 2020-04-18T22:58:35.859694 | 2012-07-16T06:17:17 | 2012-07-16T06:17:17 | 5,150,748 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | from django.contrib import admin
from profile.models import Profile
class ProfileAdmin(admin.ModelAdmin):
list_display = ['__unicode__', 'created', 'updated']
admin.site.register(Profile, ProfileAdmin)
| [
"makhonin@steelkiwi.com"
] | makhonin@steelkiwi.com |
14450e65ad686acdab9fb6fecaa1b50a8a7d5106 | 84fda250fd32b37d74f07f6d00106226881e70ee | /shop/cart/admin.py | 85a9d7c503ea1cc245e73f3574a659d5c5459871 | [] | no_license | NovosadVictor/OnlineShop | 8b80b1122f9d8019fb2d1641e08ea9eb64cc6ed6 | afe6826bf0494be6af3a033cf8c2e3a205ffb571 | refs/heads/master | 2020-12-02T16:13:26.312803 | 2017-08-22T16:29:55 | 2017-08-22T16:29:55 | 96,091,899 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | from django.contrib import admin
from .models import ProductInCart
class ProductInCartAdmin(admin.ModelAdmin):
list_display = ['owner', 'product', 'quantity',]
list_editable = ['quantity',]
admin.site.register(ProductInCart, ProductInCartAdmin)
| [
"novosad_msu@mail.ru"
] | novosad_msu@mail.ru |
52ba6a2814b715ec92f01331d0712c169ef9ebfa | 009eddcbb8e4917710ad3c8661ee5bb04b6fc512 | /tf-transliteration/transliterate.py | 2a032f79be925ab091ef9b896e4cc7c30e5a4c45 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | SRSTLCI/tf-transliteration-riggs2slo-toolkit | 873ea0ccb4c7f270bf16b137ea59ad324ea176ce | e6224627408ea23f830e2b3cd4f6e3a723d70d9d | refs/heads/main | 2023-04-20T18:47:30.731897 | 2021-05-17T16:08:55 | 2021-05-17T16:08:55 | 366,071,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,962 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import os, sys, io, re
import six
from data import create_vocab, load_vocab
from data import split_text_file, SPECIALS
from data import create_dataset, make_data_iter_fn
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_integer("train_steps", 0,
"The number of steps to run training for.")
flags.DEFINE_integer("eval_steps", 100, "Number of steps in evaluation.")
flags.DEFINE_integer("min_eval_frequency", 101, "Minimum steps between evals")
flags.DEFINE_string("hparams", "", "Comma separated list of hyperparameters")
flags.DEFINE_string("model_name", "ei", "Name of model")
flags.DEFINE_string("data_file", None, "TSV Data filename")
flags.DEFINE_float("eval_fraction", 0.05, "Fraction dataset used for evaluation")
flags.DEFINE_string("decode_input_file", None, "File to decode")
flags.DEFINE_string("vocab_file", "chars.vocab", "Character vocabulary file")
tf.logging.set_verbosity(tf.logging.INFO)
def decode_hparams(vocab_size, overrides=""):
hp = tf.contrib.training.HParams(
batch_size=32,
embedding_size=64,
char_vocab_size=vocab_size + 1, #Blank label for CTC loss
hidden_size=128,
learn_rate=0.0008
)
return hp.parse(overrides)
def get_model_dir(model_name):
model_dir = os.path.join(os.getcwd(), model_name)
if not os.path.exists(model_dir):
os.mkdir(model_dir)
return model_dir
def cer(labels, predictions):
dist = tf.edit_distance(predictions, labels)
return tf.metrics.mean(dist)
def create_model():
"""
Actual model function.
Refer https://arxiv.org/abs/1610.09565
"""
def model_fn(features, labels, mode, params):
hparams = params
inputs = features['input']
input_lengths = features['input_length']
targets = labels
target_lengths = features['target_length']
# Flatten input lengths
input_lengths = tf.reshape(input_lengths, [-1])
with tf.device('/cpu:0'):
embeddings = tf.Variable(
tf.truncated_normal(
[hparams.char_vocab_size, hparams.embedding_size],
stddev=(1/np.sqrt(hparams.embedding_size))),
name='embeddings')
input_emb = tf.nn.embedding_lookup(embeddings, inputs)
cell_fw = tf.nn.rnn_cell.BasicLSTMCell(hparams.hidden_size//2)
cell_bw = tf.nn.rnn_cell.BasicLSTMCell(hparams.hidden_size//2)
with tf.variable_scope('encoder'):
# BiLSTM
enc_outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, input_emb,
input_lengths, dtype=tf.float32)
enc_outputs = tf.concat(enc_outputs, axis=-1)
with tf.variable_scope('decoder'):
# Project to vocab size
logits = tf.layers.dense(enc_outputs, hparams.char_vocab_size)
# CTC loss and decoder requires Time major
logits = tf.transpose(logits, perm=[1, 0, 2])
loss = None
eval_metric_ops = None
train_op = None
predictions = None
if mode == tf.estimator.ModeKeys.TRAIN:
loss = tf.nn.ctc_loss(labels, logits, input_lengths, ignore_longer_outputs_than_inputs=True)
loss = tf.reduce_mean(loss)
optimizer = tf.contrib.opt.LazyAdamOptimizer(learning_rate=hparams.learn_rate)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
elif mode == tf.estimator.ModeKeys.EVAL:
loss = tf.nn.ctc_loss(labels, logits, input_lengths,
ignore_longer_outputs_than_inputs=True)
loss = tf.reduce_mean(loss)
eval_predictions, _ = tf.nn.ctc_greedy_decoder(logits, input_lengths)
eval_metric_ops = {
'CER': cer(labels, tf.cast(eval_predictions[0], tf.int32))
}
elif mode == tf.estimator.ModeKeys.PREDICT:
predictions, _ = tf.nn.ctc_greedy_decoder(logits, input_lengths)
predictions = tf.sparse_tensor_to_dense(tf.cast(predictions[0], tf.int32))
predictions = {'decoded': predictions}
return tf.estimator.EstimatorSpec(
mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops
)
return model_fn
def train():
"""
Train the model:
1. Create vocab file from dataset if not created
2. Split dataset into test/eval if not available
3. Create TFRecord files if not available
4. Load TFRecord files using tf.data pipeline
5. Train model using tf.Estimator
"""
model_dir = get_model_dir(FLAGS.model_name)
vocab_file = os.path.join(model_dir, FLAGS.vocab_file)
if not os.path.exists(vocab_file):
create_vocab([FLAGS.data_file], vocab_file)
vocab, characters = load_vocab(vocab_file)
train_file, eval_file = split_text_file(FLAGS.data_file, model_dir, FLAGS.eval_fraction)
train_tfr = create_dataset(train_file, vocab)
eval_tfr = create_dataset(eval_file, vocab)
hparams = decode_hparams(len(vocab), FLAGS.hparams)
tf.logging.info('params: %s', str(hparams))
train_input_fn = make_data_iter_fn(train_tfr, hparams.batch_size, True)
eval_input_fn = make_data_iter_fn(eval_tfr, hparams.batch_size, False)
estimator = tf.estimator.Estimator(
model_fn=create_model(),
model_dir=model_dir,
params=hparams,
config=tf.contrib.learn.RunConfig()
)
experiment = tf.contrib.learn.Experiment(
estimator=estimator,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
train_steps=FLAGS.train_steps,
eval_steps=FLAGS.eval_steps,
min_eval_frequency=FLAGS.min_eval_frequency
)
experiment.train_and_evaluate()
def predict():
"""
Perform transliteration using trained model. Input must be a text
file. Converts to a TFRecord first.
"""
model_dir = get_model_dir(FLAGS.model_name)
vocab_file = os.path.join(model_dir, FLAGS.vocab_file)
if not os.path.exists(vocab_file):
raise IOError("Could not find vocabulary file")
vocab, rev_vocab = load_vocab(vocab_file)
hparams = decode_hparams(len(vocab), FLAGS.hparams)
tf.logging.info('params: %s', str(hparams))
if FLAGS.decode_input_file is None:
raise ValueError("Must provide input field to decode")
tfr_file = create_dataset(FLAGS.decode_input_file, vocab)
infer_input_fn = make_data_iter_fn(tfr_file, hparams.batch_size, False)
estimator = tf.estimator.Estimator(
model_fn=create_model(),
model_dir=model_dir,
params=hparams,
config=tf.contrib.learn.RunConfig()
)
y = estimator.predict(input_fn=infer_input_fn, predict_keys=['decoded'])
ignore_ids = set([vocab[c] for c in SPECIALS] + [0])
decode_output_file = re.sub(r'\..+', '.out.txt', FLAGS.decode_input_file)
count = 0
with io.open(decode_output_file, 'w', encoding='utf-8') as fp:
for pred in y:
decoded = pred['decoded']
if len(decoded.shape) == 1:
decoded = decoded.reshape(1, -1)
for r in range(decoded.shape[0]):
fp.write(''.join([rev_vocab[i] for i in decoded[r, :] if i not in ignore_ids]) + '\n')
count += 1
if count % 10000 == 0:
tf.logging.info('Decoded %d lines', count)
def main(unused_argv):
if FLAGS.decode_input_file:
predict()
elif FLAGS.train_steps > 0:
train()
tf.app.run()
| [
"noreply@github.com"
] | SRSTLCI.noreply@github.com |
0192859b8b887bddad03294ac6c0618bf2e5e2a9 | 0ce9fbb56b1bb142eed50491c00adeb5eed52d3f | /chatbot.py | 01cda0956f43c0b5dbe7a8189c09280cd267198e | [] | no_license | lucasB97/ARTUR | 0336a7c1bc0e6fc9504dbd46a1a57e2d7c4dd299 | 287625fa5f30923abb676b6ce3fb326ff8ceda81 | refs/heads/main | 2023-04-27T12:01:47.335612 | 2021-04-17T17:31:16 | 2021-04-17T17:31:16 | 316,075,171 | 0 | 0 | null | 2021-04-17T02:59:21 | 2020-11-25T23:21:38 | HTML | UTF-8 | Python | false | false | 1,095 | py | from chatterbot import ChatBot
from chatterbot.trainers import ListTrainer
from chatterbot.trainers import ChatterBotCorpusTrainer
# Creating ChatBot Instance
chatbot = ChatBot(
'A.R.T.U.R.',
storage_adapter='chatterbot.storage.SQLStorageAdapter',
logic_adapters=[
{
'import_path': 'chatterbot.logic.BestMatch',
'default_response': 'Me desculpe, mas eu não entendi. Ainda estou aprendendo :(',
'maximum_similarity_threshold': 0.90
}
],
filters=[
'chatterbot.filters.RepetitiveResponseFilter'
],
database_uri='sqlite:///database.sqlite3'
)
# Training with Personal Ques & Ans
training_data = open('training/ques_ans.txt').read().splitlines()
trainer = ListTrainer(chatbot)
trainer.train(training_data)
# Training with Portugues Corpus Data
trainer_corpus = ChatterBotCorpusTrainer(chatbot)
trainer_corpus.train(
"chatterbot.corpus.portuguese",
"chatterbot.corpus.portuguese.greetings",
"chatterbot.corpus.portuguese.conversations",
"chatterbot.corpus.portuguese.linguistic_knowledge"
)
| [
"lucasbessa708@gmail.com"
] | lucasbessa708@gmail.com |
a9c2ba7de5529d4ee631d9a51afa34de0f801869 | 78f3ffc90eec06e3ea638b0a87b73562dc311984 | /damsht.py | 9665480edef3cb1d8c6ffd418a561d9b1c8e1296 | [] | no_license | ashavt/dsh | a19647db44ce8112ce80184c6b5351cd16b649ec | deef4281ddd71d472b4b1d0b0b633634da3bf6a5 | refs/heads/master | 2022-12-22T07:05:45.672516 | 2018-02-18T09:13:54 | 2018-02-18T09:13:54 | 121,934,464 | 0 | 0 | null | 2022-12-08T00:54:53 | 2018-02-18T08:49:08 | Python | UTF-8 | Python | false | false | 2,224 | py | import requests
import datetime
class BotHandler:
def __init__(self, token):
self.token = token
self.api_url = "https://api.telegram.org/bot{}/".format(token)
def get_updates(self, offset=None, timeout=30):
method = 'getUpdates'
params = {'timeout': timeout, 'offset': offset}
resp = requests.get(self.api_url + method, params)
result_json = resp.json()['result']
return result_json
def send_message(self, chat_id, text):
params = {'chat_id': chat_id, 'text': text}
method = 'sendMessage'
resp = requests.post(self.api_url + method, params)
return resp
def get_last_update(self):
get_result = self.get_updates()
if len(get_result) > 0:
last_update = get_result[-1]
else:
last_update = get_result[len(get_result)]
return last_update
greet_bot = BotHandler(token)
greetings = ('hello', 'hi', 'greetings', 'sup')
now = datetime.datetime.now()
def main():
new_offset = None
today = now.day
hour = now.hour
while True:
greet_bot.get_updates(new_offset)
last_update = greet_bot.get_last_update()
last_update_id = last_update['update_id']
last_chat_text = last_update['message']['text']
last_chat_id = last_update['message']['chat']['id']
last_chat_name = last_update['message']['chat']['first_name']
if last_chat_text.lower() in greetings and today == now.day and 6 <= hour < 12:
greet_bot.send_message(last_chat_id, 'Доброе утро {}'.format(last_chat_name))
today += 1
elif last_chat_text.lower() in greetings and today == now.day and 12 <= hour < 17:
greet_bot.send_message(last_chat_id, 'Добрый день {}'.format(last_chat_name))
today += 1
elif last_chat_text.lower() in greetings and today == now.day and 17 <= hour < 23:
greet_bot.send_message(last_chat_id, 'Добрый вечер {}'.format(last_chat_name))
today += 1
new_offset = last_update_id + 1
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
exit()
| [
"ashavt@yandex.ru"
] | ashavt@yandex.ru |
1a7e98eac3530ccd63f562cdfb6de3ad851647f7 | 5223229cbdbe883c6c1c09980c60d845e8255dd3 | /laliga_sb_analysis/Scripts/Barca_Manager_tenure_graph.py | e6efd1c05d1abfb8fc9f6185443ca62b15abc13d | [] | no_license | derrik-hanson/Python_Analysis_xG_Barca_plus | 39b112c47672b5f3133a1b439c942e4cee35791e | 65af5acb02deadb610b182a4b74b444114eb3ec6 | refs/heads/main | 2023-08-26T18:18:17.546730 | 2021-11-13T06:26:36 | 2021-11-13T06:26:36 | 422,980,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,627 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 12 19:39:17 2021
@author: Derrik Hanson
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 12 17:03:57 2021
@author: Derrik Hanson
"""
import plotly.express as px
import pandas as pd
manager_tenures = [
{'manager_name': 'Frank Rijkaard', 'start':'2003-06' ,'end':'2008-06'},
{'manager_name': 'Pep Guardiola', 'start':'2008-06' ,'end':'2012-06'},
{'manager_name': 'Tito Vilanova', 'start':'2012-07' ,'end':'2013-01'},
{'manager_name': 'Jordi Roura', 'start':'2013-01' ,'end':'2013-03'},
{'manager_name': 'Tito Vilanova', 'start':'2013-03' ,'end':'2013-07'},
{'manager_name': 'Gerard Martino', 'start':'2013-07' ,'end':'2014-05'},
{'manager_name': 'Luis Enrique', 'start':'2014-05' ,'end':'2017-05'},
{'manager_name': 'Ernesto Valverde', 'start':'2017-05' ,'end':'2020-01'},
{'manager_name': 'Quique Setien', 'start':'2020-01' ,'end':'2020-08'},
{'manager_name': 'Ronald Koeman', 'start':'2020-08' ,'end':'2021-10'},
]
# load DataFrame
df = pd.DataFrame(manager_tenures)
# Create Gantt Plot
fig = px.timeline(df, x_start="start", x_end="end", y="manager_name",
labels = {
'manager_name': 'Manager Name'}
)
fig.update_layout(
title={
'text': "Barcelona Manager Tenures",
'y':0.95,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'})
fig.update_yaxes(autorange="reversed") # otherwise tasks are slisted from the bottom up
fig.show()
fig.write_image("figures/barca_manager_tenure.pdf") | [
"hanson.derrik@gmail.com"
] | hanson.derrik@gmail.com |
73140cdc70ade106181a0a7092b94bcbb63b6c41 | 4b0ac126af3d635be9d248ed5b2642dfe32b56d0 | /philips_app_engine/main.py | 040715b066a101a64a4565b120c2e2f6f29d8966 | [] | no_license | CosmaTrix/hackathon-git | b4d31a91531818fa943796a81def0a9626283e83 | cfbdf81aa20ff74ce3af3424018f8b90201f5fe4 | refs/heads/master | 2020-12-24T17:08:42.627668 | 2015-01-31T23:08:04 | 2015-01-31T23:08:04 | 30,111,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,087 | py | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from urlparse import urljoin
import webapp2
import json
import requests
import settings
GREEN = 21845.
RED = 0
BRIGHTNESS_MAX = 255.
class MainHandler(webapp2.RequestHandler):
def __init__(self, *args, **kwargs):
super(MainHandler, self).__init__(*args, **kwargs)
self.lights = {
0: "http://{0}/api/newdeveloper/lights/1/".format(
settings.PHILIPS_HUE_IP),
1: "http://{0}/api/newdeveloper/lights/3/".format(
settings.PHILIPS_HUE_IP),
2: "http://{0}/api/newdeveloper/lights/2/".format(
settings.PHILIPS_HUE_IP),
}
self.last_light = 2
def __dict_for(self, hue_color, bright):
return {
"on": True,
"sat": 255,
"bri": bright,
"hue": hue_color
}
def __request_dict_from_resp(self, data):
return {
"on": data["state"]["on"],
"sat": data["state"]["sat"],
"br": data["state"]["bri"],
"hue": data["state"]["hue"],
}
def __sequence_lights(self, data):
resp = requests.get(self.lights[1])
data_1 = json.loads(resp.text)
requests.put(urljoin(self.lights[2], 'state'), json.dumps(
self.__request_dict_from_resp(data_1)))
resp = requests.get(self.lights[0])
data_0 = json.loads(resp.text)
requests.put(urljoin(self.lights[1], 'state'), json.dumps(
self.__request_dict_from_resp(data_0)))
requests.put(urljoin(self.lights[0], 'state'), json.dumps(data))
def __turn_lights_off(self):
off_data = json.dumps({"on": False})
requests.put(urljoin(self.lights[0], 'state'), off_data)
time.sleep(0.1)
requests.put(urljoin(self.lights[1], 'state'), off_data)
time.sleep(0.1)
requests.put(urljoin(self.lights[2], 'state'), off_data)
time.sleep(0.1)
def get(self):
fh = open("index.html", "r")
self.response.headers['Content-Type'] = 'text/html'
self.response.out.write(fh.read())
def post(self):
jsonstring = self.request.body
data = json.loads(jsonstring)
values = data.get("values", [])
max_impr = 1
max_vol = 1
tmp_impr = []
tmp_vol = []
for value in values:
impressions = value.get("impressions")
tmp_impr.append(impressions)
volume = value.get("volume")
tmp_vol.append(volume)
max_impr = max(max_impr, impressions)
max_vol = max(max_vol, volume)
rate_impr = GREEN / max_impr
list_impr = [int(impr * rate_impr) for impr in tmp_impr]
rate_vol = BRIGHTNESS_MAX / max_vol
list_vol = [int(vol * rate_vol) for vol in tmp_vol]
response = {}
for i in range(len(list_impr)):
json_dict = self.__dict_for(list_impr[i], list_vol[i])
self.__sequence_lights(json_dict)
json_dict["count"] = i
generated = response.get("generated", [])
generated.append(json_dict)
response["generated"] = generated
time.sleep(data.get("interval", 0.5))
self.__turn_lights_off()
self.response.headers['Content-Type'] = 'application/json'
response["status"] = "OK"
self.response.out.write(json.dumps(response))
app = webapp2.WSGIApplication([('/', MainHandler)], debug=True)
| [
"marco@travelbird.nl"
] | marco@travelbird.nl |
1da98ce1969f888ec8962c9239a84d4f7a580f78 | b72dbc51279d3e59cb6410367b671f8a956314c1 | /leet_code/leet_372.py | 5c1d0057a5ac67543ab059922519a69fe52287d6 | [] | no_license | ddobokki/coding-test-practice | 7b16d20403bb1714d97adfd1f47aa7d3ccd7ea4b | c88d981a1d43b986169f7884ff3ef1498e768fc8 | refs/heads/main | 2023-07-08T15:09:32.269059 | 2021-08-08T12:19:44 | 2021-08-08T12:19:44 | 344,116,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | from typing import List
class Solution:
def superPow(self, a: int, b: List[int]) -> int:
if a in [1,0]:
return a
return int(pow(a,int("".join(str(i) for i in b)),1337))
| [
"44228269+ddobokki@users.noreply.github.com"
] | 44228269+ddobokki@users.noreply.github.com |
7c3197dc1c975ece63e9829ab29ce34714bbbbbf | 07d5dbbaecddb7ddb3341861fddd5490d54840af | /Computational_Physics/plot2.py | 4da8f397c3aca7afa346f8b38707c1ec770873ec | [] | no_license | julpotter/College-Projects-and-code | 49382e5df82a80c3f2c8162c610c7f465860782e | b1a0508f88a4a6ef1b04de66c9653c68ea03a3cc | refs/heads/main | 2023-03-01T15:25:51.618680 | 2021-02-05T00:54:02 | 2021-02-05T00:54:02 | 326,104,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 640 | py |
from __future__ import print
import matplotlib.pyplot as plt
g = 9.81
t0 = 0
y0 = 1.5 # initial height
v0 = float(input("Enter the initial velocity v0 :"))
#use lists to save position and time
y = []
t = []
ycalc = 0
tcalc = 0
#create a loop to populate our lists with values
while(ycalc >= 0):
ycalc = y0 + v0*tcalc - 1/2.*g*tcalc**
y.append(ycalc)
t.append(tcalc)
print "Height is {0:5.2f} and time is {1:5.2f}".format(ycalc,tcalc)
tcalc = tcalc + 0.05 #increment
print "The maximum height reach is ", max(y)
#graph our results
plt.plot(t,y, 'r^')
plt.xlabel("Time t (s)")
plt.ylabel('Height y (m)')
plt.show()
| [
"jpotter@udallas.edu"
] | jpotter@udallas.edu |
d2db1ee6174ddceb75d614d139e0bf52ec6cc9c6 | 65c8e86e276fb9ff8159af867d3793063e480d34 | /config.py | 037c7406057066de211e539d476d582edc9ada7d | [] | no_license | prosass83/ucbotplot | 504d0d16f1625c839d624f95017ca20597b1bf59 | 25b4bb10b4c637d4349ecab56f29a8f9720f7012 | refs/heads/master | 2020-03-10T03:36:40.328166 | 2018-04-12T00:34:43 | 2018-04-12T00:34:43 | 129,169,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | py | # Twitter API Keys
consumer_key = "Uu04Kdr6N3SFmbEi334mhy9HQ"
consumer_secret = "d9WJsr5GCCdBI6wbyQopug6HeHpB3T2mKoez8DEHeJFy8D9ko0"
access_token = "2518417008-SC6yochSVIeAYERFWe3BClzJ2pit2iH6YWOUwcA"
access_token_secret = "FBkOuixi1SSXC9xP2k8PpxY07GWsoDZ3MvtC7lj1K59Xo" | [
"prosass@gmail.com"
] | prosass@gmail.com |
c8df0b5f2035c2386b9918776f917c3effb9da50 | 02fa1542bc428b64da276afdb46f2f2f7199f7a6 | /DataManager.py | ff0a2123a51d8a90d13e0f767a8e98c9f0b921d1 | [] | no_license | ulissesbcorrea/atae-lstm-theano-modified | bb4ba7b9786d00d5bf6da16cbbdd5067e8118e91 | e3bfa6a74df878ee7eec115c1a6e8fd1c4fcfa46 | refs/heads/master | 2022-12-13T16:36:10.256232 | 2020-09-14T09:21:14 | 2020-09-14T09:21:14 | 287,218,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,457 | py | # -*- encoding: utf-8 -*-
import numpy as np
import theano
class Sentence(object):
"""docstring for sentence"""
def __init__(self, content, target, rating, grained):
self.content, self.target = content.lower(), target
self.solution = np.zeros(grained, dtype=theano.config.floatX)
self.senlength = len(self.content.split(' '))
try:
self.solution[int(rating)+1] = 1
except Exception as e:
print 'erro no contrutor de Sentence:'+ str(e)
print 'rating:' + rating
exit()
def stat(self, target_dict, wordlist, grained=3):
data, data_target, i = [], [], 0
solution = np.zeros((self.senlength, grained), dtype=theano.config.floatX)
for word in self.content.split(' '):
data.append(wordlist[word])
# try:
# pol = Lexicons_dict[word]
# solution[i][pol+1] = 1
# except Exception as e:
# print 'error in stat:' + str(e)
# pass
i = i+1
for word in self.target.split(' '):
data_target.append(wordlist[word])
return {'seqs': data, 'target': data_target, 'solution': np.array([self.solution]), 'target_index': self.get_target(target_dict), 'original_text':self.content, 'aspect': self.target}
def get_target(self, dict_target):
return dict_target[self.target]
class DataManager(object):
def __init__(self, dataset, seed, grained=3):
self.fileList = ['train', 'test', 'dev']
self.origin = {}
for fname in self.fileList:
data = []
with open('%s/%s.cor' % (dataset, fname)) as f:
sentences = f.readlines()
for i in xrange(len(sentences)/3):
content, target, rating = sentences[i*3].strip(), sentences[i*3+1].strip(), sentences[i*3+2].strip()
sentence = Sentence(content, target, rating, grained)
data.append(sentence)
self.origin[fname] = data
self.gen_target()
def gen_word(self):
wordcount = {}
def sta(sentence):
for word in sentence.content.split(' '):
try:
wordcount[word] = wordcount.get(word, 0) + 1
except:
wordcount[word] = 1
for word in sentence.target.split(' '):
try:
wordcount[word] = wordcount.get(word, 0) + 1
except:
wordcount[word] = 1
for fname in self.fileList:
for sent in self.origin[fname]:
sta(sent)
words = wordcount.items()
words.sort(key=lambda x:x[1], reverse=True)
self.wordlist = {item[0]:index+1 for index, item in enumerate(words)}
return self.wordlist
def gen_target(self, threshold=5):
self.dict_target = {}
for fname in self.fileList:
for sent in self.origin[fname]:
if self.dict_target.has_key(sent.target):
self.dict_target[sent.target] = self.dict_target[sent.target] + 1
else:
self.dict_target[sent.target] = 1
i = 0
for (key,val) in self.dict_target.items():
if val < threshold:
self.dict_target[key] = 0
else:
self.dict_target[key] = i
i = i + 1
return self.dict_target
def gen_data(self, grained=3):
self.data = {}
for fname in self.fileList:
self.data[fname] = []
for sent in self.origin[fname]:
self.data[fname].append(sent.stat(self.dict_target, self.wordlist))
return self.data['train'], self.data['dev'], self.data['test']
def word2vec_pre_select(self, mdict, word2vec_file_path, save_vec_file_path):
list_seledted = ['']
line = ''
with open(word2vec_file_path) as f:
for line in f:
tmp = line.strip().split(' ', 1)
if mdict.has_key(tmp[0]):
list_seledted.append(line.strip())
list_seledted[0] = str(len(list_seledted)-1) + ' ' + str(len(line.strip().split())-1)
open(save_vec_file_path, 'w').write('\n'.join(list_seledted))
| [
"ulissesbcorrea@gmail.com"
] | ulissesbcorrea@gmail.com |
f9d8cfefda12f3f541879b5d40b545e5a08c6842 | 0751fa2615079decfe8c1446f6dcbd7d1048bc31 | /HW3/Code.py | 10d368b3246cb1e5023cca87882b4c37397c8493 | [] | no_license | fatihselimyakar/AlgorithmAndDesign | 0fa4ca8454dc641a57161657ceca29952f45054e | 519107789c77f60dfc03036f7fda962f49203720 | refs/heads/master | 2021-01-06T01:50:07.846213 | 2020-02-17T19:55:35 | 2020-02-17T19:55:35 | 241,194,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,280 | py | import itertools
#############QUESTION1###############
def createBoxList( size ):
boxes = []
for i in range(0,(int)(size/2)):
boxes.append("black")
for i in range((int)(size/2),size):
boxes.append("white")
return boxes
def boxTail(boxList,low,middle):
if(middle>=len(boxList)):
return boxList
boxList[low],boxList[middle]=boxList[middle],boxList[low]
boxTail(boxList,low+2,middle+2)
def boxRec(boxList):
return boxTail(boxList,1,(int)(len(boxList)/2))
#############QUESTION2###############
def findFakeCoin( arr ):
if(len(arr)==1):
return arr[0]
if(len(arr)%2==0):
if(sum( arr[0:(int)(len(arr)/2)] ) < sum( arr[(int)(len(arr)/2):len(arr)] )):
return findFakeCoin(arr[0:(int)(len(arr)/2)])
else:
return findFakeCoin(arr[(int)(len(arr)/2):len(arr)])
elif(len(arr)%2==1):
if(sum( arr[0:(int)(len(arr)/2)] ) == sum( arr[(int)(len(arr)/2):len(arr)-1] )):
return arr[len(arr)-1]
elif(sum( arr[0:(int)(len(arr)/2)] ) < sum( arr[(int)(len(arr)/2):len(arr)-1] )):
return findFakeCoin(arr[0:(int)(len(arr)/2)])
else:
return findFakeCoin(arr[(int)(len(arr)/2):len(arr)-1])
#############QUESTION3###############
quicksortSwapNum = 0
insertionSortSwapNum = 0
def insertionSort(arr): #decrease and conquer
global insertionSortSwapNum
for i in range(1,len(arr)):
current=arr[i]
position=i-1
while position>=0 and current<arr[position]:
arr[position+1]=arr[position]
insertionSortSwapNum+=1
position-=1
arr[position+1]=current
return insertionSortSwapNum
def rearrange(arr,low,high):
global quicksortSwapNum
i = ( low-1 )
pivot = arr[high]
for j in range(low , high):
if arr[j] < pivot:
i = i+1
arr[i],arr[j] = arr[j],arr[i]
quicksortSwapNum+=1
arr[i+1],arr[high] = arr[high],arr[i+1]
quicksortSwapNum+=1
return ( i+1 )
def quickSort(arr,low,high):#divide and conquer
if high > low:
index = rearrange(arr,low,high)
quickSort(arr, low, index-1)
quickSort(arr, index+1, high)
return quicksortSwapNum
#############QUESTION4###############
def findMedian(arr):
insertionSort(arr)
if(len(arr)%2==0):
return (arr[(int)(len(arr)/2)]+arr[(int)(len(arr)/2-1)])/2
else:
return arr[(int)(len(arr)/2)]
#############QUESTION5###############
def multiply(numbers):
total = 1
for x in numbers:
total *= x
return total
def optimalSubArray(arr):
value=(max(arr)+min(arr))*(len(arr)/4)
minMult=None
minList=None
for i in range(1,len(arr)+1):
combs=itertools.combinations(arr,i)
minList,minMult = (recSub((list)(combs),value,minList,minMult))
return minList
def recSub(combs,value,minList,minMult):
if(len(combs)==0):
return minList,minMult
elif(sum(combs[0])>=value):
if( (minMult==None) or (multiply(combs[0])<minMult) ):
minMult=multiply(combs[0])
minList=combs[0]
return recSub(combs[1:len(combs)],value,minList,minMult)
def main():
print ("TEST FUNCTION")
print ("\n**Box Test**")
boxList=createBoxList(8)
print ("Unchanged list:",boxList)
boxRec(boxList)
print ("Changed list:",boxList)
print ("\n**Fake Coin Test**")
coins=[2,2,1,2,2,2,2]
print ("Coin list:",coins)
print ("Fake coin:",findFakeCoin(coins))
print ("\n**Insertion and quicksort test**")
arr=[10,9,8,7,6,5,4,3,2,1]
print ("Unsorted array:",arr)
print ("Quicksort number of swap:",quickSort(arr,0,len(arr)-1))
print ("Quicksorted array:",arr)
arr2=[10,9,8,7,6,5,4,3,2,1]
print ("Unsorted array:",arr2)
print ("Insertion sort number of swap:",insertionSort(arr2))
print ("Insertion sorted array:",arr2)
print ("\n**Find median test**")
arr3=[10,20,12,13,19,1]
print ("Median array:",arr3)
print ("Median is:",findMedian(arr3))
print ("\n**Find optimal sub array**")
arr4=[2,4,7,5,22,11]
print ("Array is:",arr4)
print ("Optimal sub array is:",optimalSubArray(arr4))
if __name__ == '__main__':
main() | [
"noreply@github.com"
] | fatihselimyakar.noreply@github.com |
dcb1ace6bff0ebe626977fad8beb5902ed036ab4 | 8a742d701f6e3106f138fb5bb67a90de64c99abd | /lesson1/task2.py | 6dfca1d43eda210d4e9c6cd878da0521bc1d1796 | [] | no_license | alexabolot1/python-homework | ac51f20ad31b866d56783aaffc44d26582f6b113 | 5d719b5f5e3e67ef7e85a8f347d858255cb31ac7 | refs/heads/main | 2023-03-10T16:11:43.824269 | 2021-01-22T06:23:16 | 2021-01-22T06:23:16 | 330,006,488 | 0 | 0 | null | 2021-02-16T12:52:04 | 2021-01-15T19:43:41 | Python | UTF-8 | Python | false | false | 123 | py | a = input('Введите число')
print(f"Сумма вашего числа {int(a) + int(a + a) + int(a + a + a)}")
| [
"alexabolot@gmail.com"
] | alexabolot@gmail.com |
b04bcc13154c9db03c343251a42fa7662c29b486 | 52c737c2f062f7b07e35ab88e9ea042337463253 | /Weibo_v3/weibo_auto_handle.py | b1c5aade7198eb39ae4667657b948245d1f5a156 | [] | no_license | LichMscy/Weibo | b704779cdfe8c52063bb3c8e2d4e611ffd47bb37 | 6d3ed1e9709ef48c4627dfc69fd9ee9cbe7e628b | refs/heads/master | 2021-01-18T17:26:20.351718 | 2017-04-01T02:47:49 | 2017-04-01T02:47:49 | 86,798,194 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,101 | py | # -*-coding: utf-8 -*-
import re
import time
import datetime
import logging
import json
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.keys import Keys
import requests
from bs4 import BeautifulSoup
from util import parsedate
import persist_iics
logging.basicConfig(level=logging.INFO, format="[%(asctime)s]%(name)s:%(levelname)s:%(message)s")
logger = logging.getLogger(__name__)
logging.getLogger("selenium").setLevel(logging.WARNING)
def init_phantomjs_driver():
headers = {
'Cookie': 'YF-Ugrow-G0=b02489d329584fca03ad6347fc915997; SUB=_2AkMvgPj2dcPxrAFYnPgWyGvkZYpH-jycVZEAAn7uJhMyOhgv7nBSqSVOKynW2PbhU4768kfRGZgNPwXeRA..; SUBP=0033WrSXqPxfM72wWs9jqgMF55529P9D9WWEFXHsNpvgJdQjr1GM.e765JpVF020SKM7e0571hMc',
}
for key, value in headers.items():
webdriver.DesiredCapabilities.PHANTOMJS['phantomjs.page.customHeaders.{}'.format(key)] = value
useragent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.110 Safari/537.36'
webdriver.DesiredCapabilities.PHANTOMJS['phantomjs.page.settings.userAgent'] = useragent
# local path refer phantomjs
driver = webdriver.PhantomJS(executable_path='xxxxx')
driver.set_window_size(1366, 768)
return driver
def update_cookies():
p1 = persist_iics.Persist()
accounts = p1.query_account()
cookie = json.loads(accounts[0][3])
req = requests.Session().get('http://weibo.cn/', cookies=cookie)
if re.findall('登录|注册', req.text, re.S):
logging.error('Account cookies out of date! (Account_id: %s)' % accounts[0][0])
browser = init_phantomjs_driver()
try:
browser.get("http://weibo.com")
time.sleep(3)
failure = 0
while "微博-随时随地发现新鲜事" == browser.title and failure < 5:
failure += 1
username = browser.find_element_by_name("username")
pwd = browser.find_element_by_name("password")
login_submit = browser.find_element_by_class_name('W_btn_a')
username.clear()
username.send_keys(accounts[0][1])
pwd.clear()
pwd.send_keys(accounts[0][2])
login_submit.click()
time.sleep(5)
# if browser.find_element_by_class_name('verify').is_displayed():
# logger.error("Verify code is needed! (Account: %s)" % account)
if "我的首页 微博-随时随地发现新鲜事" in browser.title:
browser.get('http://weibo.cn/')
cookies = dict()
if "我的首页" in browser.title:
for elem in browser.get_cookies():
cookies[elem["name"]] = elem["value"]
p2 = persist_iics.Persist()
p2.save_account_cookies(accounts[0][0], cookies, datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
logging.error('Account cookies updated! (Account_id: %s)' % accounts[0][0])
return cookies
except:
logger.error("Weibo Login Unknown exception!")
raise
else:
return cookie
def snatch_news_info(cookies):
p1 = persist_iics.Persist()
p1_result = p1.query_task()
ids = re.findall('weibo.com/(.*?)/(.*?)[?]', p1_result[0][1], re.S)[0]
if ids and ids[0] and ids[1]:
url = 'http://weibo.cn/comment/{}?uid={}'.format(ids[1], ids[0])
req = requests.get(url, cookies=cookies)
while req.status_code != 200:
logging.error('Snatch (Task_id: %s) failed!' % p1_result[0][0])
exit()
soup = BeautifulSoup(req.text, 'lxml')
item = soup.select('span.ctt')[0]
dic = dict()
dic['platform_id'] = 2
dic['media_name'] = '新浪微博'
dic['title'] = item.get_text()[1:23] + '...'
dic['summary'] = item.get_text()[1:290]
dic['src_url'] = result[0][1]
dic['task_id'] = result[0][0]
dic['comment_num'] = ''.join(re.findall('\">\s评论\[(.*?)\]\s<', soup.extract().decode(), re.S))
like = re.findall('\">赞\[(.*?)\]<', soup.extract().decode(), re.S)
if like:
dic['like_num'] = like[0]
dic['forward_num'] = ''.join(re.findall('\">转发\[(.*?)\]<', soup.extract().decode(), re.S))
create_time = soup.select('span.ct')[0].get_text().split('\xa0来自')[0]
dic['create_time'] = parsedate.parse_date(create_time)
p2 = persist_iics.Persist()
p2.insert_news(dic)
logging.info('Snatch wb news success! (Task_id: %s)' % p1_result[0][0])
p3 = persist_iics.Persist()
p3.update_task_status(p1_result[0][0])
logging.error('Snatch (Task_id: %s) failed! Updated status!' % p1_result[0][0])
def comment_prepare():
# TODO: query comment list from db.
comment_list = tuple()
p1 = persist_iics.Persist()
result = p1.query_task()
ids = re.findall('weibo.com/(.*?)/(.*?)[?]', result[0][1], re.S)[0]
url = 'http://weibo.cn/comment/{}?uid={}'.format(ids[1], ids[0])
result = dict()
result['comment'] = comment_list
result['url'] = url
return result
def comment(weibo, wb_content, wb_comment_url):
code = 1
account = weibo['usn']
password = weibo['pwd']
# service_args = [
# '--proxy=127.0.0.1:9999',
# '--proxy-type=http',
# '--ignore-ssl-errors=true'
# ]
browser = init_phantomjs_driver()
try:
browser.get("http://weibo.com")
time.sleep(3)
# browser.save_screenshot("weibocom.png")
failure = 0
while "微博-随时随地发现新鲜事" == browser.title and failure < 5:
failure += 1
username = browser.find_element_by_name("username")
pwd = browser.find_element_by_name("password")
login_submit = browser.find_element_by_class_name('W_btn_a')
username.clear()
username.send_keys(account)
pwd.clear()
pwd.send_keys(password)
login_submit.click()
time.sleep(5)
# if browser.find_element_by_class_name('verify').is_displayed():
# logger.error("Verify code is needed! (Account: %s)" % account)
if "我的首页 微博-随时随地发现新鲜事" in browser.title:
browser.get(wb_comment_url)
comment_avatar = browser.find_element_by_xpath("//div/a[@href='http://weibo.com/']")
comment_avatar.send_keys(Keys.TAB, wb_content)
time.sleep(5)
comment_submit = browser.find_element_by_xpath("//a[@class='W_btn_a']")
comment_submit.click()
time.sleep(5)
code = 0
except:
logger.error("weibo comment Unknown exception!")
raise
return code
# if __name__ == '__main__':
# print(comment({'usn': 'xxxxx', 'pwd': 'xxxxx'}, '死...死...死狗一', 'http://weibo.com/xxxxx/xxxxx'))
| [
"jraqi1994@gmail.com"
] | jraqi1994@gmail.com |
d50868da0f47f63d568213860fafad9a262e1a9d | 926651d078851f96e71e2cc419fe87348b333ba3 | /mysite/mysite/urls.py | 1e5ccbb2f5ede005dbd394a42c6f06242f90e827 | [] | no_license | Kitabo258/secretary2 | edc9c55133aed40b2d66a89a3d32c90afd923297 | d89e74309abe1577aa4248c77ecc73412e27eba3 | refs/heads/master | 2021-08-14T15:11:43.836979 | 2017-11-16T02:32:12 | 2017-11-16T02:32:12 | 110,913,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 907 | py | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^questions/create/$','question.views.question_create'),
url(r'^admin/', admin.site.urls),
url(r'^$', 'show.views.index'),
url(r'^tama/$', 'show.views.tama'),
]
| [
"kiki901028@gmail.com"
] | kiki901028@gmail.com |
4f02cd88aa3d26c3be1bbb4b45c2049a6e8a6317 | 9ab9d9a3883471763edbceea59a0e83170581b5f | /eggs/bx_python-0.7.1_7b95ff194725-py2.7-linux-i686-ucs4.egg/EGG-INFO/scripts/bed_extend_to.py | 2985cc3497acf222c69151a76b253624baa01752 | [
"CC-BY-2.5",
"AFL-2.1",
"AFL-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | asmmhossain/phyG | 24dc211dad5b3e89c87ff384e841f2e98bbd52db | 023f505b705ab953f502cbc55e90612047867583 | refs/heads/master | 2022-11-21T12:43:46.172725 | 2014-02-14T12:33:08 | 2014-02-14T12:33:08 | 13,800,552 | 0 | 1 | NOASSERTION | 2020-07-25T21:05:41 | 2013-10-23T11:04:25 | Python | UTF-8 | Python | false | false | 1,132 | py | #!/afs/bx.psu.edu/project/pythons/linux-i686-ucs4/bin/python2.7
"""
Read BED file and extend each record to the specified minimum length. If chromosome
size information is provided trim extended intervals.
usage: %prog amount [ chrom_file ] < bed_file
"""
import sys
from bx.intervals.io import GenomicIntervalReader
length = int( sys.argv[1] )
chrom_len = None
if len( sys.argv ) > 2:
chrom_len = dict( ( fields[0], int( fields[1] ) ) for fields in map( str.split, open( sys.argv[2] ) ) )
for interval in GenomicIntervalReader( sys.stdin ):
if interval.end - interval.start < length:
start = interval.start
end = interval.end
# Extend in positive direction on strand
if interval.strand == "+":
end = start + length
else:
start = end - length
# Trim
if start < 0:
start = 0
if chrom_len and end > chrom_len[interval.chrom]:
end = chrom_len[interval.chrom]
# Set new start and end
interval.start = start
interval.end = end
# Output possibly adjusted interval
print interval
| [
"mukarram819@gmail.com"
] | mukarram819@gmail.com |
916795017de2b6b98ecb83425e776e9a8975c090 | 8212c7b8c532681107f735643ab47bb20da177ac | /content_indexer/content_indexer.py | cb4003e22f16b032b2dee9eb2467f20f7d22044f | [] | no_license | jjenner689/Python | 31458c2ca0b612776387fce3bfd3ad56fd07f031 | 665c25077b86b7c802312d5ef4b1ec740b752f8b | refs/heads/master | 2016-08-10T17:06:34.721622 | 2015-10-05T15:00:24 | 2015-10-05T15:00:24 | 43,438,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,050 | py | import string
import os
"""
Content Indexing Engine Capstone Project
You have a bunch of files in the file system! How can we index these files to make them easily searchable by keyword?
Indexing is a way of moving work 'upfront' so that when the user searches, less work is needed to get them the right search results.
Tips:
Look into array .extend() method
Look into string module , and .punctuation
Look into the set() builtin data type
Example index:
index = {'cat':['filename1','filename2','filename3'],'dog':['filename2',filename3]}
"""
#Tip: upgrade your recursive find code from a previous exercise to return a list of files
def recursive_find(name, index = {}):
array = os.listdir(name)
for i in array:
path = os.path.join(name, i)
if os.path.isdir(path):
recursive_find(path, index)
else:
data_string = read_data(path)
data_string = strip_punctuation(data_string)
data = split_data_string(data_string)
index = add_to_index(data, i, index)
return index
stop_words = ['a','an','and','i']
def read_data(filename):
with open(filename,"r") as f:
return f.read()
def strip_punctuation(data_string):
punctuation = ["\n",",","'","/","\"","?","+","*","(",")","#","!", "-"]
for i in punctuation:
data_string = data_string.replace(i, ' ')
return data_string
def split_data_string(data_string):
data = data_string.split(" ")
data = list(set(data))
data = map(lambda x: x.lower(), data)
if '' in data:
data.remove('')
return data
def add_to_index(words,filename,index):
for i in words:
if i in index:
index[i].append(filename)
else:
index[i] = [filename]
return index
def handle_words(response, index):
words = response.split(' ')
both = ''
if set(words).issubset(set(index)):
for i in range(len(words)-1):
print index[words[i]], '/', index[words[i+1]]
both = list(set(index[words[i]]) & set(index[words[i+1]]))
if both == '':
both = index[words[0]]
print '\n%s found in files %s' % (words, both)
else:
print '\n%s not found....' % list(set(words) - set(index))
def run_interactive():
print '''\n***Welcome to Josh's content index!***\n'''
#index = recursive_find('/home/josh/Desktop/text_files')
response_1 = ''
response_2 = ''
while not os.path.isdir(response_1):
if response_1 == 'q':
return
response_1 = raw_input('Please enter a valid directory to index or press q to quit > ')
index = recursive_find(response_1)
while response_2 != 'q':
response_2 = raw_input('\nEnter the item/s (separated by spaces) you would like to search or press q to quit > ')
if response_2 == 'q':
break
handle_words(response_2, index)
print "\nThankyou for using Josh's content index.......\n"
run_interactive()
| [
"jjenner689@gmail.com"
] | jjenner689@gmail.com |
820e15a0e02606bf4165dcedc96fab3b641663ef | da8a2b9404e6bb9f3d6ca5a786fd01eddf440ec4 | /lyric/apps.py | 618476d0dd10601771f2dbee5c71aa66f38b89f0 | [] | no_license | BrightHao/Django_Music | c099e22d4606a8101f1be3be3a9f4e7b2babcc25 | c31df1a0d3f34594c65bddcc88815e22bc2f0902 | refs/heads/master | 2023-03-17T20:01:33.915667 | 2021-03-13T05:37:49 | 2021-03-13T05:37:49 | 347,283,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | from django.apps import AppConfig
class LyricConfig(AppConfig):
name = 'lyric'
| [
"861759757@qq.com"
] | 861759757@qq.com |
f987964ac120b8b838a54bd03b1a6cd553eba48d | e4b52bd79c3a2fdf64f1927ddd04c87fa401fed8 | /myshop/migrations/0003_migrate_translatable_fields.py | cd9bbd763d5d51bc2a494ed80d982b7711caf5e0 | [] | no_license | ObsidianRock/shop | 8e1051efb298c65d6803fe76be07f222c19d60dd | 2a6613f28de3d577288653afc6b1a470fcde6a89 | refs/heads/master | 2021-01-19T10:01:26.797821 | 2017-04-13T16:02:34 | 2017-04-13T16:02:34 | 87,817,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,795 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
translatable_models = {
'Category': ['name', 'slug'],
'Product': ['name', 'slug', 'description'],
}
def forwards_func(apps, schema_editor):
for model, fields in translatable_models.items():
Model = apps.get_model('myshop', model)
ModelTranslation = apps.get_model('myshop', '{}Translation'.format(model))
for obj in Model.objects.all():
translation_fields = {field: getattr(obj, field) for field in fields}
translation = ModelTranslation.objects.create(
master_id=obj.pk,
language_code=settings.LANGUAGE_CODE,
**translation_fields)
def backwards_func(apps, schema_editor):
for model, fields in translatable_models.items():
Model = apps.get_model('myshop', model)
ModelTranslation = apps.get_model('myshop', '{}Translation'.format(model))
for obj in Model.objects.all():
translation = _get_translation(obj, ModelTranslation)
for field in fields:
setattr(obj, field, getattr(translation, field))
obj.save()
def _get_translation(obj, MyModelTranslation):
translations = MyModelTranslation.objects.filter(master_id=obj.pk)
try:
return translations.get(language_code=settings.LANGUAGE_CODE)
except ObjectDoesNotExist:
return translations.get()
class Migration(migrations.Migration):
dependencies = [
('myshop', '0002_add_translation_model'),
]
operations = [
migrations.RunPython(forwards_func, backwards_func),
]
| [
"ObsidianRock@users.noreply.github.com"
] | ObsidianRock@users.noreply.github.com |
cccee8c95ce17bb44043b1a20a899ac4161055be | ee22ec2076a79e8de3011377fe205bc87163ab9f | /src/basic-c3/func-let.py | 8c9c6ff3fea14adfbe60b86692ad4981a5710241 | [] | no_license | n18018/programming-term2 | 039a95c67372a38a34e2aa8c5975045a9fc731be | 86c455269eed312def529604e1ac3b00f476226c | refs/heads/master | 2020-03-22T08:59:29.545280 | 2018-08-29T07:57:37 | 2018-08-29T07:57:37 | 139,806,131 | 0 | 0 | null | 2018-07-05T06:42:11 | 2018-07-05T06:42:11 | null | UTF-8 | Python | false | false | 326 | py | # 関数を定義
def mul_func(a, b):
return a * b
def div_func(a, b):
return a / b
# mul_func関数を変数に代入
func = mul_func
# 代入した変数で関数を使う
result = func(2, 3)
print(result)
# div_func関数を変数に代入する場合
func2 = div_func
result = func2(10, 5)
print(result)
| [
"n18018@std.it-college.ac.jp"
] | n18018@std.it-college.ac.jp |
b477a9cee5d1d50b4effb6e86d254fa10629c6f1 | d63811f9944dead8a745a46e1382f64800c72c5e | /linuxYazKampı/sonuç/petimebak/adverts/views.py | ea08662d7d76a0d3d26ba3224ffcc6795ba666ad | [] | no_license | Arciles/Notes | 9dd77425209b9a10a6503dcd27a5c48c9666c35b | 095e361bdb11ca72c3bff801ed4a9b938827c84a | refs/heads/master | 2020-12-11T05:46:11.416975 | 2014-11-22T12:08:37 | 2014-11-22T12:08:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,924 | py | from datetime import datetime
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.template import RequestContext
from adverts.forms import AdvertCreationForm, PhotoCreationForm
from adverts.models import Advert
from messages.forms import NewMessageForm
def home(request):
adverts = (Advert.objects
.filter(is_published=True, end_date__gte=datetime.now())
.order_by("-date_created"))
return render_to_response("index.html", {
"adverts": adverts
}, RequestContext(request))
@login_required
def new_advert(request):
form = AdvertCreationForm()
success = False
if request.method == "POST":
form = AdvertCreationForm(request.POST)
if form.is_valid():
form.instance.user = request.user
form.save()
success = True
return render_to_response("new_advert.html", {
"form": form,
"success": success,
}, RequestContext(request))
def detail_advert(request, pk):
advert = get_object_or_404(Advert, id=pk)
message_sent = request.GET.get("message_sent")
form = NewMessageForm()
return render_to_response("detail.html", {
"advert": advert,
"form": form,
"message_sent": message_sent
}, RequestContext(request))
def photo_add(request, pk):
advert = get_object_or_404(Advert, id=pk)
form = PhotoCreationForm()
if request.method == "POST":
form = PhotoCreationForm(request.POST, request.FILES)
if form.is_valid():
form.instance.advert = advert
form.save()
return redirect(reverse('detail_advert', args=[pk]))
return render_to_response("photo_add.html", {
"advert": advert,
"form": form,
}, RequestContext(request))
| [
"salihklc91@gmail.com"
] | salihklc91@gmail.com |
49d7b8bebbad46079eac386c638ee23415db1cdd | 0c8f747e59846ddd7d2479930d38db361ff59bd6 | /secrets.py | b6eb1026c3cc29b7a0e1dd2f39b9bbafa41a8228 | [] | no_license | PaveTranquil/Texity | 39289163e1c5b4ce4d3dea7d718916db8661890a | a4798bbf0cf89496ca12bdb1cd48a3ef9ce4566c | refs/heads/main | 2023-04-13T06:28:32.349938 | 2021-04-27T20:00:52 | 2021-04-27T20:00:52 | 354,387,657 | 3 | 1 | null | 2021-04-06T05:04:37 | 2021-04-03T20:26:48 | Python | UTF-8 | Python | false | false | 69 | py | with open('apikey.txt') as file:
API_KEY = file.readline().strip()
| [
"noreply@github.com"
] | PaveTranquil.noreply@github.com |
ae8bf909464124ce2e2e1c318f37dd319d3ef4ac | 5a99d1f7e0363878a5a94732598410a06008d2ed | /multimeter/_tasks.py | 204079b61c271657c9be403e1e4f4d875d9109f1 | [] | no_license | av-pavlov/multimeter | d8b93e4a6acec420e8593ea8864db9d67ab4a177 | d8132db1e5e0c3b153ab142599c93b302c18fbf5 | refs/heads/master | 2021-04-29T18:44:15.300083 | 2018-03-23T04:38:45 | 2018-03-23T04:38:45 | 121,699,767 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,491 | py | # -*- coding: utf-8 -*=
import sys
import subprocess
from collections import OrderedDict
from os import listdir, stat
from os.path import isdir, join, isfile
from collections import OrderedDict
from .helpers import load_json, save_json, validate_code, check_or_create_dir, load_tests
class Tasks:
""" Массив олимпиадных задач """
def __init__(self, settings, languages):
self._settings = settings
self._languages = languages
self.tasks = OrderedDict()
self.load()
def __len__(self):
return len(self.tasks)
def __setitem__(self, key, value):
self.tasks[key] = value
def __getitem__(self, item):
return self.tasks[item]
def __delitem__(self, key):
del self.tasks[key]
def __contains__(self, item):
return item in self.tasks
def __iter__(self):
return self.tasks.__iter__()
def items(self):
return self.tasks.items()
def keys(self):
return sorted(self.tasks.keys())
def load(self):
""" Загрузить олимпиадные задачи из подкаталогов рабочего каталога """
# Просмотрим подкаталоги рабочего каталога
dirs = sorted(listdir(self._settings.work_dir))
for name in dirs:
path = join(self._settings.work_dir, name)
if isdir(path) and '.' not in path:
try:
self.tasks[name] = Task(name, path)
self.tasks[name].load()
except (TypeError, FileNotFoundError, UnicodeDecodeError):
# Если не удалось прочитать описание задачи - игнорируем этот подкаталог
pass
def save(self):
""" Сохранить описания олимпиадных задач в их подкаталогах """
for task in self.tasks:
task.save()
def get_results(self, task_code, username, attempt=None):
""" Получить результаты проверки решений олимпиадной задачи определенным пользователем """
answer = []
# Начнем просмотр файлов результатов в каталоге .results
results_dir = join(self._settings.work_dir, '.results')
for filename in listdir(results_dir):
# Только только JSON-файлы
if filename[-5:] != '.json':
continue
name = filename[:-5]
(_task_code, _username, _attempt) = name.split('-')
# Только выбранная олимпиадная задача
if task_code != _task_code:
continue
# Только задачи определенного пользователя
if username != _username:
continue
# Если была выбрана попытка, то нужна только определенная попытка
if attempt is not None and attempt != _attempt:
continue
# Прочитаем результат проверки и добавим в номер попытки
res = load_json(filename, {}, results_dir)
res['attempt'] = int(_attempt)
answer.append(res)
return sorted(answer, key=lambda x: x['attempt'])
def validate_task(self, code, data, check_uniqueness):
""" Проверка задания
:param check_uniqueness:
:param data:
:param code:
"""
codes_list = self.tasks if check_uniqueness else []
errors = validate_code(code, codes_list, 'Код задания')
if not data.get('name'):
errors.append('Наименование не задано')
return errors
class TestSuite:
# Стратегия отображения результатов
BRIEF = 'brief'
FULL = 'full'
ERROR = 'error'
RESULTS = (
(BRIEF, 'Отображаются только баллы за подзадачу целом'),
(FULL, 'Отображаются баллы за каждый тест'),
(ERROR, 'Отображаются баллы за подзадачу в целом либо результат первой ошибки'),
)
# Стратегия начисления баллов
PARTIAL = 'partial'
ENTIRE = 'entire'
SCORING = (
(PARTIAL, 'Баллы начисляются пропорционально'),
(ENTIRE, 'Подзадача оценивается как единое целое'),
)
task = None
code = ''
ts_dir = ''
name = ''
results = FULL
scoring = PARTIAL
test_score = 0
total_score = 0
depends = []
def __init__(self, task, code, data):
self.task = task
self.code = code
self.ts_dir = join(task.test_suites_dir, code)
self.name = data['name']
self.scoring = data['scoring']
self.results = data['results']
self.test_score = data.get('test_score', 0)
self.total_score = data.get('total_score', 0)
self.tests = load_tests(self.ts_dir)
self.depends = data.get('depends', self.depends)
class Task:
# Важные атрибуты
code = '' # Код
task_dir = '' # Каталог
# Атрибуты из конфигурационного файла
name = '' # Имя
timeout = 2.0 # Предельное время выполнения в секундах, при превышении - работа программа будет завершена
time_limit = 1.0 # Лимит времени выполнения в секундах, при превышении - вердикт TL
memory_limit = 256 # Лимит по количеству памяти в Мб, при превышении - вердикт ML
input_file = 'input.txt' # Имя выходного файла
output_file = 'output.txt' # Имя выходного файла
# Атрибуты заполняемые из файлов
statement = '' # Условия задачи
preliminary = [] # Список примеров для предварительной проверки решения
test_suites = OrderedDict() # Словарь подзадач, подзадача - это список тестов
def __init__(self, code, task_dir):
"""
Создание задачи по каталогу
:param code: код задачи
:param task_dir: каталог задачи
"""
self.code = code
self.task_dir = task_dir
@property
def brief_name(self):
return '%s. %s' % (self.code, self.name)
@property
def full_name(self):
return 'Задача %s. %s' % (self.code, self.name)
@property
def config_file(self):
return join(self.task_dir, 'task.json')
@property
def statements_file(self):
return join(self.task_dir, 'task.html')
@property
def checker(self):
return join(self.task_dir, 'check.exe')
@property
def solutions_dir(self):
return join(self.task_dir, 'solutions')
@property
def preliminary_dir(self):
return join(self.task_dir, 'tests', 'samples')
@property
def test_suites_dir(self):
return join(self.task_dir, 'tests')
def load(self):
""" Читаем описание задачи из конфигурационных файлов """
# Загружаем атрибуты задачи из конфигурационного файла
config = load_json(self.config_file, {})
if 'name' in config:
self.name = str(config['name'])
if 'timeout' in config:
self.timeout = float(config['timeout'])
if 'time_limit' in config:
self.time_limit = float(config['time_limit'])
if 'memory_limit' in config:
self.memory_limit = float(config['memory_limit'])
if 'input_file' in config:
self.input_file = str(config['input_file'])
if 'output_file' in config:
self.output_file = str(config['output_file'])
if 'test_suites' in config:
tss_from_file = config['test_suites']
if isinstance(tss_from_file, OrderedDict):
self.test_suites = OrderedDict()
for code, ts in tss_from_file.items():
self.test_suites[code] = TestSuite(self, code, ts)
# Загружаем условия задачи
try:
statement = open(self.statements_file, encoding='utf-8')
self.statement = statement.read()
except FileNotFoundError:
# Если файла нет - молча ничего не делаем
pass
# Загружаем примеры
self.preliminary = load_tests(self.preliminary_dir)
def save(self):
""" Сохранение задачи в task.json в каталоге задачи """
keys = ['name', 'brief_name', 'timeout', 'input_file', 'output_file', 'test_suites']
config = dict(zip(keys, [self.__dict__[k] for k in keys]))
save_json(config, self.config_file)
with open(self.statements_file, mode='w', encoding='utf-8') as f:
f.write(self.statement)
f.close()
def verify(self):
if not isdir(self.task_dir):
raise Exception('Task {} folder not found: {}'.format(self.code, self.task_dir))
check_or_create_dir(self.solutions_dir)
check_or_create_dir(self.test_suites_dir)
for test in self.preliminary:
self.verify_test(test)
total_score = 0
for suite_code, suite in self.test_suites.items():
if suite.scoring == TestSuite.ENTIRE:
total_score += suite.total_score
elif suite.scoring == TestSuite.PARTIAL:
total_score += suite.test_score * len(suite.tests)
for test in suite.tests:
self.verify_test(test, suite_code)
if total_score != 100:
raise Exception('Sum of tests score of task {} not equal 100 !!!'.format(self.code))
def verify_test(self, test, suite_code=None):
""" Проверка теста
:param test: имя теста
:param suite_code:
"""
if suite_code is None:
test_name = "Preliminary test {}".format(test)
input_file = join(self.preliminary_dir, test)
else:
test_name = "Test {} in {}".format(test, suite_code)
input_file = join(self.test_suites_dir, suite_code, test)
answer_file = input_file + '.a'
if not isfile(input_file):
raise Exception('{} for task {} not found !!!'.format(test_name, self.code))
if not isfile(answer_file):
raise Exception('{} for task {} don\'t have answer !!!'.format(test_name, self.code))
try:
subprocess.check_call(
[self.checker, input_file, answer_file, answer_file],
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL)
except FileNotFoundError:
raise Exception('Checker for task {} is not found !!!'.format(self.code))
except subprocess.CalledProcessError:
raise Exception('Checker for task {} is not working !!!'.format(self.code))
def check(self):
""" Проверка ответа участника """
answer = '??'
try:
output_file = 'stdout'
if isfile(self.output_file) and stat(self.output_file).st_size > 0:
output_file = self.output_file
subprocess.check_call([
self.checker,
self.input_file,
output_file,
'answer.txt',
])
answer = 'OK'
except subprocess.CalledProcessError as error:
if error.returncode == 1:
answer = 'WA' # Wrong answer
elif error.returncode == 2:
answer = 'PE' # Presentation error
finally:
return answer
| [
"7911562+av-pavlov@users.noreply.github.com"
] | 7911562+av-pavlov@users.noreply.github.com |
4a460011144f616f403fdf0cd4870acf4bd66824 | 3bdc38b3ba7bcd87f10f24fdae3832cc8344ffba | /douban_spider/doubanspider/douban_image_pipelines.py | c34d5d2d875e4668c16b617c5a0411846c223f8e | [] | no_license | sheng-jie/Learning.Python | b1eee7fe53b0b11b8ca4e2e8716dd6ebb06948d6 | 61f0336c76b9a769021238664142cbc0e4a39d02 | refs/heads/master | 2020-03-20T23:01:44.507048 | 2018-06-19T01:46:21 | 2018-06-19T01:46:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | import os
import scrapy
from scrapy.exceptions import DropItem
from scrapy.pipelines.images import ImagesPipeline
class DoubanImagesPipeline(ImagesPipeline):
def get_media_requests(self, item, info):
for image_url in item['image_urls']:
yield scrapy.Request(image_url, meta={'item': item})
def item_completed(self, results, item, info):
image_paths = [x['path'] for ok, x in results if ok]
if not image_paths:
raise DropItem("Item contains no images")
# os.rename('books/' + image_paths[0], 'books/full/' + item['name'] + '.jpg')
return item
def file_path(self, request, response=None, info=None):
item = request.meta['item']
file_format = request.url.split('.')[-1]
filename = u'full/{0[name]}.{1}'.format(item, file_format)
return filename
| [
"ysjshengjie@live.cn"
] | ysjshengjie@live.cn |
3eeca3eaadcaf592565384f53f3160774272b0ca | 86f5ed0463be0b32508865889f5a77fdda549c22 | /raspberryPi/ultrasonic_client.py | 3441acce60eca5f0c6ba2d110d8c8b66f0352044 | [
"BSD-2-Clause"
] | permissive | pseudoyim/galvaneye | e6d7d210bacab5841e98cf275744dcc4ea6454aa | c7c1f1bb893fb8fb6f83c6765473d506979ec4b0 | refs/heads/master | 2023-01-16T00:43:27.283533 | 2020-11-22T03:09:58 | 2020-11-22T03:09:58 | 70,966,508 | 68 | 25 | null | null | null | null | UTF-8 | Python | false | false | 1,129 | py | from socket import *
import time
import RPi.GPIO as GPIO
GPIO.setwarnings(False)
# create a socket and bind socket to the host
client_socket = socket(AF_INET, SOCK_STREAM)
client_socket.connect(('10.10.10.2', 8002))
def measure():
"""
measure distance
"""
GPIO.output(GPIO_TRIGGER, True)
time.sleep(0.00001)
GPIO.output(GPIO_TRIGGER, False)
start = time.time()
while GPIO.input(GPIO_ECHO)==0:
start = time.time()
while GPIO.input(GPIO_ECHO)==1:
stop = time.time()
elapsed = stop-start
distance = (elapsed * 34300)/2
return distance
# referring to the pins by GPIO numbers
GPIO.setmode(GPIO.BCM)
# define pi GPIO
GPIO_TRIGGER = 16
GPIO_ECHO = 18
# output pin: Trigger
GPIO.setup(GPIO_TRIGGER,GPIO.OUT)
# input pin: Echo
GPIO.setup(GPIO_ECHO,GPIO.IN)
# initialize trigger pin to low
GPIO.output(GPIO_TRIGGER, False)
try:
while True:
distance = measure()
print "Distance : %.1f cm" % distance
# send data to the host every 0.5 sec
client_socket.send(str(distance))
time.sleep(0.5)
finally:
client_socket.close()
GPIO.cleanup()
| [
"paul.j.yim@gmail.com"
] | paul.j.yim@gmail.com |
8c063266cbd431b2d5053fd81731c057bd7c9d32 | ca1da3b2d522566132ef48319bd2328813a0a0cc | /tests/callbacks/test_progress_bar.py | f621e7022801269b16d12ae7f3d73f8318b9fa95 | [
"Apache-2.0"
] | permissive | anthonytec2/pytorch-lightning | cc4724c3493e93c6c5b89982abe9caf78a969ee2 | 9759491940c4108ac8ef01e0b53b31f03a69b4d6 | refs/heads/master | 2022-11-18T15:48:40.110614 | 2020-07-17T09:54:24 | 2020-07-17T09:54:24 | 273,110,455 | 1 | 0 | Apache-2.0 | 2021-05-13T20:38:51 | 2020-06-18T01:06:34 | Python | UTF-8 | Python | false | false | 6,732 | py | import pytest
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ProgressBarBase, ProgressBar, ModelCheckpoint
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.base import EvalModelTemplate
@pytest.mark.parametrize('callbacks,refresh_rate', [
([], 1),
([], 2),
([ProgressBar(refresh_rate=1)], 0),
([ProgressBar(refresh_rate=2)], 0),
([ProgressBar(refresh_rate=2)], 1),
])
def test_progress_bar_on(tmpdir, callbacks, refresh_rate):
"""Test different ways the progress bar can be turned on."""
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=callbacks,
progress_bar_refresh_rate=refresh_rate,
max_epochs=1,
overfit_batches=5,
)
progress_bars = [c for c in trainer.callbacks if isinstance(c, ProgressBarBase)]
# Trainer supports only a single progress bar callback at the moment
assert len(progress_bars) == 1
assert progress_bars[0] is trainer.progress_bar_callback
@pytest.mark.parametrize('callbacks,refresh_rate', [
([], 0),
([], False),
([ModelCheckpoint('../trainer')], 0),
])
def test_progress_bar_off(tmpdir, callbacks, refresh_rate):
"""Test different ways the progress bar can be turned off."""
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=callbacks,
progress_bar_refresh_rate=refresh_rate,
)
progress_bars = [c for c in trainer.callbacks if isinstance(c, ProgressBar)]
assert 0 == len(progress_bars)
assert not trainer.progress_bar_callback
def test_progress_bar_misconfiguration():
"""Test that Trainer doesn't accept multiple progress bars."""
callbacks = [ProgressBar(), ProgressBar(), ModelCheckpoint('../trainer')]
with pytest.raises(MisconfigurationException, match=r'^You added multiple progress bar callbacks'):
Trainer(callbacks=callbacks)
def test_progress_bar_totals(tmpdir):
"""Test that the progress finishes with the correct total steps processed."""
model = EvalModelTemplate()
trainer = Trainer(
default_root_dir=tmpdir,
progress_bar_refresh_rate=1,
limit_val_batches=1.0,
max_epochs=1,
)
bar = trainer.progress_bar_callback
assert 0 == bar.total_train_batches
assert 0 == bar.total_val_batches
assert 0 == bar.total_test_batches
trainer.fit(model)
# check main progress bar total
n = bar.total_train_batches
m = bar.total_val_batches
assert len(trainer.train_dataloader) == n
assert bar.main_progress_bar.total == n + m
# check val progress bar total
assert sum(len(loader) for loader in trainer.val_dataloaders) == m
assert bar.val_progress_bar.total == m
# main progress bar should have reached the end (train batches + val batches)
assert bar.main_progress_bar.n == n + m
assert bar.train_batch_idx == n
# val progress bar should have reached the end
assert bar.val_progress_bar.n == m
assert bar.val_batch_idx == m
# check that the test progress bar is off
assert 0 == bar.total_test_batches
assert bar.test_progress_bar is None
trainer.test(model)
# check test progress bar total
k = bar.total_test_batches
assert sum(len(loader) for loader in trainer.test_dataloaders) == k
assert bar.test_progress_bar.total == k
# test progress bar should have reached the end
assert bar.test_progress_bar.n == k
assert bar.test_batch_idx == k
def test_progress_bar_fast_dev_run(tmpdir):
model = EvalModelTemplate()
trainer = Trainer(
default_root_dir=tmpdir,
fast_dev_run=True,
)
progress_bar = trainer.progress_bar_callback
assert 1 == progress_bar.total_train_batches
# total val batches are known only after val dataloaders have reloaded
trainer.fit(model)
assert 1 == progress_bar.total_val_batches
assert 1 == progress_bar.train_batch_idx
assert 1 == progress_bar.val_batch_idx
assert 0 == progress_bar.test_batch_idx
# the main progress bar should display 2 batches (1 train, 1 val)
assert 2 == progress_bar.main_progress_bar.total
assert 2 == progress_bar.main_progress_bar.n
trainer.test(model)
# the test progress bar should display 1 batch
assert 1 == progress_bar.test_batch_idx
assert 1 == progress_bar.test_progress_bar.total
assert 1 == progress_bar.test_progress_bar.n
@pytest.mark.parametrize('refresh_rate', [0, 1, 50])
def test_progress_bar_progress_refresh(tmpdir, refresh_rate):
"""Test that the three progress bars get correctly updated when using different refresh rates."""
model = EvalModelTemplate()
class CurrentProgressBar(ProgressBar):
train_batches_seen = 0
val_batches_seen = 0
test_batches_seen = 0
def on_batch_start(self, trainer, pl_module):
super().on_batch_start(trainer, pl_module)
assert self.train_batch_idx == trainer.batch_idx
def on_batch_end(self, trainer, pl_module):
super().on_batch_end(trainer, pl_module)
assert self.train_batch_idx == trainer.batch_idx + 1
if not self.is_disabled and self.train_batch_idx % self.refresh_rate == 0:
assert self.main_progress_bar.n == self.train_batch_idx
self.train_batches_seen += 1
def on_validation_batch_end(self, trainer, pl_module):
super().on_validation_batch_end(trainer, pl_module)
if not self.is_disabled and self.val_batch_idx % self.refresh_rate == 0:
assert self.val_progress_bar.n == self.val_batch_idx
self.val_batches_seen += 1
def on_test_batch_end(self, trainer, pl_module):
super().on_test_batch_end(trainer, pl_module)
if not self.is_disabled and self.test_batch_idx % self.refresh_rate == 0:
assert self.test_progress_bar.n == self.test_batch_idx
self.test_batches_seen += 1
progress_bar = CurrentProgressBar(refresh_rate=refresh_rate)
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=[progress_bar],
progress_bar_refresh_rate=101, # should not matter if custom callback provided
limit_train_batches=1.0,
num_sanity_val_steps=2,
max_epochs=3,
)
assert trainer.progress_bar_callback.refresh_rate == refresh_rate
trainer.fit(model)
assert progress_bar.train_batches_seen == 3 * progress_bar.total_train_batches
assert progress_bar.val_batches_seen == 3 * progress_bar.total_val_batches + trainer.num_sanity_val_steps
trainer.test(model)
assert progress_bar.test_batches_seen == progress_bar.total_test_batches
| [
"noreply@github.com"
] | anthonytec2.noreply@github.com |
1908dbef5523b4c7baf3492790e727cfa3424bd7 | 13a1fae2f825f6c16fa15e47556e4e0b33da15b2 | /stepik course/stepik_3_1_1.py | f1d88368217a5e569d6bd9ca3bbf9a90f41a9d1a | [] | no_license | kubenet/pyLearn | 98a4a2c3243f2c03d00af82451863d65a8a38350 | 3d850de58ba525040c1f72730dcd31368766af53 | refs/heads/master | 2020-07-20T21:13:14.266289 | 2020-03-10T15:53:55 | 2020-03-10T15:53:55 | 206,710,216 | 1 | 0 | null | 2019-09-06T04:05:23 | 2019-09-06T04:05:22 | null | UTF-8 | Python | false | false | 786 | py | # Напишите функцию f(x), которая возвращает значение следующей функции, определённой на всей числовой прямой:
# 1−(x+2)^2, при x≤−2
# -x/2, при −2<x≤2
# ((x−2)^2)+1, при 2<x
#
# Требуется реализовать только функцию, решение не должно осуществлять операций ввода-вывода.
# Sample Input 1:
# 4.5
# Sample Output 1:
# 7.25
# Sample Input 2:
# -4.5
# Sample Output 2:
# -5.25
# Sample Input 3:
# 1
# Sample Output 3:
# -0.5
def f(x):
if x > 2:
return ((x-2)**2)+1
elif x <= -2:
return 1-(x+2)**2
elif (x <= 2) or (-2 < x):
return -x/2
print(f(4.5))
print(f(-4.5))
print(f(1))
| [
"kubenet@gmail.com"
] | kubenet@gmail.com |
a03c242a1a98c722662d25054877d5f1d75f61b1 | 8b1ef7f1a7e11b8c39b0546f521a4533f82bb6ba | /PyQt5- Login Form/main.py | 57c90f7324b9bdcd010b805087c425faa78e58d7 | [] | no_license | muhammedzahit/QT5 | 4b6c8e1461f352d4143949486019071285e42d53 | b82283d517023d5860d37e421e5b9babad503469 | refs/heads/master | 2023-02-03T08:19:31.775039 | 2020-12-17T10:25:08 | 2020-12-17T10:25:08 | 289,100,610 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,098 | py | # -*- coding: utf-8 -*-
from PyQt5 import QtCore, QtGui, QtWidgets
import sqlite3
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(800, 600))
MainWindow.setMaximumSize(QtCore.QSize(800, 600))
self.centralwidget = QtWidgets.QWidget(MainWindow)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth())
self.centralwidget.setSizePolicy(sizePolicy)
self.centralwidget.setObjectName("centralwidget")
self.bg_photo = QtWidgets.QLabel(self.centralwidget)
self.bg_photo.setGeometry(QtCore.QRect(0, 0, 800, 600))
self.bg_photo.setAutoFillBackground(False)
self.bg_photo.setText("")
self.bg_photo.setPixmap(QtGui.QPixmap("imgs/bg.jpg"))
self.bg_photo.setScaledContents(True)
self.bg_photo.setObjectName("bg_photo")
self.stackedWidget = QtWidgets.QStackedWidget(self.centralwidget)
self.stackedWidget.setGeometry(QtCore.QRect(110, 50, 511, 411))
self.stackedWidget.setObjectName("stackedWidget")
self.page_register = QtWidgets.QWidget()
self.page_register.setStyleSheet("*{\n"
"font: italic 16pt \"Brush Script MT\";\n"
"color : brown;\n"
"background: transparent\n"
"}\n"
"\n"
"QPushButton\n"
"\n"
"{\n"
"background-color : rgb(85, 0, 0, 0.7);\n"
"}\n"
"\n"
"QLabel\n"
"{\n"
"color: yellow\n"
"}\n"
"\n"
"QLineEdit{\n"
"background-color : rgb(85, 0, 0, 0.7);\n"
"color:blue;\n"
"}")
self.page_register.setObjectName("page_register")
self.formLayoutWidget = QtWidgets.QWidget(self.page_register)
self.formLayoutWidget.setGeometry(QtCore.QRect(10, 150, 481, 191))
self.formLayoutWidget.setObjectName("formLayoutWidget")
self.formLayout = QtWidgets.QFormLayout(self.formLayoutWidget)
self.formLayout.setContentsMargins(0, 0, 0, 0)
self.formLayout.setObjectName("formLayout")
self.verticalLayout_4 = QtWidgets.QVBoxLayout()
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.register_name = QtWidgets.QLineEdit(self.formLayoutWidget)
self.register_name.setObjectName("register_name")
self.verticalLayout_4.addWidget(self.register_name)
self.register_password = QtWidgets.QLineEdit(self.formLayoutWidget)
self.register_password.setObjectName("register_password")
self.register_password.setEchoMode(QtWidgets.QLineEdit.Password)
self.verticalLayout_4.addWidget(self.register_password)
self.register_confirm_password = QtWidgets.QLineEdit(self.formLayoutWidget)
self.register_confirm_password.setObjectName("register_confirm_password")
self.register_confirm_password.setEchoMode(QtWidgets.QLineEdit.Password)
self.verticalLayout_4.addWidget(self.register_confirm_password)
self.formLayout.setLayout(0, QtWidgets.QFormLayout.FieldRole, self.verticalLayout_4)
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.label_3 = QtWidgets.QLabel(self.formLayoutWidget)
self.label_3.setObjectName("label_3")
self.verticalLayout_3.addWidget(self.label_3)
self.label_4 = QtWidgets.QLabel(self.formLayoutWidget)
self.label_4.setObjectName("label_4")
self.verticalLayout_3.addWidget(self.label_4)
self.label_5 = QtWidgets.QLabel(self.formLayoutWidget)
self.label_5.setObjectName("label_5")
self.verticalLayout_3.addWidget(self.label_5)
self.formLayout.setLayout(0, QtWidgets.QFormLayout.LabelRole, self.verticalLayout_3)
self.registerButton = QtWidgets.QPushButton(self.formLayoutWidget)
self.registerButton.setStyleSheet("")
self.registerButton.setObjectName("registerButton")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.registerButton)
self.stackedWidget.addWidget(self.page_register)
self.page_login = QtWidgets.QWidget()
self.page_login.setStyleSheet("*{\n"
"font: italic 16pt \"Brush Script MT\";\n"
"color : brown;\n"
"background: transparent\n"
"}\n"
"\n"
"QPushButton\n"
"\n"
"{\n"
"background-color : rgb(85, 0, 0, 0.7);\n"
"}\n"
"\n"
"QLabel\n"
"{\n"
"color: yellow\n"
"}\n"
"\n"
"QLineEdit{\n"
"background-color : rgb(85, 0, 0, 0.7);\n"
"color:blue;\n"
"}")
self.page_login.setObjectName("page_login")
self.frame = QtWidgets.QFrame(self.page_login)
self.frame.setGeometry(QtCore.QRect(10, 10, 491, 391))
self.frame.setStyleSheet("")
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.login_button = QtWidgets.QPushButton(self.frame)
self.login_button.setGeometry(QtCore.QRect(70, 290, 161, 51))
self.login_button.setStyleSheet("")
self.login_button.setObjectName("login_button")
self.to_register_button = QtWidgets.QPushButton(self.frame)
self.to_register_button.setGeometry(QtCore.QRect(260, 290, 161, 51))
self.to_register_button.setStyleSheet("")
self.to_register_button.setObjectName("to_register_button")
self.verticalLayoutWidget = QtWidgets.QWidget(self.frame)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(30, 70, 181, 141))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.label_2 = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label_2.setStyleSheet("")
self.label_2.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.label_2.setObjectName("label_2")
self.verticalLayout.addWidget(self.label_2)
self.label = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label.setStyleSheet("")
self.label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.verticalLayoutWidget_2 = QtWidgets.QWidget(self.frame)
self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(220, 70, 211, 151))
self.verticalLayoutWidget_2.setObjectName("verticalLayoutWidget_2")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_2)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.login_name = QtWidgets.QLineEdit(self.verticalLayoutWidget_2)
self.login_name.setStyleSheet("")
self.login_name.setObjectName("login_name")
self.verticalLayout_2.addWidget(self.login_name)
self.login_password = QtWidgets.QLineEdit(self.verticalLayoutWidget_2)
self.login_password.setStyleSheet("")
self.login_password.setObjectName("login_password")
self.login_password.setEchoMode(QtWidgets.QLineEdit.Password)
self.verticalLayout_2.addWidget(self.login_password)
self.stackedWidget.addWidget(self.page_login)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
self.stackedWidget.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
# sayfalar arası geçiş -->
self.to_register_button.clicked.connect(self.to_register_page)
self.registerButton.clicked.connect(self.to_home_page)
self.login_button.clicked.connect(self.check_info)
conn = sqlite3.connect("data.db")
cursor = conn.cursor()
cursor.execute(
"CREATE TABLE IF NOT EXISTS USERS(ID INTEGER NOT NULL PRIMARY KEY, USERNAME TEXT, PASSWORD TEXT)")
conn.close()
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label_3.setText(_translate("MainWindow", "Username: "))
self.label_4.setText(_translate("MainWindow", "Password:"))
self.label_5.setText(_translate("MainWindow", "Confirm Password:"))
self.registerButton.setText(_translate("MainWindow", "register"))
self.login_button.setText(_translate("MainWindow", "login"))
self.to_register_button.setText(_translate("MainWindow", "register"))
self.label_2.setText(_translate("MainWindow", "USERNAME :"))
self.label.setText(_translate("MainWindow", "PASSWORD :"))
def to_register_page(self):
self.stackedWidget.setCurrentIndex(0)
def check_username(self, username):
conn = sqlite3.connect("data.db")
curr = conn.cursor()
curr.execute("SELECT USERNAME FROM USERS")
usernames = curr.fetchall()
conn.close()
for name in usernames:
if username == name[0]:
return False
return True
def to_home_page(self):
_translate = QtCore.QCoreApplication.translate
if self.register_name.text() == "" or self.register_password.text() == "" or self.register_confirm_password.text() == "":
msg = QtWidgets.QMessageBox()
msg.setText(_translate("MainWindow", "You must fill all lines !!!"))
msg.setIcon(QtWidgets.QMessageBox.Warning)
x = msg.exec_()
elif self.register_password.text() != self.register_confirm_password.text():
msg = QtWidgets.QMessageBox()
msg.setText(_translate("MainWindow", "Passwords didn't match !!!"))
msg.setIcon(QtWidgets.QMessageBox.Warning)
x = msg.exec_()
elif not self.check_username(self.register_name.text()):
msg = QtWidgets.QMessageBox()
msg.setText(_translate("MainWindow", "Username already taken !!!"))
msg.setIcon(QtWidgets.QMessageBox.Warning)
x = msg.exec_()
else:
conn = sqlite3.connect("data.db")
conn.execute("INSERT INTO USERS(USERNAME,PASSWORD) values(?,?)",
(self.register_name.text(), self.register_password.text(),))
conn.commit()
conn.close()
self.stackedWidget.setCurrentIndex(1)
def check_info(self):
_translate = QtCore.QCoreApplication.translate
conn = sqlite3.connect("data.db")
curr = conn.cursor()
curr.execute("SELECT * FROM USERS")
user_list = curr.fetchall()
print(user_list)
conn.close()
flag = False
for item in user_list:
if item[1] == self.login_name.text() and item[2] == self.login_password.text():
msg = QtWidgets.QMessageBox()
msg.setWindowTitle(_translate("Login", "Logged in"))
msg.setText(_translate("Login", "Login succesfull :)"))
x = msg.exec_()
flag = True
break
if not flag:
msg = QtWidgets.QMessageBox()
msg.setText(_translate("Login", "Username and pasword didn't match !!!"))
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setWindowTitle(_translate("Login", "Login Error"))
x = msg.exec_()
else:
exit()
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
list = ["Türkçe", "English"]
t = QtCore.QTranslator()
lang = QtWidgets.QInputDialog.getItem(MainWindow, "Select Language", "Language:", list)
#print(lang)
if lang[0] == "Türkçe":
t.load("turkish.qm")
app.installTranslator(t)
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| [
"muhammetzahitaydin@gmail.com"
] | muhammetzahitaydin@gmail.com |
7ac445a7981cc09e31bfafce07f08ab38310efce | 2a1146bd74be4ae270bd2dc105e1917aa13a3bfe | /gotti/modules/modules.py | 346c86de735720927666368efd616105af0b2a46 | [
"MIT"
] | permissive | HellBringerReal/Telegram-Bot | 0a2721ed04667c6bb1347f7ccdf0e657a360a71a | c204de5e8212fd32aaae6afd92c2bc7999457d4f | refs/heads/master | 2023-06-28T20:34:50.909101 | 2021-08-11T20:22:27 | 2021-08-11T20:22:27 | 290,599,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,947 | py | import importlib
from telegram import Bot, Update, ParseMode
from telegram.ext import CommandHandler, run_async
from gotti import dispatcher
from gotti.__main__ import (IMPORTED, HELPABLE, MIGRATEABLE, STATS, USER_INFO, DATA_IMPORT, DATA_EXPORT, CHAT_SETTINGS,
USER_SETTINGS)
from gotti.modules.helper_funcs.chat_status import sudo_plus, dev_plus
@run_async
@dev_plus
def load(bot: Bot, update: Update):
message = update.effective_message
text = message.text.split(" ", 1)[1]
load_messasge = message.reply_text(f"Attempting to load module : <b>{text}</b>", parse_mode=ParseMode.HTML)
try:
imported_module = importlib.import_module("gotti.modules." + text)
except:
load_messasge.edit_text("Does that module even exist?")
return
if not hasattr(imported_module, "__mod_name__"):
imported_module.__mod_name__ = imported_module.__name__
if not imported_module.__mod_name__.lower() in IMPORTED:
IMPORTED[imported_module.__mod_name__.lower()] = imported_module
else:
load_messasge.edit_text("Module already loaded.")
return
if "__handlers__" in dir(imported_module):
handlers = imported_module.__handlers__
for handler in handlers:
if type(handler) != tuple:
dispatcher.add_handler(handler)
else:
handler_name, priority = handler
dispatcher.add_handler(handler_name, priority)
else:
IMPORTED.pop(imported_module.__mod_name__.lower())
load_messasge.edit_text("The module cannot be loaded.")
return
if hasattr(imported_module, "__help__") and imported_module.__help__:
HELPABLE[imported_module.__mod_name__.lower()] = imported_module
# Chats to migrate on chat_migrated events
if hasattr(imported_module, "__migrate__"):
MIGRATEABLE.append(imported_module)
if hasattr(imported_module, "__stats__"):
STATS.append(imported_module)
if hasattr(imported_module, "__user_info__"):
USER_INFO.append(imported_module)
if hasattr(imported_module, "__import_data__"):
DATA_IMPORT.append(imported_module)
if hasattr(imported_module, "__export_data__"):
DATA_EXPORT.append(imported_module)
if hasattr(imported_module, "__chat_settings__"):
CHAT_SETTINGS[imported_module.__mod_name__.lower()] = imported_module
if hasattr(imported_module, "__user_settings__"):
USER_SETTINGS[imported_module.__mod_name__.lower()] = imported_module
load_messasge.edit_text("Successfully loaded module : <b>{}</b>".format(text), parse_mode=ParseMode.HTML)
@run_async
@dev_plus
def unload(bot: Bot, update: Update):
message = update.effective_message
text = message.text.split(" ", 1)[1]
unload_messasge = message.reply_text(f"Attempting to unload module : <b>{text}</b>", parse_mode=ParseMode.HTML)
try:
imported_module = importlib.import_module("gotti.modules." + text)
except:
unload_messasge.edit_text("Does that module even exist?")
return
if not hasattr(imported_module, "__mod_name__"):
imported_module.__mod_name__ = imported_module.__name__
if imported_module.__mod_name__.lower() in IMPORTED:
IMPORTED.pop(imported_module.__mod_name__.lower())
else:
unload_messasge.edit_text("Can't unload something that isn't loaded.")
return
if "__handlers__" in dir(imported_module):
handlers = imported_module.__handlers__
for handler in handlers:
if type(handler) == bool:
unload_messasge.edit_text("This module can't be unloaded!")
return
elif type(handler) != tuple:
dispatcher.remove_handler(handler)
else:
handler_name, priority = handler
dispatcher.remove_handler(handler_name, priority)
else:
unload_messasge.edit_text("The module cannot be unloaded.")
return
if hasattr(imported_module, "__help__") and imported_module.__help__:
HELPABLE.pop(imported_module.__mod_name__.lower())
# Chats to migrate on chat_migrated events
if hasattr(imported_module, "__migrate__"):
MIGRATEABLE.remove(imported_module)
if hasattr(imported_module, "__stats__"):
STATS.remove(imported_module)
if hasattr(imported_module, "__user_info__"):
USER_INFO.remove(imported_module)
if hasattr(imported_module, "__import_data__"):
DATA_IMPORT.remove(imported_module)
if hasattr(imported_module, "__export_data__"):
DATA_EXPORT.remove(imported_module)
if hasattr(imported_module, "__chat_settings__"):
CHAT_SETTINGS.pop(imported_module.__mod_name__.lower())
if hasattr(imported_module, "__user_settings__"):
USER_SETTINGS.pop(imported_module.__mod_name__.lower())
unload_messasge.edit_text(f"Successfully unloaded module : <b>{text}</b>", parse_mode=ParseMode.HTML)
@run_async
@sudo_plus
def listmodules(bot: Bot, update: Update):
message = update.effective_message
module_list = []
for helpable_module in HELPABLE:
helpable_module_info = IMPORTED[helpable_module]
file_info = IMPORTED[helpable_module_info.__mod_name__.lower()]
file_name = file_info.__name__.rsplit("gotti.modules.", 1)[1]
mod_name = file_info.__mod_name__
module_list.append(f'- <code>{mod_name} ({file_name})</code>\n')
module_list = "Following modules are loaded : \n\n" + ''.join(module_list)
message.reply_text(module_list, parse_mode=ParseMode.HTML)
LOAD_HANDLER = CommandHandler("load", load)
UNLOAD_HANDLER = CommandHandler("unload", unload)
LISTMODULES_HANDLER = CommandHandler("listmodules", listmodules)
dispatcher.add_handler(LOAD_HANDLER)
dispatcher.add_handler(UNLOAD_HANDLER)
dispatcher.add_handler(LISTMODULES_HANDLER)
__mod_name__ = "MODULES"
| [
"noreply@github.com"
] | HellBringerReal.noreply@github.com |
795d213349a1ac367a0dcc6c7f13ed3a859b131d | c385ed950cd8512915f97a8bbca466349b647a56 | /code/model.py | c9fc26614280c37bf6aa54a2050dc001f870e95d | [] | no_license | livenb/Ultrasonic_Nerve | b5f2c9c8a0bd0e2e3e654ab247f1dca6fa65b847 | 39f5c036dbf3c63ebaae5c96d86ee6005ccfe3af | refs/heads/master | 2021-01-10T23:08:18.455410 | 2017-02-09T01:42:58 | 2017-02-09T01:42:58 | 70,640,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,683 | py | from __future__ import print_function
import cv2
import numpy as np
from keras.models import Model
from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as K
from data_prepare import load_train_data, load_test_data
data_path = '../data/'
# K.set_image_dim_ordering('th') # Theano dimension ordering in this code
img_rows = 64
img_cols = 80
smooth = 1.
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coef_loss(y_true, y_pred):
return -dice_coef(y_true, y_pred)
def get_unet():
inputs = Input((img_rows, img_cols, 1))
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(pool4)
conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(conv5)
pre = Convolution2D(1, 1, 1, init='he_normal', activation='sigmoid')(conv5)
pre = Flatten()(pre)
aux_out = Dense(1, activation='sigmoid', name='aux_output')(pre)
up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=3)
conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(up6)
conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv6)
up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3], mode='concat', concat_axis=3)
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(up7)
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv7)
up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=3)
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up8)
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv8)
up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=3)
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up9)
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv9)
conv10 = Convolution2D(1, 1, 1, activation='sigmoid',
name='main_output')(conv9)
model = Model(input=inputs, output=[conv10, aux_out])
model.compile(optimizer=Adam(lr=1e-5),
loss={'main_output': dice_coef_loss, 'aux_output': 'binary_crossentropy'},
metrics={'main_output': dice_coef, 'aux_output': 'acc'},
loss_weights={'main_output': 1., 'aux_output': 0.5})
return model
def preprocess(imgs):
imgs_p = np.ndarray((imgs.shape[0], img_rows, img_cols, 1),
dtype=np.uint8)
for i in range(imgs.shape[0]):
img = cv2.resize(imgs[i], (img_cols, img_rows))
imgs_p[i] = img.reshape((img.shape[0],img.shape[1],1))
return imgs_p
def mask_exist(mask):
return np.array([int(np.sum(mask[i, 0]) > 0) for i in xrange(len(mask))])
def train_and_predict():
print('-'*30)
print('Loading and preprocessing train data...')
print('-'*30)
imgs_train, imgs_mask_train = load_train_data()
imgs_train = preprocess(imgs_train)
imgs_mask_train = preprocess(imgs_mask_train)
imgs_train = imgs_train.astype('float32')
mean = np.mean(imgs_train) # mean for data centering
std = np.std(imgs_train) # std for data normalization
imgs_train -= mean
imgs_train /= std
imgs_mask_train = imgs_mask_train.astype('float32')
imgs_mask_train /= 255. # scale masks to [0, 1]
print('-'*30)
print('Creating and compiling model...')
print('-'*30)
model = get_unet()
model_checkpoint = ModelCheckpoint(data_path+'unet.hdf5', monitor='loss',
save_best_only=True)
print('-'*30)
print('Fitting model...')
print('-'*30)
model.fit(imgs_train, [imgs_mask_train, mask_exist(imgs_mask_train)],
batch_size=32, nb_epoch=20,
verbose=1, shuffle=True, callbacks=[model_checkpoint])
print('-'*30)
print('Loading and preprocessing test data...')
print('-'*30)
imgs_test, imgs_id_test = load_test_data()
imgs_test = preprocess(imgs_test)
imgs_test = imgs_test.astype('float32')
imgs_test -= mean
imgs_test /= std
print('-'*30)
print('Loading saved weights...')
print('-'*30)
model.load_weights(data_path+'unet.hdf5')
print('-'*30)
print('Predicting masks on test data...')
print('-'*30)
imgs_mask_test = model.predict(imgs_test, verbose=1)
np.save(data_path+'imgs_mask_test.npy', imgs_mask_test)
if __name__ == '__main__':
train_and_predict()
| [
"livenb666@gmail.com"
] | livenb666@gmail.com |
59aed57ba62f13ab930efe93f2f9d078ea11c2d3 | f144f0f34227acf229a991d09db18281cd6b5ac6 | /.c9/metadata/environment/abscences/models.py | 269d869f41bb85e539bc1b9b841e06464feed82d | [] | no_license | KikiDow/TUD_HDP_PSE3S | 42dde541a6d65d0c764def842a1e2732fec740ac | cd21c27651e442e1ed84518c1d567f1d52f215c0 | refs/heads/main | 2023-04-22T23:25:25.966531 | 2021-05-09T14:10:01 | 2021-05-09T14:10:01 | 313,276,503 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 817 | py | {"filter":false,"title":"models.py","tooltip":"/abscences/models.py","undoManager":{"mark":2,"position":2,"stack":[[{"start":{"row":8,"column":55},"end":{"row":8,"column":56},"action":"insert","lines":["/"],"id":2},{"start":{"row":8,"column":56},"end":{"row":8,"column":57},"action":"insert","lines":["/"]}],[{"start":{"row":8,"column":56},"end":{"row":8,"column":57},"action":"remove","lines":["/"],"id":3}],[{"start":{"row":8,"column":55},"end":{"row":8,"column":56},"action":"remove","lines":["/"],"id":4}]]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":8,"column":55},"end":{"row":8,"column":55},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1620153966673,"hash":"c83b2801de5e5f3fcb36b6f66f5a7f588f9bceb1"} | [
"ec2-user@ip-172-31-0-153.ec2.internal"
] | ec2-user@ip-172-31-0-153.ec2.internal |
614d20e490badf198728acb806f1b442ff8a43b7 | 9ab642dbc8b5409673e5b2f90e009aa4b5634c32 | /st_network_server.py | ea0366083d46d6693412626cfecd9193cb995a67 | [] | no_license | sunpu/stwhiteboard | 65c20aab6049acc0ca1b5b1924ae048671e119ee | 26cb02b593076e65496beca162be24404bee690a | refs/heads/master | 2021-09-07T12:40:49.138494 | 2018-02-23T01:44:37 | 2018-02-23T01:44:37 | 115,606,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,122 | py | #!/usr/bin/python2.7
# -*- coding:utf-8 -*-
import SocketServer
from time import ctime
import json
courseDict = {}
class Course:
def __init__(self, courseID):
self.courseID = courseID
self.clientDict = {}
self.finishList = []
HOST = ''
PORT = 10001
ADDR = (HOST, PORT)
ROOT_PATH = './file/'
class Client(SocketServer.BaseRequestHandler):
role = ''
courseID = 0
bigData = ''
def readDirectory(self, path):
result = []
paths = os.listdir(path)
for i, item in enumerate(paths):
sub_path = os.path.join(path, item)
data = {}
data['name'] = item
timestamp = os.path.getmtime(sub_path)
date = datetime.datetime.fromtimestamp(timestamp)
data['time'] = date.strftime('%Y-%m-%d %H:%M:%S')
if os.path.isdir(sub_path):
data['type'] = 'folder'
data['size'] = '-'
else:
data['type'] = 'file'
fsize = os.path.getsize(sub_path)
fsize = fsize / float(1024)
data['size'] = str(round(fsize,2)) + 'KB'
result.append(data)
json_res = json.dumps(result)
return json_res
def sendMessage(self, data):
self.request.sendall('#*#' + data + '@%@')
def sendHistoryMessage(self):
#print courseDict[self.courseID].finishList
finishList = courseDict[self.courseID].finishList
for index in range(0, len(finishList)):
self.request.sendall('#*#' + finishList[index] + '@%@')
def boardcastMessage(self, data):
#print courseDict
clientDict = courseDict[self.courseID].clientDict
#print clientDict
for k in clientDict.keys():
if k == self.client_address:
continue
#print k, clientDict[k]
#print '---', data
clientDict[k].sendall('#*#' + data + '@%@')
def processData(self, data):
#print '--------------', data
datas = json.loads(data)
if datas['type'] == 'createClient':
self.role = datas['data']['role']
elif datas['type'] == 'createCourse':
self.courseID = datas['data']['courseID']
if courseDict.has_key(self.courseID):
return
course = Course(self.courseID)
courseDict[self.courseID] = course
elif datas['type'] == 'joinCourse':
self.courseID = datas['data']['courseID']
course = courseDict[self.courseID]
course.clientDict[self.client_address] = self.request
self.sendHistoryMessage()
elif datas['type'] == 'setClientAuthority' or datas['type'] == 'finish':
self.boardcastMessage(data)
course = courseDict[self.courseID]
for index in range(0, len(course.finishList)):
#print '---', course.finishList[index]
historyDatas = json.loads(course.finishList[index])
#print 'historyDatas---', historyDatas
#print 'datas---', datas
if historyDatas.has_key('itemID') and datas.has_key('itemID') and historyDatas['itemID'] == datas['itemID'] and historyDatas['subtype'] == datas['subtype']:
course.finishList.remove(course.finishList[index])
break
course.finishList.append(data)
elif datas['type'] == 'realtime':
self.boardcastMessage(data)
elif datas['type'] == 'file':
path = ROOT_PATH
if datas['action'] == 'list':
path += datas['data']['path']
elif datas['action'] == 'new':
path += datas['data']['path']
name = datas['data']['name']
cmd = 'cd %s;mkdir %s;' % (path, name)
os.system(cmd)
elif datas['action'] == 'copy':
path += datas['data']['path']
name = datas['data']['name']
destPath += datas['data']['destPath']
cmd = 'cd %s;cp -rf %s %s;' % (path, name, destPath)
os.system(cmd)
elif datas['action'] == 'move':
path += datas['data']['path']
name = datas['data']['name']
destPath += datas['data']['destPath']
cmd = 'cd %s;mv -rf %s %s;' % (path, name, destPath)
os.system(cmd)
elif datas['action'] == 'del':
path += datas['data']['path']
name = datas['data']['name']
cmd = 'cd %s;rm -rf %s;' % (path, name)
os.system(cmd)
list = self.readDirectory(path)
self.sendMessage(list)
#{"type":"file","action":"list","data":{"path":"/1/2"}}
#{"type":"file","action":"new","data":{"path":"/1/2","name":"xxx"}}
#{"type":"file","action":"copy","data":{"path":"/2","name":"xxx","destPath":"/3"}}
#{"type":"file","action":"move","data":{"path":"/2","name":"xxx","destPath":"/3"}}
#{"type":"file","action":"del","data":{"path":"/2","name":"xxx"}}
def handle(self):
# 客户端登入后,记住
print '...connected from:', self.client_address
while True:
data = self.request.recv(1024 * 1024 * 10)
#print data, 'from', self.client_address
if len(data) == 0:
course = courseDict[self.courseID]
course.clientDict.pop(self.client_address)
break
if data.endswith('@%@'):
if len(self.bigData) > 0:
data = self.bigData + data
self.bigData = ''
data = data.replace('@%@', '')
dataList = data.split('#*#')
for index in range(0, len(dataList)):
if dataList[index]:
self.processData(dataList[index])
else:
self.bigData = self.bigData + data
tcpServ = SocketServer.ThreadingTCPServer(ADDR, Client)
print 'waiting for connection...'
tcpServ.serve_forever() | [
"sunpumsn@hotmail.com"
] | sunpumsn@hotmail.com |
b6d4b00e9ba7fd2e1ffb15551e74584d5f265b5d | ed61c386fbe2ab18a73e6c4ac4c581540638dba6 | /src/old verion of code/propagation.py | d0295ff5fd4ade51e1ce136c9a8ddc81f37f1650 | [] | no_license | allenqaq/Online-Social-Network | 5f244cbf3b9f53fe68a926847e485c3a5ab9782e | c6f863827521ed787cf0120c3e02ae0485004a6a | refs/heads/master | 2020-03-08T17:34:54.384051 | 2018-12-21T08:42:34 | 2018-12-21T08:42:34 | 128,272,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,024 | py | '''
Created on Mar 5, 2017
@author: allen
'''
from math import sqrt
import numpy as np
# def mean(lst):
# """calculates mean"""
# sum = 0
# for i in range(len(lst)):
# sum += lst[i]
# return (sum / len(lst))
# def stddev(lst):
# """calculates standard deviation"""
# sum = 0
# mn = mean(lst)
# for i in range(len(lst)) :
# sum += pow((lst[i]-mn),2)
# return sqrt(sum/len(lst)-1)
def eccenricity(items) :
if not items :
return 0;
sd = np.std(items)
# print(sd)
if sd == 0 :
return 0
max1 = max(items)
items.remove(max1)
if len(items) != 0 :
max2 = max(items)
else :
max2 = 0
# print((max1 - max2) / sd)
return (max1 - max2) / sd
def matchScores(lgraph, rgraph, mapping, lnode) :
# print('match scores start')
scores = {}
# for i in lgraph.nodes():
# scores[i] = 0
# print(scores)
# scores[lnode] = 0.1
listIn1 = lgraph.in_edges(lnode)
for lnbr in listIn1 :
#lnbr is like (1111, 1112)
if lnbr[0] not in mapping[0].keys() :
continue
rnbr = mapping[0][lnbr[0]]
listOut1 = rgraph.out_edges(rnbr)
for rnode in listOut1 :
#rnode is like (1111, 1112)
# if rnode[1] in mapping[0].keys() or rgraph.in_degree(rnode[1]) > 100 or rgraph.in_degree(rnode[1]) == 1:
if rnode[1] in mapping[0].keys() :
continue
else :
skip = rgraph.in_degree(rnode[1]) - rgraph.in_degree(lnode)
# skip is a degree check supposing 2 mapping mode degree differential is less than 3
if skip > 3 or skip < -3 :
continue
elif rnode[1] in scores.keys() :
scores[rnode[1]] += 1 / sqrt(rgraph.in_degree(rnode[1]))
else :
scores[rnode[1]] = 1 / sqrt(rgraph.in_degree(rnode[1]))
listOut2 = lgraph.out_edges(lnode)
for lnbr in listOut2 :
#lnbr is like (1111, 1112)
if lnbr[1] not in mapping[0].keys() :
continue
rnbr = mapping[0][lnbr[1]]
listIn2 = rgraph.in_edges(rnbr)
for rnode in listIn2 :
#rnode is like (1111, 1112)
# if rnode[0] in mapping[0].keys() or rgraph.out_degree(rnode[0]) > 100 or rgraph.out_degree(rnode[0]) == 1:
if rnode[0] in mapping[0].keys() :
continue
else :
skip = rgraph.out_degree(rnode[0]) - rgraph.out_degree(lnode)
# skip is a degree check supposing 2 mapping mode degree differential is less than 3
if skip > 3 or skip < -3 :
continue
if rnode[0] in scores.keys() :
scores[rnode[0]] += 1 / sqrt(rgraph.out_degree(rnode[0]))
else :
scores[rnode[0]] = 1 / sqrt(rgraph.out_degree(rnode[0]))
# if lnode in scores.keys() and scores[lnode] != 0 :
# print("lnode :"),
# print(lnode)
# print("shoule be scores : ")
# print(scores[lnode])
# print("max id is :"),
# print(max(scores.items(), key=lambda x: x[1])[0])
# print(max(scores.values()))
# print(scores)
# # for (k,v) in scores.items() :
# # if v == max(scores.values()) :
# # print(k),
# # print
# print("--------------------------------------")
return scores
def propagationStep(lgraph, rgraph, mapping) :
scores = {}
node_acount = 0
for lnode in lgraph.nodes() :
node_acount = node_acount + 1
data_len = len(lgraph.nodes())
rate = node_acount * 100.0 / data_len
print('-------'),
print(rate),
print(' %')
if lnode in mapping[0].keys() :
continue
scores[lnode] = matchScores(lgraph, rgraph, mapping, lnode)
# print(scores[lnode])
if eccenricity(scores[lnode].values()) < 0.01 :
# 0.01 is theta, a parameter that controls the tradeoff between the yield and the accuracy.
continue
rnode = max(scores[lnode].items(), key=lambda x: x[1])[0]
scores[rnode] = matchScores(rgraph, lgraph, mapping, rnode)
# no need to invert mapping
if eccenricity(scores[rnode].values()) < 0.01 :
# 0.01 is theta, a parameter that controls the tradeoff between the yield and the accuracy.
continue
reverse_match = max(scores[rnode].items(), key=lambda x: x[1])[0]
print("reverse_match :"),
print(reverse_match)
print("lnode :"),
print(lnode)
# print(scores[rnode][reverse_match])
# print(scores[rnode][lnode])
print("======================================")
if reverse_match != lnode :
continue
else :
mapping[0][lnode] = rnode
| [
"allenqaq555@gmail.com"
] | allenqaq555@gmail.com |
c6c061e8c03a18c82ab3cd76a3c826f444b900f1 | 14c831041866edeed1fb3a5643405542fe736939 | /datasetGen.py | f1e20ec710d378afc9b9e20cff19a3c148fd8dba | [] | no_license | Rumee040/Retinal-Vessel-Segmentation-using-Convolutional-Network | d4a37d72d8fde637b53c5d2e279c9848d1dc1313 | 41333730fd2c27a99c0bbb149dfaa739c04ece50 | refs/heads/master | 2021-05-06T06:01:16.863817 | 2017-12-20T11:58:19 | 2017-12-20T11:58:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,468 | py | #import cv2
import numpy as np
from sys import exit
from PIL import Image
import matplotlib.pyplot as plt
import random
## defining parameters
patchSize = 27 # rectangular patch with size patchSize*patchSize*channel
patchPerImg = 1000 # patches per image
numImage = 20 # number of images
totalPatch = patchPerImg * numImage
data = np.ones((totalPatch, patchSize, patchSize, 3)) # all of the patches will be stored here
dataLoc = np.ones((totalPatch, 2)) # location of the patches stores as (row, column)
dataLabel = np.ones((totalPatch)) # label of the patches 0 - neg, 1 - pos
balance = 0.5 # balance between positive and negative patches
positive = int(patchPerImg * 0.5) # number of positive image in an image
negative = patchPerImg - positive # number of negative image in an image
## reading the imageand mask
for i in range(1, numImage + 1):
imgNum = str(i)
if i < 10:
imgNum = '0' + imgNum
imgName = imgNum + '_test.tif'
img = Image.open('E:\\library of EEE\\4-2\\eee 426\\data\\DRIVE\\DRIVE\\test\\images\\' + imgName)
maskName = imgNum + '_test_mask.gif'
mask = Image.open('E:\\library of EEE\\4-2\\eee 426\\data\\DRIVE\\DRIVE\\test\\mask\\' + maskName)
gndTruthName = imgNum + '_manual1.gif'
gndTruth = Image.open('E:\\library of EEE\\4-2\\eee 426\\data\\DRIVE\\DRIVE\\test\\1st_manual\\' + gndTruthName)
## converting them to numpy array
img = np.array(img)
#img = np.array(img.getdata()).reshape(img.size[1], img.size[0], 3) # Image class store image as (width, height) but we want it as (row, column)
#img = img.astype('float32') / 255 # to see the image in plt
mask = mask.convert('RGB')
#mask = np.array(mask.getdata()).reshape(mask.size[1], mask.size[0], 3)
mask = np.array(mask)
#mask = mask.astype('float32') / 255
gndTruth = gndTruth.convert('RGB')
gndTruth = np.array(gndTruth)[:,:,0]
#gndTruth = gndTruth.astype('float32') / 255
## cutting out patches from the image
imgRow = img.shape[0]
imgCol = img.shape[1]
count = 0
ind = (i - 1) * patchPerImg
posCount = 0
negCount = 0
while count < patchPerImg:
r = int(round(random.uniform(0, img.shape[0])))
c = int(round(random.uniform(0, img.shape[1])))
rStart = r - patchSize // 2
rEnd = r + patchSize // 2 + 1
cStart = c - patchSize // 2
cEnd = c + patchSize // 2 + 1
if np.all(mask[rStart:rEnd, cStart:cEnd]) and r > 13 and r < imgRow - 14 and c > 13 and c < imgCol - 14:
label = gndTruth[r, c]
if label == 0:
if negCount == negative:
continue
else:
negCount += 1
else:
if posCount == positive:
continue
else:
posCount += 1
data[ind + count, :, :, :] = img[rStart:rEnd, cStart:cEnd, :]
dataLoc[ind + count] = np.array([r, c])
dataLabel[ind + count] = label
count += 1
print(negCount, posCount)
print(np.count_nonzero(dataLabel))
## storing the images and data
np.save('E:\\library of EEE\\4-2\\eee 426\\data\\MSCprojectDataBase\\simpleClassifierDataBase\\DRIVEtestData', data)
np.save('E:\\library of EEE\\4-2\\eee 426\\data\\MSCprojectDataBase\\simpleClassifierDataBase\\DRIVEtestDataLcation', dataLoc)
np.save('E:\\library of EEE\\4-2\\eee 426\\data\\MSCprojectDataBase\\simpleClassifierDataBase\\DRIVEtestDataLabel', dataLabel) | [
"noreply@github.com"
] | Rumee040.noreply@github.com |
811cf7cfd7af8000864d8ca0048863f2d418d819 | a9756247d833412e64b18e741c293e7cab9c0b5b | /Command Line/enrollment.py | 3e6491fc442bd5f6d3ef46d8be2a7ca14fa00ac7 | [] | no_license | guennithegun/Command-Line | e44b2b5b7ceac9adc7894f37bf29396b5557dfff | 7c01985129415a56250fa679c9290c3395bf5158 | refs/heads/master | 2020-04-08T04:05:26.323362 | 2018-11-25T05:59:43 | 2018-11-25T05:59:43 | 159,002,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 817 | py | import pandas as pd
if __name__ == "__main__":
data = pd.read_csv("data/CRDC2013_14.csv", encoding="Latin-1")
cols =["SCH_ENR_HI_M", "SCH_ENR_HI_F", "SCH_ENR_AM_M", "SCH_ENR_AM_F", "SCH_ENR_AS_M", "SCH_ENR_AS_F", "SCH_ENR_HP_M", "SCH_ENR_HP_F", "SCH_ENR_BL_M", "SCH_ENR_BL_F", "SCH_ENR_WH_M", "SCH_ENR_WH_F", "SCH_ENR_TR_M", "SCH_ENR_TR_F"]
data["total_enrollment"] = data["TOT_ENR_M"] + data["TOT_ENR_F"]
sums = {}
for col in cols:
sums[col] = data[col].sum()
all_enrollment = data["total_enrollment"].sum()
gender_race_perc = {}
for col in cols:
gender_race_perc[col] = (sums[col]*100) / all_enrollment
counter = int()
for keys,values in gender_race_perc.items():
print(keys)
print(values) | [
"noreply@github.com"
] | guennithegun.noreply@github.com |
9d58d840d5920137fe056298c28671343069f204 | ee8ee84343e5efd184e20cb474abdd04425aaf7b | /messenger/models.py | 2992db2413316e882741ef7602bad696b1bf617c | [] | no_license | Amrsaeed/codetouch | 978924b59bb68da05f2307fbc2deed31d10aa927 | a70c6d6241e57692501034a5bf6f5d5634738ef1 | refs/heads/master | 2021-01-22T08:18:00.354453 | 2019-01-14T17:05:07 | 2019-01-14T17:05:07 | 92,609,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Message(models.Model):
message_text = models.CharField(max_length=2000)
sentOn = models.DateTimeField('Sent On')
sender = models.CharField(max_length=150, default='None')
reciever = models.CharField(max_length=150, default='None')
def __str__(self):
return self.message_text
| [
"amrsaeed@aucegypt.edu"
] | amrsaeed@aucegypt.edu |
aea9bbdf52c15b1029619192896682d0da4103c2 | a4b46342bc37d2d08b19934c5230928575d9cc39 | /getEmailByRegex.py | 7ad1f201c2b92c1916a084f220d32e11906a7fd5 | [] | no_license | yenchenhuang/regex_practice | 5daec1e13903ad233b2d67dba145593e4a0e7966 | 59f138639f783198d813572f2487566955355d0b | refs/heads/master | 2020-03-26T05:34:41.882199 | 2016-06-22T09:13:52 | 2016-06-22T09:13:52 | 144,563,581 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | import re
def is_email(input):
pattern = r"([\w._]+)@([\w_\-.]+?)"
match = re.match(pattern, input)
if match:
return True
else:
return False
def get_emails(paragraph):
pattern = r"[\w._]+@[\w_\-.]+"
matches = re.findall(pattern, paragraph)
return matches
def get_accounts(paragraph):
pattern = r"([\w._]+)@[\w_\-.]+"
matches = re.findall(pattern, paragraph)
return matches
| [
"yenchenhuang@kkbox.com"
] | yenchenhuang@kkbox.com |
69163c15593175ec5108d8614e170be4b086b0cc | d5a84ba1417d59d6b8eff26124a37ba7186d7e33 | /test_calculator.py | a303fcf1954dd8cadf456028cef326d495242bd5 | [] | no_license | jonathanzerox/tdd-python | 7c4ce49acb6eec562c9a382e9aab83cd1963aa23 | cd718ecdd646046dd0e2d437d050d75f16378280 | refs/heads/master | 2021-08-23T17:39:47.264755 | 2017-12-05T22:54:25 | 2017-12-05T22:54:25 | 113,240,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | import unittest
from calculator import Calculator
class TestCalculator(unittest.TestCase):
def SetUp():
print("Setting things up")
def TearDown():
print("Releasing allocated resources back")
def test_addition(self):
calc = Calculator()
self.assertEqual(4, calc.add(2, 2))
def test_multiplication(self):
calc = Calculator()
self.assertEqual(8, calc.mul(4, 2))
if __name__ == '__main__':
unittest.main()
| [
"jonathanzerox@hotmail.com"
] | jonathanzerox@hotmail.com |
c74aea42a9ebf71293d70dbcefafdea417e21e02 | f66b0fda3a16eb7ceb8a7ceb63dcbc373c4ece2d | /smart_QC/libs/json_field/fields.py | 7fcada3804ff579ad916c9cef24d6cb98f62d8c2 | [] | no_license | zcyuefan/smart_QC | 9c82dd7b8a25bfcd0231195b1a797e3f491f3272 | fc8aaec48203065f09ffde7f318c3e39680092ba | refs/heads/master | 2021-01-19T04:55:40.300511 | 2017-09-13T02:31:48 | 2017-09-13T02:31:48 | 81,196,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,430 | py | from __future__ import unicode_literals
from .utils import is_aware
from .forms import JSONFormField
try:
import json
except ImportError: # python < 2.6
from django.utils import simplejson as json
from django.db import models
from django.core import exceptions
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ImproperlyConfigured
import re
import decimal
import datetime
import six
try:
from dateutil import parser as date_parser
except ImportError:
raise ImproperlyConfigured('The "dateutil" library is required and was not found.')
try:
JSON_DECODE_ERROR = json.JSONDecodeError # simplejson
except AttributeError:
JSON_DECODE_ERROR = ValueError # other
TIME_FMT = r'\d{2}:\d{2}:\d{2}(\.\d+)?'
DATE_FMT = r'\d{4}-\d{2}-\d{2}'
TIMEZONE_FMT = r'(\+|\-)\d{2}:\d{2}'
TIME_RE = re.compile(r'^(%s)$' % TIME_FMT)
DATE_RE = re.compile(r'^(%s)$' % DATE_FMT)
DATETIME_RE = re.compile(r'^(%s)T(%s)(%s)?$' % (DATE_FMT, TIME_FMT, TIMEZONE_FMT))
class JSONEncoder(json.JSONEncoder):
"""
JSONEncoder subclass that knows how to encode date/time and decimal types.
"""
def default(self, o):
# See "Date Time String Format" in the ECMA-262 specification.
if isinstance(o, datetime.datetime):
r = o.isoformat()
if o.microsecond:
r = r[:23] + r[26:]
if r.endswith('+00:00'):
r = r[:-6] + 'Z'
return r
elif isinstance(o, datetime.date):
return o.isoformat()
elif isinstance(o, datetime.time):
if is_aware(o):
raise ValueError("JSON can't represent timezone-aware times.")
r = o.isoformat()
if o.microsecond:
r = r[:12]
return r
elif isinstance(o, decimal.Decimal):
return str(o)
else:
return super(JSONEncoder, self).default(o)
class JSONDecoder(json.JSONDecoder):
""" Recursive JSON to Python deserialization. """
_recursable_types = ([str] if six.PY3 else [str, unicode]) + [list, dict]
def _is_recursive(self, obj):
return type(obj) in JSONDecoder._recursable_types
def decode(self, obj, *args, **kwargs):
if not kwargs.get('recurse', False):
obj = super(JSONDecoder, self).decode(obj, *args, **kwargs)
if isinstance(obj, list):
for i in six.moves.xrange(len(obj)):
item = obj[i]
if self._is_recursive(item):
obj[i] = self.decode(item, recurse=True)
elif isinstance(obj, dict):
for key, value in obj.items():
if self._is_recursive(value):
obj[key] = self.decode(value, recurse=True)
elif isinstance(obj, six.string_types):
if TIME_RE.match(obj):
try:
return date_parser.parse(obj).time()
except ValueError:
pass
if DATE_RE.match(obj):
try:
return date_parser.parse(obj).date()
except ValueError:
pass
if DATETIME_RE.match(obj):
try:
return date_parser.parse(obj)
except ValueError:
pass
return obj
class Creator(object):
"""
Taken from django.db.models.fields.subclassing.
"""
_state_key = '_json_field_state'
def __init__(self, field, lazy):
self.field = field
self.lazy = lazy
def __get__(self, obj, type=None):
if obj is None:
return self
if self.lazy:
state = getattr(obj, self._state_key, None)
if state is None:
state = {}
setattr(obj, self._state_key, state)
if state.get(self.field.name, False):
return obj.__dict__[self.field.name]
value = self.field.to_python(obj.__dict__[self.field.name])
obj.__dict__[self.field.name] = value
state[self.field.name] = True
else:
value = obj.__dict__[self.field.name]
return value
def __set__(self, obj, value):
obj.__dict__[self.field.name] = value if self.lazy else self.field.to_python(value)
class JSONField(models.TextField):
""" Stores and loads valid JSON objects. """
description = 'JSON object'
def __init__(self, *args, **kwargs):
self.default_error_messages = {
'invalid': _('Enter a valid JSON object')
}
self._db_type = kwargs.pop('db_type', None)
self.evaluate_formfield = kwargs.pop('evaluate_formfield', False)
self.lazy = kwargs.pop('lazy', True)
encoder = kwargs.pop('encoder', JSONEncoder)
decoder = kwargs.pop('decoder', JSONDecoder)
encoder_kwargs = kwargs.pop('encoder_kwargs', {})
decoder_kwargs = kwargs.pop('decoder_kwargs', {})
if not encoder_kwargs and encoder:
encoder_kwargs.update({'cls':encoder})
if not decoder_kwargs and decoder:
decoder_kwargs.update({'cls':decoder, 'parse_float':decimal.Decimal})
self.encoder_kwargs = encoder_kwargs
self.decoder_kwargs = decoder_kwargs
self.ignore_error = kwargs.pop('ignore_error', False)
kwargs['default'] = kwargs.get('default', 'null')
kwargs['help_text'] = kwargs.get('help_text', self.default_error_messages['invalid'])
super(JSONField, self).__init__(*args, **kwargs)
def db_type(self, *args, **kwargs):
if self._db_type:
return self._db_type
return super(JSONField, self).db_type(*args, **kwargs)
def to_python(self, value):
if value is None: # allow blank objects
return None
if isinstance(value, six.string_types):
try:
value = json.loads(value, **self.decoder_kwargs)
except JSON_DECODE_ERROR:
pass
return value
def get_db_prep_value(self, value, *args, **kwargs):
if self.null and value is None and not kwargs.get('force'):
return None
a=isinstance(value, six.string_types)
if isinstance(value, six.string_types) and self.ignore_error:
return value
return json.dumps(value, **self.encoder_kwargs)
def value_to_string(self, obj):
return self.get_db_prep_value(self._get_val_from_obj(obj))
# def value_to_string(self, obj):
# value = self.value_from_object(obj)
# return self.get_prep_value(value)
def value_from_object(self, obj):
raw_value = super(JSONField, self).value_from_object(obj)
a=isinstance(raw_value, six.string_types)
if isinstance(raw_value, six.string_types) and self.ignore_error:
return raw_value
return json.dumps(raw_value, **self.encoder_kwargs)
# return json.dumps(super(JSONField, self).value_from_object(obj), **self.encoder_kwargs)
def formfield(self, **kwargs):
defaults = {
'form_class': kwargs.get('form_class', JSONFormField),
'evaluate': self.evaluate_formfield,
'encoder_kwargs': self.encoder_kwargs,
'decoder_kwargs': self.decoder_kwargs,
'ignore_error': self.ignore_error
}
defaults.update(kwargs)
return super(JSONField, self).formfield(**defaults)
def contribute_to_class(self, cls, name):
super(JSONField, self).contribute_to_class(cls, name)
def get_json(model_instance):
return self.get_db_prep_value(getattr(model_instance, self.attname, None), force=True)
setattr(cls, 'get_%s_json' % self.name, get_json)
def set_json(model_instance, value):
return setattr(model_instance, self.attname, self.to_python(value))
setattr(cls, 'set_%s_json' % self.name, set_json)
setattr(cls, name, Creator(self, lazy=self.lazy)) # deferred deserialization
try:
# add support for South migrations
from south.modelsinspector import add_introspection_rules
rules = [
(
(JSONField,),
[],
{
'db_type': ['_db_type', {'default': None}]
}
)
]
add_introspection_rules(rules, ['^json_field\.fields\.JSONField'])
except ImportError:
pass
| [
"zcyuefan@126.com"
] | zcyuefan@126.com |
4f8cdbd5a7b33f05ef45e53e15cec978c09c4e1f | a86acf28472700008261fb3709134483b4d4c7e2 | /myutils/seq2seq_peeky.py | 39147587ca4e866f28e07cc94f092969ca0bb185 | [] | no_license | s1len7/ai-rnn | 5a0ab40e7aa0371f41131e99371991c3aeecfa98 | e3f0a07f1a5e4681b2209276b5f3d6a5d4901f1d | refs/heads/master | 2022-11-30T13:55:41.267065 | 2020-07-29T11:19:18 | 2020-07-29T11:19:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,012 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 1 11:24:24 2020
@author: shkim
"""
"""
## seq2seq 성능 개선
* 1) 입력 데이터 반전(Reverse)
* 2) 엿보기(Peeky)
"""
"""
## seq2seq 성능 개선 : 엿보기(Peeky)
### base seq2seq 모델에서의 동작
* Encoder는 입력문장(문제문장)을 고정길이 벡터 h로 변환함
* 이때 h안에는 Decoder에 필요한 정보가 모두 담겨 있음
* 즉 h가 Decoder에 있어서 유일한 정보인 셈임
* 최초 시각의 LSTM 계층만이 벡터 h를 이용함 --> 이 중요한 h 정보를 더 활용할 수 없을까?
### 개선된 seq2seq 모델 : 엿보기(Peeky) 모델
* 중요한 정보가 담긴 Encoder의 출력 h를 Decoder의 다른 계층에도 전달해 주는 것
* Encoder의 출력 h를 모든 시각의 LSTM 계층과 Affine 계층에 전해줌 --> 집단 지성
* LSTM 계층과 Affine 계층에 입력되는 벡터가 2개씩 됨 --> concatenate 됨
"""
#%%
"""
## 개선된 seq2seq 모델 : 엿보기(Peeky) 모델 구현
"""
import numpy as np
import sys
sys.path.append('..')
from myutils.time_layers import TimeEmbedding, TimeLSTM, TimeAffine, TimeSoftmaxWithLoss
from myutils.seq2seq import Seq2seq, Encoder
#%%
class DecoderPeeky:
def __init__(self, vocab_size, wordvec_size, hideen_size):
V, D, H = vocab_size, wordvec_size, hideen_size
rn = np.random.randn
embed_W = (rn(V, D) / 100).astype('f')
lstm_Wx = (rn(H+D, 4*H) / np.sqrt(H+D)).astype('f')
lstm_Wh = (rn(H, 4*H) / np.sqrt(H)).astype('f')
lstm_b = np.zeros(4*H).astype('f')
affine_W = (rn(H+H, V) / np.sqrt(H+H)).astype('f')
affine_b = np.zeros(V).astype('f')
self.embed = TimeEmbedding(embed_W)
self.lstm = TimeLSTM(lstm_Wx, lstm_Wh, lstm_b, stateful=True)
self.affine = TimeAffine(affine_W, affine_b)
self.params, self.grads = [], []
for layer in (self.embed, self.lstm, self.affine):
self.params += layer.params
self.grads += layer.grads
self.cache = None
def forward(self, xs, h):
N, T = xs.shape
N, H = h.shape
self.lstm.set_state(h)
out = self.embed.forward(xs)
hs = np.repeat(h, T, axis=0).reshape(N, T, H)
out = np.concatenate((hs, out), axis=2)
out = self.lstm.forward(out)
out = np.concatenate((hs, out), axis=2)
score = self.affine.forward(out)
self.cache = H
return score
def backward(self, dscore):
H = self.cache
dout = self.affine.backward(dscore)
dout, dhs0 = dout[:, :, H:], dout[:, :, :H]
dout = self.lstm.backward(dout)
dembed, dhs1 = dout[:, :, H:], dout[:, :, :H]
self.embed.backward(dembed)
dhs = dhs0 + dhs1
dh = self.lstm.dh + np.sum(dhs, axis=1)
return dh
def generate(self, h, start_id, sample_size):
sampled = []
char_id = start_id
self.lstm.set_state(h)
H = h.shape[1]
peeky_h = h.reshape(1, 1, H)
for _ in range(sample_size):
x = np.array([char_id]).reshape((1, 1))
out = self.embed.forward(x)
out = np.concatenate((peeky_h, out), axis=2)
out = self.lstm.forward(out)
out = np.concatenate((peeky_h, out), axis=2)
score = self.affine.forward(out)
char_id = np.argmax(score.flatten())
sampled.append(char_id)
return sampled
#%%
class Seq2seqPeeky(Seq2seq):
def __init__(self, vocab_size, wordvec_size, hidden_size):
V, D, H = vocab_size, wordvec_size, hidden_size
self.encoder = Encoder(V, D, H)
self.decoder = DecoderPeeky(V, D, H)
self.softmax = TimeSoftmaxWithLoss()
self.params = self.encoder.params + self.decoder.params
self.grads = self.encoder.grads + self.decoder.grads
#%%
| [
"shkim.hi@gmail.com"
] | shkim.hi@gmail.com |
80e5198c86ed27d6c8417c69bd3be133fb25d9a9 | 87738f6f80f4fe03ac15d6de02b9b51f4e340c4b | /Part 3 - Classification/Section 10 - Support Vector Machine (SVM)/svm.py | d94e5cd0a3aee556681f4db83424c2c51038813a | [
"MIT"
] | permissive | Nikronic/Machine-Learning-Models | b4209dcc5f5a428da9447bc40fc93b9f0e171880 | 9fb48463ee2211eec800b2436699508f55d5ee28 | refs/heads/master | 2021-11-10T14:50:56.425443 | 2021-11-07T15:42:48 | 2021-11-07T15:42:48 | 142,788,604 | 34 | 13 | null | null | null | null | UTF-8 | Python | false | false | 3,824 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 17 23:26:31 2018
@author: Mohammad Doosti Lakhani
"""
# Imporing libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing dataset
dataset = pd.read_csv('Social_Network_Ads.csv')
x = dataset.iloc[:,2:4].values
y = dataset.iloc[:,4].values
# Feature scaling
from sklearn.preprocessing import StandardScaler
standardscaler_x = StandardScaler()
standardscaler_x = standardscaler_x.fit(x)
x = standardscaler_x.transform(x)
# Splitting dataset into Train set and Test set
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test = train_test_split(x,y, train_size = 0.75 , random_state=0)
# Fitting the K-Nearest Neighbors model to the train set
from sklearn.svm import SVC
classifier = SVC(kernel='rbf', random_state=0)
classifier = classifier.fit(x_train,y_train)
""" Try to uncomment below code and see the visualization output (Try different kernels)"""
"""
classifier = SVC(kernel='poly', degree = 3, random_state=0)
classifier = classifier.fit(x_train,y_train)
classifier = SVC(kernel='linear', random_state=0) ## (Equals to SVR)
classifier = classifier.fit(x_train,y_train)
classifier = SVC(kernel='sigmoid', random_state=0)
classifier = classifier.fit(x_train,y_train)
"""
# Make the prediction on train set
y_train_pred = classifier.predict(x_train)
# Make the prediction on train set
y_test_pred = classifier.predict(x_test)
# Acurracy on test and train set
from sklearn.metrics import confusion_matrix
cm_train = confusion_matrix(y_train,y_train_pred)
cm_test = confusion_matrix(y_test,y_test_pred)
import os
import sys
scriptpath = "../../Tools" # functions of acc and CAP
# Add the directory containing your module to the Python path
sys.path.append(os.path.abspath(scriptpath))
import accuracy as ac
t_train,f_train,acc_train = ac.accuracy_on_cm(cm_train)
print('Train status = #{} True, #{} False, %{} Accuracy'.format(t_train,f_train,acc_train*100))
t_test,f_test,acc_test = ac.accuracy_on_cm(cm_test)
print('Test status = #{} True, #{} False, %{} Accuracy'.format(t_test,f_test,acc_test*100))
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = x_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('SVM - rbf kernel (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = x_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('SVM - rbf kernel (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
| [
"nikan.doosti@outlook.com"
] | nikan.doosti@outlook.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.