file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
client_enum_component_type.go | package bungo
import (
"errors"
"fmt"
)
const (
ComponentsNoneKey = 0
ComponentsProfilesKey = 100
ComponentsVendorReceiptsKey = 101
ComponentsProfileInventoriesKey = 102
ComponentsProfileCurrenciesKey = 103
ComponentsProfileProgressionKey = 104
ComponentsPlatformSilverKey = 105
ComponentsCharactersKey = 200
ComponentsCharacterInventoriesKey = 201
ComponentsCharacterProgressionsKey = 202
CharacterRenderDataKey = 203
ComponentsCharacterActivitiesKey = 204
ComponentsCharacterEquipmentKey = 205
ComponentsItemInstancesKey = 300
ComponentsItemObjectivesKey = 301
ComponentsItemPerksKey = 302
ComponentsItemRenderDataKey = 303
ComponentsItemStatsKey = 304
ComponentsItemSocketsKey = 305
ComponentsItemTalentGridsKey = 306
ComponentsItemCommonDataKey = 307
ComponentsItemPlugStatesKey = 308
ComponentsVendorsKey = 400
ComponentsVendorCategoriesKey = 401
ComponentsVendorSalesKey = 402
ComponentsKiosksKey = 500
ComponentsCurrencyLookupsKey = 600
ComponentsPresentationNodesKey = 700
ComponentsCollectiblesKey = 800
ComponentsRecordsKey = 900
ComponentsTransitoryKey = 1000
)
type ComponentType string
func ComponentTypesManyS(keys ...int) (out []string) {
for _, e := range ComponentTypesMany(keys...) {
out = append(out, string(e))
}
return out
}
func ComponentTypesMany(keys ...int) (out []ComponentType) {
for _, key := range keys {
eout, _, _ := ComponentTypesE(key)
out = append(out, eout)
}
return out
}
func ComponentTypes(key int) ComponentType {
out, _, _ := ComponentTypesE(key)
return out
}
func ComponentTypesE(key int) (ComponentType, string, error) {
switch key {
case ComponentsNoneKey:
return "None", "", nil
case ComponentsProfilesKey:
description := `
Profiles is the most basic component, only relevant when calling GetProfile.
This returns basic information about the profile, which is almost nothing:
- a list of characterIds,
- some information about the last time you logged in, and;
- that most sobering statistic: how long you've played.
`
return "Profiles", description, nil
case ComponentsVendorReceiptsKey:
description := `
Only applicable for GetProfile, this will return information about receipts for refundable vendor items.
`
return "VendorReceipts", description, nil
case ComponentsProfileInventoriesKey:
description := `
Asking for this will get you the profile-level inventories, such as your Vault buckets
(yeah, the Vault is really inventory buckets located on your Profile)
`
return "ProfileInventories", description, nil
case ComponentsProfileCurrenciesKey:
description := `
This will get you a summary of items on your Profile that we consider to be "currencies", such as Glimmer.
I mean, if there's Glimmer in Destiny 2. I didn't say there was Glimmer.
`
return "ProfileCurrencies", description, nil
case ComponentsProfileProgressionKey:
description := `
This will get you any progression-related information that exists on a Profile-wide level, across all characters.
`
return "ProfileProgression", description, nil
case ComponentsPlatformSilverKey:
description := `
This will get you information about the silver that this profile has on every platform on which it plays.
You may only request this component for the logged in user's Profile, and will not receive it if you request it for another Profile.
`
return "PlatformSilver", description, nil
case ComponentsCharactersKey:
description := `
This will get you summary info about each of the characters in the profile.
`
return "Characters", description, nil
case ComponentsCharacterInventoriesKey:
description := `
This will get you information about any non-equipped items on the character or character(s) in question,
if you're allowed to see it.
You have to either be authenticated as that user, or that user must allow anonymous viewing of their non-equipped items in Bungie.Net settings to actually get results.
`
return "CharacterInventories", description, nil
case ComponentsCharacterProgressionsKey:
description := `
This will get you information about the progression (faction, experience, etc... "levels") relevant to each character.
You have to either be authenticated as that user, or that user must allow anonymous viewing of their progression info in Bungie.Net settings to actually get results.
`
return "CharacterProgressions", description, nil
case CharacterRenderDataKey:
description := `
This will get you just enough information to be able to render the character in 3D if you have written a 3D rendering library for Destiny Characters, or "borrowed" ours.
It's okay, I won't tell anyone if you're using it.
I'm no snitch. (actually, we don't care if you use it - go to town)
`
return "RenderData", description, nil
case ComponentsCharacterActivitiesKey:
description := `
This will return info about activities that a user can see and gating on it, if you are the currently authenticated user or the user has elected to allow anonymous viewing of its progression info.
Note that the data returned by this can be unfortunately problematic and relatively unreliable in some cases.
We'll eventually work on making it more consistently reliable.
`
return "CharacterActivities", description, nil
case ComponentsCharacterEquipmentKey:
description := `
This will return info about the equipped items on the character(s). Everyone can see this.
`
return "CharacterEquipment", description, nil
case ComponentsItemInstancesKey:
description := `
This will return basic info about instanced items - whether they can be equipped, their tracked status, and some info commonly needed in many places (current damage type, primary stat value, etc)
`
return "ItemInstances", description, nil
case ComponentsItemObjectivesKey:
description := `
Items can have Objectives (DestinyObjectiveDefinition) bound to them.
If they do, this will return info for items that have such bound objectives.
`
return "ItemObjectives", description, nil
case ComponentsItemPerksKey:
description := `
Items can have perks (DestinyPerkDefinition).
If they do, this will return info for what perks are active on items.
`
return "ItemPerks", description, nil
case ComponentsItemRenderDataKey:
description := `
If you just want to render the weapon, this is just enough info to do that rendering.
`
return "ItemRenderData", description, nil
case ComponentsItemStatsKey:
description := `
Items can have stats, like rate of fire. Asking for this component will return requested item's stats if they have stats.
`
return "ItemStats", description, nil
case ComponentsItemSocketsKey:
description := `
Items can have sockets, where plugs can be inserted.
Asking for this component will return all info relevant to the sockets on items that have them.
`
return "ItemSockets", description, nil
case ComponentsItemTalentGridsKey:
description := `
Items can have talent grids, though that matters a lot less frequently than it used to.
Asking for this component will return all relevant info about activated Nodes and Steps on this talent grid, like the good ol' days.
`
return "ItemTalentGrids", description, nil
case ComponentsItemCommonDataKey:
description := `
Items that *aren't* instanced still have important information you need to know:
- how much of it you have,;
- the itemHash so you can look up their DestinyInventoryItemDefinition,;
- whether they're locked,;
- etc...
Both instanced and non-instanced items will have these properties.
You will get this automatically with Inventory components - you only need to pass this when calling GetItem on a specific item.
`
return "ItemCommonData", description, nil
case ComponentsItemPlugStatesKey:
description := `
Items that are "Plugs" can be inserted into sockets.
This returns statuses about those plugs and why they can/can't be inserted.
I hear you giggling, there's nothing funny about inserting plugs. Get your head out of the gutter and pay attention!
`
return "ItemPlugStates", description, nil
case ComponentsVendorsKey:
description := `
When obtaining vendor information, this will return summary information about the Vendor or Vendors being returned.
`
return "Vendors", description, nil
case ComponentsVendorCategoriesKey:
description := `
When obtaining vendor information, this will return information about the categories of items provided by the Vendor.
`
return "VendorCategories", description, nil
case ComponentsVendorSalesKey:
description := `
When obtaining vendor information, this will return the information about items being sold by the Vendor.
`
return "VendorSales", description, nil
case ComponentsKiosksKey:
description := `
Asking for this component will return you the account's Kiosk statuses: that is, what items have been filled out/acquired.
But only if you are the currently authenticated user or the user has elected to allow anonymous viewing of its progression info.
`
return "Kiosks", description, nil
case ComponentsCurrencyLookupsKey:
description := `
A "shortcut" component that will give you all of the item hashes/quantities of items that the requested character can use to determine if an action (purchasing, socket insertion) has the required currency.
(recall that all currencies are just items, and that some vendor purchases require items that you might not traditionally consider to be a "currency", like plugs/mods!)
`
return "CurrencyLookups", description, nil
case ComponentsPresentationNodesKey:
description := `
Returns summary status information about all "Presentation Nodes".
See DestinyPresentationNodeDefinition for more details, but the gist is that these are entities used by the game UI to bucket Collectibles and Records into a hierarchy of categories.
You may ask for and use this data if you want to perform similar bucketing in your own UI: or you can skip it and roll your own.
`
return "PresentationNodes", description, nil
case ComponentsCollectiblesKey:
description := `
Returns summary status information about all "Collectibles". | return "Collectibles", description, nil
case ComponentsRecordsKey:
description := `
Returns summary status information about all "Records" also known in the game as "Triumphs".
I know, it's confusing because there's also "Moments of Triumph" that will themselves be represented as "Triumphs."
`
return "Records", description, nil
case ComponentsTransitoryKey:
description := `
Returns information that Bungie considers to be "Transitory":
- data that may change too frequently; or
- come from a non-authoritative source
such that we don't consider the data to be fully trustworthy, but that might prove useful for some limited use cases.
We can provide no guarantee of timeliness nor consistency for this data: buyer beware with the Transitory component.
`
return "Transitory", description, nil
default:
return "", "", errors.New(fmt.Sprintf("unknown key: %d", key))
}
} | These are records of what items you've discovered while playing Destiny, and some other basic information.
For detailed information, you will have to call a separate endpoint devoted to the purpose.
` | random_line_split |
client_enum_component_type.go | package bungo
import (
"errors"
"fmt"
)
const (
ComponentsNoneKey = 0
ComponentsProfilesKey = 100
ComponentsVendorReceiptsKey = 101
ComponentsProfileInventoriesKey = 102
ComponentsProfileCurrenciesKey = 103
ComponentsProfileProgressionKey = 104
ComponentsPlatformSilverKey = 105
ComponentsCharactersKey = 200
ComponentsCharacterInventoriesKey = 201
ComponentsCharacterProgressionsKey = 202
CharacterRenderDataKey = 203
ComponentsCharacterActivitiesKey = 204
ComponentsCharacterEquipmentKey = 205
ComponentsItemInstancesKey = 300
ComponentsItemObjectivesKey = 301
ComponentsItemPerksKey = 302
ComponentsItemRenderDataKey = 303
ComponentsItemStatsKey = 304
ComponentsItemSocketsKey = 305
ComponentsItemTalentGridsKey = 306
ComponentsItemCommonDataKey = 307
ComponentsItemPlugStatesKey = 308
ComponentsVendorsKey = 400
ComponentsVendorCategoriesKey = 401
ComponentsVendorSalesKey = 402
ComponentsKiosksKey = 500
ComponentsCurrencyLookupsKey = 600
ComponentsPresentationNodesKey = 700
ComponentsCollectiblesKey = 800
ComponentsRecordsKey = 900
ComponentsTransitoryKey = 1000
)
type ComponentType string
func ComponentTypesManyS(keys ...int) (out []string) {
for _, e := range ComponentTypesMany(keys...) {
out = append(out, string(e))
}
return out
}
func ComponentTypesMany(keys ...int) (out []ComponentType) {
for _, key := range keys {
eout, _, _ := ComponentTypesE(key)
out = append(out, eout)
}
return out
}
func ComponentTypes(key int) ComponentType {
out, _, _ := ComponentTypesE(key)
return out
}
func | (key int) (ComponentType, string, error) {
switch key {
case ComponentsNoneKey:
return "None", "", nil
case ComponentsProfilesKey:
description := `
Profiles is the most basic component, only relevant when calling GetProfile.
This returns basic information about the profile, which is almost nothing:
- a list of characterIds,
- some information about the last time you logged in, and;
- that most sobering statistic: how long you've played.
`
return "Profiles", description, nil
case ComponentsVendorReceiptsKey:
description := `
Only applicable for GetProfile, this will return information about receipts for refundable vendor items.
`
return "VendorReceipts", description, nil
case ComponentsProfileInventoriesKey:
description := `
Asking for this will get you the profile-level inventories, such as your Vault buckets
(yeah, the Vault is really inventory buckets located on your Profile)
`
return "ProfileInventories", description, nil
case ComponentsProfileCurrenciesKey:
description := `
This will get you a summary of items on your Profile that we consider to be "currencies", such as Glimmer.
I mean, if there's Glimmer in Destiny 2. I didn't say there was Glimmer.
`
return "ProfileCurrencies", description, nil
case ComponentsProfileProgressionKey:
description := `
This will get you any progression-related information that exists on a Profile-wide level, across all characters.
`
return "ProfileProgression", description, nil
case ComponentsPlatformSilverKey:
description := `
This will get you information about the silver that this profile has on every platform on which it plays.
You may only request this component for the logged in user's Profile, and will not receive it if you request it for another Profile.
`
return "PlatformSilver", description, nil
case ComponentsCharactersKey:
description := `
This will get you summary info about each of the characters in the profile.
`
return "Characters", description, nil
case ComponentsCharacterInventoriesKey:
description := `
This will get you information about any non-equipped items on the character or character(s) in question,
if you're allowed to see it.
You have to either be authenticated as that user, or that user must allow anonymous viewing of their non-equipped items in Bungie.Net settings to actually get results.
`
return "CharacterInventories", description, nil
case ComponentsCharacterProgressionsKey:
description := `
This will get you information about the progression (faction, experience, etc... "levels") relevant to each character.
You have to either be authenticated as that user, or that user must allow anonymous viewing of their progression info in Bungie.Net settings to actually get results.
`
return "CharacterProgressions", description, nil
case CharacterRenderDataKey:
description := `
This will get you just enough information to be able to render the character in 3D if you have written a 3D rendering library for Destiny Characters, or "borrowed" ours.
It's okay, I won't tell anyone if you're using it.
I'm no snitch. (actually, we don't care if you use it - go to town)
`
return "RenderData", description, nil
case ComponentsCharacterActivitiesKey:
description := `
This will return info about activities that a user can see and gating on it, if you are the currently authenticated user or the user has elected to allow anonymous viewing of its progression info.
Note that the data returned by this can be unfortunately problematic and relatively unreliable in some cases.
We'll eventually work on making it more consistently reliable.
`
return "CharacterActivities", description, nil
case ComponentsCharacterEquipmentKey:
description := `
This will return info about the equipped items on the character(s). Everyone can see this.
`
return "CharacterEquipment", description, nil
case ComponentsItemInstancesKey:
description := `
This will return basic info about instanced items - whether they can be equipped, their tracked status, and some info commonly needed in many places (current damage type, primary stat value, etc)
`
return "ItemInstances", description, nil
case ComponentsItemObjectivesKey:
description := `
Items can have Objectives (DestinyObjectiveDefinition) bound to them.
If they do, this will return info for items that have such bound objectives.
`
return "ItemObjectives", description, nil
case ComponentsItemPerksKey:
description := `
Items can have perks (DestinyPerkDefinition).
If they do, this will return info for what perks are active on items.
`
return "ItemPerks", description, nil
case ComponentsItemRenderDataKey:
description := `
If you just want to render the weapon, this is just enough info to do that rendering.
`
return "ItemRenderData", description, nil
case ComponentsItemStatsKey:
description := `
Items can have stats, like rate of fire. Asking for this component will return requested item's stats if they have stats.
`
return "ItemStats", description, nil
case ComponentsItemSocketsKey:
description := `
Items can have sockets, where plugs can be inserted.
Asking for this component will return all info relevant to the sockets on items that have them.
`
return "ItemSockets", description, nil
case ComponentsItemTalentGridsKey:
description := `
Items can have talent grids, though that matters a lot less frequently than it used to.
Asking for this component will return all relevant info about activated Nodes and Steps on this talent grid, like the good ol' days.
`
return "ItemTalentGrids", description, nil
case ComponentsItemCommonDataKey:
description := `
Items that *aren't* instanced still have important information you need to know:
- how much of it you have,;
- the itemHash so you can look up their DestinyInventoryItemDefinition,;
- whether they're locked,;
- etc...
Both instanced and non-instanced items will have these properties.
You will get this automatically with Inventory components - you only need to pass this when calling GetItem on a specific item.
`
return "ItemCommonData", description, nil
case ComponentsItemPlugStatesKey:
description := `
Items that are "Plugs" can be inserted into sockets.
This returns statuses about those plugs and why they can/can't be inserted.
I hear you giggling, there's nothing funny about inserting plugs. Get your head out of the gutter and pay attention!
`
return "ItemPlugStates", description, nil
case ComponentsVendorsKey:
description := `
When obtaining vendor information, this will return summary information about the Vendor or Vendors being returned.
`
return "Vendors", description, nil
case ComponentsVendorCategoriesKey:
description := `
When obtaining vendor information, this will return information about the categories of items provided by the Vendor.
`
return "VendorCategories", description, nil
case ComponentsVendorSalesKey:
description := `
When obtaining vendor information, this will return the information about items being sold by the Vendor.
`
return "VendorSales", description, nil
case ComponentsKiosksKey:
description := `
Asking for this component will return you the account's Kiosk statuses: that is, what items have been filled out/acquired.
But only if you are the currently authenticated user or the user has elected to allow anonymous viewing of its progression info.
`
return "Kiosks", description, nil
case ComponentsCurrencyLookupsKey:
description := `
A "shortcut" component that will give you all of the item hashes/quantities of items that the requested character can use to determine if an action (purchasing, socket insertion) has the required currency.
(recall that all currencies are just items, and that some vendor purchases require items that you might not traditionally consider to be a "currency", like plugs/mods!)
`
return "CurrencyLookups", description, nil
case ComponentsPresentationNodesKey:
description := `
Returns summary status information about all "Presentation Nodes".
See DestinyPresentationNodeDefinition for more details, but the gist is that these are entities used by the game UI to bucket Collectibles and Records into a hierarchy of categories.
You may ask for and use this data if you want to perform similar bucketing in your own UI: or you can skip it and roll your own.
`
return "PresentationNodes", description, nil
case ComponentsCollectiblesKey:
description := `
Returns summary status information about all "Collectibles".
These are records of what items you've discovered while playing Destiny, and some other basic information.
For detailed information, you will have to call a separate endpoint devoted to the purpose.
`
return "Collectibles", description, nil
case ComponentsRecordsKey:
description := `
Returns summary status information about all "Records" also known in the game as "Triumphs".
I know, it's confusing because there's also "Moments of Triumph" that will themselves be represented as "Triumphs."
`
return "Records", description, nil
case ComponentsTransitoryKey:
description := `
Returns information that Bungie considers to be "Transitory":
- data that may change too frequently; or
- come from a non-authoritative source
such that we don't consider the data to be fully trustworthy, but that might prove useful for some limited use cases.
We can provide no guarantee of timeliness nor consistency for this data: buyer beware with the Transitory component.
`
return "Transitory", description, nil
default:
return "", "", errors.New(fmt.Sprintf("unknown key: %d", key))
}
}
| ComponentTypesE | identifier_name |
client_enum_component_type.go | package bungo
import (
"errors"
"fmt"
)
const (
ComponentsNoneKey = 0
ComponentsProfilesKey = 100
ComponentsVendorReceiptsKey = 101
ComponentsProfileInventoriesKey = 102
ComponentsProfileCurrenciesKey = 103
ComponentsProfileProgressionKey = 104
ComponentsPlatformSilverKey = 105
ComponentsCharactersKey = 200
ComponentsCharacterInventoriesKey = 201
ComponentsCharacterProgressionsKey = 202
CharacterRenderDataKey = 203
ComponentsCharacterActivitiesKey = 204
ComponentsCharacterEquipmentKey = 205
ComponentsItemInstancesKey = 300
ComponentsItemObjectivesKey = 301
ComponentsItemPerksKey = 302
ComponentsItemRenderDataKey = 303
ComponentsItemStatsKey = 304
ComponentsItemSocketsKey = 305
ComponentsItemTalentGridsKey = 306
ComponentsItemCommonDataKey = 307
ComponentsItemPlugStatesKey = 308
ComponentsVendorsKey = 400
ComponentsVendorCategoriesKey = 401
ComponentsVendorSalesKey = 402
ComponentsKiosksKey = 500
ComponentsCurrencyLookupsKey = 600
ComponentsPresentationNodesKey = 700
ComponentsCollectiblesKey = 800
ComponentsRecordsKey = 900
ComponentsTransitoryKey = 1000
)
type ComponentType string
func ComponentTypesManyS(keys ...int) (out []string) {
for _, e := range ComponentTypesMany(keys...) {
out = append(out, string(e))
}
return out
}
func ComponentTypesMany(keys ...int) (out []ComponentType) {
for _, key := range keys |
return out
}
func ComponentTypes(key int) ComponentType {
out, _, _ := ComponentTypesE(key)
return out
}
func ComponentTypesE(key int) (ComponentType, string, error) {
switch key {
case ComponentsNoneKey:
return "None", "", nil
case ComponentsProfilesKey:
description := `
Profiles is the most basic component, only relevant when calling GetProfile.
This returns basic information about the profile, which is almost nothing:
- a list of characterIds,
- some information about the last time you logged in, and;
- that most sobering statistic: how long you've played.
`
return "Profiles", description, nil
case ComponentsVendorReceiptsKey:
description := `
Only applicable for GetProfile, this will return information about receipts for refundable vendor items.
`
return "VendorReceipts", description, nil
case ComponentsProfileInventoriesKey:
description := `
Asking for this will get you the profile-level inventories, such as your Vault buckets
(yeah, the Vault is really inventory buckets located on your Profile)
`
return "ProfileInventories", description, nil
case ComponentsProfileCurrenciesKey:
description := `
This will get you a summary of items on your Profile that we consider to be "currencies", such as Glimmer.
I mean, if there's Glimmer in Destiny 2. I didn't say there was Glimmer.
`
return "ProfileCurrencies", description, nil
case ComponentsProfileProgressionKey:
description := `
This will get you any progression-related information that exists on a Profile-wide level, across all characters.
`
return "ProfileProgression", description, nil
case ComponentsPlatformSilverKey:
description := `
This will get you information about the silver that this profile has on every platform on which it plays.
You may only request this component for the logged in user's Profile, and will not receive it if you request it for another Profile.
`
return "PlatformSilver", description, nil
case ComponentsCharactersKey:
description := `
This will get you summary info about each of the characters in the profile.
`
return "Characters", description, nil
case ComponentsCharacterInventoriesKey:
description := `
This will get you information about any non-equipped items on the character or character(s) in question,
if you're allowed to see it.
You have to either be authenticated as that user, or that user must allow anonymous viewing of their non-equipped items in Bungie.Net settings to actually get results.
`
return "CharacterInventories", description, nil
case ComponentsCharacterProgressionsKey:
description := `
This will get you information about the progression (faction, experience, etc... "levels") relevant to each character.
You have to either be authenticated as that user, or that user must allow anonymous viewing of their progression info in Bungie.Net settings to actually get results.
`
return "CharacterProgressions", description, nil
case CharacterRenderDataKey:
description := `
This will get you just enough information to be able to render the character in 3D if you have written a 3D rendering library for Destiny Characters, or "borrowed" ours.
It's okay, I won't tell anyone if you're using it.
I'm no snitch. (actually, we don't care if you use it - go to town)
`
return "RenderData", description, nil
case ComponentsCharacterActivitiesKey:
description := `
This will return info about activities that a user can see and gating on it, if you are the currently authenticated user or the user has elected to allow anonymous viewing of its progression info.
Note that the data returned by this can be unfortunately problematic and relatively unreliable in some cases.
We'll eventually work on making it more consistently reliable.
`
return "CharacterActivities", description, nil
case ComponentsCharacterEquipmentKey:
description := `
This will return info about the equipped items on the character(s). Everyone can see this.
`
return "CharacterEquipment", description, nil
case ComponentsItemInstancesKey:
description := `
This will return basic info about instanced items - whether they can be equipped, their tracked status, and some info commonly needed in many places (current damage type, primary stat value, etc)
`
return "ItemInstances", description, nil
case ComponentsItemObjectivesKey:
description := `
Items can have Objectives (DestinyObjectiveDefinition) bound to them.
If they do, this will return info for items that have such bound objectives.
`
return "ItemObjectives", description, nil
case ComponentsItemPerksKey:
description := `
Items can have perks (DestinyPerkDefinition).
If they do, this will return info for what perks are active on items.
`
return "ItemPerks", description, nil
case ComponentsItemRenderDataKey:
description := `
If you just want to render the weapon, this is just enough info to do that rendering.
`
return "ItemRenderData", description, nil
case ComponentsItemStatsKey:
description := `
Items can have stats, like rate of fire. Asking for this component will return requested item's stats if they have stats.
`
return "ItemStats", description, nil
case ComponentsItemSocketsKey:
description := `
Items can have sockets, where plugs can be inserted.
Asking for this component will return all info relevant to the sockets on items that have them.
`
return "ItemSockets", description, nil
case ComponentsItemTalentGridsKey:
description := `
Items can have talent grids, though that matters a lot less frequently than it used to.
Asking for this component will return all relevant info about activated Nodes and Steps on this talent grid, like the good ol' days.
`
return "ItemTalentGrids", description, nil
case ComponentsItemCommonDataKey:
description := `
Items that *aren't* instanced still have important information you need to know:
- how much of it you have,;
- the itemHash so you can look up their DestinyInventoryItemDefinition,;
- whether they're locked,;
- etc...
Both instanced and non-instanced items will have these properties.
You will get this automatically with Inventory components - you only need to pass this when calling GetItem on a specific item.
`
return "ItemCommonData", description, nil
case ComponentsItemPlugStatesKey:
description := `
Items that are "Plugs" can be inserted into sockets.
This returns statuses about those plugs and why they can/can't be inserted.
I hear you giggling, there's nothing funny about inserting plugs. Get your head out of the gutter and pay attention!
`
return "ItemPlugStates", description, nil
case ComponentsVendorsKey:
description := `
When obtaining vendor information, this will return summary information about the Vendor or Vendors being returned.
`
return "Vendors", description, nil
case ComponentsVendorCategoriesKey:
description := `
When obtaining vendor information, this will return information about the categories of items provided by the Vendor.
`
return "VendorCategories", description, nil
case ComponentsVendorSalesKey:
description := `
When obtaining vendor information, this will return the information about items being sold by the Vendor.
`
return "VendorSales", description, nil
case ComponentsKiosksKey:
description := `
Asking for this component will return you the account's Kiosk statuses: that is, what items have been filled out/acquired.
But only if you are the currently authenticated user or the user has elected to allow anonymous viewing of its progression info.
`
return "Kiosks", description, nil
case ComponentsCurrencyLookupsKey:
description := `
A "shortcut" component that will give you all of the item hashes/quantities of items that the requested character can use to determine if an action (purchasing, socket insertion) has the required currency.
(recall that all currencies are just items, and that some vendor purchases require items that you might not traditionally consider to be a "currency", like plugs/mods!)
`
return "CurrencyLookups", description, nil
case ComponentsPresentationNodesKey:
description := `
Returns summary status information about all "Presentation Nodes".
See DestinyPresentationNodeDefinition for more details, but the gist is that these are entities used by the game UI to bucket Collectibles and Records into a hierarchy of categories.
You may ask for and use this data if you want to perform similar bucketing in your own UI: or you can skip it and roll your own.
`
return "PresentationNodes", description, nil
case ComponentsCollectiblesKey:
description := `
Returns summary status information about all "Collectibles".
These are records of what items you've discovered while playing Destiny, and some other basic information.
For detailed information, you will have to call a separate endpoint devoted to the purpose.
`
return "Collectibles", description, nil
case ComponentsRecordsKey:
description := `
Returns summary status information about all "Records" also known in the game as "Triumphs".
I know, it's confusing because there's also "Moments of Triumph" that will themselves be represented as "Triumphs."
`
return "Records", description, nil
case ComponentsTransitoryKey:
description := `
Returns information that Bungie considers to be "Transitory":
- data that may change too frequently; or
- come from a non-authoritative source
such that we don't consider the data to be fully trustworthy, but that might prove useful for some limited use cases.
We can provide no guarantee of timeliness nor consistency for this data: buyer beware with the Transitory component.
`
return "Transitory", description, nil
default:
return "", "", errors.New(fmt.Sprintf("unknown key: %d", key))
}
}
| {
eout, _, _ := ComponentTypesE(key)
out = append(out, eout)
} | conditional_block |
client_enum_component_type.go | package bungo
import (
"errors"
"fmt"
)
const (
ComponentsNoneKey = 0
ComponentsProfilesKey = 100
ComponentsVendorReceiptsKey = 101
ComponentsProfileInventoriesKey = 102
ComponentsProfileCurrenciesKey = 103
ComponentsProfileProgressionKey = 104
ComponentsPlatformSilverKey = 105
ComponentsCharactersKey = 200
ComponentsCharacterInventoriesKey = 201
ComponentsCharacterProgressionsKey = 202
CharacterRenderDataKey = 203
ComponentsCharacterActivitiesKey = 204
ComponentsCharacterEquipmentKey = 205
ComponentsItemInstancesKey = 300
ComponentsItemObjectivesKey = 301
ComponentsItemPerksKey = 302
ComponentsItemRenderDataKey = 303
ComponentsItemStatsKey = 304
ComponentsItemSocketsKey = 305
ComponentsItemTalentGridsKey = 306
ComponentsItemCommonDataKey = 307
ComponentsItemPlugStatesKey = 308
ComponentsVendorsKey = 400
ComponentsVendorCategoriesKey = 401
ComponentsVendorSalesKey = 402
ComponentsKiosksKey = 500
ComponentsCurrencyLookupsKey = 600
ComponentsPresentationNodesKey = 700
ComponentsCollectiblesKey = 800
ComponentsRecordsKey = 900
ComponentsTransitoryKey = 1000
)
type ComponentType string
func ComponentTypesManyS(keys ...int) (out []string) {
for _, e := range ComponentTypesMany(keys...) {
out = append(out, string(e))
}
return out
}
func ComponentTypesMany(keys ...int) (out []ComponentType) {
for _, key := range keys {
eout, _, _ := ComponentTypesE(key)
out = append(out, eout)
}
return out
}
func ComponentTypes(key int) ComponentType {
out, _, _ := ComponentTypesE(key)
return out
}
func ComponentTypesE(key int) (ComponentType, string, error) | {
switch key {
case ComponentsNoneKey:
return "None", "", nil
case ComponentsProfilesKey:
description := `
Profiles is the most basic component, only relevant when calling GetProfile.
This returns basic information about the profile, which is almost nothing:
- a list of characterIds,
- some information about the last time you logged in, and;
- that most sobering statistic: how long you've played.
`
return "Profiles", description, nil
case ComponentsVendorReceiptsKey:
description := `
Only applicable for GetProfile, this will return information about receipts for refundable vendor items.
`
return "VendorReceipts", description, nil
case ComponentsProfileInventoriesKey:
description := `
Asking for this will get you the profile-level inventories, such as your Vault buckets
(yeah, the Vault is really inventory buckets located on your Profile)
`
return "ProfileInventories", description, nil
case ComponentsProfileCurrenciesKey:
description := `
This will get you a summary of items on your Profile that we consider to be "currencies", such as Glimmer.
I mean, if there's Glimmer in Destiny 2. I didn't say there was Glimmer.
`
return "ProfileCurrencies", description, nil
case ComponentsProfileProgressionKey:
description := `
This will get you any progression-related information that exists on a Profile-wide level, across all characters.
`
return "ProfileProgression", description, nil
case ComponentsPlatformSilverKey:
description := `
This will get you information about the silver that this profile has on every platform on which it plays.
You may only request this component for the logged in user's Profile, and will not receive it if you request it for another Profile.
`
return "PlatformSilver", description, nil
case ComponentsCharactersKey:
description := `
This will get you summary info about each of the characters in the profile.
`
return "Characters", description, nil
case ComponentsCharacterInventoriesKey:
description := `
This will get you information about any non-equipped items on the character or character(s) in question,
if you're allowed to see it.
You have to either be authenticated as that user, or that user must allow anonymous viewing of their non-equipped items in Bungie.Net settings to actually get results.
`
return "CharacterInventories", description, nil
case ComponentsCharacterProgressionsKey:
description := `
This will get you information about the progression (faction, experience, etc... "levels") relevant to each character.
You have to either be authenticated as that user, or that user must allow anonymous viewing of their progression info in Bungie.Net settings to actually get results.
`
return "CharacterProgressions", description, nil
case CharacterRenderDataKey:
description := `
This will get you just enough information to be able to render the character in 3D if you have written a 3D rendering library for Destiny Characters, or "borrowed" ours.
It's okay, I won't tell anyone if you're using it.
I'm no snitch. (actually, we don't care if you use it - go to town)
`
return "RenderData", description, nil
case ComponentsCharacterActivitiesKey:
description := `
This will return info about activities that a user can see and gating on it, if you are the currently authenticated user or the user has elected to allow anonymous viewing of its progression info.
Note that the data returned by this can be unfortunately problematic and relatively unreliable in some cases.
We'll eventually work on making it more consistently reliable.
`
return "CharacterActivities", description, nil
case ComponentsCharacterEquipmentKey:
description := `
This will return info about the equipped items on the character(s). Everyone can see this.
`
return "CharacterEquipment", description, nil
case ComponentsItemInstancesKey:
description := `
This will return basic info about instanced items - whether they can be equipped, their tracked status, and some info commonly needed in many places (current damage type, primary stat value, etc)
`
return "ItemInstances", description, nil
case ComponentsItemObjectivesKey:
description := `
Items can have Objectives (DestinyObjectiveDefinition) bound to them.
If they do, this will return info for items that have such bound objectives.
`
return "ItemObjectives", description, nil
case ComponentsItemPerksKey:
description := `
Items can have perks (DestinyPerkDefinition).
If they do, this will return info for what perks are active on items.
`
return "ItemPerks", description, nil
case ComponentsItemRenderDataKey:
description := `
If you just want to render the weapon, this is just enough info to do that rendering.
`
return "ItemRenderData", description, nil
case ComponentsItemStatsKey:
description := `
Items can have stats, like rate of fire. Asking for this component will return requested item's stats if they have stats.
`
return "ItemStats", description, nil
case ComponentsItemSocketsKey:
description := `
Items can have sockets, where plugs can be inserted.
Asking for this component will return all info relevant to the sockets on items that have them.
`
return "ItemSockets", description, nil
case ComponentsItemTalentGridsKey:
description := `
Items can have talent grids, though that matters a lot less frequently than it used to.
Asking for this component will return all relevant info about activated Nodes and Steps on this talent grid, like the good ol' days.
`
return "ItemTalentGrids", description, nil
case ComponentsItemCommonDataKey:
description := `
Items that *aren't* instanced still have important information you need to know:
- how much of it you have,;
- the itemHash so you can look up their DestinyInventoryItemDefinition,;
- whether they're locked,;
- etc...
Both instanced and non-instanced items will have these properties.
You will get this automatically with Inventory components - you only need to pass this when calling GetItem on a specific item.
`
return "ItemCommonData", description, nil
case ComponentsItemPlugStatesKey:
description := `
Items that are "Plugs" can be inserted into sockets.
This returns statuses about those plugs and why they can/can't be inserted.
I hear you giggling, there's nothing funny about inserting plugs. Get your head out of the gutter and pay attention!
`
return "ItemPlugStates", description, nil
case ComponentsVendorsKey:
description := `
When obtaining vendor information, this will return summary information about the Vendor or Vendors being returned.
`
return "Vendors", description, nil
case ComponentsVendorCategoriesKey:
description := `
When obtaining vendor information, this will return information about the categories of items provided by the Vendor.
`
return "VendorCategories", description, nil
case ComponentsVendorSalesKey:
description := `
When obtaining vendor information, this will return the information about items being sold by the Vendor.
`
return "VendorSales", description, nil
case ComponentsKiosksKey:
description := `
Asking for this component will return you the account's Kiosk statuses: that is, what items have been filled out/acquired.
But only if you are the currently authenticated user or the user has elected to allow anonymous viewing of its progression info.
`
return "Kiosks", description, nil
case ComponentsCurrencyLookupsKey:
description := `
A "shortcut" component that will give you all of the item hashes/quantities of items that the requested character can use to determine if an action (purchasing, socket insertion) has the required currency.
(recall that all currencies are just items, and that some vendor purchases require items that you might not traditionally consider to be a "currency", like plugs/mods!)
`
return "CurrencyLookups", description, nil
case ComponentsPresentationNodesKey:
description := `
Returns summary status information about all "Presentation Nodes".
See DestinyPresentationNodeDefinition for more details, but the gist is that these are entities used by the game UI to bucket Collectibles and Records into a hierarchy of categories.
You may ask for and use this data if you want to perform similar bucketing in your own UI: or you can skip it and roll your own.
`
return "PresentationNodes", description, nil
case ComponentsCollectiblesKey:
description := `
Returns summary status information about all "Collectibles".
These are records of what items you've discovered while playing Destiny, and some other basic information.
For detailed information, you will have to call a separate endpoint devoted to the purpose.
`
return "Collectibles", description, nil
case ComponentsRecordsKey:
description := `
Returns summary status information about all "Records" also known in the game as "Triumphs".
I know, it's confusing because there's also "Moments of Triumph" that will themselves be represented as "Triumphs."
`
return "Records", description, nil
case ComponentsTransitoryKey:
description := `
Returns information that Bungie considers to be "Transitory":
- data that may change too frequently; or
- come from a non-authoritative source
such that we don't consider the data to be fully trustworthy, but that might prove useful for some limited use cases.
We can provide no guarantee of timeliness nor consistency for this data: buyer beware with the Transitory component.
`
return "Transitory", description, nil
default:
return "", "", errors.New(fmt.Sprintf("unknown key: %d", key))
}
} | identifier_body | |
mysql.rs | mod conversion;
mod error;
use mysql_async::{self as my, prelude::Queryable as _};
use percent_encoding::percent_decode;
use std::{borrow::Cow, future::Future, path::Path, time::Duration};
use tokio::time::timeout;
use url::Url;
use crate::{
ast::{ParameterizedValue, Query},
connector::{metrics, queryable::*, ResultSet, DBIO},
error::{Error, ErrorKind},
visitor::{self, Visitor},
};
/// A connector interface for the MySQL database.
#[derive(Debug)]
pub struct Mysql {
pub(crate) pool: my::Pool,
pub(crate) url: MysqlUrl,
socket_timeout: Option<Duration>,
connect_timeout: Duration,
}
/// Wraps a connection url and exposes the parsing logic used by quaint, including default values.
#[derive(Debug, Clone)]
pub struct MysqlUrl {
url: Url,
query_params: MysqlUrlQueryParams,
}
impl MysqlUrl {
/// Parse `Url` to `MysqlUrl`. Returns error for mistyped connection
/// parameters.
pub fn new(url: Url) -> Result<Self, Error> {
let query_params = Self::parse_query_params(&url)?;
Ok(Self { url, query_params })
}
/// The bare `Url` to the database.
pub fn url(&self) -> &Url {
&self.url
}
/// The percent-decoded database username.
pub fn username(&self) -> Cow<str> {
match percent_decode(self.url.username().as_bytes()).decode_utf8() {
Ok(username) => username,
Err(_) => {
#[cfg(not(feature = "tracing-log"))]
warn!("Couldn't decode username to UTF-8, using the non-decoded version.");
#[cfg(feature = "tracing-log")]
tracing::warn!("Couldn't decode username to UTF-8, using the non-decoded version.");
self.url.username().into()
}
}
}
/// The percent-decoded database password.
pub fn password(&self) -> Option<Cow<str>> {
match self
.url
.password()
.and_then(|pw| percent_decode(pw.as_bytes()).decode_utf8().ok())
{
Some(password) => Some(password),
None => self.url.password().map(|s| s.into()),
}
}
/// Name of the database connected. Defaults to `mysql`.
pub fn dbname(&self) -> &str {
match self.url.path_segments() {
Some(mut segments) => segments.next().unwrap_or("mysql"),
None => "mysql",
}
}
/// The database host. If `socket` and `host` are not set, defaults to `localhost`.
pub fn host(&self) -> &str {
self.url.host_str().unwrap_or("localhost")
}
/// If set, connected to the database through a Unix socket.
pub fn socket(&self) -> &Option<String> {
&self.query_params.socket
}
/// The database port, defaults to `3306`.
pub fn port(&self) -> u16 {
self.url.port().unwrap_or(3306)
}
fn default_connection_limit() -> usize {
num_cpus::get_physical() * 2 + 1
}
fn parse_query_params(url: &Url) -> Result<MysqlUrlQueryParams, Error> {
let mut connection_limit = Self::default_connection_limit();
let mut ssl_opts = my::SslOpts::default();
let mut use_ssl = false;
let mut socket = None;
let mut socket_timeout = None;
let mut connect_timeout = Duration::from_secs(5);
for (k, v) in url.query_pairs() {
match k.as_ref() {
"connection_limit" => {
let as_int: usize = v
.parse()
.map_err(|_| Error::builder(ErrorKind::InvalidConnectionArguments).build())?;
connection_limit = as_int;
}
"sslcert" => {
use_ssl = true;
ssl_opts.set_root_cert_path(Some(Path::new(&*v).to_path_buf()));
}
"sslidentity" => {
use_ssl = true;
ssl_opts.set_pkcs12_path(Some(Path::new(&*v).to_path_buf()));
}
"sslpassword" => {
use_ssl = true;
ssl_opts.set_password(Some(v.to_string()));
}
"socket" => {
socket = Some(v.replace("(", "").replace(")", ""));
}
"socket_timeout" => {
let as_int = v
.parse()
.map_err(|_| Error::builder(ErrorKind::InvalidConnectionArguments).build())?;
socket_timeout = Some(Duration::from_secs(as_int));
}
"connect_timeout" => {
let as_int = v
.parse()
.map_err(|_| Error::builder(ErrorKind::InvalidConnectionArguments).build())?;
connect_timeout = Duration::from_secs(as_int);
}
"sslaccept" => {
match v.as_ref() {
"strict" => {}
"accept_invalid_certs" => {
ssl_opts.set_danger_accept_invalid_certs(true);
}
_ => {
#[cfg(not(feature = "tracing-log"))]
debug!("Unsupported SSL accept mode {}, defaulting to `strict`", v);
#[cfg(feature = "tracing-log")]
tracing::debug!(
message = "Unsupported SSL accept mode, defaulting to `strict`",
mode = &*v
);
}
};
}
_ => {
#[cfg(not(feature = "tracing-log"))]
trace!("Discarding connection string param: {}", k);
#[cfg(feature = "tracing-log")]
tracing::trace!(message = "Discarding connection string param", param = &*k);
}
};
}
Ok(MysqlUrlQueryParams {
ssl_opts,
connection_limit,
use_ssl,
socket,
connect_timeout,
socket_timeout,
})
}
#[cfg(feature = "pooled")]
pub(crate) fn connection_limit(&self) -> usize {
self.query_params.connection_limit
}
pub(crate) fn to_opts_builder(&self) -> my::OptsBuilder {
let mut config = my::OptsBuilder::new();
config.user(Some(self.username()));
config.pass(self.password());
config.db_name(Some(self.dbname()));
match self.socket() {
Some(ref socket) => {
config.socket(Some(socket));
}
None => {
config.ip_or_hostname(self.host());
config.tcp_port(self.port());
}
}
config.stmt_cache_size(Some(1000));
config.conn_ttl(Some(Duration::from_secs(5)));
if self.query_params.use_ssl {
config.ssl_opts(Some(self.query_params.ssl_opts.clone()));
}
config
}
}
#[derive(Debug, Clone)]
pub(crate) struct MysqlUrlQueryParams {
ssl_opts: my::SslOpts,
connection_limit: usize,
use_ssl: bool,
socket: Option<String>,
socket_timeout: Option<Duration>,
connect_timeout: Duration,
}
impl Mysql {
/// Create a new MySQL connection using `OptsBuilder` from the `mysql` crate.
pub fn new(url: MysqlUrl) -> crate::Result<Self> {
let mut opts = url.to_opts_builder();
let pool_opts = my::PoolOptions::with_constraints(my::PoolConstraints::new(1, 1).unwrap());
opts.pool_options(pool_opts);
Ok(Self {
socket_timeout: url.query_params.socket_timeout,
connect_timeout: url.query_params.connect_timeout,
pool: my::Pool::new(opts),
url,
})
}
async fn timeout<T, F, E>(&self, f: F) -> crate::Result<T>
where
F: Future<Output = std::result::Result<T, E>>,
E: Into<Error>,
|
}
impl TransactionCapable for Mysql {}
impl Queryable for Mysql {
fn query<'a>(&'a self, q: Query<'a>) -> DBIO<'a, ResultSet> {
let (sql, params) = visitor::Mysql::build(q);
DBIO::new(async move { self.query_raw(&sql, ¶ms).await })
}
fn execute<'a>(&'a self, q: Query<'a>) -> DBIO<'a, u64> {
let (sql, params) = visitor::Mysql::build(q);
DBIO::new(async move { self.execute_raw(&sql, ¶ms).await })
}
fn query_raw<'a>(&'a self, sql: &'a str, params: &'a [ParameterizedValue]) -> DBIO<'a, ResultSet> {
metrics::query("mysql.query_raw", sql, params, move || async move {
let conn = timeout(self.connect_timeout, self.pool.get_conn()).await??;
let results = self
.timeout(conn.prep_exec(sql, conversion::conv_params(params)))
.await?;
let columns = results
.columns_ref()
.iter()
.map(|s| s.name_str().into_owned())
.collect();
let last_id = results.last_insert_id();
let mut result_set = ResultSet::new(columns, Vec::new());
let (_, rows) = self
.timeout(results.map_and_drop(|mut row| row.take_result_row()))
.await?;
for row in rows.into_iter() {
result_set.rows.push(row?);
}
if let Some(id) = last_id {
result_set.set_last_insert_id(id);
};
Ok(result_set)
})
}
fn execute_raw<'a>(&'a self, sql: &'a str, params: &'a [ParameterizedValue<'a>]) -> DBIO<'a, u64> {
metrics::query("mysql.execute_raw", sql, params, move || async move {
let conn = timeout(self.connect_timeout, self.pool.get_conn()).await??;
let results = self
.timeout(conn.prep_exec(sql, conversion::conv_params(params)))
.await?;
Ok(results.affected_rows())
})
}
fn raw_cmd<'a>(&'a self, cmd: &'a str) -> DBIO<'a, ()> {
metrics::query("mysql.raw_cmd", cmd, &[], move || async move {
let conn = timeout(self.connect_timeout, self.pool.get_conn()).await??;
self.timeout(conn.query(cmd)).await?;
Ok(())
})
}
}
#[cfg(test)]
mod tests {
use super::MysqlUrl;
use crate::{
ast::{Insert, ParameterizedValue, Select},
connector::Queryable,
error::*,
single::Quaint,
};
use once_cell::sync::Lazy;
use std::env;
use url::Url;
static CONN_STR: Lazy<String> = Lazy::new(|| env::var("TEST_MYSQL").expect("TEST_MYSQL env var"));
#[test]
fn should_parse_socket_url() {
let url = MysqlUrl::new(Url::parse("mysql://root@localhost/dbname?socket=(/tmp/mysql.sock)").unwrap()).unwrap();
assert_eq!("dbname", url.dbname());
assert_eq!(&Some(String::from("/tmp/mysql.sock")), url.socket());
}
#[tokio::test]
async fn should_provide_a_database_connection() {
let connection = Quaint::new(&CONN_STR).await.unwrap();
let res = connection
.query_raw(
"select * from information_schema.`COLUMNS` where COLUMN_NAME = 'unknown_123'",
&[],
)
.await
.unwrap();
assert!(res.is_empty());
}
const TABLE_DEF: &str = r#"
CREATE TABLE `user`(
id int4 PRIMARY KEY NOT NULL,
name text NOT NULL,
age int4 NOT NULL,
salary float4
);
"#;
const CREATE_USER: &str = r#"
INSERT INTO `user` (id, name, age, salary)
VALUES (1, 'Joe', 27, 20000.00 );
"#;
const DROP_TABLE: &str = "DROP TABLE IF EXISTS `user`;";
#[tokio::test]
async fn should_map_columns_correctly() {
let connection = Quaint::new(&CONN_STR).await.unwrap();
connection.query_raw(DROP_TABLE, &[]).await.unwrap();
connection.query_raw(TABLE_DEF, &[]).await.unwrap();
let ch_ch_ch_ch_changees = connection.execute_raw(CREATE_USER, &[]).await.unwrap();
assert_eq!(1, ch_ch_ch_ch_changees);
let rows = connection.query_raw("SELECT * FROM `user`", &[]).await.unwrap();
assert_eq!(rows.len(), 1);
let row = rows.get(0).unwrap();
assert_eq!(row["id"].as_i64(), Some(1));
assert_eq!(row["name"].as_str(), Some("Joe"));
assert!(row["name"].is_text());
assert_eq!(row["age"].as_i64(), Some(27));
assert_eq!(row["salary"].as_f64(), Some(20000.0));
}
#[tokio::test]
async fn blobs_roundtrip() {
let connection = Quaint::new(&CONN_STR).await.unwrap();
let blob: Vec<u8> = vec![4, 2, 0];
connection
.query_raw("DROP TABLE IF EXISTS mysql_blobs_roundtrip_test", &[])
.await
.unwrap();
connection
.query_raw(
"CREATE TABLE mysql_blobs_roundtrip_test (id int AUTO_INCREMENT PRIMARY KEY, bytes MEDIUMBLOB)",
&[],
)
.await
.unwrap();
let insert = Insert::single_into("mysql_blobs_roundtrip_test").value("bytes", blob.as_slice());
connection.query(insert.into()).await.unwrap();
let roundtripped = Select::from_table("mysql_blobs_roundtrip_test").column("bytes");
let roundtripped = connection.query(roundtripped.into()).await.unwrap();
assert_eq!(
roundtripped.into_single().unwrap().at(0).unwrap(),
&ParameterizedValue::Bytes(blob.as_slice().into())
);
}
#[tokio::test]
async fn should_map_nonexisting_database_error() {
let mut url = Url::parse(&CONN_STR).unwrap();
url.set_username("root").unwrap();
url.set_path("/this_does_not_exist");
let url = url.as_str().to_string();
let conn = Quaint::new(&url).await.unwrap();
let res = conn.query_raw("SELECT 1 + 1", &[]).await;
assert!(&res.is_err());
let err = res.unwrap_err();
match err.kind() {
ErrorKind::DatabaseDoesNotExist { db_name } => {
assert_eq!(Some("1049"), err.original_code());
assert_eq!(Some("Unknown database \'this_does_not_exist\'"), err.original_message());
assert_eq!("this_does_not_exist", db_name.as_str())
}
e => panic!("Expected `DatabaseDoesNotExist`, got {:?}", e),
}
}
#[tokio::test]
async fn test_uniq_constraint_violation() {
let conn = Quaint::new(&CONN_STR).await.unwrap();
let _ = conn.raw_cmd("DROP TABLE test_uniq_constraint_violation").await;
let _ = conn.raw_cmd("DROP INDEX idx_uniq_constraint_violation").await;
conn.raw_cmd("CREATE TABLE test_uniq_constraint_violation (id1 int, id2 int)")
.await
.unwrap();
conn.raw_cmd("CREATE UNIQUE INDEX idx_uniq_constraint_violation ON test_uniq_constraint_violation (id1, id2) USING btree").await.unwrap();
conn.query_raw(
"INSERT INTO test_uniq_constraint_violation (id1, id2) VALUES (1, 2)",
&[],
)
.await
.unwrap();
let res = conn
.query_raw(
"INSERT INTO test_uniq_constraint_violation (id1, id2) VALUES (1, 2)",
&[],
)
.await;
let err = res.unwrap_err();
match err.kind() {
ErrorKind::UniqueConstraintViolation { constraint } => {
assert_eq!(Some("1062"), err.original_code());
assert_eq!(
&DatabaseConstraint::Index(String::from("idx_uniq_constraint_violation")),
constraint,
)
}
_ => panic!(err),
}
}
#[tokio::test]
async fn test_null_constraint_violation() {
let conn = Quaint::new(&CONN_STR).await.unwrap();
let _ = conn.raw_cmd("DROP TABLE test_null_constraint_violation").await;
conn.raw_cmd("CREATE TABLE test_null_constraint_violation (id1 int not null, id2 int not null)")
.await
.unwrap();
// Error code 1364
{
let res = conn
.query_raw("INSERT INTO test_null_constraint_violation () VALUES ()", &[])
.await;
let err = res.unwrap_err();
match err.kind() {
ErrorKind::NullConstraintViolation { constraint } => {
assert_eq!(Some("1364"), err.original_code());
assert_eq!(
Some("Field \'id1\' doesn\'t have a default value"),
err.original_message()
);
assert_eq!(&DatabaseConstraint::Fields(vec![String::from("id1")]), constraint)
}
_ => panic!(err),
}
}
// Error code 1048
{
conn.query_raw(
"INSERT INTO test_null_constraint_violation (id1, id2) VALUES (50, 55)",
&[],
)
.await
.unwrap();
let err = conn
.query_raw("UPDATE test_null_constraint_violation SET id2 = NULL", &[])
.await
.unwrap_err();
match err.kind() {
ErrorKind::NullConstraintViolation { constraint } => {
assert_eq!(Some("1048"), err.original_code());
assert_eq!(&DatabaseConstraint::Fields(vec![String::from("id2")]), constraint);
}
_ => panic!("{:?}", err),
}
}
}
}
| {
match self.socket_timeout {
Some(duration) => match timeout(duration, f).await {
Ok(Ok(result)) => Ok(result),
Ok(Err(err)) => Err(err.into()),
Err(to) => Err(to.into()),
},
None => match f.await {
Ok(result) => Ok(result),
Err(err) => Err(err.into()),
},
}
} | identifier_body |
mysql.rs | mod conversion;
mod error;
use mysql_async::{self as my, prelude::Queryable as _};
use percent_encoding::percent_decode;
use std::{borrow::Cow, future::Future, path::Path, time::Duration};
use tokio::time::timeout;
use url::Url;
use crate::{
ast::{ParameterizedValue, Query},
connector::{metrics, queryable::*, ResultSet, DBIO},
error::{Error, ErrorKind},
visitor::{self, Visitor},
};
/// A connector interface for the MySQL database.
#[derive(Debug)]
pub struct Mysql {
pub(crate) pool: my::Pool,
pub(crate) url: MysqlUrl,
socket_timeout: Option<Duration>,
connect_timeout: Duration,
}
/// Wraps a connection url and exposes the parsing logic used by quaint, including default values.
#[derive(Debug, Clone)]
pub struct MysqlUrl {
url: Url,
query_params: MysqlUrlQueryParams,
}
impl MysqlUrl {
/// Parse `Url` to `MysqlUrl`. Returns error for mistyped connection
/// parameters.
pub fn new(url: Url) -> Result<Self, Error> {
let query_params = Self::parse_query_params(&url)?;
Ok(Self { url, query_params })
}
/// The bare `Url` to the database.
pub fn url(&self) -> &Url {
&self.url
}
/// The percent-decoded database username.
pub fn username(&self) -> Cow<str> {
match percent_decode(self.url.username().as_bytes()).decode_utf8() {
Ok(username) => username,
Err(_) => {
#[cfg(not(feature = "tracing-log"))]
warn!("Couldn't decode username to UTF-8, using the non-decoded version.");
#[cfg(feature = "tracing-log")]
tracing::warn!("Couldn't decode username to UTF-8, using the non-decoded version.");
self.url.username().into()
}
}
}
/// The percent-decoded database password.
pub fn password(&self) -> Option<Cow<str>> {
match self
.url
.password()
.and_then(|pw| percent_decode(pw.as_bytes()).decode_utf8().ok())
{
Some(password) => Some(password),
None => self.url.password().map(|s| s.into()),
}
}
/// Name of the database connected. Defaults to `mysql`.
pub fn dbname(&self) -> &str {
match self.url.path_segments() {
Some(mut segments) => segments.next().unwrap_or("mysql"),
None => "mysql",
}
}
/// The database host. If `socket` and `host` are not set, defaults to `localhost`.
pub fn host(&self) -> &str {
self.url.host_str().unwrap_or("localhost")
}
/// If set, connected to the database through a Unix socket.
pub fn socket(&self) -> &Option<String> {
&self.query_params.socket
}
/// The database port, defaults to `3306`.
pub fn port(&self) -> u16 {
self.url.port().unwrap_or(3306)
}
fn default_connection_limit() -> usize {
num_cpus::get_physical() * 2 + 1
}
fn parse_query_params(url: &Url) -> Result<MysqlUrlQueryParams, Error> {
let mut connection_limit = Self::default_connection_limit();
let mut ssl_opts = my::SslOpts::default();
let mut use_ssl = false;
let mut socket = None;
let mut socket_timeout = None;
let mut connect_timeout = Duration::from_secs(5);
for (k, v) in url.query_pairs() {
match k.as_ref() {
"connection_limit" => {
let as_int: usize = v
.parse()
.map_err(|_| Error::builder(ErrorKind::InvalidConnectionArguments).build())?;
connection_limit = as_int;
}
"sslcert" => {
use_ssl = true;
ssl_opts.set_root_cert_path(Some(Path::new(&*v).to_path_buf()));
}
"sslidentity" => {
use_ssl = true;
ssl_opts.set_pkcs12_path(Some(Path::new(&*v).to_path_buf()));
}
"sslpassword" => {
use_ssl = true;
ssl_opts.set_password(Some(v.to_string()));
}
"socket" => {
socket = Some(v.replace("(", "").replace(")", ""));
}
"socket_timeout" => {
let as_int = v
.parse()
.map_err(|_| Error::builder(ErrorKind::InvalidConnectionArguments).build())?;
socket_timeout = Some(Duration::from_secs(as_int));
}
"connect_timeout" => {
let as_int = v
.parse()
.map_err(|_| Error::builder(ErrorKind::InvalidConnectionArguments).build())?;
connect_timeout = Duration::from_secs(as_int);
}
"sslaccept" => {
match v.as_ref() {
"strict" => {}
"accept_invalid_certs" => {
ssl_opts.set_danger_accept_invalid_certs(true);
}
_ => {
#[cfg(not(feature = "tracing-log"))]
debug!("Unsupported SSL accept mode {}, defaulting to `strict`", v);
#[cfg(feature = "tracing-log")]
tracing::debug!(
message = "Unsupported SSL accept mode, defaulting to `strict`",
mode = &*v
);
}
};
}
_ => {
#[cfg(not(feature = "tracing-log"))]
trace!("Discarding connection string param: {}", k);
#[cfg(feature = "tracing-log")]
tracing::trace!(message = "Discarding connection string param", param = &*k);
}
};
}
Ok(MysqlUrlQueryParams {
ssl_opts,
connection_limit,
use_ssl,
socket,
connect_timeout,
socket_timeout,
})
}
#[cfg(feature = "pooled")]
pub(crate) fn connection_limit(&self) -> usize {
self.query_params.connection_limit
}
pub(crate) fn to_opts_builder(&self) -> my::OptsBuilder {
let mut config = my::OptsBuilder::new();
config.user(Some(self.username()));
config.pass(self.password());
config.db_name(Some(self.dbname()));
match self.socket() {
Some(ref socket) => {
config.socket(Some(socket));
}
None => {
config.ip_or_hostname(self.host());
config.tcp_port(self.port());
}
}
config.stmt_cache_size(Some(1000));
config.conn_ttl(Some(Duration::from_secs(5)));
if self.query_params.use_ssl {
config.ssl_opts(Some(self.query_params.ssl_opts.clone()));
}
config
}
}
#[derive(Debug, Clone)]
pub(crate) struct MysqlUrlQueryParams {
ssl_opts: my::SslOpts,
connection_limit: usize,
use_ssl: bool,
socket: Option<String>,
socket_timeout: Option<Duration>,
connect_timeout: Duration,
}
impl Mysql {
/// Create a new MySQL connection using `OptsBuilder` from the `mysql` crate.
pub fn new(url: MysqlUrl) -> crate::Result<Self> {
let mut opts = url.to_opts_builder();
let pool_opts = my::PoolOptions::with_constraints(my::PoolConstraints::new(1, 1).unwrap());
opts.pool_options(pool_opts);
Ok(Self {
socket_timeout: url.query_params.socket_timeout,
connect_timeout: url.query_params.connect_timeout,
pool: my::Pool::new(opts),
url,
})
}
async fn timeout<T, F, E>(&self, f: F) -> crate::Result<T>
where
F: Future<Output = std::result::Result<T, E>>,
E: Into<Error>,
{
match self.socket_timeout {
Some(duration) => match timeout(duration, f).await {
Ok(Ok(result)) => Ok(result),
Ok(Err(err)) => Err(err.into()),
Err(to) => Err(to.into()),
},
None => match f.await {
Ok(result) => Ok(result),
Err(err) => Err(err.into()),
},
}
}
}
impl TransactionCapable for Mysql {}
impl Queryable for Mysql {
fn query<'a>(&'a self, q: Query<'a>) -> DBIO<'a, ResultSet> {
let (sql, params) = visitor::Mysql::build(q);
DBIO::new(async move { self.query_raw(&sql, ¶ms).await })
}
fn execute<'a>(&'a self, q: Query<'a>) -> DBIO<'a, u64> {
let (sql, params) = visitor::Mysql::build(q);
DBIO::new(async move { self.execute_raw(&sql, ¶ms).await })
}
fn query_raw<'a>(&'a self, sql: &'a str, params: &'a [ParameterizedValue]) -> DBIO<'a, ResultSet> {
metrics::query("mysql.query_raw", sql, params, move || async move {
let conn = timeout(self.connect_timeout, self.pool.get_conn()).await??;
let results = self
.timeout(conn.prep_exec(sql, conversion::conv_params(params)))
.await?;
let columns = results
.columns_ref()
.iter()
.map(|s| s.name_str().into_owned())
.collect();
let last_id = results.last_insert_id();
let mut result_set = ResultSet::new(columns, Vec::new());
let (_, rows) = self
.timeout(results.map_and_drop(|mut row| row.take_result_row()))
.await?;
for row in rows.into_iter() {
result_set.rows.push(row?);
}
if let Some(id) = last_id {
result_set.set_last_insert_id(id);
};
Ok(result_set)
})
}
fn execute_raw<'a>(&'a self, sql: &'a str, params: &'a [ParameterizedValue<'a>]) -> DBIO<'a, u64> {
metrics::query("mysql.execute_raw", sql, params, move || async move {
let conn = timeout(self.connect_timeout, self.pool.get_conn()).await??;
let results = self
.timeout(conn.prep_exec(sql, conversion::conv_params(params)))
.await?;
Ok(results.affected_rows())
})
}
fn raw_cmd<'a>(&'a self, cmd: &'a str) -> DBIO<'a, ()> {
metrics::query("mysql.raw_cmd", cmd, &[], move || async move {
let conn = timeout(self.connect_timeout, self.pool.get_conn()).await??;
self.timeout(conn.query(cmd)).await?;
Ok(())
})
}
}
#[cfg(test)]
mod tests {
use super::MysqlUrl;
use crate::{
ast::{Insert, ParameterizedValue, Select},
connector::Queryable,
error::*,
single::Quaint,
};
use once_cell::sync::Lazy;
use std::env;
use url::Url;
static CONN_STR: Lazy<String> = Lazy::new(|| env::var("TEST_MYSQL").expect("TEST_MYSQL env var"));
#[test]
fn should_parse_socket_url() {
let url = MysqlUrl::new(Url::parse("mysql://root@localhost/dbname?socket=(/tmp/mysql.sock)").unwrap()).unwrap();
assert_eq!("dbname", url.dbname());
assert_eq!(&Some(String::from("/tmp/mysql.sock")), url.socket());
}
#[tokio::test]
async fn should_provide_a_database_connection() {
let connection = Quaint::new(&CONN_STR).await.unwrap();
let res = connection
.query_raw(
"select * from information_schema.`COLUMNS` where COLUMN_NAME = 'unknown_123'",
&[],
)
.await
.unwrap();
assert!(res.is_empty());
}
const TABLE_DEF: &str = r#"
CREATE TABLE `user`(
id int4 PRIMARY KEY NOT NULL,
name text NOT NULL,
age int4 NOT NULL,
salary float4
);
"#;
const CREATE_USER: &str = r#"
INSERT INTO `user` (id, name, age, salary)
VALUES (1, 'Joe', 27, 20000.00 );
"#;
const DROP_TABLE: &str = "DROP TABLE IF EXISTS `user`;";
#[tokio::test]
async fn should_map_columns_correctly() {
let connection = Quaint::new(&CONN_STR).await.unwrap();
connection.query_raw(DROP_TABLE, &[]).await.unwrap();
connection.query_raw(TABLE_DEF, &[]).await.unwrap();
let ch_ch_ch_ch_changees = connection.execute_raw(CREATE_USER, &[]).await.unwrap();
assert_eq!(1, ch_ch_ch_ch_changees);
let rows = connection.query_raw("SELECT * FROM `user`", &[]).await.unwrap();
assert_eq!(rows.len(), 1);
let row = rows.get(0).unwrap();
assert_eq!(row["id"].as_i64(), Some(1));
assert_eq!(row["name"].as_str(), Some("Joe"));
assert!(row["name"].is_text());
assert_eq!(row["age"].as_i64(), Some(27));
assert_eq!(row["salary"].as_f64(), Some(20000.0));
}
#[tokio::test]
async fn blobs_roundtrip() {
let connection = Quaint::new(&CONN_STR).await.unwrap();
let blob: Vec<u8> = vec![4, 2, 0];
connection
.query_raw("DROP TABLE IF EXISTS mysql_blobs_roundtrip_test", &[])
.await
.unwrap();
connection
.query_raw(
"CREATE TABLE mysql_blobs_roundtrip_test (id int AUTO_INCREMENT PRIMARY KEY, bytes MEDIUMBLOB)",
&[],
)
.await
.unwrap();
let insert = Insert::single_into("mysql_blobs_roundtrip_test").value("bytes", blob.as_slice());
connection.query(insert.into()).await.unwrap();
let roundtripped = Select::from_table("mysql_blobs_roundtrip_test").column("bytes");
let roundtripped = connection.query(roundtripped.into()).await.unwrap();
assert_eq!(
roundtripped.into_single().unwrap().at(0).unwrap(),
&ParameterizedValue::Bytes(blob.as_slice().into())
);
}
#[tokio::test]
async fn should_map_nonexisting_database_error() {
let mut url = Url::parse(&CONN_STR).unwrap();
url.set_username("root").unwrap();
url.set_path("/this_does_not_exist");
let url = url.as_str().to_string();
let conn = Quaint::new(&url).await.unwrap();
let res = conn.query_raw("SELECT 1 + 1", &[]).await;
assert!(&res.is_err());
let err = res.unwrap_err();
match err.kind() {
ErrorKind::DatabaseDoesNotExist { db_name } => {
assert_eq!(Some("1049"), err.original_code());
assert_eq!(Some("Unknown database \'this_does_not_exist\'"), err.original_message());
assert_eq!("this_does_not_exist", db_name.as_str())
}
e => panic!("Expected `DatabaseDoesNotExist`, got {:?}", e),
}
}
#[tokio::test]
async fn test_uniq_constraint_violation() {
let conn = Quaint::new(&CONN_STR).await.unwrap();
let _ = conn.raw_cmd("DROP TABLE test_uniq_constraint_violation").await;
let _ = conn.raw_cmd("DROP INDEX idx_uniq_constraint_violation").await;
conn.raw_cmd("CREATE TABLE test_uniq_constraint_violation (id1 int, id2 int)")
.await
.unwrap();
conn.raw_cmd("CREATE UNIQUE INDEX idx_uniq_constraint_violation ON test_uniq_constraint_violation (id1, id2) USING btree").await.unwrap();
conn.query_raw(
"INSERT INTO test_uniq_constraint_violation (id1, id2) VALUES (1, 2)",
&[],
)
.await
.unwrap();
let res = conn
.query_raw(
"INSERT INTO test_uniq_constraint_violation (id1, id2) VALUES (1, 2)",
&[],
)
.await;
let err = res.unwrap_err();
match err.kind() {
ErrorKind::UniqueConstraintViolation { constraint } => {
assert_eq!(Some("1062"), err.original_code());
assert_eq!(
&DatabaseConstraint::Index(String::from("idx_uniq_constraint_violation")),
constraint,
)
}
_ => panic!(err),
}
}
#[tokio::test]
async fn | () {
let conn = Quaint::new(&CONN_STR).await.unwrap();
let _ = conn.raw_cmd("DROP TABLE test_null_constraint_violation").await;
conn.raw_cmd("CREATE TABLE test_null_constraint_violation (id1 int not null, id2 int not null)")
.await
.unwrap();
// Error code 1364
{
let res = conn
.query_raw("INSERT INTO test_null_constraint_violation () VALUES ()", &[])
.await;
let err = res.unwrap_err();
match err.kind() {
ErrorKind::NullConstraintViolation { constraint } => {
assert_eq!(Some("1364"), err.original_code());
assert_eq!(
Some("Field \'id1\' doesn\'t have a default value"),
err.original_message()
);
assert_eq!(&DatabaseConstraint::Fields(vec![String::from("id1")]), constraint)
}
_ => panic!(err),
}
}
// Error code 1048
{
conn.query_raw(
"INSERT INTO test_null_constraint_violation (id1, id2) VALUES (50, 55)",
&[],
)
.await
.unwrap();
let err = conn
.query_raw("UPDATE test_null_constraint_violation SET id2 = NULL", &[])
.await
.unwrap_err();
match err.kind() {
ErrorKind::NullConstraintViolation { constraint } => {
assert_eq!(Some("1048"), err.original_code());
assert_eq!(&DatabaseConstraint::Fields(vec![String::from("id2")]), constraint);
}
_ => panic!("{:?}", err),
}
}
}
}
| test_null_constraint_violation | identifier_name |
mysql.rs | mod conversion;
mod error;
use mysql_async::{self as my, prelude::Queryable as _}; | use url::Url;
use crate::{
ast::{ParameterizedValue, Query},
connector::{metrics, queryable::*, ResultSet, DBIO},
error::{Error, ErrorKind},
visitor::{self, Visitor},
};
/// A connector interface for the MySQL database.
#[derive(Debug)]
pub struct Mysql {
pub(crate) pool: my::Pool,
pub(crate) url: MysqlUrl,
socket_timeout: Option<Duration>,
connect_timeout: Duration,
}
/// Wraps a connection url and exposes the parsing logic used by quaint, including default values.
#[derive(Debug, Clone)]
pub struct MysqlUrl {
url: Url,
query_params: MysqlUrlQueryParams,
}
impl MysqlUrl {
/// Parse `Url` to `MysqlUrl`. Returns error for mistyped connection
/// parameters.
pub fn new(url: Url) -> Result<Self, Error> {
let query_params = Self::parse_query_params(&url)?;
Ok(Self { url, query_params })
}
/// The bare `Url` to the database.
pub fn url(&self) -> &Url {
&self.url
}
/// The percent-decoded database username.
pub fn username(&self) -> Cow<str> {
match percent_decode(self.url.username().as_bytes()).decode_utf8() {
Ok(username) => username,
Err(_) => {
#[cfg(not(feature = "tracing-log"))]
warn!("Couldn't decode username to UTF-8, using the non-decoded version.");
#[cfg(feature = "tracing-log")]
tracing::warn!("Couldn't decode username to UTF-8, using the non-decoded version.");
self.url.username().into()
}
}
}
/// The percent-decoded database password.
pub fn password(&self) -> Option<Cow<str>> {
match self
.url
.password()
.and_then(|pw| percent_decode(pw.as_bytes()).decode_utf8().ok())
{
Some(password) => Some(password),
None => self.url.password().map(|s| s.into()),
}
}
/// Name of the database connected. Defaults to `mysql`.
pub fn dbname(&self) -> &str {
match self.url.path_segments() {
Some(mut segments) => segments.next().unwrap_or("mysql"),
None => "mysql",
}
}
/// The database host. If `socket` and `host` are not set, defaults to `localhost`.
pub fn host(&self) -> &str {
self.url.host_str().unwrap_or("localhost")
}
/// If set, connected to the database through a Unix socket.
pub fn socket(&self) -> &Option<String> {
&self.query_params.socket
}
/// The database port, defaults to `3306`.
pub fn port(&self) -> u16 {
self.url.port().unwrap_or(3306)
}
fn default_connection_limit() -> usize {
num_cpus::get_physical() * 2 + 1
}
fn parse_query_params(url: &Url) -> Result<MysqlUrlQueryParams, Error> {
let mut connection_limit = Self::default_connection_limit();
let mut ssl_opts = my::SslOpts::default();
let mut use_ssl = false;
let mut socket = None;
let mut socket_timeout = None;
let mut connect_timeout = Duration::from_secs(5);
for (k, v) in url.query_pairs() {
match k.as_ref() {
"connection_limit" => {
let as_int: usize = v
.parse()
.map_err(|_| Error::builder(ErrorKind::InvalidConnectionArguments).build())?;
connection_limit = as_int;
}
"sslcert" => {
use_ssl = true;
ssl_opts.set_root_cert_path(Some(Path::new(&*v).to_path_buf()));
}
"sslidentity" => {
use_ssl = true;
ssl_opts.set_pkcs12_path(Some(Path::new(&*v).to_path_buf()));
}
"sslpassword" => {
use_ssl = true;
ssl_opts.set_password(Some(v.to_string()));
}
"socket" => {
socket = Some(v.replace("(", "").replace(")", ""));
}
"socket_timeout" => {
let as_int = v
.parse()
.map_err(|_| Error::builder(ErrorKind::InvalidConnectionArguments).build())?;
socket_timeout = Some(Duration::from_secs(as_int));
}
"connect_timeout" => {
let as_int = v
.parse()
.map_err(|_| Error::builder(ErrorKind::InvalidConnectionArguments).build())?;
connect_timeout = Duration::from_secs(as_int);
}
"sslaccept" => {
match v.as_ref() {
"strict" => {}
"accept_invalid_certs" => {
ssl_opts.set_danger_accept_invalid_certs(true);
}
_ => {
#[cfg(not(feature = "tracing-log"))]
debug!("Unsupported SSL accept mode {}, defaulting to `strict`", v);
#[cfg(feature = "tracing-log")]
tracing::debug!(
message = "Unsupported SSL accept mode, defaulting to `strict`",
mode = &*v
);
}
};
}
_ => {
#[cfg(not(feature = "tracing-log"))]
trace!("Discarding connection string param: {}", k);
#[cfg(feature = "tracing-log")]
tracing::trace!(message = "Discarding connection string param", param = &*k);
}
};
}
Ok(MysqlUrlQueryParams {
ssl_opts,
connection_limit,
use_ssl,
socket,
connect_timeout,
socket_timeout,
})
}
#[cfg(feature = "pooled")]
pub(crate) fn connection_limit(&self) -> usize {
self.query_params.connection_limit
}
pub(crate) fn to_opts_builder(&self) -> my::OptsBuilder {
let mut config = my::OptsBuilder::new();
config.user(Some(self.username()));
config.pass(self.password());
config.db_name(Some(self.dbname()));
match self.socket() {
Some(ref socket) => {
config.socket(Some(socket));
}
None => {
config.ip_or_hostname(self.host());
config.tcp_port(self.port());
}
}
config.stmt_cache_size(Some(1000));
config.conn_ttl(Some(Duration::from_secs(5)));
if self.query_params.use_ssl {
config.ssl_opts(Some(self.query_params.ssl_opts.clone()));
}
config
}
}
#[derive(Debug, Clone)]
pub(crate) struct MysqlUrlQueryParams {
ssl_opts: my::SslOpts,
connection_limit: usize,
use_ssl: bool,
socket: Option<String>,
socket_timeout: Option<Duration>,
connect_timeout: Duration,
}
impl Mysql {
/// Create a new MySQL connection using `OptsBuilder` from the `mysql` crate.
pub fn new(url: MysqlUrl) -> crate::Result<Self> {
let mut opts = url.to_opts_builder();
let pool_opts = my::PoolOptions::with_constraints(my::PoolConstraints::new(1, 1).unwrap());
opts.pool_options(pool_opts);
Ok(Self {
socket_timeout: url.query_params.socket_timeout,
connect_timeout: url.query_params.connect_timeout,
pool: my::Pool::new(opts),
url,
})
}
async fn timeout<T, F, E>(&self, f: F) -> crate::Result<T>
where
F: Future<Output = std::result::Result<T, E>>,
E: Into<Error>,
{
match self.socket_timeout {
Some(duration) => match timeout(duration, f).await {
Ok(Ok(result)) => Ok(result),
Ok(Err(err)) => Err(err.into()),
Err(to) => Err(to.into()),
},
None => match f.await {
Ok(result) => Ok(result),
Err(err) => Err(err.into()),
},
}
}
}
impl TransactionCapable for Mysql {}
impl Queryable for Mysql {
fn query<'a>(&'a self, q: Query<'a>) -> DBIO<'a, ResultSet> {
let (sql, params) = visitor::Mysql::build(q);
DBIO::new(async move { self.query_raw(&sql, ¶ms).await })
}
fn execute<'a>(&'a self, q: Query<'a>) -> DBIO<'a, u64> {
let (sql, params) = visitor::Mysql::build(q);
DBIO::new(async move { self.execute_raw(&sql, ¶ms).await })
}
fn query_raw<'a>(&'a self, sql: &'a str, params: &'a [ParameterizedValue]) -> DBIO<'a, ResultSet> {
metrics::query("mysql.query_raw", sql, params, move || async move {
let conn = timeout(self.connect_timeout, self.pool.get_conn()).await??;
let results = self
.timeout(conn.prep_exec(sql, conversion::conv_params(params)))
.await?;
let columns = results
.columns_ref()
.iter()
.map(|s| s.name_str().into_owned())
.collect();
let last_id = results.last_insert_id();
let mut result_set = ResultSet::new(columns, Vec::new());
let (_, rows) = self
.timeout(results.map_and_drop(|mut row| row.take_result_row()))
.await?;
for row in rows.into_iter() {
result_set.rows.push(row?);
}
if let Some(id) = last_id {
result_set.set_last_insert_id(id);
};
Ok(result_set)
})
}
fn execute_raw<'a>(&'a self, sql: &'a str, params: &'a [ParameterizedValue<'a>]) -> DBIO<'a, u64> {
metrics::query("mysql.execute_raw", sql, params, move || async move {
let conn = timeout(self.connect_timeout, self.pool.get_conn()).await??;
let results = self
.timeout(conn.prep_exec(sql, conversion::conv_params(params)))
.await?;
Ok(results.affected_rows())
})
}
fn raw_cmd<'a>(&'a self, cmd: &'a str) -> DBIO<'a, ()> {
metrics::query("mysql.raw_cmd", cmd, &[], move || async move {
let conn = timeout(self.connect_timeout, self.pool.get_conn()).await??;
self.timeout(conn.query(cmd)).await?;
Ok(())
})
}
}
#[cfg(test)]
mod tests {
use super::MysqlUrl;
use crate::{
ast::{Insert, ParameterizedValue, Select},
connector::Queryable,
error::*,
single::Quaint,
};
use once_cell::sync::Lazy;
use std::env;
use url::Url;
static CONN_STR: Lazy<String> = Lazy::new(|| env::var("TEST_MYSQL").expect("TEST_MYSQL env var"));
#[test]
fn should_parse_socket_url() {
let url = MysqlUrl::new(Url::parse("mysql://root@localhost/dbname?socket=(/tmp/mysql.sock)").unwrap()).unwrap();
assert_eq!("dbname", url.dbname());
assert_eq!(&Some(String::from("/tmp/mysql.sock")), url.socket());
}
#[tokio::test]
async fn should_provide_a_database_connection() {
let connection = Quaint::new(&CONN_STR).await.unwrap();
let res = connection
.query_raw(
"select * from information_schema.`COLUMNS` where COLUMN_NAME = 'unknown_123'",
&[],
)
.await
.unwrap();
assert!(res.is_empty());
}
const TABLE_DEF: &str = r#"
CREATE TABLE `user`(
id int4 PRIMARY KEY NOT NULL,
name text NOT NULL,
age int4 NOT NULL,
salary float4
);
"#;
const CREATE_USER: &str = r#"
INSERT INTO `user` (id, name, age, salary)
VALUES (1, 'Joe', 27, 20000.00 );
"#;
const DROP_TABLE: &str = "DROP TABLE IF EXISTS `user`;";
#[tokio::test]
async fn should_map_columns_correctly() {
let connection = Quaint::new(&CONN_STR).await.unwrap();
connection.query_raw(DROP_TABLE, &[]).await.unwrap();
connection.query_raw(TABLE_DEF, &[]).await.unwrap();
let ch_ch_ch_ch_changees = connection.execute_raw(CREATE_USER, &[]).await.unwrap();
assert_eq!(1, ch_ch_ch_ch_changees);
let rows = connection.query_raw("SELECT * FROM `user`", &[]).await.unwrap();
assert_eq!(rows.len(), 1);
let row = rows.get(0).unwrap();
assert_eq!(row["id"].as_i64(), Some(1));
assert_eq!(row["name"].as_str(), Some("Joe"));
assert!(row["name"].is_text());
assert_eq!(row["age"].as_i64(), Some(27));
assert_eq!(row["salary"].as_f64(), Some(20000.0));
}
#[tokio::test]
async fn blobs_roundtrip() {
let connection = Quaint::new(&CONN_STR).await.unwrap();
let blob: Vec<u8> = vec![4, 2, 0];
connection
.query_raw("DROP TABLE IF EXISTS mysql_blobs_roundtrip_test", &[])
.await
.unwrap();
connection
.query_raw(
"CREATE TABLE mysql_blobs_roundtrip_test (id int AUTO_INCREMENT PRIMARY KEY, bytes MEDIUMBLOB)",
&[],
)
.await
.unwrap();
let insert = Insert::single_into("mysql_blobs_roundtrip_test").value("bytes", blob.as_slice());
connection.query(insert.into()).await.unwrap();
let roundtripped = Select::from_table("mysql_blobs_roundtrip_test").column("bytes");
let roundtripped = connection.query(roundtripped.into()).await.unwrap();
assert_eq!(
roundtripped.into_single().unwrap().at(0).unwrap(),
&ParameterizedValue::Bytes(blob.as_slice().into())
);
}
#[tokio::test]
async fn should_map_nonexisting_database_error() {
let mut url = Url::parse(&CONN_STR).unwrap();
url.set_username("root").unwrap();
url.set_path("/this_does_not_exist");
let url = url.as_str().to_string();
let conn = Quaint::new(&url).await.unwrap();
let res = conn.query_raw("SELECT 1 + 1", &[]).await;
assert!(&res.is_err());
let err = res.unwrap_err();
match err.kind() {
ErrorKind::DatabaseDoesNotExist { db_name } => {
assert_eq!(Some("1049"), err.original_code());
assert_eq!(Some("Unknown database \'this_does_not_exist\'"), err.original_message());
assert_eq!("this_does_not_exist", db_name.as_str())
}
e => panic!("Expected `DatabaseDoesNotExist`, got {:?}", e),
}
}
#[tokio::test]
async fn test_uniq_constraint_violation() {
let conn = Quaint::new(&CONN_STR).await.unwrap();
let _ = conn.raw_cmd("DROP TABLE test_uniq_constraint_violation").await;
let _ = conn.raw_cmd("DROP INDEX idx_uniq_constraint_violation").await;
conn.raw_cmd("CREATE TABLE test_uniq_constraint_violation (id1 int, id2 int)")
.await
.unwrap();
conn.raw_cmd("CREATE UNIQUE INDEX idx_uniq_constraint_violation ON test_uniq_constraint_violation (id1, id2) USING btree").await.unwrap();
conn.query_raw(
"INSERT INTO test_uniq_constraint_violation (id1, id2) VALUES (1, 2)",
&[],
)
.await
.unwrap();
let res = conn
.query_raw(
"INSERT INTO test_uniq_constraint_violation (id1, id2) VALUES (1, 2)",
&[],
)
.await;
let err = res.unwrap_err();
match err.kind() {
ErrorKind::UniqueConstraintViolation { constraint } => {
assert_eq!(Some("1062"), err.original_code());
assert_eq!(
&DatabaseConstraint::Index(String::from("idx_uniq_constraint_violation")),
constraint,
)
}
_ => panic!(err),
}
}
#[tokio::test]
async fn test_null_constraint_violation() {
let conn = Quaint::new(&CONN_STR).await.unwrap();
let _ = conn.raw_cmd("DROP TABLE test_null_constraint_violation").await;
conn.raw_cmd("CREATE TABLE test_null_constraint_violation (id1 int not null, id2 int not null)")
.await
.unwrap();
// Error code 1364
{
let res = conn
.query_raw("INSERT INTO test_null_constraint_violation () VALUES ()", &[])
.await;
let err = res.unwrap_err();
match err.kind() {
ErrorKind::NullConstraintViolation { constraint } => {
assert_eq!(Some("1364"), err.original_code());
assert_eq!(
Some("Field \'id1\' doesn\'t have a default value"),
err.original_message()
);
assert_eq!(&DatabaseConstraint::Fields(vec![String::from("id1")]), constraint)
}
_ => panic!(err),
}
}
// Error code 1048
{
conn.query_raw(
"INSERT INTO test_null_constraint_violation (id1, id2) VALUES (50, 55)",
&[],
)
.await
.unwrap();
let err = conn
.query_raw("UPDATE test_null_constraint_violation SET id2 = NULL", &[])
.await
.unwrap_err();
match err.kind() {
ErrorKind::NullConstraintViolation { constraint } => {
assert_eq!(Some("1048"), err.original_code());
assert_eq!(&DatabaseConstraint::Fields(vec![String::from("id2")]), constraint);
}
_ => panic!("{:?}", err),
}
}
}
} | use percent_encoding::percent_decode;
use std::{borrow::Cow, future::Future, path::Path, time::Duration};
use tokio::time::timeout; | random_line_split |
Bijlage_D.py | # -------------------------------------------
# MODULES
# -------------------------------------------
import sys
import platform
if(platform.system()== "Windows"):
dir_sep = "\\"
else:
dir_sep = "/"
import time
import os
import numpy as np
import subprocess
import math
from mathutils import Vector
try:
from CifFile import CifFile
pars_check = False
except:
print("PyCIFRW not installed, try: pip install PyCifRW")
pars_check = True
try:
import bpy
Blender_env = True
except:
print("Not in blender environment.")
# -------------------------------------------
# VARIABLES
# -------------------------------------------
# global variables
file_path = "Select a file" # path to CIF-file
draw_bonds = False # draws bonds between atoms
draw_style = "SPACE FILLING" # sets draw style
draw_quality = "MED" # sets key for qualitydic
draw_lattice = False # draws unit cell outline
atom_name = False # displays names of atoms
bond_distance = 2 # set the max distance between bound atoms
lattice_size = 0.03 # sets size of lattice borders
bond_radius = 0.05 # radius of bond
add_camera = True # render final image
atom_color = True # draw atoms in color
user_feedback = "" # feedback for the user
print_data = True
# dictionaries
# sets detail of spheres
styledic = {
"SPACE FILLING" : [1,0],
"BALL AND STICK" : [0.5,0],
"STICK" : [0,1]
}
# sets detail of spheres
qualitydic = {
"MIN" : 8,
"LOW" : 16,
"MED" : 32,
"HIGH" : 64,
"MAX" : 128
}
'''
Uncomment this when no external dictionaries are found
# dictionary which couples atoms to a color
colordic = {
"O" : [1,0,0],
"Si" : [0.25,0.25,1],
"Fe" : [1,0.2,0.2],
}
# dictionary which couples atoms to a specific size
sizedic = {
"O" : 0.3,
"Si" : 0.6,
"Fe" : 1.4,
}
'''
# Read in dictionaries from external files
path = os.path.dirname(os.path.realpath(__file__))
# dictionary which couples atoms to a color
# Color scheme, in RGB percentages, following the CPK convention was extracted from https://en.wikipedia.org/wiki/CPK_coloring#Typical_assignments
# data can be changed by modifying the values in colordic.txt
with open(path+dir_sep+'colordic.txt','r') as inf:
colordic = eval(inf.read())
# dictionary which couples atoms to a specific size
# Atom data, in Ångström, was extracted from https://en.wikipedia.org/wiki/Atomic_radii_of_the_elements_(data_page)
# data can be changed by modifying the values in sizedic.txt
with open(path+dir_sep+'sizedic.txt','r') as inf:
sizedic = eval(inf.read())
# ----------------------------------------------
# BLENDER ADD-ON
# ----------------------------------------------
# add-on info
bl_info = {
"name": "Crystallographic Drawing Tool for Blender",
"description": "Add-on for drawing crystals from CIF-files.",
"author": "Jarrit Boons",
"blender": (2, 80,0),
"location": "View3D",
"category": "Crystallography in Blender"
}
# Operator to open the file browser and select a file
class ScanFileOperator(bpy.types.Operator):
bl_idname = "error.scan_file"
bl_label = "Scan file for return"
filepath = bpy.props.StringProperty(subtype="FILE_PATH")
def execute(self, context):
global file_path
global user_feedback
user_feedback = ""
file_path = self.filepath
return {'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
def register():
bpy.types.Scene.path_to_file = bpy.props.StringProperty(
name="",
description="Path to CIF file",
default = "empty"
)
# Operator to hold CDTB-data and program execution
class Operator(bpy.types.Operator):
bl_idname = "object.cdtb_operator"
bl_label = "CDTB_operator"
bl_descriptor = "Operator for drawing crystal"
# Runs the whole program
def execute(self, context):
global pars_check
global user_feedback
if(pars_check):
user_feedback = "CiFFile module not installed"
return {'FINISHED'}
if(file_path == "Select a file"):
print("No file selected")
user_feedback = "No File selected"
else:
user_feedback = "Crystal drawn"
global draw_bonds
draw_bonds = context.scene.draw_bonds
global bond_distance
bond_distance = context.scene.bond_distance
global draw_lattice
draw_lattice = context.scene.draw_lattice
global atom_name
atom_name = context.scene.atom_name
global print_data
print_data = context.scene.print_data
global draw_style
global atom_color
draw_style = context.scene.style_selection_mode
if(draw_style=="STICK"):
draw_bonds = True
atom_color = False
else:
atom_color = True
global draw_quality
draw_quality = context.scene.quality_selection_mode
global add_camera
add_camera = context.scene.add_camera
drawCrystal(file_path)
return {'FINISHED'}
@classmethod
def register(cls):
print("Registered class: %s " % cls.bl_label)
bpy.types.Scene.draw_bonds = bpy.props.BoolProperty(
name="Draw bonds",
description="Draw bonds between elements"
)
bpy.types.Scene.bond_distance = bpy.props.FloatProperty(
name="Bond distance",
description="Set max distance for bonds to occur",
default=2,
min=0.0,
max=10.0,
precision=2
)
bpy.types.Scene.atom_name = bpy.props.BoolProperty(
name="Atom names",
description="Display the name of atoms"
)
bpy.types.Scene.draw_lattice = bpy.props.BoolProperty(
name="Draw lattice",
description="Draw unit cell outline"
)
bpy.types.Scene.print_data = bpy.props.BoolProperty(
name="Print data",
description="Print crystal data in terminal"
)
# Dropdown menu for drawing style
selection_style = [
("SPACE FILLING", "SPACE FILLING", "", 1),
("BALL AND STICK", "BALL AND STICK", "", 2),
("STICK", "STICK", "", 3),
]
bpy.types.Scene.style_selection_mode = bpy.props.EnumProperty(
items=selection_style,
name="Style"
)
# Dropdown menu for drawing quality
selection_qual = [
("MIN", "MIN", "", 1),
("LOW", "LOW", "", 2),
("MED", "MED", "", 3),
("HIGH", "HIGH", "", 4),
("MAX", "MAX", "", 5)
]
bpy.types.Scene.quality_selection_mode = bpy.props.EnumProperty(
items=selection_qual,
name="Quality",
default="MED"
)
bpy.types.Scene.add_camera = bpy.props.BoolProperty(
name="Place camera",
description="Place a camera and light to make rendering possible"
)
@classmethod
def unregister(cls):
pr | # Panel to display add-on in Blender environment
class Panel(bpy.types.Panel):
bl_idname = "CDTB_Panel"
bl_label = "CDTB_Panel"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_context = "objectmode"
bl_category = "CDTB"
def draw(self,context):
scn = context.scene
layout = self.layout
layout.label(text = 'Input file',icon_value=112)
'''
for i in range(100):
layout.label(text = str(i),icon_value =i)
'''
box = layout.box()
row = box.row()
splitrow = row.split(factor=0.075)
left_col = splitrow.column()
right_col = splitrow.column()
left_col.operator('error.scan_file',icon_value=108,text="")
right_col.label(text=file_path.rsplit('\\', 2)[-1])
layout.label(text = 'Settings',icon_value =117)
box = layout.box()
box.prop(scn,'draw_bonds')
box.prop(scn,'bond_distance')
box.prop(scn,'draw_lattice')
box.prop(scn, 'atom_name')
box.prop(scn,'print_data')
box.prop(scn, 'style_selection_mode')
box.prop(scn, 'quality_selection_mode')
box.prop(scn, 'add_camera')
layout.separator()
splitrow = layout.split(factor=0.3)
col = splitrow.column()
col.operator('object.cdtb_operator',text="Draw Crystal")
col = splitrow.column()
col.label(text=user_feedback)
layout.separator()
@classmethod
def register(cls):
print("Registered class: %s " % cls.bl_label)
@classmethod
def unregister(cls):
print("Unregistered class: %s " % cls.bl_label)
def register():
bpy.utils.register_class(Operator)
bpy.utils.register_class(ScanFileOperator)
bpy.utils.register_class(Panel)
def unregister():
bpy.utils.unregister_class(Operator)
bpy.utils.unregister_class(Panel)
bpy.utils.unregister_class(ScanFileOperator)
#----------------------------------------------
# MAIN PROGRAM
#----------------------------------------------
class Crysdata():
def __init__(self,F,cb):
self.start = time.time()
print("Draw timer started")
self.name = F
self.cell = Cell(cb)
self.atoms = readEl(cb)
self.pos = readPos(cb)
c = self.cell
self.ftoc = self.get_fractional_to_cartesian_matrix(c.alen,c.blen,c.clen,c.alpha,c.beta,c.gamma)
def printout(self):
print(self.name)
print()
self.cell.printout()
print()
for element in self.pos:
element.printout()
print()
for element in self.atoms:
element.printout()
print()
print("Fractional to cartesian matrix:")
print(self.ftoc)
def get_fractional_to_cartesian_matrix(self,a, b, c, alpha, beta, gamma):
"""
Original code found at: https://gist.github.com/Bismarrck/a68da01f19b39320f78a
!changed formula to resemble one found on: https://en.wikipedia.org/wiki/Fractional_coordinates
Return the transformation matrix that converts fractional coordinates to
cartesian coordinates.
Parameters
----------
a, b, c : float
The lengths of the edges.
alpha, gamma, beta : float
The angles between the sides.
angle_in_degrees : bool
True if alpha, beta and gamma are expressed in degrees.
Returns
-------
r : array_like
The 3x3 rotation matrix. ``V_cart = np.dot(r, V_frac)``.
"""
alpha = np.deg2rad(alpha)
beta = np.deg2rad(beta)
gamma = np.deg2rad(gamma)
cosa = np.cos(alpha)
sina = np.sin(alpha)
cosb = np.cos(beta)
sinb = np.sin(beta)
cosg = np.cos(gamma)
sing = np.sin(gamma)
volume = 1.0 - cosa**2.0 - cosb**2.0 - cosg**2.0 + 2.0 * cosa * cosb * cosg
volume = a*b*c*np.sqrt(volume)
r = np.zeros((3, 3))
r[0, 0] = float(a)
r[0, 1] = float(b * cosg)
r[0, 2] = float(c * cosb)
r[1, 0] = float(0)
r[1, 1] = float(b * sing)
r[1, 2] = float(c * (cosa - cosb * cosg) / sing)
r[2, 0] = float(0)
r[2, 1] = float(0)
r[2, 2] = float(volume / (a*b*sing))
return r
def drawCrystal(self):
if draw_lattice:
self.drawCell()
print("Lattice drawn after {:.3f} seconds".format((time.time()-self.start)))
self.drawAtoms()
print("Atoms drawn after {:.3f} seconds".format((time.time()-self.start)))
if(draw_bonds):
self.drawBonds()
print("Bonds drawn after {:.3f} seconds".format((time.time()-self.start)))
def drawAtoms(self):
for a in self.atoms:
a.drawObj(self.ftoc)
print("Atoms drawn:",len(self.atoms))
def drawCell(self):
cell_corners=[]
cell_edges=[]
# calculate and draw corners
for i in range(2):
for j in range(2):
for k in range(2):
bpy.ops.mesh.primitive_uv_sphere_add(size=lattice_size,location=toCarth(self.ftoc,[i,j,k]))
activeObject = bpy.context.active_object # Set active object to variable
cell_corners.append(activeObject)
mat = bpy.data.materials.new(name="MaterialName") # set new material to variable
activeObject.data.materials.append(mat) # add the material to the object
bpy.context.object.active_material.diffuse_color = [0,0,0] # change color
# draw lines
for i,j in zip([0,0,0,1,1,2,2,3,4,4,5,6],[1,2,4,3,5,3,6,7,5,6,7,7]):
cell_edges.append(self.drawLine(cell_corners[i].location,cell_corners[j].location))
# select all line and corners
for i in cell_corners:
i.select_set(action="SELECT")
for i in cell_edges:
i.select_set(action="SELECT")
# set corner in origin as active and join meshes as one object
bpy.context.view_layer.objects.active = cell_corners[0]
bpy.ops.object.join()
print("Cell box drawn")
def drawLine(self,ac,tc):
dx = tc[0] - ac[0]
dy = tc[1] - ac[1]
dz = tc[2] - ac[2]
dist = np.sqrt(dx**2 + dy**2 + dz**2)
bpy.ops.mesh.primitive_cylinder_add(vertices=qualitydic[draw_quality],radius=lattice_size,depth = dist,location = (dx/2 + ac[0], dy/2 + ac[1], dz/2 + ac[2]))
activeObject = bpy.context.active_object
mat = bpy.data.materials.new(name="MaterialName") # set new material to variable
activeObject.data.materials.append(mat) # add the material to the object
bpy.context.object.active_material.diffuse_color = [0,0,0] # change color
phi = math.atan2(dy, dx)
theta = math.acos(dz/dist)
bpy.context.object.rotation_euler[1] = theta
bpy.context.object.rotation_euler[2] = phi
return activeObject
def drawBonds(self):
cnt = 0
bpy.ops.curve.primitive_bezier_circle_add(location=(0,0,0),radius = bond_radius)
bpy.context.object.name = 'bez'
for atom in self.atoms:
for target in self.atoms:
if atom != target:
if("bond{}-{}".format(target.elid,atom.elid)in bpy.data.objects):
continue
if(atom.sym == 'H' and target.sym == 'H'):
continue
if calcDistance(self.ftoc,atom,target) <= bond_distance:
self.makeBond(atom,target)
cnt += 1
print("Atom bonds drawn:",cnt)
# This function hooks the bond to the atoms
def makeBond(self,atom,target):
if 'OBJECT'!=bpy.context.mode:
bpy.ops.object.mode_set(mode='OBJECT')
o1 = bpy.data.objects[atom.elid]
o2 = bpy.data.objects[target.elid]
bond = self.hookCurve(o1,o2, bpy.context.scene)
bpy.context.object.data.bevel_object = bpy.data.objects["bez"]
bpy.context.object.name = "bond{}-{}".format(atom.elid,target.elid)
activeObject = bpy.context.active_object # Set active object to variable
mat = bpy.data.materials.new(name="MaterialName") # set new material to variable
activeObject.data.materials.append(mat) # add the material to the object
bpy.context.object.active_material.diffuse_color = [255,255,255] # change color
if 'OBJECT'!=bpy.context.mode:
bpy.ops.object.mode_set(mode='OBJECT')
def hookCurve(self,o1, o2, scn):
curve = bpy.data.curves.new("link", 'CURVE')
curve.dimensions = '3D'
spline = curve.splines.new('BEZIER')
spline.bezier_points.add(1)
p0 = spline.bezier_points[0]
p1 = spline.bezier_points[1]
# p0.co = o1.location
p0.handle_right_type = 'VECTOR'
# p1.co = o2.location
p1.handle_left_type = 'VECTOR'
obj = bpy.data.objects.new("link", curve)
m0 = obj.modifiers.new("alpha", 'HOOK')
m0.object = o1
m1 = obj.modifiers.new("beta", 'HOOK')
m1.object = o2
bpy.context.collection.objects.link(obj)
bpy.context.view_layer.objects.active = obj
bpy.ops.object.mode_set(mode='EDIT')
# Reassign the points
p0 = curve.splines[0].bezier_points[0]
p1 = curve.splines[0].bezier_points[1]
# Hook first control point to first atom
p0.select_control_point = True
p1.select_control_point = False
bpy.ops.object.hook_assign(modifier="alpha")
# Hook second control point to first atom
p0 = curve.splines[0].bezier_points[0]
p1 = curve.splines[0].bezier_points[1]
p1.select_control_point = True
p0.select_control_point = False
bpy.ops.object.hook_assign(modifier="beta")
return obj
class Cell():
def __init__(self,cb):
self.alen = float(cb["_cell_length_a"])
self.blen = float(cb["_cell_length_b"])
self.clen = float(cb["_cell_length_c"])
self.alpha = float(cb["_cell_angle_alpha"])
self.beta = float(cb["_cell_angle_beta"])
self.gamma = float(cb["_cell_angle_gamma"])
def printout(self):
print("alen:{:8} \nblen:{:8} \nclen:{:8} \nalpha:{:8} \nbeta: {:8} \ngamma:{:8}".format(self.alen,self.blen,self.clen,self.alpha,self.beta,self.gamma))
class Atom():
def __init__(self,elid,sym,xpos,ypos,zpos):
self.elid = elid
self.sym = sym
self.xpos = float(xpos)
self.ypos = float(ypos)
self.zpos = float(zpos)
def printout(self):
print("id:{:3} symbol:{:2} x:{:.4f} y:{:.4f} z:{:.4f}".format(self.elid,self.sym,self.xpos,self.ypos,self.zpos))
def drawObj(self,ftoc):
size = sizedic[self.sym]*styledic[draw_style][0]+bond_radius*styledic[draw_style][1]
bpy.ops.mesh.primitive_uv_sphere_add(segments=qualitydic[draw_quality],ring_count=qualitydic[draw_quality]/2,size=size,location=toCarth(ftoc,[self.xpos,self.ypos,self.zpos]))
bpy.context.object.name = self.elid
activeObject = bpy.context.active_object # Set active object to variable
mat = bpy.data.materials.new(name="MaterialName") # set new material to variable
activeObject.data.materials.append(mat) # add the material to the object
if(atom_name):
bpy.context.object.show_name = True
if(atom_color):
bpy.context.object.active_material.diffuse_color = colordic[self.sym] # change color to dictionary color
else:
bpy.context.object.active_material.diffuse_color = [1,1,1] # change color to white
class sympos():
def __init__(self,string):
self.xsym = (string[0].split(','))[0]
self.ysym = (string[0].split(','))[1]
self.zsym = (string[0].split(','))[2]
def printout(self):
print("x:{:8} y:{:8} z:{:8}".format(self.xsym,self.ysym,self.zsym))
def readEl(cb):
elements = []
previd = []
idcnt = []
lb = cb.GetLoop("_atom_site_label")
for el in lb:
flag = False
for i in range(len(previd)):
if(el[0] == previd[i]):
flag = True
break
if(flag):
idcnt[i] += 1
else:
previd.append(el[0])
idcnt.append(0)
i = len(idcnt)-1
id_t = "{}.{}".format(el[0],idcnt[i])
elements.append(Atom(id_t,el[1],el[2],el[3],el[4]))
return elements
def readPos(cb):
positions = [];
lb = cb.GetLoop("_symmetry_equiv_pos_as_xyz")
for el in lb:
positions.append(sympos(el))
return positions
def obabel_fill_unit_cell(cif_file, p1_file):
# Convert symmetry to P1 using openbabel as subprocess
# Notation: obabel [-i<input-type>] <infilename> [-o<output-type>] -O<outfilename> [Options]
subprocess.run(['obabel', '-icif', cif_file, '-ocif', '-O', p1_file, '--fillUC', 'keepconnect'])
def calcDistance(ftoc,atom1,atom2):
ac = toCarth(ftoc,[atom1.xpos,atom1.ypos,atom1.zpos])
tc = toCarth(ftoc,[atom2.xpos,atom2.ypos,atom2.zpos])
dx = tc[0] - ac[0]
dy = tc[1] - ac[1]
dz = tc[2] - ac[2]
dist = np.sqrt(dx**2 + dy**2 + dz**2)
return dist
def toCarth(ftoc,V_frac):
return np.dot(ftoc, V_frac)
def look_at(obj_camera, point):
loc_camera = obj_camera.matrix_world.to_translation()
direction = point - loc_camera
# point the cameras '-Z' and use its 'Y' as up
rot_quat = direction.to_track_quat('-Z', 'Y')
# assume we're using euler rotation
obj_camera.rotation_euler = rot_quat.to_euler()
def addCamera(x,y,z):
bpy.ops.object.camera_add(view_align=True, enter_editmode=False, location=(5*x,5*y,5*z))
print("camera added")
bpy.ops.object.light_add(type='SUN', view_align=False, location=(0, 0, 0))
obj_camera = bpy.data.objects["Camera"]
look_at(obj_camera, Vector([0,0,z/4]))
obj_camera.data.type = 'ORTHO'
obj_camera.data.ortho_scale = ((x+y+z))
def clearWS():
if 'OBJECT'!=bpy.context.mode:
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.delete(use_global=False)
# remove all previous curves
for i in bpy.data.curves:
bpy.data.curves.remove(i)
# remove all previous materials
for m in bpy.data.materials:
bpy.data.materials.remove(m)
# remove all previous camera's
for c in bpy.data.cameras:
bpy.data.cameras.remove(c)
print("Workspace cleared.")
return
def drawCrystal(file):
# Check if file is file:
S = time.time()
global user_feedback
ext = file[len(file)-4:]
if(ext.lower() != ".cif"):
print("Only cif files can be visualised")
user_feedback = "Not a cif file"
return
# Check OpenBabel installation
try:
# Convert the cif file to its P1 symmetry notation as a temporary cif file
print('Converting %s to P1' %file)
obabel_fill_unit_cell(file, "temp.CIF")
cf = CifFile("temp.CIF")
except:
print("No OpenBabel installation found, install it from http://openbabel.org/wiki/Category:Installation")
user_feedback = "OpenBabel not installed"
#cf = CifFile(file) CifFile apparently can't read in long filepaths
return
# Open and parse our cif
f = file.rsplit(dir_sep, 1)[-1]
F = f[:3]
print(f)
cb = cf.first_block()
Crystal = Crysdata(F,cb)
# Print crystal data in terminal if checked
if(print_data):
Crystal.printout()
print("Crystal data read after "+ str(time.time() - S) + " seconds")
# Draw crystal if in Blender environment
if(Blender_env):
clearWS()
Crystal.drawCrystal()
bpy.ops.object.select_all(action='DESELECT')
if(add_camera):
addCamera(Crystal.cell.alen,Crystal.cell.blen,Crystal.cell.clen)
| int("Unregistered class: %s " % cls.bl_label)
| identifier_body |
Bijlage_D.py | # -------------------------------------------
# MODULES
# -------------------------------------------
import sys
import platform
if(platform.system()== "Windows"):
dir_sep = "\\"
else:
dir_sep = "/"
import time
import os
import numpy as np
import subprocess
import math
from mathutils import Vector
try:
from CifFile import CifFile
pars_check = False
except:
print("PyCIFRW not installed, try: pip install PyCifRW")
pars_check = True
try:
import bpy
Blender_env = True
except:
print("Not in blender environment.")
# -------------------------------------------
# VARIABLES
# -------------------------------------------
# global variables
file_path = "Select a file" # path to CIF-file
draw_bonds = False # draws bonds between atoms
draw_style = "SPACE FILLING" # sets draw style
draw_quality = "MED" # sets key for qualitydic
draw_lattice = False # draws unit cell outline
atom_name = False # displays names of atoms
bond_distance = 2 # set the max distance between bound atoms
lattice_size = 0.03 # sets size of lattice borders
bond_radius = 0.05 # radius of bond
add_camera = True # render final image
atom_color = True # draw atoms in color
user_feedback = "" # feedback for the user
print_data = True
# dictionaries
# sets detail of spheres
styledic = {
"SPACE FILLING" : [1,0],
"BALL AND STICK" : [0.5,0],
"STICK" : [0,1]
}
# sets detail of spheres
qualitydic = {
"MIN" : 8,
"LOW" : 16,
"MED" : 32,
"HIGH" : 64,
"MAX" : 128
}
'''
Uncomment this when no external dictionaries are found
# dictionary which couples atoms to a color
colordic = {
"O" : [1,0,0],
"Si" : [0.25,0.25,1],
"Fe" : [1,0.2,0.2],
}
# dictionary which couples atoms to a specific size
sizedic = {
"O" : 0.3,
"Si" : 0.6,
"Fe" : 1.4,
}
'''
# Read in dictionaries from external files
path = os.path.dirname(os.path.realpath(__file__))
# dictionary which couples atoms to a color
# Color scheme, in RGB percentages, following the CPK convention was extracted from https://en.wikipedia.org/wiki/CPK_coloring#Typical_assignments
# data can be changed by modifying the values in colordic.txt
with open(path+dir_sep+'colordic.txt','r') as inf:
colordic = eval(inf.read())
# dictionary which couples atoms to a specific size
# Atom data, in Ångström, was extracted from https://en.wikipedia.org/wiki/Atomic_radii_of_the_elements_(data_page)
# data can be changed by modifying the values in sizedic.txt
with open(path+dir_sep+'sizedic.txt','r') as inf:
sizedic = eval(inf.read())
# ----------------------------------------------
# BLENDER ADD-ON
# ----------------------------------------------
# add-on info
bl_info = {
"name": "Crystallographic Drawing Tool for Blender",
"description": "Add-on for drawing crystals from CIF-files.",
"author": "Jarrit Boons",
"blender": (2, 80,0),
"location": "View3D",
"category": "Crystallography in Blender"
}
# Operator to open the file browser and select a file
class ScanFileOperator(bpy.types.Operator):
bl_idname = "error.scan_file"
bl_label = "Scan file for return"
filepath = bpy.props.StringProperty(subtype="FILE_PATH")
def execute(self, context):
global file_path
global user_feedback
user_feedback = ""
file_path = self.filepath
return {'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
def register():
bpy.types.Scene.path_to_file = bpy.props.StringProperty(
name="",
description="Path to CIF file",
default = "empty"
)
# Operator to hold CDTB-data and program execution
class Operator(bpy.types.Operator):
bl_idname = "object.cdtb_operator"
bl_label = "CDTB_operator"
bl_descriptor = "Operator for drawing crystal"
# Runs the whole program
def execute(self, context):
global pars_check
global user_feedback
if(pars_check):
user_feedback = "CiFFile module not installed"
return {'FINISHED'}
if(file_path == "Select a file"):
print("No file selected")
user_feedback = "No File selected"
else:
user_feedback = "Crystal drawn"
global draw_bonds
draw_bonds = context.scene.draw_bonds
global bond_distance
bond_distance = context.scene.bond_distance
global draw_lattice
draw_lattice = context.scene.draw_lattice
global atom_name
atom_name = context.scene.atom_name
global print_data
print_data = context.scene.print_data
global draw_style
global atom_color
draw_style = context.scene.style_selection_mode
if(draw_style=="STICK"):
draw_bonds = True
atom_color = False
else:
atom_color = True
global draw_quality
draw_quality = context.scene.quality_selection_mode
global add_camera
add_camera = context.scene.add_camera
drawCrystal(file_path)
return {'FINISHED'}
@classmethod
def register(cls):
print("Registered class: %s " % cls.bl_label)
bpy.types.Scene.draw_bonds = bpy.props.BoolProperty(
name="Draw bonds",
description="Draw bonds between elements"
)
bpy.types.Scene.bond_distance = bpy.props.FloatProperty(
name="Bond distance",
description="Set max distance for bonds to occur",
default=2,
min=0.0,
max=10.0,
precision=2
)
bpy.types.Scene.atom_name = bpy.props.BoolProperty(
name="Atom names",
description="Display the name of atoms"
)
bpy.types.Scene.draw_lattice = bpy.props.BoolProperty(
name="Draw lattice",
description="Draw unit cell outline"
)
bpy.types.Scene.print_data = bpy.props.BoolProperty(
name="Print data",
description="Print crystal data in terminal"
)
# Dropdown menu for drawing style
selection_style = [
("SPACE FILLING", "SPACE FILLING", "", 1),
("BALL AND STICK", "BALL AND STICK", "", 2),
("STICK", "STICK", "", 3),
]
bpy.types.Scene.style_selection_mode = bpy.props.EnumProperty(
items=selection_style,
name="Style"
)
# Dropdown menu for drawing quality
selection_qual = [
("MIN", "MIN", "", 1),
("LOW", "LOW", "", 2),
("MED", "MED", "", 3),
("HIGH", "HIGH", "", 4),
("MAX", "MAX", "", 5)
]
bpy.types.Scene.quality_selection_mode = bpy.props.EnumProperty(
items=selection_qual,
name="Quality",
default="MED"
)
bpy.types.Scene.add_camera = bpy.props.BoolProperty(
name="Place camera",
description="Place a camera and light to make rendering possible"
)
@classmethod
def unregister(cls):
print("Unregistered class: %s " % cls.bl_label)
# Panel to display add-on in Blender environment
class Panel(bpy.types.Panel):
bl_idname = "CDTB_Panel"
bl_label = "CDTB_Panel"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_context = "objectmode"
bl_category = "CDTB"
def draw(self,context):
scn = context.scene
layout = self.layout
layout.label(text = 'Input file',icon_value=112)
'''
for i in range(100):
layout.label(text = str(i),icon_value =i)
'''
box = layout.box()
row = box.row()
splitrow = row.split(factor=0.075)
left_col = splitrow.column()
right_col = splitrow.column()
left_col.operator('error.scan_file',icon_value=108,text="")
right_col.label(text=file_path.rsplit('\\', 2)[-1])
layout.label(text = 'Settings',icon_value =117)
box = layout.box()
box.prop(scn,'draw_bonds')
box.prop(scn,'bond_distance')
box.prop(scn,'draw_lattice')
box.prop(scn, 'atom_name')
box.prop(scn,'print_data')
box.prop(scn, 'style_selection_mode')
box.prop(scn, 'quality_selection_mode')
box.prop(scn, 'add_camera')
layout.separator()
splitrow = layout.split(factor=0.3)
col = splitrow.column()
col.operator('object.cdtb_operator',text="Draw Crystal")
col = splitrow.column()
col.label(text=user_feedback)
layout.separator()
@classmethod
def register(cls):
print("Registered class: %s " % cls.bl_label)
@classmethod
def unregister(cls):
print("Unregistered class: %s " % cls.bl_label)
def register():
bpy.utils.register_class(Operator)
bpy.utils.register_class(ScanFileOperator)
bpy.utils.register_class(Panel)
def unregister():
bpy.utils.unregister_class(Operator)
bpy.utils.unregister_class(Panel)
bpy.utils.unregister_class(ScanFileOperator)
#----------------------------------------------
# MAIN PROGRAM
#----------------------------------------------
class Crysdata():
def __init__(self,F,cb):
self.start = time.time()
print("Draw timer started")
self.name = F
self.cell = Cell(cb)
self.atoms = readEl(cb)
self.pos = readPos(cb)
c = self.cell
self.ftoc = self.get_fractional_to_cartesian_matrix(c.alen,c.blen,c.clen,c.alpha,c.beta,c.gamma)
def printout(self):
print(self.name)
print()
self.cell.printout()
print()
for element in self.pos:
element.printout()
print()
for element in self.atoms:
element.printout()
print()
print("Fractional to cartesian matrix:")
print(self.ftoc)
def get_fractional_to_cartesian_matrix(self,a, b, c, alpha, beta, gamma):
"""
Original code found at: https://gist.github.com/Bismarrck/a68da01f19b39320f78a
!changed formula to resemble one found on: https://en.wikipedia.org/wiki/Fractional_coordinates
Return the transformation matrix that converts fractional coordinates to
cartesian coordinates.
Parameters
----------
a, b, c : float
The lengths of the edges.
alpha, gamma, beta : float
The angles between the sides.
angle_in_degrees : bool
True if alpha, beta and gamma are expressed in degrees.
Returns
-------
r : array_like
The 3x3 rotation matrix. ``V_cart = np.dot(r, V_frac)``.
"""
alpha = np.deg2rad(alpha)
beta = np.deg2rad(beta)
gamma = np.deg2rad(gamma)
cosa = np.cos(alpha)
sina = np.sin(alpha)
cosb = np.cos(beta)
sinb = np.sin(beta)
cosg = np.cos(gamma)
sing = np.sin(gamma)
volume = 1.0 - cosa**2.0 - cosb**2.0 - cosg**2.0 + 2.0 * cosa * cosb * cosg
volume = a*b*c*np.sqrt(volume)
r = np.zeros((3, 3))
r[0, 0] = float(a)
r[0, 1] = float(b * cosg)
r[0, 2] = float(c * cosb)
r[1, 0] = float(0)
r[1, 1] = float(b * sing)
r[1, 2] = float(c * (cosa - cosb * cosg) / sing)
r[2, 0] = float(0)
r[2, 1] = float(0)
r[2, 2] = float(volume / (a*b*sing))
return r
def drawCrystal(self):
if draw_lattice:
self.drawCell()
print("Lattice drawn after {:.3f} seconds".format((time.time()-self.start)))
self.drawAtoms()
print("Atoms drawn after {:.3f} seconds".format((time.time()-self.start)))
if(draw_bonds):
self.drawBonds()
print("Bonds drawn after {:.3f} seconds".format((time.time()-self.start)))
def drawAtoms(self):
for a in self.atoms:
a.drawObj(self.ftoc)
print("Atoms drawn:",len(self.atoms))
def drawCell(self):
cell_corners=[]
cell_edges=[]
# calculate and draw corners
for i in range(2):
for j in range(2):
for k in range(2):
bpy.ops.mesh.primitive_uv_sphere_add(size=lattice_size,location=toCarth(self.ftoc,[i,j,k]))
activeObject = bpy.context.active_object # Set active object to variable
cell_corners.append(activeObject)
mat = bpy.data.materials.new(name="MaterialName") # set new material to variable
activeObject.data.materials.append(mat) # add the material to the object
bpy.context.object.active_material.diffuse_color = [0,0,0] # change color
# draw lines
for i,j in zip([0,0,0,1,1,2,2,3,4,4,5,6],[1,2,4,3,5,3,6,7,5,6,7,7]):
cell_edges.append(self.drawLine(cell_corners[i].location,cell_corners[j].location))
# select all line and corners
for i in cell_corners:
i.select_set(action="SELECT")
for i in cell_edges:
i.select_set(action="SELECT")
# set corner in origin as active and join meshes as one object
bpy.context.view_layer.objects.active = cell_corners[0]
bpy.ops.object.join()
print("Cell box drawn")
def drawLine(self,ac,tc):
dx = tc[0] - ac[0]
dy = tc[1] - ac[1]
dz = tc[2] - ac[2]
dist = np.sqrt(dx**2 + dy**2 + dz**2)
bpy.ops.mesh.primitive_cylinder_add(vertices=qualitydic[draw_quality],radius=lattice_size,depth = dist,location = (dx/2 + ac[0], dy/2 + ac[1], dz/2 + ac[2]))
activeObject = bpy.context.active_object
mat = bpy.data.materials.new(name="MaterialName") # set new material to variable
activeObject.data.materials.append(mat) # add the material to the object
bpy.context.object.active_material.diffuse_color = [0,0,0] # change color
phi = math.atan2(dy, dx)
theta = math.acos(dz/dist)
bpy.context.object.rotation_euler[1] = theta
bpy.context.object.rotation_euler[2] = phi
return activeObject
def drawBonds(self):
cnt = 0
bpy.ops.curve.primitive_bezier_circle_add(location=(0,0,0),radius = bond_radius)
bpy.context.object.name = 'bez'
for atom in self.atoms:
for target in self.atoms:
if atom != target:
if("bond{}-{}".format(target.elid,atom.elid)in bpy.data.objects):
continue
if(atom.sym == 'H' and target.sym == 'H'):
continue
if calcDistance(self.ftoc,atom,target) <= bond_distance:
self.makeBond(atom,target)
cnt += 1
print("Atom bonds drawn:",cnt)
# This function hooks the bond to the atoms
def makeBond(self,atom,target):
if 'OBJECT'!=bpy.context.mode:
bpy.ops.object.mode_set(mode='OBJECT')
o1 = bpy.data.objects[atom.elid]
o2 = bpy.data.objects[target.elid]
bond = self.hookCurve(o1,o2, bpy.context.scene)
bpy.context.object.data.bevel_object = bpy.data.objects["bez"]
bpy.context.object.name = "bond{}-{}".format(atom.elid,target.elid)
activeObject = bpy.context.active_object # Set active object to variable
mat = bpy.data.materials.new(name="MaterialName") # set new material to variable
activeObject.data.materials.append(mat) # add the material to the object
bpy.context.object.active_material.diffuse_color = [255,255,255] # change color
if 'OBJECT'!=bpy.context.mode:
bpy.ops.object.mode_set(mode='OBJECT')
def ho | elf,o1, o2, scn):
curve = bpy.data.curves.new("link", 'CURVE')
curve.dimensions = '3D'
spline = curve.splines.new('BEZIER')
spline.bezier_points.add(1)
p0 = spline.bezier_points[0]
p1 = spline.bezier_points[1]
# p0.co = o1.location
p0.handle_right_type = 'VECTOR'
# p1.co = o2.location
p1.handle_left_type = 'VECTOR'
obj = bpy.data.objects.new("link", curve)
m0 = obj.modifiers.new("alpha", 'HOOK')
m0.object = o1
m1 = obj.modifiers.new("beta", 'HOOK')
m1.object = o2
bpy.context.collection.objects.link(obj)
bpy.context.view_layer.objects.active = obj
bpy.ops.object.mode_set(mode='EDIT')
# Reassign the points
p0 = curve.splines[0].bezier_points[0]
p1 = curve.splines[0].bezier_points[1]
# Hook first control point to first atom
p0.select_control_point = True
p1.select_control_point = False
bpy.ops.object.hook_assign(modifier="alpha")
# Hook second control point to first atom
p0 = curve.splines[0].bezier_points[0]
p1 = curve.splines[0].bezier_points[1]
p1.select_control_point = True
p0.select_control_point = False
bpy.ops.object.hook_assign(modifier="beta")
return obj
class Cell():
def __init__(self,cb):
self.alen = float(cb["_cell_length_a"])
self.blen = float(cb["_cell_length_b"])
self.clen = float(cb["_cell_length_c"])
self.alpha = float(cb["_cell_angle_alpha"])
self.beta = float(cb["_cell_angle_beta"])
self.gamma = float(cb["_cell_angle_gamma"])
def printout(self):
print("alen:{:8} \nblen:{:8} \nclen:{:8} \nalpha:{:8} \nbeta: {:8} \ngamma:{:8}".format(self.alen,self.blen,self.clen,self.alpha,self.beta,self.gamma))
class Atom():
def __init__(self,elid,sym,xpos,ypos,zpos):
self.elid = elid
self.sym = sym
self.xpos = float(xpos)
self.ypos = float(ypos)
self.zpos = float(zpos)
def printout(self):
print("id:{:3} symbol:{:2} x:{:.4f} y:{:.4f} z:{:.4f}".format(self.elid,self.sym,self.xpos,self.ypos,self.zpos))
def drawObj(self,ftoc):
size = sizedic[self.sym]*styledic[draw_style][0]+bond_radius*styledic[draw_style][1]
bpy.ops.mesh.primitive_uv_sphere_add(segments=qualitydic[draw_quality],ring_count=qualitydic[draw_quality]/2,size=size,location=toCarth(ftoc,[self.xpos,self.ypos,self.zpos]))
bpy.context.object.name = self.elid
activeObject = bpy.context.active_object # Set active object to variable
mat = bpy.data.materials.new(name="MaterialName") # set new material to variable
activeObject.data.materials.append(mat) # add the material to the object
if(atom_name):
bpy.context.object.show_name = True
if(atom_color):
bpy.context.object.active_material.diffuse_color = colordic[self.sym] # change color to dictionary color
else:
bpy.context.object.active_material.diffuse_color = [1,1,1] # change color to white
class sympos():
def __init__(self,string):
self.xsym = (string[0].split(','))[0]
self.ysym = (string[0].split(','))[1]
self.zsym = (string[0].split(','))[2]
def printout(self):
print("x:{:8} y:{:8} z:{:8}".format(self.xsym,self.ysym,self.zsym))
def readEl(cb):
elements = []
previd = []
idcnt = []
lb = cb.GetLoop("_atom_site_label")
for el in lb:
flag = False
for i in range(len(previd)):
if(el[0] == previd[i]):
flag = True
break
if(flag):
idcnt[i] += 1
else:
previd.append(el[0])
idcnt.append(0)
i = len(idcnt)-1
id_t = "{}.{}".format(el[0],idcnt[i])
elements.append(Atom(id_t,el[1],el[2],el[3],el[4]))
return elements
def readPos(cb):
positions = [];
lb = cb.GetLoop("_symmetry_equiv_pos_as_xyz")
for el in lb:
positions.append(sympos(el))
return positions
def obabel_fill_unit_cell(cif_file, p1_file):
# Convert symmetry to P1 using openbabel as subprocess
# Notation: obabel [-i<input-type>] <infilename> [-o<output-type>] -O<outfilename> [Options]
subprocess.run(['obabel', '-icif', cif_file, '-ocif', '-O', p1_file, '--fillUC', 'keepconnect'])
def calcDistance(ftoc,atom1,atom2):
ac = toCarth(ftoc,[atom1.xpos,atom1.ypos,atom1.zpos])
tc = toCarth(ftoc,[atom2.xpos,atom2.ypos,atom2.zpos])
dx = tc[0] - ac[0]
dy = tc[1] - ac[1]
dz = tc[2] - ac[2]
dist = np.sqrt(dx**2 + dy**2 + dz**2)
return dist
def toCarth(ftoc,V_frac):
return np.dot(ftoc, V_frac)
def look_at(obj_camera, point):
loc_camera = obj_camera.matrix_world.to_translation()
direction = point - loc_camera
# point the cameras '-Z' and use its 'Y' as up
rot_quat = direction.to_track_quat('-Z', 'Y')
# assume we're using euler rotation
obj_camera.rotation_euler = rot_quat.to_euler()
def addCamera(x,y,z):
bpy.ops.object.camera_add(view_align=True, enter_editmode=False, location=(5*x,5*y,5*z))
print("camera added")
bpy.ops.object.light_add(type='SUN', view_align=False, location=(0, 0, 0))
obj_camera = bpy.data.objects["Camera"]
look_at(obj_camera, Vector([0,0,z/4]))
obj_camera.data.type = 'ORTHO'
obj_camera.data.ortho_scale = ((x+y+z))
def clearWS():
if 'OBJECT'!=bpy.context.mode:
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.delete(use_global=False)
# remove all previous curves
for i in bpy.data.curves:
bpy.data.curves.remove(i)
# remove all previous materials
for m in bpy.data.materials:
bpy.data.materials.remove(m)
# remove all previous camera's
for c in bpy.data.cameras:
bpy.data.cameras.remove(c)
print("Workspace cleared.")
return
def drawCrystal(file):
# Check if file is file:
S = time.time()
global user_feedback
ext = file[len(file)-4:]
if(ext.lower() != ".cif"):
print("Only cif files can be visualised")
user_feedback = "Not a cif file"
return
# Check OpenBabel installation
try:
# Convert the cif file to its P1 symmetry notation as a temporary cif file
print('Converting %s to P1' %file)
obabel_fill_unit_cell(file, "temp.CIF")
cf = CifFile("temp.CIF")
except:
print("No OpenBabel installation found, install it from http://openbabel.org/wiki/Category:Installation")
user_feedback = "OpenBabel not installed"
#cf = CifFile(file) CifFile apparently can't read in long filepaths
return
# Open and parse our cif
f = file.rsplit(dir_sep, 1)[-1]
F = f[:3]
print(f)
cb = cf.first_block()
Crystal = Crysdata(F,cb)
# Print crystal data in terminal if checked
if(print_data):
Crystal.printout()
print("Crystal data read after "+ str(time.time() - S) + " seconds")
# Draw crystal if in Blender environment
if(Blender_env):
clearWS()
Crystal.drawCrystal()
bpy.ops.object.select_all(action='DESELECT')
if(add_camera):
addCamera(Crystal.cell.alen,Crystal.cell.blen,Crystal.cell.clen)
| okCurve(s | identifier_name |
Bijlage_D.py | # -------------------------------------------
# MODULES
# -------------------------------------------
import sys
import platform
if(platform.system()== "Windows"):
dir_sep = "\\"
else:
dir_sep = "/"
import time
import os
import numpy as np
import subprocess
import math
from mathutils import Vector
try:
from CifFile import CifFile
pars_check = False
except:
print("PyCIFRW not installed, try: pip install PyCifRW")
pars_check = True
try:
import bpy
Blender_env = True
except:
print("Not in blender environment.")
# -------------------------------------------
# VARIABLES
# -------------------------------------------
# global variables
file_path = "Select a file" # path to CIF-file
draw_bonds = False # draws bonds between atoms
draw_style = "SPACE FILLING" # sets draw style
draw_quality = "MED" # sets key for qualitydic
draw_lattice = False # draws unit cell outline
atom_name = False # displays names of atoms
bond_distance = 2 # set the max distance between bound atoms
lattice_size = 0.03 # sets size of lattice borders
bond_radius = 0.05 # radius of bond
add_camera = True # render final image
atom_color = True # draw atoms in color
user_feedback = "" # feedback for the user
print_data = True
# dictionaries
# sets detail of spheres
styledic = {
"SPACE FILLING" : [1,0],
"BALL AND STICK" : [0.5,0],
"STICK" : [0,1]
}
# sets detail of spheres
qualitydic = {
"MIN" : 8,
"LOW" : 16,
"MED" : 32,
"HIGH" : 64,
"MAX" : 128
}
'''
Uncomment this when no external dictionaries are found
# dictionary which couples atoms to a color
colordic = {
"O" : [1,0,0],
"Si" : [0.25,0.25,1],
"Fe" : [1,0.2,0.2],
}
# dictionary which couples atoms to a specific size
sizedic = {
"O" : 0.3,
"Si" : 0.6,
"Fe" : 1.4,
}
'''
# Read in dictionaries from external files
path = os.path.dirname(os.path.realpath(__file__))
# dictionary which couples atoms to a color
# Color scheme, in RGB percentages, following the CPK convention was extracted from https://en.wikipedia.org/wiki/CPK_coloring#Typical_assignments
# data can be changed by modifying the values in colordic.txt
with open(path+dir_sep+'colordic.txt','r') as inf:
colordic = eval(inf.read())
# dictionary which couples atoms to a specific size
# Atom data, in Ångström, was extracted from https://en.wikipedia.org/wiki/Atomic_radii_of_the_elements_(data_page)
# data can be changed by modifying the values in sizedic.txt
with open(path+dir_sep+'sizedic.txt','r') as inf:
sizedic = eval(inf.read())
# ----------------------------------------------
# BLENDER ADD-ON
# ----------------------------------------------
# add-on info
bl_info = {
"name": "Crystallographic Drawing Tool for Blender",
"description": "Add-on for drawing crystals from CIF-files.",
"author": "Jarrit Boons",
"blender": (2, 80,0),
"location": "View3D",
"category": "Crystallography in Blender"
}
# Operator to open the file browser and select a file
class ScanFileOperator(bpy.types.Operator):
bl_idname = "error.scan_file"
bl_label = "Scan file for return"
filepath = bpy.props.StringProperty(subtype="FILE_PATH")
def execute(self, context):
global file_path
global user_feedback
user_feedback = ""
file_path = self.filepath
return {'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
def register():
bpy.types.Scene.path_to_file = bpy.props.StringProperty(
name="",
description="Path to CIF file",
default = "empty"
)
# Operator to hold CDTB-data and program execution
class Operator(bpy.types.Operator):
bl_idname = "object.cdtb_operator"
bl_label = "CDTB_operator"
bl_descriptor = "Operator for drawing crystal"
# Runs the whole program
def execute(self, context):
global pars_check
global user_feedback
if(pars_check):
user_feedback = "CiFFile module not installed"
return {'FINISHED'}
if(file_path == "Select a file"):
print("No file selected")
user_feedback = "No File selected"
else:
user_feedback = "Crystal drawn"
global draw_bonds
draw_bonds = context.scene.draw_bonds
global bond_distance
bond_distance = context.scene.bond_distance
global draw_lattice
draw_lattice = context.scene.draw_lattice
global atom_name
atom_name = context.scene.atom_name
global print_data
print_data = context.scene.print_data
global draw_style
global atom_color
draw_style = context.scene.style_selection_mode
if(draw_style=="STICK"):
draw_bonds = True
atom_color = False
else:
atom_color = True
global draw_quality
draw_quality = context.scene.quality_selection_mode
global add_camera
add_camera = context.scene.add_camera
drawCrystal(file_path)
return {'FINISHED'}
@classmethod
def register(cls):
print("Registered class: %s " % cls.bl_label)
bpy.types.Scene.draw_bonds = bpy.props.BoolProperty(
name="Draw bonds",
description="Draw bonds between elements"
)
bpy.types.Scene.bond_distance = bpy.props.FloatProperty(
name="Bond distance",
description="Set max distance for bonds to occur",
default=2,
min=0.0,
max=10.0,
precision=2
)
bpy.types.Scene.atom_name = bpy.props.BoolProperty(
name="Atom names",
description="Display the name of atoms"
)
bpy.types.Scene.draw_lattice = bpy.props.BoolProperty(
name="Draw lattice",
description="Draw unit cell outline"
)
bpy.types.Scene.print_data = bpy.props.BoolProperty(
name="Print data",
description="Print crystal data in terminal"
)
# Dropdown menu for drawing style
selection_style = [
("SPACE FILLING", "SPACE FILLING", "", 1),
("BALL AND STICK", "BALL AND STICK", "", 2),
("STICK", "STICK", "", 3),
]
bpy.types.Scene.style_selection_mode = bpy.props.EnumProperty(
items=selection_style,
name="Style"
)
# Dropdown menu for drawing quality
selection_qual = [
("MIN", "MIN", "", 1),
("LOW", "LOW", "", 2),
("MED", "MED", "", 3),
("HIGH", "HIGH", "", 4),
("MAX", "MAX", "", 5)
]
bpy.types.Scene.quality_selection_mode = bpy.props.EnumProperty(
items=selection_qual,
name="Quality",
default="MED"
)
bpy.types.Scene.add_camera = bpy.props.BoolProperty(
name="Place camera",
description="Place a camera and light to make rendering possible"
)
@classmethod
def unregister(cls):
print("Unregistered class: %s " % cls.bl_label)
# Panel to display add-on in Blender environment
class Panel(bpy.types.Panel):
bl_idname = "CDTB_Panel"
bl_label = "CDTB_Panel"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_context = "objectmode"
bl_category = "CDTB"
def draw(self,context):
scn = context.scene
layout = self.layout
layout.label(text = 'Input file',icon_value=112)
'''
for i in range(100):
layout.label(text = str(i),icon_value =i)
'''
box = layout.box()
row = box.row()
splitrow = row.split(factor=0.075)
left_col = splitrow.column()
right_col = splitrow.column()
left_col.operator('error.scan_file',icon_value=108,text="")
right_col.label(text=file_path.rsplit('\\', 2)[-1])
layout.label(text = 'Settings',icon_value =117)
box = layout.box()
box.prop(scn,'draw_bonds')
box.prop(scn,'bond_distance')
box.prop(scn,'draw_lattice')
box.prop(scn, 'atom_name')
box.prop(scn,'print_data')
box.prop(scn, 'style_selection_mode')
box.prop(scn, 'quality_selection_mode')
box.prop(scn, 'add_camera')
layout.separator()
splitrow = layout.split(factor=0.3)
col = splitrow.column()
col.operator('object.cdtb_operator',text="Draw Crystal")
col = splitrow.column()
col.label(text=user_feedback)
layout.separator()
@classmethod
def register(cls):
print("Registered class: %s " % cls.bl_label)
@classmethod
def unregister(cls):
print("Unregistered class: %s " % cls.bl_label)
def register():
bpy.utils.register_class(Operator)
bpy.utils.register_class(ScanFileOperator)
bpy.utils.register_class(Panel)
def unregister():
bpy.utils.unregister_class(Operator)
bpy.utils.unregister_class(Panel)
bpy.utils.unregister_class(ScanFileOperator)
#----------------------------------------------
# MAIN PROGRAM
#----------------------------------------------
class Crysdata():
def __init__(self,F,cb):
self.start = time.time()
print("Draw timer started")
self.name = F
self.cell = Cell(cb)
self.atoms = readEl(cb)
self.pos = readPos(cb)
c = self.cell
self.ftoc = self.get_fractional_to_cartesian_matrix(c.alen,c.blen,c.clen,c.alpha,c.beta,c.gamma)
def printout(self):
print(self.name)
print()
self.cell.printout()
print()
for element in self.pos:
element.printout()
print()
for element in self.atoms:
element.printout()
print()
print("Fractional to cartesian matrix:")
print(self.ftoc)
def get_fractional_to_cartesian_matrix(self,a, b, c, alpha, beta, gamma):
"""
Original code found at: https://gist.github.com/Bismarrck/a68da01f19b39320f78a
!changed formula to resemble one found on: https://en.wikipedia.org/wiki/Fractional_coordinates
Return the transformation matrix that converts fractional coordinates to
cartesian coordinates.
Parameters
----------
a, b, c : float
The lengths of the edges.
alpha, gamma, beta : float
The angles between the sides.
angle_in_degrees : bool
True if alpha, beta and gamma are expressed in degrees.
Returns
-------
r : array_like
The 3x3 rotation matrix. ``V_cart = np.dot(r, V_frac)``.
"""
alpha = np.deg2rad(alpha)
beta = np.deg2rad(beta)
gamma = np.deg2rad(gamma)
cosa = np.cos(alpha)
sina = np.sin(alpha)
cosb = np.cos(beta)
sinb = np.sin(beta)
cosg = np.cos(gamma)
sing = np.sin(gamma)
volume = 1.0 - cosa**2.0 - cosb**2.0 - cosg**2.0 + 2.0 * cosa * cosb * cosg
volume = a*b*c*np.sqrt(volume)
r = np.zeros((3, 3))
r[0, 0] = float(a)
r[0, 1] = float(b * cosg)
r[0, 2] = float(c * cosb)
r[1, 0] = float(0)
r[1, 1] = float(b * sing)
r[1, 2] = float(c * (cosa - cosb * cosg) / sing)
r[2, 0] = float(0)
r[2, 1] = float(0)
r[2, 2] = float(volume / (a*b*sing))
return r
def drawCrystal(self):
if draw_lattice:
self.drawCell()
print("Lattice drawn after {:.3f} seconds".format((time.time()-self.start)))
self.drawAtoms()
print("Atoms drawn after {:.3f} seconds".format((time.time()-self.start)))
if(draw_bonds):
self.drawBonds()
print("Bonds drawn after {:.3f} seconds".format((time.time()-self.start)))
def drawAtoms(self):
for a in self.atoms:
a.drawObj(self.ftoc)
print("Atoms drawn:",len(self.atoms))
def drawCell(self):
cell_corners=[]
cell_edges=[]
# calculate and draw corners
for i in range(2):
for j in range(2):
fo | # draw lines
for i,j in zip([0,0,0,1,1,2,2,3,4,4,5,6],[1,2,4,3,5,3,6,7,5,6,7,7]):
cell_edges.append(self.drawLine(cell_corners[i].location,cell_corners[j].location))
# select all line and corners
for i in cell_corners:
i.select_set(action="SELECT")
for i in cell_edges:
i.select_set(action="SELECT")
# set corner in origin as active and join meshes as one object
bpy.context.view_layer.objects.active = cell_corners[0]
bpy.ops.object.join()
print("Cell box drawn")
def drawLine(self,ac,tc):
dx = tc[0] - ac[0]
dy = tc[1] - ac[1]
dz = tc[2] - ac[2]
dist = np.sqrt(dx**2 + dy**2 + dz**2)
bpy.ops.mesh.primitive_cylinder_add(vertices=qualitydic[draw_quality],radius=lattice_size,depth = dist,location = (dx/2 + ac[0], dy/2 + ac[1], dz/2 + ac[2]))
activeObject = bpy.context.active_object
mat = bpy.data.materials.new(name="MaterialName") # set new material to variable
activeObject.data.materials.append(mat) # add the material to the object
bpy.context.object.active_material.diffuse_color = [0,0,0] # change color
phi = math.atan2(dy, dx)
theta = math.acos(dz/dist)
bpy.context.object.rotation_euler[1] = theta
bpy.context.object.rotation_euler[2] = phi
return activeObject
def drawBonds(self):
cnt = 0
bpy.ops.curve.primitive_bezier_circle_add(location=(0,0,0),radius = bond_radius)
bpy.context.object.name = 'bez'
for atom in self.atoms:
for target in self.atoms:
if atom != target:
if("bond{}-{}".format(target.elid,atom.elid)in bpy.data.objects):
continue
if(atom.sym == 'H' and target.sym == 'H'):
continue
if calcDistance(self.ftoc,atom,target) <= bond_distance:
self.makeBond(atom,target)
cnt += 1
print("Atom bonds drawn:",cnt)
# This function hooks the bond to the atoms
def makeBond(self,atom,target):
if 'OBJECT'!=bpy.context.mode:
bpy.ops.object.mode_set(mode='OBJECT')
o1 = bpy.data.objects[atom.elid]
o2 = bpy.data.objects[target.elid]
bond = self.hookCurve(o1,o2, bpy.context.scene)
bpy.context.object.data.bevel_object = bpy.data.objects["bez"]
bpy.context.object.name = "bond{}-{}".format(atom.elid,target.elid)
activeObject = bpy.context.active_object # Set active object to variable
mat = bpy.data.materials.new(name="MaterialName") # set new material to variable
activeObject.data.materials.append(mat) # add the material to the object
bpy.context.object.active_material.diffuse_color = [255,255,255] # change color
if 'OBJECT'!=bpy.context.mode:
bpy.ops.object.mode_set(mode='OBJECT')
def hookCurve(self,o1, o2, scn):
curve = bpy.data.curves.new("link", 'CURVE')
curve.dimensions = '3D'
spline = curve.splines.new('BEZIER')
spline.bezier_points.add(1)
p0 = spline.bezier_points[0]
p1 = spline.bezier_points[1]
# p0.co = o1.location
p0.handle_right_type = 'VECTOR'
# p1.co = o2.location
p1.handle_left_type = 'VECTOR'
obj = bpy.data.objects.new("link", curve)
m0 = obj.modifiers.new("alpha", 'HOOK')
m0.object = o1
m1 = obj.modifiers.new("beta", 'HOOK')
m1.object = o2
bpy.context.collection.objects.link(obj)
bpy.context.view_layer.objects.active = obj
bpy.ops.object.mode_set(mode='EDIT')
# Reassign the points
p0 = curve.splines[0].bezier_points[0]
p1 = curve.splines[0].bezier_points[1]
# Hook first control point to first atom
p0.select_control_point = True
p1.select_control_point = False
bpy.ops.object.hook_assign(modifier="alpha")
# Hook second control point to first atom
p0 = curve.splines[0].bezier_points[0]
p1 = curve.splines[0].bezier_points[1]
p1.select_control_point = True
p0.select_control_point = False
bpy.ops.object.hook_assign(modifier="beta")
return obj
class Cell():
def __init__(self,cb):
self.alen = float(cb["_cell_length_a"])
self.blen = float(cb["_cell_length_b"])
self.clen = float(cb["_cell_length_c"])
self.alpha = float(cb["_cell_angle_alpha"])
self.beta = float(cb["_cell_angle_beta"])
self.gamma = float(cb["_cell_angle_gamma"])
def printout(self):
print("alen:{:8} \nblen:{:8} \nclen:{:8} \nalpha:{:8} \nbeta: {:8} \ngamma:{:8}".format(self.alen,self.blen,self.clen,self.alpha,self.beta,self.gamma))
class Atom():
def __init__(self,elid,sym,xpos,ypos,zpos):
self.elid = elid
self.sym = sym
self.xpos = float(xpos)
self.ypos = float(ypos)
self.zpos = float(zpos)
def printout(self):
print("id:{:3} symbol:{:2} x:{:.4f} y:{:.4f} z:{:.4f}".format(self.elid,self.sym,self.xpos,self.ypos,self.zpos))
def drawObj(self,ftoc):
size = sizedic[self.sym]*styledic[draw_style][0]+bond_radius*styledic[draw_style][1]
bpy.ops.mesh.primitive_uv_sphere_add(segments=qualitydic[draw_quality],ring_count=qualitydic[draw_quality]/2,size=size,location=toCarth(ftoc,[self.xpos,self.ypos,self.zpos]))
bpy.context.object.name = self.elid
activeObject = bpy.context.active_object # Set active object to variable
mat = bpy.data.materials.new(name="MaterialName") # set new material to variable
activeObject.data.materials.append(mat) # add the material to the object
if(atom_name):
bpy.context.object.show_name = True
if(atom_color):
bpy.context.object.active_material.diffuse_color = colordic[self.sym] # change color to dictionary color
else:
bpy.context.object.active_material.diffuse_color = [1,1,1] # change color to white
class sympos():
def __init__(self,string):
self.xsym = (string[0].split(','))[0]
self.ysym = (string[0].split(','))[1]
self.zsym = (string[0].split(','))[2]
def printout(self):
print("x:{:8} y:{:8} z:{:8}".format(self.xsym,self.ysym,self.zsym))
def readEl(cb):
elements = []
previd = []
idcnt = []
lb = cb.GetLoop("_atom_site_label")
for el in lb:
flag = False
for i in range(len(previd)):
if(el[0] == previd[i]):
flag = True
break
if(flag):
idcnt[i] += 1
else:
previd.append(el[0])
idcnt.append(0)
i = len(idcnt)-1
id_t = "{}.{}".format(el[0],idcnt[i])
elements.append(Atom(id_t,el[1],el[2],el[3],el[4]))
return elements
def readPos(cb):
positions = [];
lb = cb.GetLoop("_symmetry_equiv_pos_as_xyz")
for el in lb:
positions.append(sympos(el))
return positions
def obabel_fill_unit_cell(cif_file, p1_file):
# Convert symmetry to P1 using openbabel as subprocess
# Notation: obabel [-i<input-type>] <infilename> [-o<output-type>] -O<outfilename> [Options]
subprocess.run(['obabel', '-icif', cif_file, '-ocif', '-O', p1_file, '--fillUC', 'keepconnect'])
def calcDistance(ftoc,atom1,atom2):
ac = toCarth(ftoc,[atom1.xpos,atom1.ypos,atom1.zpos])
tc = toCarth(ftoc,[atom2.xpos,atom2.ypos,atom2.zpos])
dx = tc[0] - ac[0]
dy = tc[1] - ac[1]
dz = tc[2] - ac[2]
dist = np.sqrt(dx**2 + dy**2 + dz**2)
return dist
def toCarth(ftoc,V_frac):
return np.dot(ftoc, V_frac)
def look_at(obj_camera, point):
loc_camera = obj_camera.matrix_world.to_translation()
direction = point - loc_camera
# point the cameras '-Z' and use its 'Y' as up
rot_quat = direction.to_track_quat('-Z', 'Y')
# assume we're using euler rotation
obj_camera.rotation_euler = rot_quat.to_euler()
def addCamera(x,y,z):
bpy.ops.object.camera_add(view_align=True, enter_editmode=False, location=(5*x,5*y,5*z))
print("camera added")
bpy.ops.object.light_add(type='SUN', view_align=False, location=(0, 0, 0))
obj_camera = bpy.data.objects["Camera"]
look_at(obj_camera, Vector([0,0,z/4]))
obj_camera.data.type = 'ORTHO'
obj_camera.data.ortho_scale = ((x+y+z))
def clearWS():
if 'OBJECT'!=bpy.context.mode:
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.delete(use_global=False)
# remove all previous curves
for i in bpy.data.curves:
bpy.data.curves.remove(i)
# remove all previous materials
for m in bpy.data.materials:
bpy.data.materials.remove(m)
# remove all previous camera's
for c in bpy.data.cameras:
bpy.data.cameras.remove(c)
print("Workspace cleared.")
return
def drawCrystal(file):
# Check if file is file:
S = time.time()
global user_feedback
ext = file[len(file)-4:]
if(ext.lower() != ".cif"):
print("Only cif files can be visualised")
user_feedback = "Not a cif file"
return
# Check OpenBabel installation
try:
# Convert the cif file to its P1 symmetry notation as a temporary cif file
print('Converting %s to P1' %file)
obabel_fill_unit_cell(file, "temp.CIF")
cf = CifFile("temp.CIF")
except:
print("No OpenBabel installation found, install it from http://openbabel.org/wiki/Category:Installation")
user_feedback = "OpenBabel not installed"
#cf = CifFile(file) CifFile apparently can't read in long filepaths
return
# Open and parse our cif
f = file.rsplit(dir_sep, 1)[-1]
F = f[:3]
print(f)
cb = cf.first_block()
Crystal = Crysdata(F,cb)
# Print crystal data in terminal if checked
if(print_data):
Crystal.printout()
print("Crystal data read after "+ str(time.time() - S) + " seconds")
# Draw crystal if in Blender environment
if(Blender_env):
clearWS()
Crystal.drawCrystal()
bpy.ops.object.select_all(action='DESELECT')
if(add_camera):
addCamera(Crystal.cell.alen,Crystal.cell.blen,Crystal.cell.clen)
| r k in range(2):
bpy.ops.mesh.primitive_uv_sphere_add(size=lattice_size,location=toCarth(self.ftoc,[i,j,k]))
activeObject = bpy.context.active_object # Set active object to variable
cell_corners.append(activeObject)
mat = bpy.data.materials.new(name="MaterialName") # set new material to variable
activeObject.data.materials.append(mat) # add the material to the object
bpy.context.object.active_material.diffuse_color = [0,0,0] # change color
| conditional_block |
Bijlage_D.py | # -------------------------------------------
# MODULES
# -------------------------------------------
import sys
import platform
if(platform.system()== "Windows"):
dir_sep = "\\"
else:
dir_sep = "/"
import time
import os
import numpy as np
import subprocess
import math
from mathutils import Vector
try:
from CifFile import CifFile
pars_check = False
except:
print("PyCIFRW not installed, try: pip install PyCifRW")
pars_check = True
try:
import bpy
Blender_env = True
except:
print("Not in blender environment.")
# -------------------------------------------
# VARIABLES
# -------------------------------------------
# global variables
file_path = "Select a file" # path to CIF-file
draw_bonds = False # draws bonds between atoms
draw_style = "SPACE FILLING" # sets draw style
draw_quality = "MED" # sets key for qualitydic
draw_lattice = False # draws unit cell outline
atom_name = False # displays names of atoms
bond_distance = 2 # set the max distance between bound atoms
lattice_size = 0.03 # sets size of lattice borders
bond_radius = 0.05 # radius of bond
add_camera = True # render final image
atom_color = True # draw atoms in color
user_feedback = "" # feedback for the user
print_data = True
# dictionaries
# sets detail of spheres
styledic = {
"SPACE FILLING" : [1,0],
"BALL AND STICK" : [0.5,0],
"STICK" : [0,1]
}
# sets detail of spheres
qualitydic = {
"MIN" : 8,
"LOW" : 16,
"MED" : 32,
"HIGH" : 64,
"MAX" : 128
}
'''
Uncomment this when no external dictionaries are found
# dictionary which couples atoms to a color
colordic = {
"O" : [1,0,0],
"Si" : [0.25,0.25,1],
"Fe" : [1,0.2,0.2],
}
# dictionary which couples atoms to a specific size
sizedic = {
"O" : 0.3,
"Si" : 0.6,
"Fe" : 1.4,
}
'''
# Read in dictionaries from external files
path = os.path.dirname(os.path.realpath(__file__))
# dictionary which couples atoms to a color
# Color scheme, in RGB percentages, following the CPK convention was extracted from https://en.wikipedia.org/wiki/CPK_coloring#Typical_assignments
# data can be changed by modifying the values in colordic.txt
with open(path+dir_sep+'colordic.txt','r') as inf:
colordic = eval(inf.read())
# dictionary which couples atoms to a specific size
# Atom data, in Ångström, was extracted from https://en.wikipedia.org/wiki/Atomic_radii_of_the_elements_(data_page)
# data can be changed by modifying the values in sizedic.txt
with open(path+dir_sep+'sizedic.txt','r') as inf:
sizedic = eval(inf.read())
# ----------------------------------------------
# BLENDER ADD-ON
# ----------------------------------------------
# add-on info
bl_info = {
"name": "Crystallographic Drawing Tool for Blender",
"description": "Add-on for drawing crystals from CIF-files.",
"author": "Jarrit Boons",
"blender": (2, 80,0),
"location": "View3D",
"category": "Crystallography in Blender"
}
# Operator to open the file browser and select a file
class ScanFileOperator(bpy.types.Operator):
bl_idname = "error.scan_file"
bl_label = "Scan file for return"
filepath = bpy.props.StringProperty(subtype="FILE_PATH")
def execute(self, context):
global file_path
global user_feedback
user_feedback = ""
file_path = self.filepath
return {'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
def register():
bpy.types.Scene.path_to_file = bpy.props.StringProperty(
name="",
description="Path to CIF file",
default = "empty"
)
# Operator to hold CDTB-data and program execution
class Operator(bpy.types.Operator):
bl_idname = "object.cdtb_operator"
bl_label = "CDTB_operator"
bl_descriptor = "Operator for drawing crystal"
# Runs the whole program
def execute(self, context):
global pars_check
global user_feedback
if(pars_check):
user_feedback = "CiFFile module not installed"
return {'FINISHED'}
if(file_path == "Select a file"):
print("No file selected")
user_feedback = "No File selected"
else:
user_feedback = "Crystal drawn"
global draw_bonds
draw_bonds = context.scene.draw_bonds
global bond_distance
bond_distance = context.scene.bond_distance
global draw_lattice
draw_lattice = context.scene.draw_lattice
global atom_name
atom_name = context.scene.atom_name
global print_data
print_data = context.scene.print_data
global draw_style
global atom_color
draw_style = context.scene.style_selection_mode
if(draw_style=="STICK"):
draw_bonds = True
atom_color = False
else:
atom_color = True
global draw_quality
draw_quality = context.scene.quality_selection_mode
global add_camera
add_camera = context.scene.add_camera
drawCrystal(file_path)
return {'FINISHED'}
@classmethod
def register(cls):
print("Registered class: %s " % cls.bl_label)
bpy.types.Scene.draw_bonds = bpy.props.BoolProperty(
name="Draw bonds",
description="Draw bonds between elements"
)
bpy.types.Scene.bond_distance = bpy.props.FloatProperty(
name="Bond distance",
description="Set max distance for bonds to occur",
default=2,
min=0.0,
max=10.0,
precision=2
)
bpy.types.Scene.atom_name = bpy.props.BoolProperty(
name="Atom names",
description="Display the name of atoms"
)
bpy.types.Scene.draw_lattice = bpy.props.BoolProperty(
name="Draw lattice",
description="Draw unit cell outline"
)
bpy.types.Scene.print_data = bpy.props.BoolProperty(
name="Print data",
description="Print crystal data in terminal"
)
# Dropdown menu for drawing style
selection_style = [
("SPACE FILLING", "SPACE FILLING", "", 1),
("BALL AND STICK", "BALL AND STICK", "", 2),
("STICK", "STICK", "", 3),
]
bpy.types.Scene.style_selection_mode = bpy.props.EnumProperty(
items=selection_style,
name="Style"
)
# Dropdown menu for drawing quality
selection_qual = [
("MIN", "MIN", "", 1),
("LOW", "LOW", "", 2),
("MED", "MED", "", 3),
("HIGH", "HIGH", "", 4),
("MAX", "MAX", "", 5)
]
bpy.types.Scene.quality_selection_mode = bpy.props.EnumProperty(
items=selection_qual,
name="Quality",
default="MED"
)
bpy.types.Scene.add_camera = bpy.props.BoolProperty(
name="Place camera",
description="Place a camera and light to make rendering possible"
)
@classmethod
def unregister(cls):
print("Unregistered class: %s " % cls.bl_label)
# Panel to display add-on in Blender environment
class Panel(bpy.types.Panel):
bl_idname = "CDTB_Panel"
bl_label = "CDTB_Panel"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_context = "objectmode"
bl_category = "CDTB"
def draw(self,context):
scn = context.scene
layout = self.layout
layout.label(text = 'Input file',icon_value=112)
'''
for i in range(100):
layout.label(text = str(i),icon_value =i)
'''
box = layout.box()
row = box.row()
splitrow = row.split(factor=0.075)
left_col = splitrow.column()
right_col = splitrow.column()
left_col.operator('error.scan_file',icon_value=108,text="")
right_col.label(text=file_path.rsplit('\\', 2)[-1])
layout.label(text = 'Settings',icon_value =117)
box = layout.box()
box.prop(scn,'draw_bonds')
box.prop(scn,'bond_distance')
box.prop(scn,'draw_lattice')
box.prop(scn, 'atom_name')
box.prop(scn,'print_data')
box.prop(scn, 'style_selection_mode')
box.prop(scn, 'quality_selection_mode')
box.prop(scn, 'add_camera')
layout.separator()
splitrow = layout.split(factor=0.3)
col = splitrow.column()
col.operator('object.cdtb_operator',text="Draw Crystal")
col = splitrow.column()
col.label(text=user_feedback)
layout.separator()
@classmethod
def register(cls):
print("Registered class: %s " % cls.bl_label)
@classmethod
def unregister(cls):
print("Unregistered class: %s " % cls.bl_label)
def register():
bpy.utils.register_class(Operator)
bpy.utils.register_class(ScanFileOperator)
bpy.utils.register_class(Panel)
def unregister():
bpy.utils.unregister_class(Operator)
bpy.utils.unregister_class(Panel)
bpy.utils.unregister_class(ScanFileOperator)
#----------------------------------------------
# MAIN PROGRAM
#----------------------------------------------
class Crysdata():
def __init__(self,F,cb):
self.start = time.time()
print("Draw timer started")
self.name = F
self.cell = Cell(cb)
self.atoms = readEl(cb)
self.pos = readPos(cb)
c = self.cell
self.ftoc = self.get_fractional_to_cartesian_matrix(c.alen,c.blen,c.clen,c.alpha,c.beta,c.gamma)
def printout(self):
print(self.name)
print()
self.cell.printout()
print()
for element in self.pos:
element.printout()
print()
for element in self.atoms:
element.printout()
print()
print("Fractional to cartesian matrix:")
print(self.ftoc)
def get_fractional_to_cartesian_matrix(self,a, b, c, alpha, beta, gamma):
"""
Original code found at: https://gist.github.com/Bismarrck/a68da01f19b39320f78a
!changed formula to resemble one found on: https://en.wikipedia.org/wiki/Fractional_coordinates
Return the transformation matrix that converts fractional coordinates to
cartesian coordinates.
Parameters
----------
a, b, c : float
The lengths of the edges.
alpha, gamma, beta : float
The angles between the sides.
angle_in_degrees : bool
True if alpha, beta and gamma are expressed in degrees.
Returns
-------
r : array_like
The 3x3 rotation matrix. ``V_cart = np.dot(r, V_frac)``.
"""
alpha = np.deg2rad(alpha)
beta = np.deg2rad(beta)
gamma = np.deg2rad(gamma)
cosa = np.cos(alpha)
sina = np.sin(alpha)
cosb = np.cos(beta)
sinb = np.sin(beta)
cosg = np.cos(gamma)
sing = np.sin(gamma)
volume = 1.0 - cosa**2.0 - cosb**2.0 - cosg**2.0 + 2.0 * cosa * cosb * cosg
volume = a*b*c*np.sqrt(volume)
r = np.zeros((3, 3))
r[0, 0] = float(a)
r[0, 1] = float(b * cosg)
r[0, 2] = float(c * cosb)
r[1, 0] = float(0)
r[1, 1] = float(b * sing)
r[1, 2] = float(c * (cosa - cosb * cosg) / sing)
r[2, 0] = float(0) | r[2, 1] = float(0)
r[2, 2] = float(volume / (a*b*sing))
return r
def drawCrystal(self):
if draw_lattice:
self.drawCell()
print("Lattice drawn after {:.3f} seconds".format((time.time()-self.start)))
self.drawAtoms()
print("Atoms drawn after {:.3f} seconds".format((time.time()-self.start)))
if(draw_bonds):
self.drawBonds()
print("Bonds drawn after {:.3f} seconds".format((time.time()-self.start)))
def drawAtoms(self):
for a in self.atoms:
a.drawObj(self.ftoc)
print("Atoms drawn:",len(self.atoms))
def drawCell(self):
cell_corners=[]
cell_edges=[]
# calculate and draw corners
for i in range(2):
for j in range(2):
for k in range(2):
bpy.ops.mesh.primitive_uv_sphere_add(size=lattice_size,location=toCarth(self.ftoc,[i,j,k]))
activeObject = bpy.context.active_object # Set active object to variable
cell_corners.append(activeObject)
mat = bpy.data.materials.new(name="MaterialName") # set new material to variable
activeObject.data.materials.append(mat) # add the material to the object
bpy.context.object.active_material.diffuse_color = [0,0,0] # change color
# draw lines
for i,j in zip([0,0,0,1,1,2,2,3,4,4,5,6],[1,2,4,3,5,3,6,7,5,6,7,7]):
cell_edges.append(self.drawLine(cell_corners[i].location,cell_corners[j].location))
# select all line and corners
for i in cell_corners:
i.select_set(action="SELECT")
for i in cell_edges:
i.select_set(action="SELECT")
# set corner in origin as active and join meshes as one object
bpy.context.view_layer.objects.active = cell_corners[0]
bpy.ops.object.join()
print("Cell box drawn")
def drawLine(self,ac,tc):
dx = tc[0] - ac[0]
dy = tc[1] - ac[1]
dz = tc[2] - ac[2]
dist = np.sqrt(dx**2 + dy**2 + dz**2)
bpy.ops.mesh.primitive_cylinder_add(vertices=qualitydic[draw_quality],radius=lattice_size,depth = dist,location = (dx/2 + ac[0], dy/2 + ac[1], dz/2 + ac[2]))
activeObject = bpy.context.active_object
mat = bpy.data.materials.new(name="MaterialName") # set new material to variable
activeObject.data.materials.append(mat) # add the material to the object
bpy.context.object.active_material.diffuse_color = [0,0,0] # change color
phi = math.atan2(dy, dx)
theta = math.acos(dz/dist)
bpy.context.object.rotation_euler[1] = theta
bpy.context.object.rotation_euler[2] = phi
return activeObject
def drawBonds(self):
cnt = 0
bpy.ops.curve.primitive_bezier_circle_add(location=(0,0,0),radius = bond_radius)
bpy.context.object.name = 'bez'
for atom in self.atoms:
for target in self.atoms:
if atom != target:
if("bond{}-{}".format(target.elid,atom.elid)in bpy.data.objects):
continue
if(atom.sym == 'H' and target.sym == 'H'):
continue
if calcDistance(self.ftoc,atom,target) <= bond_distance:
self.makeBond(atom,target)
cnt += 1
print("Atom bonds drawn:",cnt)
# This function hooks the bond to the atoms
def makeBond(self,atom,target):
if 'OBJECT'!=bpy.context.mode:
bpy.ops.object.mode_set(mode='OBJECT')
o1 = bpy.data.objects[atom.elid]
o2 = bpy.data.objects[target.elid]
bond = self.hookCurve(o1,o2, bpy.context.scene)
bpy.context.object.data.bevel_object = bpy.data.objects["bez"]
bpy.context.object.name = "bond{}-{}".format(atom.elid,target.elid)
activeObject = bpy.context.active_object # Set active object to variable
mat = bpy.data.materials.new(name="MaterialName") # set new material to variable
activeObject.data.materials.append(mat) # add the material to the object
bpy.context.object.active_material.diffuse_color = [255,255,255] # change color
if 'OBJECT'!=bpy.context.mode:
bpy.ops.object.mode_set(mode='OBJECT')
def hookCurve(self,o1, o2, scn):
curve = bpy.data.curves.new("link", 'CURVE')
curve.dimensions = '3D'
spline = curve.splines.new('BEZIER')
spline.bezier_points.add(1)
p0 = spline.bezier_points[0]
p1 = spline.bezier_points[1]
# p0.co = o1.location
p0.handle_right_type = 'VECTOR'
# p1.co = o2.location
p1.handle_left_type = 'VECTOR'
obj = bpy.data.objects.new("link", curve)
m0 = obj.modifiers.new("alpha", 'HOOK')
m0.object = o1
m1 = obj.modifiers.new("beta", 'HOOK')
m1.object = o2
bpy.context.collection.objects.link(obj)
bpy.context.view_layer.objects.active = obj
bpy.ops.object.mode_set(mode='EDIT')
# Reassign the points
p0 = curve.splines[0].bezier_points[0]
p1 = curve.splines[0].bezier_points[1]
# Hook first control point to first atom
p0.select_control_point = True
p1.select_control_point = False
bpy.ops.object.hook_assign(modifier="alpha")
# Hook second control point to first atom
p0 = curve.splines[0].bezier_points[0]
p1 = curve.splines[0].bezier_points[1]
p1.select_control_point = True
p0.select_control_point = False
bpy.ops.object.hook_assign(modifier="beta")
return obj
class Cell():
def __init__(self,cb):
self.alen = float(cb["_cell_length_a"])
self.blen = float(cb["_cell_length_b"])
self.clen = float(cb["_cell_length_c"])
self.alpha = float(cb["_cell_angle_alpha"])
self.beta = float(cb["_cell_angle_beta"])
self.gamma = float(cb["_cell_angle_gamma"])
def printout(self):
print("alen:{:8} \nblen:{:8} \nclen:{:8} \nalpha:{:8} \nbeta: {:8} \ngamma:{:8}".format(self.alen,self.blen,self.clen,self.alpha,self.beta,self.gamma))
class Atom():
def __init__(self,elid,sym,xpos,ypos,zpos):
self.elid = elid
self.sym = sym
self.xpos = float(xpos)
self.ypos = float(ypos)
self.zpos = float(zpos)
def printout(self):
print("id:{:3} symbol:{:2} x:{:.4f} y:{:.4f} z:{:.4f}".format(self.elid,self.sym,self.xpos,self.ypos,self.zpos))
def drawObj(self,ftoc):
size = sizedic[self.sym]*styledic[draw_style][0]+bond_radius*styledic[draw_style][1]
bpy.ops.mesh.primitive_uv_sphere_add(segments=qualitydic[draw_quality],ring_count=qualitydic[draw_quality]/2,size=size,location=toCarth(ftoc,[self.xpos,self.ypos,self.zpos]))
bpy.context.object.name = self.elid
activeObject = bpy.context.active_object # Set active object to variable
mat = bpy.data.materials.new(name="MaterialName") # set new material to variable
activeObject.data.materials.append(mat) # add the material to the object
if(atom_name):
bpy.context.object.show_name = True
if(atom_color):
bpy.context.object.active_material.diffuse_color = colordic[self.sym] # change color to dictionary color
else:
bpy.context.object.active_material.diffuse_color = [1,1,1] # change color to white
class sympos():
def __init__(self,string):
self.xsym = (string[0].split(','))[0]
self.ysym = (string[0].split(','))[1]
self.zsym = (string[0].split(','))[2]
def printout(self):
print("x:{:8} y:{:8} z:{:8}".format(self.xsym,self.ysym,self.zsym))
def readEl(cb):
elements = []
previd = []
idcnt = []
lb = cb.GetLoop("_atom_site_label")
for el in lb:
flag = False
for i in range(len(previd)):
if(el[0] == previd[i]):
flag = True
break
if(flag):
idcnt[i] += 1
else:
previd.append(el[0])
idcnt.append(0)
i = len(idcnt)-1
id_t = "{}.{}".format(el[0],idcnt[i])
elements.append(Atom(id_t,el[1],el[2],el[3],el[4]))
return elements
def readPos(cb):
positions = [];
lb = cb.GetLoop("_symmetry_equiv_pos_as_xyz")
for el in lb:
positions.append(sympos(el))
return positions
def obabel_fill_unit_cell(cif_file, p1_file):
# Convert symmetry to P1 using openbabel as subprocess
# Notation: obabel [-i<input-type>] <infilename> [-o<output-type>] -O<outfilename> [Options]
subprocess.run(['obabel', '-icif', cif_file, '-ocif', '-O', p1_file, '--fillUC', 'keepconnect'])
def calcDistance(ftoc,atom1,atom2):
ac = toCarth(ftoc,[atom1.xpos,atom1.ypos,atom1.zpos])
tc = toCarth(ftoc,[atom2.xpos,atom2.ypos,atom2.zpos])
dx = tc[0] - ac[0]
dy = tc[1] - ac[1]
dz = tc[2] - ac[2]
dist = np.sqrt(dx**2 + dy**2 + dz**2)
return dist
def toCarth(ftoc,V_frac):
return np.dot(ftoc, V_frac)
def look_at(obj_camera, point):
loc_camera = obj_camera.matrix_world.to_translation()
direction = point - loc_camera
# point the cameras '-Z' and use its 'Y' as up
rot_quat = direction.to_track_quat('-Z', 'Y')
# assume we're using euler rotation
obj_camera.rotation_euler = rot_quat.to_euler()
def addCamera(x,y,z):
bpy.ops.object.camera_add(view_align=True, enter_editmode=False, location=(5*x,5*y,5*z))
print("camera added")
bpy.ops.object.light_add(type='SUN', view_align=False, location=(0, 0, 0))
obj_camera = bpy.data.objects["Camera"]
look_at(obj_camera, Vector([0,0,z/4]))
obj_camera.data.type = 'ORTHO'
obj_camera.data.ortho_scale = ((x+y+z))
def clearWS():
if 'OBJECT'!=bpy.context.mode:
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.delete(use_global=False)
# remove all previous curves
for i in bpy.data.curves:
bpy.data.curves.remove(i)
# remove all previous materials
for m in bpy.data.materials:
bpy.data.materials.remove(m)
# remove all previous camera's
for c in bpy.data.cameras:
bpy.data.cameras.remove(c)
print("Workspace cleared.")
return
def drawCrystal(file):
# Check if file is file:
S = time.time()
global user_feedback
ext = file[len(file)-4:]
if(ext.lower() != ".cif"):
print("Only cif files can be visualised")
user_feedback = "Not a cif file"
return
# Check OpenBabel installation
try:
# Convert the cif file to its P1 symmetry notation as a temporary cif file
print('Converting %s to P1' %file)
obabel_fill_unit_cell(file, "temp.CIF")
cf = CifFile("temp.CIF")
except:
print("No OpenBabel installation found, install it from http://openbabel.org/wiki/Category:Installation")
user_feedback = "OpenBabel not installed"
#cf = CifFile(file) CifFile apparently can't read in long filepaths
return
# Open and parse our cif
f = file.rsplit(dir_sep, 1)[-1]
F = f[:3]
print(f)
cb = cf.first_block()
Crystal = Crysdata(F,cb)
# Print crystal data in terminal if checked
if(print_data):
Crystal.printout()
print("Crystal data read after "+ str(time.time() - S) + " seconds")
# Draw crystal if in Blender environment
if(Blender_env):
clearWS()
Crystal.drawCrystal()
bpy.ops.object.select_all(action='DESELECT')
if(add_camera):
addCamera(Crystal.cell.alen,Crystal.cell.blen,Crystal.cell.clen) | random_line_split | |
gopro.py | import argparse
import csv
import json
from datetime import datetime, timedelta, date
import os
import subprocess
from functools import reduce
from json import JSONEncoder
from gpxpy.geo import Location
import pytz
from raw_instr_data import RawInstrData
GOPRO_GPMF_BIN = '../gopro_gpmf/cmake-build-debug/gopro_gpmf'
UTC_TZ = pytz.timezone("UTC")
def from_timestamp(time_stamp):
return UTC_TZ.localize(datetime.utcfromtimestamp(time_stamp)) if time_stamp is not None else None
def to_timestamp(start_utc):
return start_utc.timestamp() if start_utc is not None else None
class GoProCacheEncoder(JSONEncoder):
# Override the default method
def default(self, obj):
if isinstance(obj, (date, datetime)):
return obj.isoformat()
elif isinstance(obj, Location):
if obj.elevation is None:
return {'lat': obj.latitude, 'lon': obj.longitude}
else:
return {'lat': obj.latitude, 'lon': obj.longitude, 'alt': obj.elevation}
elif isinstance(obj, RawInstrData):
return obj.to_dict()
raise TypeError("Type %s not serializable" % type(obj))
def decode_gopro_cache(d):
for k in d:
if k == 'start_utc' or k == 'stop_utc':
if d[k] is None:
d[k] = None
elif 'utc' in k:
d[k] = datetime.fromisoformat(d[k])
elif 'instr_data' in k:
ii_list = []
for h in d[k]:
ii = RawInstrData()
ii.utc = h['utc']
ii.lat = h['lat']
ii.lon = h['lon']
ii.sog = h['sog']
ii.cog = h['cog']
ii.awa = h['awa']
ii.aws = h['aws']
ii.twa = h['twa']
ii.tws = h['tws']
ii.sow = h['sow']
ii.hdg = h['hdg']
ii.n2k_epoch = h['n2k_epoch']
ii_list.append(ii)
d[k] = ii_list
return d
class GoPro:
def | (self, sd_card_dir, work_dir):
cache_dir = work_dir + os.sep + 'gopro'
os.makedirs(cache_dir, exist_ok=True)
self.instr_data = []
# Build the list of clips
clips = []
print(f'Looking for GOPRO clips in {sd_card_dir} ...')
for root, dirs_list, files_list in os.walk(sd_card_dir):
for file_name in files_list:
if os.path.splitext(file_name)[-1] == ".MP4":
file_name_path = os.path.join(root, file_name)
if file_name[0] != '.':
clips.append({'name': file_name_path})
# Time stamp the clips
self.clips = [] # Only clips with valid UTC
for clip in clips:
clip_name = clip['name']
clip_cache_name = cache_dir + os.sep + os.path.basename(clip_name) + '.json'
clip_nmea_name = cache_dir + os.sep + os.path.basename(clip_name) + '.nmea'
clip['clip_nmea_name'] = clip_nmea_name
if os.path.isfile(clip_cache_name):
print(f'Reading GOPRO clip info from cache {clip_cache_name}')
with open(clip_cache_name, 'r') as f:
cache = json.load(f, object_hook=decode_gopro_cache)
start_utc = from_timestamp(cache['start_utc'])
stop_utc = from_timestamp(cache['stop_utc'])
instr_data = cache['instr_data']
else:
print(f'Scanning {clip_name}')
[start_utc, stop_utc, instr_data] = self.extract_sensor_data(clip_name, clip_nmea_name)
self.instr_data.append(instr_data)
cache = {
'start_utc': to_timestamp(start_utc),
'stop_utc': to_timestamp(stop_utc),
'instr_data': instr_data
}
with open(clip_cache_name, 'w') as f:
json.dump(cache, f, indent=4, cls=GoProCacheEncoder)
if start_utc is not None:
clip['start_utc'] = start_utc
clip['stop_utc'] = stop_utc
clip['instr_data'] = instr_data
self.clips.append(clip)
print(f'{clip["name"]} {start_utc} {stop_utc}')
else:
print(f'Warning: Clip {clip["name"]} contains no valid UTC')
# Sort clips by UTC start time
self.clips.sort(key=lambda x: x['start_utc'])
# Determine overall start and finish times
self.start_time_utc = None
self.finish_time_utc = None
if len(self.clips) > 0:
self.start_time_utc = self.clips[0]['start_utc']
self.finish_time_utc = self.clips[-1]['stop_utc']
# Create one NMEA file once clips are sorted
gopro_nmea_file = cache_dir + os.sep + 'gopro.nmea'
print(f'Creating GOPRO NMEA file {gopro_nmea_file}')
with open(gopro_nmea_file, 'w') as nmea_file:
for clip in clips:
self.instr_data += clip['instr_data']
with open(clip['clip_nmea_name'], 'r') as clip_nmea:
for line in clip_nmea:
nmea_file.write(line)
print(f'Done with GOPRO processing')
@staticmethod
def extract_sensor_data(mp4_name, clip_nmea_name):
instr_data = []
cmd = [GOPRO_GPMF_BIN, mp4_name]
result = subprocess.run(cmd, stdout=subprocess.PIPE)
start_utc = None
stop_utc = None
timezone = pytz.timezone("UTC")
if result.returncode == 0:
print(f'Creating NMEA file {clip_nmea_name}')
with open(clip_nmea_name, 'w') as nmea_file:
lines = result.stdout.decode('utf-8').split('\n')
reader = csv.DictReader(lines)
for row in reader:
utc = timezone.localize(datetime.fromisoformat(row['utc']))
if row['fix_valid'] == 'True':
signed_lat = float(row['lat'])
lat_sign = 'N' if signed_lat > 0 else 'S'
lat = abs(signed_lat)
lat_min = (lat - int(lat)) * 60
signed_lon = float(row['lon'])
lon_sign = 'E' if signed_lon > 0 else 'W'
lon = abs(signed_lon)
lon_min = (lon - int(lon)) * 60
sog = float(row['sog_ms']) * 3600. / 1852.
if 0 <= lat <= 90 and 0 <= lon <= 180:
rmc = f'$GPRMC,{utc.hour:02d}{utc.minute:02d}{utc.second:02d}.{int(utc.microsecond / 1000):03d},' \
f'A,{int(lat):02d}{lat_min:08.5f},{lat_sign},'\
f'{int(lon):03d}{lon_min:08.5f},{lon_sign},'\
f'{sog:.1f},,{utc.day:02d}{utc.month:02d}{utc.year % 100:02d},'
ii = RawInstrData(0, utc, signed_lat, signed_lon, sog)
instr_data.append(ii)
else:
print('GPRO GPMF bug')
if start_utc is None:
t_ms = int(float(row['t']) * 1000)
start_utc = utc - timedelta(milliseconds=t_ms)
stop_utc = utc
else:
rmc = f'$GPRMC,{utc.hour:02d}{utc.minute:02d}{utc.second:02d}.{int(utc.microsecond / 1000):03d},' \
f'V,,,'\
f',,'\
f',,{utc.day:02d}{utc.month:02d}{utc.year % 100:02d},'
body = rmc[1:] # string between $ and *
cc = reduce(lambda i, j: int(i) ^ int(j), [ord(x) for x in body])
nmea = f'{rmc}*{cc:02X}\r\n'
nmea_file.write(nmea)
return start_utc, stop_utc, instr_data
def get_clips_for_time_interval(self, start_utc, stop_utc):
# Find the clip containing the start of interval
clips = []
for start_idx in range(len(self.clips)):
clip = self.clips[start_idx]
if clip['start_utc'] <= start_utc <= clip['stop_utc']:
in_time = (start_utc - clip['start_utc']).seconds
# Now find the clip containing the stop time
for stop_idx in range(start_idx, len(self.clips)):
clip = self.clips[stop_idx]
if stop_utc <= clip['stop_utc']: # Last clip found
# Check for the corner case when the stop_utc falls inbetween start_utc of the previous clip
# and start_utc of this one
if stop_utc < clip['start_utc']:
return clips
out_time = (stop_utc - clip['start_utc']).seconds
clips.append({
'name': clip['name'],
'in_time': in_time,
'out_time': out_time,
})
return clips
else: # The time interval spans to subsequent clips
clips.append({
'name': clip['name'],
'in_time': in_time,
'out_time': None, # Till the end of clip
})
in_time = None # Start from the beginning of the next clip
return clips
if __name__ == '__main__':
parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
parser.add_argument("--work-dir", help="Working directory", default='/tmp')
parser.add_argument("--gopro-dir", help="GoPro SD card directory", default='/Volumes/GOPRO')
args = parser.parse_args()
gopro = GoPro(args.gopro_dir, args.work_dir)
start = datetime.fromisoformat('2021-11-19T18:04:54.825').astimezone(pytz.utc)
stop = datetime.fromisoformat('2021-11-19T19:23:06.190').astimezone(pytz.utc)
print(gopro.get_clips_for_time_interval(start, stop))
| __init__ | identifier_name |
gopro.py | import argparse
import csv
import json
from datetime import datetime, timedelta, date
import os
import subprocess
from functools import reduce
from json import JSONEncoder
from gpxpy.geo import Location
import pytz
from raw_instr_data import RawInstrData
GOPRO_GPMF_BIN = '../gopro_gpmf/cmake-build-debug/gopro_gpmf'
UTC_TZ = pytz.timezone("UTC")
def from_timestamp(time_stamp):
return UTC_TZ.localize(datetime.utcfromtimestamp(time_stamp)) if time_stamp is not None else None
def to_timestamp(start_utc):
return start_utc.timestamp() if start_utc is not None else None
class GoProCacheEncoder(JSONEncoder):
# Override the default method
def default(self, obj):
if isinstance(obj, (date, datetime)):
return obj.isoformat()
elif isinstance(obj, Location):
if obj.elevation is None:
return {'lat': obj.latitude, 'lon': obj.longitude}
else:
return {'lat': obj.latitude, 'lon': obj.longitude, 'alt': obj.elevation}
elif isinstance(obj, RawInstrData):
return obj.to_dict()
raise TypeError("Type %s not serializable" % type(obj))
def decode_gopro_cache(d):
for k in d:
if k == 'start_utc' or k == 'stop_utc':
if d[k] is None:
d[k] = None
elif 'utc' in k:
d[k] = datetime.fromisoformat(d[k])
elif 'instr_data' in k:
ii_list = []
for h in d[k]:
ii = RawInstrData()
ii.utc = h['utc']
ii.lat = h['lat']
ii.lon = h['lon']
ii.sog = h['sog']
ii.cog = h['cog']
ii.awa = h['awa']
ii.aws = h['aws']
ii.twa = h['twa']
ii.tws = h['tws']
ii.sow = h['sow']
ii.hdg = h['hdg']
ii.n2k_epoch = h['n2k_epoch']
ii_list.append(ii)
d[k] = ii_list
return d
class GoPro:
def __init__(self, sd_card_dir, work_dir):
cache_dir = work_dir + os.sep + 'gopro'
os.makedirs(cache_dir, exist_ok=True)
self.instr_data = []
# Build the list of clips
clips = []
print(f'Looking for GOPRO clips in {sd_card_dir} ...')
for root, dirs_list, files_list in os.walk(sd_card_dir):
for file_name in files_list:
if os.path.splitext(file_name)[-1] == ".MP4":
file_name_path = os.path.join(root, file_name)
if file_name[0] != '.':
clips.append({'name': file_name_path})
# Time stamp the clips
self.clips = [] # Only clips with valid UTC
for clip in clips:
clip_name = clip['name']
clip_cache_name = cache_dir + os.sep + os.path.basename(clip_name) + '.json'
clip_nmea_name = cache_dir + os.sep + os.path.basename(clip_name) + '.nmea'
clip['clip_nmea_name'] = clip_nmea_name
if os.path.isfile(clip_cache_name):
print(f'Reading GOPRO clip info from cache {clip_cache_name}')
with open(clip_cache_name, 'r') as f:
cache = json.load(f, object_hook=decode_gopro_cache)
start_utc = from_timestamp(cache['start_utc'])
stop_utc = from_timestamp(cache['stop_utc'])
instr_data = cache['instr_data']
else:
print(f'Scanning {clip_name}')
[start_utc, stop_utc, instr_data] = self.extract_sensor_data(clip_name, clip_nmea_name)
self.instr_data.append(instr_data)
cache = {
'start_utc': to_timestamp(start_utc),
'stop_utc': to_timestamp(stop_utc),
'instr_data': instr_data
}
with open(clip_cache_name, 'w') as f:
json.dump(cache, f, indent=4, cls=GoProCacheEncoder)
if start_utc is not None:
clip['start_utc'] = start_utc
clip['stop_utc'] = stop_utc
clip['instr_data'] = instr_data
self.clips.append(clip)
print(f'{clip["name"]} {start_utc} {stop_utc}')
else:
print(f'Warning: Clip {clip["name"]} contains no valid UTC')
# Sort clips by UTC start time
self.clips.sort(key=lambda x: x['start_utc'])
# Determine overall start and finish times
self.start_time_utc = None
self.finish_time_utc = None
if len(self.clips) > 0:
self.start_time_utc = self.clips[0]['start_utc']
self.finish_time_utc = self.clips[-1]['stop_utc']
# Create one NMEA file once clips are sorted
gopro_nmea_file = cache_dir + os.sep + 'gopro.nmea'
print(f'Creating GOPRO NMEA file {gopro_nmea_file}')
with open(gopro_nmea_file, 'w') as nmea_file:
for clip in clips:
self.instr_data += clip['instr_data']
with open(clip['clip_nmea_name'], 'r') as clip_nmea:
for line in clip_nmea:
nmea_file.write(line)
print(f'Done with GOPRO processing')
@staticmethod
def extract_sensor_data(mp4_name, clip_nmea_name):
instr_data = []
cmd = [GOPRO_GPMF_BIN, mp4_name]
result = subprocess.run(cmd, stdout=subprocess.PIPE)
start_utc = None
stop_utc = None
timezone = pytz.timezone("UTC")
if result.returncode == 0:
print(f'Creating NMEA file {clip_nmea_name}')
with open(clip_nmea_name, 'w') as nmea_file:
lines = result.stdout.decode('utf-8').split('\n')
reader = csv.DictReader(lines)
for row in reader:
utc = timezone.localize(datetime.fromisoformat(row['utc']))
if row['fix_valid'] == 'True':
signed_lat = float(row['lat'])
lat_sign = 'N' if signed_lat > 0 else 'S'
lat = abs(signed_lat)
lat_min = (lat - int(lat)) * 60
signed_lon = float(row['lon'])
lon_sign = 'E' if signed_lon > 0 else 'W'
lon = abs(signed_lon)
lon_min = (lon - int(lon)) * 60
sog = float(row['sog_ms']) * 3600. / 1852.
if 0 <= lat <= 90 and 0 <= lon <= 180:
rmc = f'$GPRMC,{utc.hour:02d}{utc.minute:02d}{utc.second:02d}.{int(utc.microsecond / 1000):03d},' \
f'A,{int(lat):02d}{lat_min:08.5f},{lat_sign},'\
f'{int(lon):03d}{lon_min:08.5f},{lon_sign},'\
f'{sog:.1f},,{utc.day:02d}{utc.month:02d}{utc.year % 100:02d},'
ii = RawInstrData(0, utc, signed_lat, signed_lon, sog)
instr_data.append(ii)
else:
print('GPRO GPMF bug')
if start_utc is None:
t_ms = int(float(row['t']) * 1000)
start_utc = utc - timedelta(milliseconds=t_ms)
stop_utc = utc
else:
rmc = f'$GPRMC,{utc.hour:02d}{utc.minute:02d}{utc.second:02d}.{int(utc.microsecond / 1000):03d},' \
f'V,,,'\
f',,'\
f',,{utc.day:02d}{utc.month:02d}{utc.year % 100:02d},'
body = rmc[1:] # string between $ and *
cc = reduce(lambda i, j: int(i) ^ int(j), [ord(x) for x in body])
nmea = f'{rmc}*{cc:02X}\r\n'
nmea_file.write(nmea)
return start_utc, stop_utc, instr_data
def get_clips_for_time_interval(self, start_utc, stop_utc):
# Find the clip containing the start of interval
|
if __name__ == '__main__':
parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
parser.add_argument("--work-dir", help="Working directory", default='/tmp')
parser.add_argument("--gopro-dir", help="GoPro SD card directory", default='/Volumes/GOPRO')
args = parser.parse_args()
gopro = GoPro(args.gopro_dir, args.work_dir)
start = datetime.fromisoformat('2021-11-19T18:04:54.825').astimezone(pytz.utc)
stop = datetime.fromisoformat('2021-11-19T19:23:06.190').astimezone(pytz.utc)
print(gopro.get_clips_for_time_interval(start, stop))
| clips = []
for start_idx in range(len(self.clips)):
clip = self.clips[start_idx]
if clip['start_utc'] <= start_utc <= clip['stop_utc']:
in_time = (start_utc - clip['start_utc']).seconds
# Now find the clip containing the stop time
for stop_idx in range(start_idx, len(self.clips)):
clip = self.clips[stop_idx]
if stop_utc <= clip['stop_utc']: # Last clip found
# Check for the corner case when the stop_utc falls inbetween start_utc of the previous clip
# and start_utc of this one
if stop_utc < clip['start_utc']:
return clips
out_time = (stop_utc - clip['start_utc']).seconds
clips.append({
'name': clip['name'],
'in_time': in_time,
'out_time': out_time,
})
return clips
else: # The time interval spans to subsequent clips
clips.append({
'name': clip['name'],
'in_time': in_time,
'out_time': None, # Till the end of clip
})
in_time = None # Start from the beginning of the next clip
return clips | identifier_body |
gopro.py | import argparse
import csv
import json
from datetime import datetime, timedelta, date
import os
import subprocess
from functools import reduce
from json import JSONEncoder
from gpxpy.geo import Location
import pytz
from raw_instr_data import RawInstrData
GOPRO_GPMF_BIN = '../gopro_gpmf/cmake-build-debug/gopro_gpmf'
UTC_TZ = pytz.timezone("UTC")
def from_timestamp(time_stamp):
return UTC_TZ.localize(datetime.utcfromtimestamp(time_stamp)) if time_stamp is not None else None
def to_timestamp(start_utc):
return start_utc.timestamp() if start_utc is not None else None
class GoProCacheEncoder(JSONEncoder):
# Override the default method
def default(self, obj):
if isinstance(obj, (date, datetime)):
return obj.isoformat()
elif isinstance(obj, Location):
if obj.elevation is None:
return {'lat': obj.latitude, 'lon': obj.longitude}
else:
return {'lat': obj.latitude, 'lon': obj.longitude, 'alt': obj.elevation}
elif isinstance(obj, RawInstrData):
return obj.to_dict()
raise TypeError("Type %s not serializable" % type(obj))
def decode_gopro_cache(d):
for k in d:
if k == 'start_utc' or k == 'stop_utc':
if d[k] is None:
d[k] = None
elif 'utc' in k:
d[k] = datetime.fromisoformat(d[k])
elif 'instr_data' in k:
ii_list = []
for h in d[k]:
ii = RawInstrData()
ii.utc = h['utc']
ii.lat = h['lat']
ii.lon = h['lon']
ii.sog = h['sog']
ii.cog = h['cog']
ii.awa = h['awa']
ii.aws = h['aws']
ii.twa = h['twa']
ii.tws = h['tws']
ii.sow = h['sow']
ii.hdg = h['hdg']
ii.n2k_epoch = h['n2k_epoch']
ii_list.append(ii)
d[k] = ii_list
return d
class GoPro:
def __init__(self, sd_card_dir, work_dir):
cache_dir = work_dir + os.sep + 'gopro'
os.makedirs(cache_dir, exist_ok=True)
self.instr_data = []
# Build the list of clips
clips = []
print(f'Looking for GOPRO clips in {sd_card_dir} ...')
for root, dirs_list, files_list in os.walk(sd_card_dir):
for file_name in files_list:
if os.path.splitext(file_name)[-1] == ".MP4":
file_name_path = os.path.join(root, file_name)
if file_name[0] != '.':
clips.append({'name': file_name_path})
# Time stamp the clips
self.clips = [] # Only clips with valid UTC
for clip in clips:
clip_name = clip['name']
clip_cache_name = cache_dir + os.sep + os.path.basename(clip_name) + '.json'
clip_nmea_name = cache_dir + os.sep + os.path.basename(clip_name) + '.nmea'
clip['clip_nmea_name'] = clip_nmea_name
if os.path.isfile(clip_cache_name):
print(f'Reading GOPRO clip info from cache {clip_cache_name}')
with open(clip_cache_name, 'r') as f:
cache = json.load(f, object_hook=decode_gopro_cache)
start_utc = from_timestamp(cache['start_utc'])
stop_utc = from_timestamp(cache['stop_utc'])
instr_data = cache['instr_data']
else:
print(f'Scanning {clip_name}')
[start_utc, stop_utc, instr_data] = self.extract_sensor_data(clip_name, clip_nmea_name)
self.instr_data.append(instr_data)
cache = {
'start_utc': to_timestamp(start_utc),
'stop_utc': to_timestamp(stop_utc),
'instr_data': instr_data
}
with open(clip_cache_name, 'w') as f:
json.dump(cache, f, indent=4, cls=GoProCacheEncoder)
if start_utc is not None:
clip['start_utc'] = start_utc
clip['stop_utc'] = stop_utc
clip['instr_data'] = instr_data
self.clips.append(clip)
print(f'{clip["name"]} {start_utc} {stop_utc}')
else:
print(f'Warning: Clip {clip["name"]} contains no valid UTC')
# Sort clips by UTC start time
self.clips.sort(key=lambda x: x['start_utc'])
# Determine overall start and finish times
self.start_time_utc = None
self.finish_time_utc = None
if len(self.clips) > 0:
self.start_time_utc = self.clips[0]['start_utc']
self.finish_time_utc = self.clips[-1]['stop_utc']
# Create one NMEA file once clips are sorted
gopro_nmea_file = cache_dir + os.sep + 'gopro.nmea'
print(f'Creating GOPRO NMEA file {gopro_nmea_file}')
with open(gopro_nmea_file, 'w') as nmea_file:
for clip in clips:
self.instr_data += clip['instr_data']
with open(clip['clip_nmea_name'], 'r') as clip_nmea:
for line in clip_nmea:
nmea_file.write(line)
print(f'Done with GOPRO processing')
@staticmethod
def extract_sensor_data(mp4_name, clip_nmea_name):
instr_data = []
cmd = [GOPRO_GPMF_BIN, mp4_name]
result = subprocess.run(cmd, stdout=subprocess.PIPE)
start_utc = None
stop_utc = None
timezone = pytz.timezone("UTC")
if result.returncode == 0:
print(f'Creating NMEA file {clip_nmea_name}')
with open(clip_nmea_name, 'w') as nmea_file:
lines = result.stdout.decode('utf-8').split('\n')
reader = csv.DictReader(lines)
for row in reader:
utc = timezone.localize(datetime.fromisoformat(row['utc']))
if row['fix_valid'] == 'True':
signed_lat = float(row['lat'])
lat_sign = 'N' if signed_lat > 0 else 'S'
lat = abs(signed_lat)
lat_min = (lat - int(lat)) * 60
signed_lon = float(row['lon'])
lon_sign = 'E' if signed_lon > 0 else 'W'
lon = abs(signed_lon)
lon_min = (lon - int(lon)) * 60
sog = float(row['sog_ms']) * 3600. / 1852.
if 0 <= lat <= 90 and 0 <= lon <= 180:
rmc = f'$GPRMC,{utc.hour:02d}{utc.minute:02d}{utc.second:02d}.{int(utc.microsecond / 1000):03d},' \
f'A,{int(lat):02d}{lat_min:08.5f},{lat_sign},'\
f'{int(lon):03d}{lon_min:08.5f},{lon_sign},'\
f'{sog:.1f},,{utc.day:02d}{utc.month:02d}{utc.year % 100:02d},'
ii = RawInstrData(0, utc, signed_lat, signed_lon, sog)
instr_data.append(ii)
else:
print('GPRO GPMF bug')
if start_utc is None:
t_ms = int(float(row['t']) * 1000)
start_utc = utc - timedelta(milliseconds=t_ms)
stop_utc = utc
else:
rmc = f'$GPRMC,{utc.hour:02d}{utc.minute:02d}{utc.second:02d}.{int(utc.microsecond / 1000):03d},' \
f'V,,,'\
f',,'\
f',,{utc.day:02d}{utc.month:02d}{utc.year % 100:02d},'
body = rmc[1:] # string between $ and *
cc = reduce(lambda i, j: int(i) ^ int(j), [ord(x) for x in body])
nmea = f'{rmc}*{cc:02X}\r\n'
nmea_file.write(nmea)
return start_utc, stop_utc, instr_data
def get_clips_for_time_interval(self, start_utc, stop_utc):
# Find the clip containing the start of interval
clips = []
for start_idx in range(len(self.clips)):
clip = self.clips[start_idx]
if clip['start_utc'] <= start_utc <= clip['stop_utc']:
|
return clips
if __name__ == '__main__':
parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
parser.add_argument("--work-dir", help="Working directory", default='/tmp')
parser.add_argument("--gopro-dir", help="GoPro SD card directory", default='/Volumes/GOPRO')
args = parser.parse_args()
gopro = GoPro(args.gopro_dir, args.work_dir)
start = datetime.fromisoformat('2021-11-19T18:04:54.825').astimezone(pytz.utc)
stop = datetime.fromisoformat('2021-11-19T19:23:06.190').astimezone(pytz.utc)
print(gopro.get_clips_for_time_interval(start, stop))
| in_time = (start_utc - clip['start_utc']).seconds
# Now find the clip containing the stop time
for stop_idx in range(start_idx, len(self.clips)):
clip = self.clips[stop_idx]
if stop_utc <= clip['stop_utc']: # Last clip found
# Check for the corner case when the stop_utc falls inbetween start_utc of the previous clip
# and start_utc of this one
if stop_utc < clip['start_utc']:
return clips
out_time = (stop_utc - clip['start_utc']).seconds
clips.append({
'name': clip['name'],
'in_time': in_time,
'out_time': out_time,
})
return clips
else: # The time interval spans to subsequent clips
clips.append({
'name': clip['name'],
'in_time': in_time,
'out_time': None, # Till the end of clip
})
in_time = None # Start from the beginning of the next clip | conditional_block |
gopro.py | import argparse
import csv
import json
from datetime import datetime, timedelta, date
import os
import subprocess
from functools import reduce
from json import JSONEncoder
from gpxpy.geo import Location
import pytz
from raw_instr_data import RawInstrData
GOPRO_GPMF_BIN = '../gopro_gpmf/cmake-build-debug/gopro_gpmf'
UTC_TZ = pytz.timezone("UTC")
def from_timestamp(time_stamp):
return UTC_TZ.localize(datetime.utcfromtimestamp(time_stamp)) if time_stamp is not None else None
def to_timestamp(start_utc):
return start_utc.timestamp() if start_utc is not None else None
class GoProCacheEncoder(JSONEncoder):
# Override the default method
def default(self, obj):
if isinstance(obj, (date, datetime)):
return obj.isoformat()
elif isinstance(obj, Location):
if obj.elevation is None:
return {'lat': obj.latitude, 'lon': obj.longitude}
else:
return {'lat': obj.latitude, 'lon': obj.longitude, 'alt': obj.elevation}
elif isinstance(obj, RawInstrData):
return obj.to_dict()
raise TypeError("Type %s not serializable" % type(obj))
def decode_gopro_cache(d):
for k in d:
if k == 'start_utc' or k == 'stop_utc':
if d[k] is None:
d[k] = None
elif 'utc' in k:
d[k] = datetime.fromisoformat(d[k])
elif 'instr_data' in k:
ii_list = []
for h in d[k]:
ii = RawInstrData()
ii.utc = h['utc']
ii.lat = h['lat']
ii.lon = h['lon']
ii.sog = h['sog']
ii.cog = h['cog']
ii.awa = h['awa']
ii.aws = h['aws'] | ii.sow = h['sow']
ii.hdg = h['hdg']
ii.n2k_epoch = h['n2k_epoch']
ii_list.append(ii)
d[k] = ii_list
return d
class GoPro:
def __init__(self, sd_card_dir, work_dir):
cache_dir = work_dir + os.sep + 'gopro'
os.makedirs(cache_dir, exist_ok=True)
self.instr_data = []
# Build the list of clips
clips = []
print(f'Looking for GOPRO clips in {sd_card_dir} ...')
for root, dirs_list, files_list in os.walk(sd_card_dir):
for file_name in files_list:
if os.path.splitext(file_name)[-1] == ".MP4":
file_name_path = os.path.join(root, file_name)
if file_name[0] != '.':
clips.append({'name': file_name_path})
# Time stamp the clips
self.clips = [] # Only clips with valid UTC
for clip in clips:
clip_name = clip['name']
clip_cache_name = cache_dir + os.sep + os.path.basename(clip_name) + '.json'
clip_nmea_name = cache_dir + os.sep + os.path.basename(clip_name) + '.nmea'
clip['clip_nmea_name'] = clip_nmea_name
if os.path.isfile(clip_cache_name):
print(f'Reading GOPRO clip info from cache {clip_cache_name}')
with open(clip_cache_name, 'r') as f:
cache = json.load(f, object_hook=decode_gopro_cache)
start_utc = from_timestamp(cache['start_utc'])
stop_utc = from_timestamp(cache['stop_utc'])
instr_data = cache['instr_data']
else:
print(f'Scanning {clip_name}')
[start_utc, stop_utc, instr_data] = self.extract_sensor_data(clip_name, clip_nmea_name)
self.instr_data.append(instr_data)
cache = {
'start_utc': to_timestamp(start_utc),
'stop_utc': to_timestamp(stop_utc),
'instr_data': instr_data
}
with open(clip_cache_name, 'w') as f:
json.dump(cache, f, indent=4, cls=GoProCacheEncoder)
if start_utc is not None:
clip['start_utc'] = start_utc
clip['stop_utc'] = stop_utc
clip['instr_data'] = instr_data
self.clips.append(clip)
print(f'{clip["name"]} {start_utc} {stop_utc}')
else:
print(f'Warning: Clip {clip["name"]} contains no valid UTC')
# Sort clips by UTC start time
self.clips.sort(key=lambda x: x['start_utc'])
# Determine overall start and finish times
self.start_time_utc = None
self.finish_time_utc = None
if len(self.clips) > 0:
self.start_time_utc = self.clips[0]['start_utc']
self.finish_time_utc = self.clips[-1]['stop_utc']
# Create one NMEA file once clips are sorted
gopro_nmea_file = cache_dir + os.sep + 'gopro.nmea'
print(f'Creating GOPRO NMEA file {gopro_nmea_file}')
with open(gopro_nmea_file, 'w') as nmea_file:
for clip in clips:
self.instr_data += clip['instr_data']
with open(clip['clip_nmea_name'], 'r') as clip_nmea:
for line in clip_nmea:
nmea_file.write(line)
print(f'Done with GOPRO processing')
@staticmethod
def extract_sensor_data(mp4_name, clip_nmea_name):
instr_data = []
cmd = [GOPRO_GPMF_BIN, mp4_name]
result = subprocess.run(cmd, stdout=subprocess.PIPE)
start_utc = None
stop_utc = None
timezone = pytz.timezone("UTC")
if result.returncode == 0:
print(f'Creating NMEA file {clip_nmea_name}')
with open(clip_nmea_name, 'w') as nmea_file:
lines = result.stdout.decode('utf-8').split('\n')
reader = csv.DictReader(lines)
for row in reader:
utc = timezone.localize(datetime.fromisoformat(row['utc']))
if row['fix_valid'] == 'True':
signed_lat = float(row['lat'])
lat_sign = 'N' if signed_lat > 0 else 'S'
lat = abs(signed_lat)
lat_min = (lat - int(lat)) * 60
signed_lon = float(row['lon'])
lon_sign = 'E' if signed_lon > 0 else 'W'
lon = abs(signed_lon)
lon_min = (lon - int(lon)) * 60
sog = float(row['sog_ms']) * 3600. / 1852.
if 0 <= lat <= 90 and 0 <= lon <= 180:
rmc = f'$GPRMC,{utc.hour:02d}{utc.minute:02d}{utc.second:02d}.{int(utc.microsecond / 1000):03d},' \
f'A,{int(lat):02d}{lat_min:08.5f},{lat_sign},'\
f'{int(lon):03d}{lon_min:08.5f},{lon_sign},'\
f'{sog:.1f},,{utc.day:02d}{utc.month:02d}{utc.year % 100:02d},'
ii = RawInstrData(0, utc, signed_lat, signed_lon, sog)
instr_data.append(ii)
else:
print('GPRO GPMF bug')
if start_utc is None:
t_ms = int(float(row['t']) * 1000)
start_utc = utc - timedelta(milliseconds=t_ms)
stop_utc = utc
else:
rmc = f'$GPRMC,{utc.hour:02d}{utc.minute:02d}{utc.second:02d}.{int(utc.microsecond / 1000):03d},' \
f'V,,,'\
f',,'\
f',,{utc.day:02d}{utc.month:02d}{utc.year % 100:02d},'
body = rmc[1:] # string between $ and *
cc = reduce(lambda i, j: int(i) ^ int(j), [ord(x) for x in body])
nmea = f'{rmc}*{cc:02X}\r\n'
nmea_file.write(nmea)
return start_utc, stop_utc, instr_data
def get_clips_for_time_interval(self, start_utc, stop_utc):
# Find the clip containing the start of interval
clips = []
for start_idx in range(len(self.clips)):
clip = self.clips[start_idx]
if clip['start_utc'] <= start_utc <= clip['stop_utc']:
in_time = (start_utc - clip['start_utc']).seconds
# Now find the clip containing the stop time
for stop_idx in range(start_idx, len(self.clips)):
clip = self.clips[stop_idx]
if stop_utc <= clip['stop_utc']: # Last clip found
# Check for the corner case when the stop_utc falls inbetween start_utc of the previous clip
# and start_utc of this one
if stop_utc < clip['start_utc']:
return clips
out_time = (stop_utc - clip['start_utc']).seconds
clips.append({
'name': clip['name'],
'in_time': in_time,
'out_time': out_time,
})
return clips
else: # The time interval spans to subsequent clips
clips.append({
'name': clip['name'],
'in_time': in_time,
'out_time': None, # Till the end of clip
})
in_time = None # Start from the beginning of the next clip
return clips
if __name__ == '__main__':
parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
parser.add_argument("--work-dir", help="Working directory", default='/tmp')
parser.add_argument("--gopro-dir", help="GoPro SD card directory", default='/Volumes/GOPRO')
args = parser.parse_args()
gopro = GoPro(args.gopro_dir, args.work_dir)
start = datetime.fromisoformat('2021-11-19T18:04:54.825').astimezone(pytz.utc)
stop = datetime.fromisoformat('2021-11-19T19:23:06.190').astimezone(pytz.utc)
print(gopro.get_clips_for_time_interval(start, stop)) | ii.twa = h['twa']
ii.tws = h['tws'] | random_line_split |
Octagoat.py | # Octagoat. 13 December 2019. Author: Mohammed Madi.
# Coursework 2 project for COMP16321.
# The goat background picture was taken on the
# 23rd of November 2019 at Snowdon, Wales by the author.
# The goat pixel drawings and the cage were produced
# using PixilArt online tool and modified using GIMP.
from tkinter import Tk, PhotoImage, Button
from tkinter import Menu, messagebox, Canvas, Label
from PIL import Image, ImageTk
import time
# Window dimensions.
def setWindowDimensions(w, h):
window.title("Octagoat")
# title of window
ws = window.winfo_screenwidth()
# computers screen width used for window dimensions
hs = window.winfo_screenheight()
# computers screen height used for window dimensions
window.geometry(f"{ws}x{hs}")
# window size
return window
# Main menu with buttons and background picture.
def main_menu():
global window, start, start2, start3, over
global start4, start5, bg1, canvas, boss, start6
if(over==True):
Restart.destroy()
exitb.destroy()
menub.destroy()
if(over==False):
canvas = Canvas(window, bg="black", width=width, height=height)
over = False
background = canvas.create_image(0, 0, anchor='nw', image=bg1)
start = Button(window, text="Play Normal mode", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: normal())
start.place(x=400, y=50)
start2 = Button(window, text="Play Hard mode", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: hard())
start2.place(x=750, y=50)
start3 = Button(window, text="Play Mating\nseason mode", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: mating())
start3.place(x=1100, y=50)
start4 = Button(window, text="Leaderboard", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20))
start4.place(x=1450, y=50)
start5 = Button(window, text="Exit", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: window.destroy())
start5.place(x=50, y=200)
start6 = Button(window, text="Tutorial", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: tutorial())
start6.place(x=50, y=50)
boss = canvas.create_image(0, -width, anchor='nw', image=bosspic)
# Tutorial explaining different game elements and how to play.
def tutorial():
messagebox.showinfo("Welcome to Octagoat",
"The name comes from Mixed Martial arts (MMA) cages, which are octagons,\
put goats in an octagon and you have an octagoat.")
messagebox.showinfo("Welcome to Octagoat",
"Your character is a goat fighting other goats,\
the goats are running from,\
one side of the cage to the other trying to ram your goat,\
and your goat is trying to stomp the other goats (in Super Mario fashion)")
messagebox.showinfo("Welcome to Octagoat",
" Stomp another goat once, and it will count as a win.\
but get rammed from another goat twice and you lose.\
You can stay on the ramp if you are too scared")
messagebox.showinfo("Welcome to Octagoat",
"You can use the arrow keys to move your goat from\
side to side or jump. To get on the ramp jump to the top\
from underneath it. To get off the ramp simply walk off the sides")
messagebox.showinfo("Welcome to Octagoat",
"Press B if someone passes. C stands for cheat and \
D stands for Diaz.\nWHEN PLAYING MATING \
SEASON TRY TO GET ON THE RAMP AS SOON AS YOU SPAWN")
def rightKey(event):
global direction
direction = "right"
move()
def leftKey(event):
global direction
direction = "left"
move()
def upKey(event):
global direction, sprite
direction = "up"
move()
# Movement function for player
def move():
# This function includes movement, and collision with the ramp
global direction, ramp, score, sprite, speed, paused
if paused == 0:
if direction == "left":
canvas.move(sprite, -speed, 0)
elif direction == "right":
canvas.move(sprite, speed, 0)
elif direction == "up":
canvas.move(sprite, 0, -150)
window.after(350, lambda: canvas.move(sprite, 0, 150))
a = canvas.bbox(sprite)
c = canvas.bbox(box)
# Ramp collision
if a[1] in range((c[3]-100), c[3]) and(a[0] in range
(int(c[0]), int(c[2])) or a[2] in range(int(c[0]), int(c[2]))):
canvas.move(sprite, 0, -420)
ramp = True
if a[0] not in range(int(c[0]), int(c[2]-50.0)) and(
a[2] not in range(int(c[0]+50.0), int(c[2])) and ramp):
window.after(10, lambda: canvas.move(sprite, 0, 120))
window.after(250, lambda: canvas.move(sprite, 0, 150))
window.after(450, lambda: canvas.move(sprite, 0, 150))
ramp = False
def checkCollision(g, frame):
global width, dir, hit, score, scoreText
a = canvas.bbox(sprite)
if a[3] in range((g[3]-150), (g[3])) and(a[0] in range
((g[0]), (g[2]-75)) or a[2] in range((g[0]+75), (g[2]))):
if frame == enemy or frame == enemytr:
canvas.move(frame, (width-g[0]), 0)
else:
canvas.move(frame, (-g[0]), 0)
score += 10
txt = "Goats knocked out:" + str(score)
canvas.itemconfigure(scoreText, text=txt)
canvas.move(sprite, 0, -25)
window.after(250, lambda: canvas.move(sprite, 0, 25))
# Enemy collision (Losing condition)
if a[1] in range((g[1]), (g[3])) and(a[0] in range
((g[0]), (g[2]-75)) or a[2] in range((g[0]+75), (g[2]))):
canvas.move(sprite, 0, -250)
window.after(250, lambda: canvas.move(sprite, 0, 250))
hit += 1
canvas.delete(health1)
# Enemy hit player twice (game over).
if hit == 2:
canvas.move(sprite, 0, -height)
canvas.delete(health2)
messagebox.showinfo("Game over",
"Natural selection got you.\nSurvival of the fittest.\n")
gameover()
return
# Enemy movement function (only for normal mode)
# Update: now also controls mating season second goat on right side
def moveenemy():
global width, dir, hit, score, scoreText, paused, dir3
mov1 = 12
mov2 = 8
if paused == 0:
g = canvas.bbox(enemy)
if(diff == "mating"):
g3 = canvas.bbox(enemytr)
if g3[0] < 100:
dir3 = "right"
if g3[2] > (width-100):
dir3 = "left"
if dir3 == "left":
mov2 = -mov2
if g[0] < 100:
dir = "right"
if g[2] > (width-100):
dir = "left"
if dir == "left":
mov1 = -mov1
canvas.move(enemy, mov1, 0)
if(diff == "mating"):
canvas.move(enemytr, mov2, 0)
if(over == False):
if(diff == "mating"):
window.after(7, moveenemy)
else:
window.after(10, moveenemy)
checkCollision(g, enemy)
if(diff == "mating"):
checkCollision(g3, enemytr)
# Second enemy movement function in Hard mode.
# Update: now also controls mating season second goat on left side
def moveenemy2():
global width, dir2, hit, score, scoreText, paused, dir4
mov1 = 12
mov2 = 8
if paused == 0:
g2 = canvas.bbox(enemy2)
if(diff == "mating"):
g4 = canvas.bbox(enemytl)
if g4[0] < 100:
dir4 = "left"
if g4[2] > (width-100):
dir4 = "right"
if dir4 == "right":
mov2 = -mov2
if g2[0] < 100:
dir2 = "left"
if g2[2] > (width-100):
dir2 = "right"
if dir2 == "right":
mov1 = -mov1
canvas.move(enemy2, mov1, 0)
if(diff == "mating"):
canvas.move(enemytl, mov2, 0)
if (over == False):
if(diff == "mating"):
window.after(7, moveenemy2)
else:
window.after(10, moveenemy2)
checkCollision(g2, enemy2)
if(diff == "mating"):
checkCollision(g4, enemytl)
# Normal mode function which is called for hard and Mating season modes.
def normal():
global score, sprite, enemy, health2, health1, box, scoreText
global start, start2, start3, diff, boss, start4, start5, start6
start.destroy()
start2.destroy()
start3.destroy()
start4.destroy()
start5.destroy()
start6.destroy()
diff = "normal"
bg = canvas.create_image(0, 0, anchor='nw', image=mint)
sprite = canvas.create_image((width/2), (height-350),
anchor='nw', image=img)
enemy = canvas.create_image((width-250), (height-350),
anchor='nw', image=img2)
health1 = canvas.create_image(220, 0, anchor='ne', image=snoop)
health2 = canvas.create_image(100, 0, anchor='ne', image=snoop)
box = canvas.create_rectangle((width/2-190), (height-610),
(width/2+120), (height-560), fill="brown")
scoreText = canvas.create_text(width/2, 10, fill="black",
font="terminal 28", text=txt)
if diff == "normal":
boss = canvas.create_image(0, -width, anchor='nw', image=bosspic)
over = False
moveenemy()
return
# Hard Mode adds an extra goat.
def hard():
global enemy2, diff, boss
normal()
diff = "hard"
enemy2 = canvas.create_image(0, (height-350), anchor='nw', image=img)
if diff == "hard":
boss = canvas.create_image(0, -width, anchor='nw', image=bosspic)
over = False
moveenemy2()
return
# Mating season adds 2 extra fast goats.
def mating():
global enemytr, enemytl, diff, boss
normal()
hard()
diff = "mating"
enemytr = canvas.create_image((width-250), (height-350), # tr=top right
anchor='nw', image=img2)
enemytl = canvas.create_image(0, (height-350), anchor='nw', image=img) # tl=top left
boss = canvas.create_image(0, -width, anchor='nw', image=bosspic)
over = False
return
# Pause function
def pause(event):
global paused
if paused == 0:
pausetxt = Label(canvas, text="Game paused\
\nReturn?\nThe game will pause for 3 seconds when\
you press p again", font="terminal 15", bg="green")
pausetxt.place(x=width/3, y=100)
paused += 1
window.after(10000, lambda: pausetxt.destroy())
elif paused == 1:
time.sleep(3)
paused = 0
moveenemy()
move()
moveenemy2()
# Bosskey
def bosskey(event):
|
# Bosskey reversal button
def bossisgone(event):
global boss, width
canvas.move(boss, 0, -width)
def gameover():
global window, Restart, exitb, menub, hit, over, score
Restart = Button(window, text="Restart", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: restart())
Restart.place(x=50, y=50)
exitb = Button(window, text="Exit", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: window.destroy())
exitb.place(x=1450, y=50)
menub = Button(window, text="Return to\nmain menu", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: main_menu())
menub.place(x=1100, y=50)
hit = 0
score = 0
over = True
return
# Restart option after losing
def restart():
global Restart, exitb, menub, diff, speeden, speeden2, hit, over
Restart.destroy()
exitb.destroy()
menub.destroy()
over = False
if diff == "normal":
normal()
if diff == "hard":
hard()
if diff == "mating":
mating()
return
# Cheat code 1
def invincibility(event):
global hit
if hit == 3:
hit = 0
diaz2 = Label(canvas, text="Nick Diaz has retired.\n\
You are no longer invincible", font="terminal 15", bg="green")
diaz2.place(x=width/2, y=100)
window.after(3000, lambda: diaz2.destroy())
else:
hit = 3
diaz = Label(canvas, text="You have been blessed with the powers of Nick Diaz.\
\nYou are now invincible due to high amounts of THC\
(cannabis psychoactive component) in your system.",
font="terminal 15", bg="green")
diaz.place(x=250, y=100)
window.after(3000, lambda: diaz.destroy())
return
# Cheat code 2
def speedcheat(event):
global speed
if speed == 75:
speed = 125
fast = Label(canvas, text="You've been training with Usain Bolt\
\nYou are now faster than even the people on steroids. ",
font="terminal 15", bg="green")
fast.place(x=250, y=100)
window.after(3000, lambda: fast.destroy())
else:
speed = 75
diaz = Label(canvas, text="Your speed is back to normal.\
\nStill pretty fast for a goat.", font="terminal 15", bg="green")
diaz.place(x=250, y=100)
window.after(3000, lambda: diaz.destroy())
# TKinter window commands.
window = Tk()
width = window.winfo_screenwidth()
# width of screen resolution used for program.
height = window.winfo_screenheight()
# height of screen resolution used for program.
window = setWindowDimensions(width, height)
# Resizing background images to fit different resolutions
image = Image.open("goat.png")
image = image.resize((width, height), Image.ANTIALIAS)
image.save("goatnewres.png")
image2 = Image.open("cage.png")
image2 = image2.resize((width, height), Image.ANTIALIAS)
image2.save("cagenewres.png")
# Images loaded for the program.
img = ImageTk.PhotoImage(file="greg.png")
img2 = ImageTk.PhotoImage(file="gregleft.png")
mint = ImageTk.PhotoImage(file="goatnewres.png")
snoop = ImageTk.PhotoImage(file="420.png")
bosspic = ImageTk.PhotoImage(file="bosspic.png")
bg1 = ImageTk.PhotoImage(file="cagenewres.png")
# Variables used for collision functionality.
ramp = False
hit = 0
# Pause.
over = False
paused = 0
# Directions for enemies.
dir = "left"
dir2 = "right"
dir3 = "left"
dir4 = "right"
speed = 75
# Score text and score variable.
score = 0
txt = "Goats knocked out:" + str(score)
main_menu()
# Key bindings
canvas.bind("<Left>", leftKey)
canvas.bind("<Right>", rightKey)
canvas.bind("<Up>", upKey)
canvas.bind("<p>", pause)
canvas.bind("<b>", bosskey)
canvas.bind("<u>", bossisgone)
canvas.bind("<d>", invincibility)
canvas.bind("<c>", speedcheat)
canvas.focus_set()
# Player direction
direction = ""
# TKinter commands foro opening the window and packing the canvas
canvas.pack()
window.mainloop()
| global boss, width
canvas.move(boss, 0, width) | identifier_body |
Octagoat.py | # Octagoat. 13 December 2019. Author: Mohammed Madi.
# Coursework 2 project for COMP16321.
# The goat background picture was taken on the
# 23rd of November 2019 at Snowdon, Wales by the author.
# The goat pixel drawings and the cage were produced
# using PixilArt online tool and modified using GIMP.
from tkinter import Tk, PhotoImage, Button
from tkinter import Menu, messagebox, Canvas, Label
from PIL import Image, ImageTk
import time
# Window dimensions.
def setWindowDimensions(w, h):
window.title("Octagoat")
# title of window
ws = window.winfo_screenwidth()
# computers screen width used for window dimensions
hs = window.winfo_screenheight()
# computers screen height used for window dimensions
window.geometry(f"{ws}x{hs}")
# window size
return window
# Main menu with buttons and background picture.
def main_menu():
global window, start, start2, start3, over
global start4, start5, bg1, canvas, boss, start6
if(over==True):
Restart.destroy()
exitb.destroy()
menub.destroy()
if(over==False):
canvas = Canvas(window, bg="black", width=width, height=height)
over = False
background = canvas.create_image(0, 0, anchor='nw', image=bg1)
start = Button(window, text="Play Normal mode", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: normal())
start.place(x=400, y=50)
start2 = Button(window, text="Play Hard mode", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: hard())
start2.place(x=750, y=50)
start3 = Button(window, text="Play Mating\nseason mode", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: mating())
start3.place(x=1100, y=50)
start4 = Button(window, text="Leaderboard", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20))
start4.place(x=1450, y=50)
start5 = Button(window, text="Exit", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: window.destroy())
start5.place(x=50, y=200)
start6 = Button(window, text="Tutorial", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: tutorial())
start6.place(x=50, y=50)
boss = canvas.create_image(0, -width, anchor='nw', image=bosspic)
# Tutorial explaining different game elements and how to play.
def tutorial():
messagebox.showinfo("Welcome to Octagoat",
"The name comes from Mixed Martial arts (MMA) cages, which are octagons,\
put goats in an octagon and you have an octagoat.")
messagebox.showinfo("Welcome to Octagoat",
"Your character is a goat fighting other goats,\
the goats are running from,\
one side of the cage to the other trying to ram your goat,\
and your goat is trying to stomp the other goats (in Super Mario fashion)")
messagebox.showinfo("Welcome to Octagoat",
" Stomp another goat once, and it will count as a win.\
but get rammed from another goat twice and you lose.\
You can stay on the ramp if you are too scared")
messagebox.showinfo("Welcome to Octagoat",
"You can use the arrow keys to move your goat from\
side to side or jump. To get on the ramp jump to the top\
from underneath it. To get off the ramp simply walk off the sides")
messagebox.showinfo("Welcome to Octagoat",
"Press B if someone passes. C stands for cheat and \
D stands for Diaz.\nWHEN PLAYING MATING \
SEASON TRY TO GET ON THE RAMP AS SOON AS YOU SPAWN")
def rightKey(event):
global direction
direction = "right"
move()
def leftKey(event):
global direction
direction = "left"
move()
def upKey(event):
global direction, sprite
direction = "up"
move()
# Movement function for player
def move():
# This function includes movement, and collision with the ramp
global direction, ramp, score, sprite, speed, paused
if paused == 0:
if direction == "left":
canvas.move(sprite, -speed, 0)
elif direction == "right":
canvas.move(sprite, speed, 0)
elif direction == "up":
canvas.move(sprite, 0, -150)
window.after(350, lambda: canvas.move(sprite, 0, 150))
a = canvas.bbox(sprite)
c = canvas.bbox(box)
# Ramp collision
if a[1] in range((c[3]-100), c[3]) and(a[0] in range
(int(c[0]), int(c[2])) or a[2] in range(int(c[0]), int(c[2]))):
canvas.move(sprite, 0, -420)
ramp = True
if a[0] not in range(int(c[0]), int(c[2]-50.0)) and(
a[2] not in range(int(c[0]+50.0), int(c[2])) and ramp):
window.after(10, lambda: canvas.move(sprite, 0, 120))
window.after(250, lambda: canvas.move(sprite, 0, 150))
window.after(450, lambda: canvas.move(sprite, 0, 150))
ramp = False
def checkCollision(g, frame):
global width, dir, hit, score, scoreText
a = canvas.bbox(sprite)
if a[3] in range((g[3]-150), (g[3])) and(a[0] in range
((g[0]), (g[2]-75)) or a[2] in range((g[0]+75), (g[2]))):
if frame == enemy or frame == enemytr:
canvas.move(frame, (width-g[0]), 0)
else:
canvas.move(frame, (-g[0]), 0)
score += 10
txt = "Goats knocked out:" + str(score)
canvas.itemconfigure(scoreText, text=txt)
canvas.move(sprite, 0, -25)
window.after(250, lambda: canvas.move(sprite, 0, 25))
# Enemy collision (Losing condition)
if a[1] in range((g[1]), (g[3])) and(a[0] in range
((g[0]), (g[2]-75)) or a[2] in range((g[0]+75), (g[2]))):
canvas.move(sprite, 0, -250)
window.after(250, lambda: canvas.move(sprite, 0, 250))
hit += 1
canvas.delete(health1)
# Enemy hit player twice (game over).
if hit == 2:
canvas.move(sprite, 0, -height)
canvas.delete(health2)
messagebox.showinfo("Game over",
"Natural selection got you.\nSurvival of the fittest.\n")
gameover()
return
# Enemy movement function (only for normal mode)
# Update: now also controls mating season second goat on right side
def moveenemy():
global width, dir, hit, score, scoreText, paused, dir3
mov1 = 12
mov2 = 8
if paused == 0:
g = canvas.bbox(enemy)
if(diff == "mating"):
g3 = canvas.bbox(enemytr)
if g3[0] < 100:
dir3 = "right"
if g3[2] > (width-100):
dir3 = "left"
if dir3 == "left":
mov2 = -mov2
if g[0] < 100:
dir = "right"
if g[2] > (width-100):
dir = "left"
if dir == "left":
mov1 = -mov1
canvas.move(enemy, mov1, 0)
if(diff == "mating"):
canvas.move(enemytr, mov2, 0)
if(over == False):
if(diff == "mating"):
window.after(7, moveenemy)
else:
window.after(10, moveenemy)
checkCollision(g, enemy)
if(diff == "mating"):
checkCollision(g3, enemytr)
# Second enemy movement function in Hard mode.
# Update: now also controls mating season second goat on left side
def moveenemy2():
global width, dir2, hit, score, scoreText, paused, dir4
mov1 = 12
mov2 = 8
if paused == 0:
g2 = canvas.bbox(enemy2)
if(diff == "mating"):
g4 = canvas.bbox(enemytl)
if g4[0] < 100:
|
if g4[2] > (width-100):
dir4 = "right"
if dir4 == "right":
mov2 = -mov2
if g2[0] < 100:
dir2 = "left"
if g2[2] > (width-100):
dir2 = "right"
if dir2 == "right":
mov1 = -mov1
canvas.move(enemy2, mov1, 0)
if(diff == "mating"):
canvas.move(enemytl, mov2, 0)
if (over == False):
if(diff == "mating"):
window.after(7, moveenemy2)
else:
window.after(10, moveenemy2)
checkCollision(g2, enemy2)
if(diff == "mating"):
checkCollision(g4, enemytl)
# Normal mode function which is called for hard and Mating season modes.
def normal():
global score, sprite, enemy, health2, health1, box, scoreText
global start, start2, start3, diff, boss, start4, start5, start6
start.destroy()
start2.destroy()
start3.destroy()
start4.destroy()
start5.destroy()
start6.destroy()
diff = "normal"
bg = canvas.create_image(0, 0, anchor='nw', image=mint)
sprite = canvas.create_image((width/2), (height-350),
anchor='nw', image=img)
enemy = canvas.create_image((width-250), (height-350),
anchor='nw', image=img2)
health1 = canvas.create_image(220, 0, anchor='ne', image=snoop)
health2 = canvas.create_image(100, 0, anchor='ne', image=snoop)
box = canvas.create_rectangle((width/2-190), (height-610),
(width/2+120), (height-560), fill="brown")
scoreText = canvas.create_text(width/2, 10, fill="black",
font="terminal 28", text=txt)
if diff == "normal":
boss = canvas.create_image(0, -width, anchor='nw', image=bosspic)
over = False
moveenemy()
return
# Hard Mode adds an extra goat.
def hard():
global enemy2, diff, boss
normal()
diff = "hard"
enemy2 = canvas.create_image(0, (height-350), anchor='nw', image=img)
if diff == "hard":
boss = canvas.create_image(0, -width, anchor='nw', image=bosspic)
over = False
moveenemy2()
return
# Mating season adds 2 extra fast goats.
def mating():
global enemytr, enemytl, diff, boss
normal()
hard()
diff = "mating"
enemytr = canvas.create_image((width-250), (height-350), # tr=top right
anchor='nw', image=img2)
enemytl = canvas.create_image(0, (height-350), anchor='nw', image=img) # tl=top left
boss = canvas.create_image(0, -width, anchor='nw', image=bosspic)
over = False
return
# Pause function
def pause(event):
global paused
if paused == 0:
pausetxt = Label(canvas, text="Game paused\
\nReturn?\nThe game will pause for 3 seconds when\
you press p again", font="terminal 15", bg="green")
pausetxt.place(x=width/3, y=100)
paused += 1
window.after(10000, lambda: pausetxt.destroy())
elif paused == 1:
time.sleep(3)
paused = 0
moveenemy()
move()
moveenemy2()
# Bosskey
def bosskey(event):
global boss, width
canvas.move(boss, 0, width)
# Bosskey reversal button
def bossisgone(event):
global boss, width
canvas.move(boss, 0, -width)
def gameover():
global window, Restart, exitb, menub, hit, over, score
Restart = Button(window, text="Restart", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: restart())
Restart.place(x=50, y=50)
exitb = Button(window, text="Exit", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: window.destroy())
exitb.place(x=1450, y=50)
menub = Button(window, text="Return to\nmain menu", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: main_menu())
menub.place(x=1100, y=50)
hit = 0
score = 0
over = True
return
# Restart option after losing
def restart():
global Restart, exitb, menub, diff, speeden, speeden2, hit, over
Restart.destroy()
exitb.destroy()
menub.destroy()
over = False
if diff == "normal":
normal()
if diff == "hard":
hard()
if diff == "mating":
mating()
return
# Cheat code 1
def invincibility(event):
global hit
if hit == 3:
hit = 0
diaz2 = Label(canvas, text="Nick Diaz has retired.\n\
You are no longer invincible", font="terminal 15", bg="green")
diaz2.place(x=width/2, y=100)
window.after(3000, lambda: diaz2.destroy())
else:
hit = 3
diaz = Label(canvas, text="You have been blessed with the powers of Nick Diaz.\
\nYou are now invincible due to high amounts of THC\
(cannabis psychoactive component) in your system.",
font="terminal 15", bg="green")
diaz.place(x=250, y=100)
window.after(3000, lambda: diaz.destroy())
return
# Cheat code 2
def speedcheat(event):
global speed
if speed == 75:
speed = 125
fast = Label(canvas, text="You've been training with Usain Bolt\
\nYou are now faster than even the people on steroids. ",
font="terminal 15", bg="green")
fast.place(x=250, y=100)
window.after(3000, lambda: fast.destroy())
else:
speed = 75
diaz = Label(canvas, text="Your speed is back to normal.\
\nStill pretty fast for a goat.", font="terminal 15", bg="green")
diaz.place(x=250, y=100)
window.after(3000, lambda: diaz.destroy())
# TKinter window commands.
window = Tk()
width = window.winfo_screenwidth()
# width of screen resolution used for program.
height = window.winfo_screenheight()
# height of screen resolution used for program.
window = setWindowDimensions(width, height)
# Resizing background images to fit different resolutions
image = Image.open("goat.png")
image = image.resize((width, height), Image.ANTIALIAS)
image.save("goatnewres.png")
image2 = Image.open("cage.png")
image2 = image2.resize((width, height), Image.ANTIALIAS)
image2.save("cagenewres.png")
# Images loaded for the program.
img = ImageTk.PhotoImage(file="greg.png")
img2 = ImageTk.PhotoImage(file="gregleft.png")
mint = ImageTk.PhotoImage(file="goatnewres.png")
snoop = ImageTk.PhotoImage(file="420.png")
bosspic = ImageTk.PhotoImage(file="bosspic.png")
bg1 = ImageTk.PhotoImage(file="cagenewres.png")
# Variables used for collision functionality.
ramp = False
hit = 0
# Pause.
over = False
paused = 0
# Directions for enemies.
dir = "left"
dir2 = "right"
dir3 = "left"
dir4 = "right"
speed = 75
# Score text and score variable.
score = 0
txt = "Goats knocked out:" + str(score)
main_menu()
# Key bindings
canvas.bind("<Left>", leftKey)
canvas.bind("<Right>", rightKey)
canvas.bind("<Up>", upKey)
canvas.bind("<p>", pause)
canvas.bind("<b>", bosskey)
canvas.bind("<u>", bossisgone)
canvas.bind("<d>", invincibility)
canvas.bind("<c>", speedcheat)
canvas.focus_set()
# Player direction
direction = ""
# TKinter commands foro opening the window and packing the canvas
canvas.pack()
window.mainloop()
| dir4 = "left" | conditional_block |
Octagoat.py | # Octagoat. 13 December 2019. Author: Mohammed Madi.
# Coursework 2 project for COMP16321. |
# The goat pixel drawings and the cage were produced
# using PixilArt online tool and modified using GIMP.
from tkinter import Tk, PhotoImage, Button
from tkinter import Menu, messagebox, Canvas, Label
from PIL import Image, ImageTk
import time
# Window dimensions.
def setWindowDimensions(w, h):
window.title("Octagoat")
# title of window
ws = window.winfo_screenwidth()
# computers screen width used for window dimensions
hs = window.winfo_screenheight()
# computers screen height used for window dimensions
window.geometry(f"{ws}x{hs}")
# window size
return window
# Main menu with buttons and background picture.
def main_menu():
global window, start, start2, start3, over
global start4, start5, bg1, canvas, boss, start6
if(over==True):
Restart.destroy()
exitb.destroy()
menub.destroy()
if(over==False):
canvas = Canvas(window, bg="black", width=width, height=height)
over = False
background = canvas.create_image(0, 0, anchor='nw', image=bg1)
start = Button(window, text="Play Normal mode", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: normal())
start.place(x=400, y=50)
start2 = Button(window, text="Play Hard mode", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: hard())
start2.place(x=750, y=50)
start3 = Button(window, text="Play Mating\nseason mode", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: mating())
start3.place(x=1100, y=50)
start4 = Button(window, text="Leaderboard", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20))
start4.place(x=1450, y=50)
start5 = Button(window, text="Exit", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: window.destroy())
start5.place(x=50, y=200)
start6 = Button(window, text="Tutorial", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: tutorial())
start6.place(x=50, y=50)
boss = canvas.create_image(0, -width, anchor='nw', image=bosspic)
# Tutorial explaining different game elements and how to play.
def tutorial():
messagebox.showinfo("Welcome to Octagoat",
"The name comes from Mixed Martial arts (MMA) cages, which are octagons,\
put goats in an octagon and you have an octagoat.")
messagebox.showinfo("Welcome to Octagoat",
"Your character is a goat fighting other goats,\
the goats are running from,\
one side of the cage to the other trying to ram your goat,\
and your goat is trying to stomp the other goats (in Super Mario fashion)")
messagebox.showinfo("Welcome to Octagoat",
" Stomp another goat once, and it will count as a win.\
but get rammed from another goat twice and you lose.\
You can stay on the ramp if you are too scared")
messagebox.showinfo("Welcome to Octagoat",
"You can use the arrow keys to move your goat from\
side to side or jump. To get on the ramp jump to the top\
from underneath it. To get off the ramp simply walk off the sides")
messagebox.showinfo("Welcome to Octagoat",
"Press B if someone passes. C stands for cheat and \
D stands for Diaz.\nWHEN PLAYING MATING \
SEASON TRY TO GET ON THE RAMP AS SOON AS YOU SPAWN")
def rightKey(event):
global direction
direction = "right"
move()
def leftKey(event):
global direction
direction = "left"
move()
def upKey(event):
global direction, sprite
direction = "up"
move()
# Movement function for player
def move():
# This function includes movement, and collision with the ramp
global direction, ramp, score, sprite, speed, paused
if paused == 0:
if direction == "left":
canvas.move(sprite, -speed, 0)
elif direction == "right":
canvas.move(sprite, speed, 0)
elif direction == "up":
canvas.move(sprite, 0, -150)
window.after(350, lambda: canvas.move(sprite, 0, 150))
a = canvas.bbox(sprite)
c = canvas.bbox(box)
# Ramp collision
if a[1] in range((c[3]-100), c[3]) and(a[0] in range
(int(c[0]), int(c[2])) or a[2] in range(int(c[0]), int(c[2]))):
canvas.move(sprite, 0, -420)
ramp = True
if a[0] not in range(int(c[0]), int(c[2]-50.0)) and(
a[2] not in range(int(c[0]+50.0), int(c[2])) and ramp):
window.after(10, lambda: canvas.move(sprite, 0, 120))
window.after(250, lambda: canvas.move(sprite, 0, 150))
window.after(450, lambda: canvas.move(sprite, 0, 150))
ramp = False
def checkCollision(g, frame):
global width, dir, hit, score, scoreText
a = canvas.bbox(sprite)
if a[3] in range((g[3]-150), (g[3])) and(a[0] in range
((g[0]), (g[2]-75)) or a[2] in range((g[0]+75), (g[2]))):
if frame == enemy or frame == enemytr:
canvas.move(frame, (width-g[0]), 0)
else:
canvas.move(frame, (-g[0]), 0)
score += 10
txt = "Goats knocked out:" + str(score)
canvas.itemconfigure(scoreText, text=txt)
canvas.move(sprite, 0, -25)
window.after(250, lambda: canvas.move(sprite, 0, 25))
# Enemy collision (Losing condition)
if a[1] in range((g[1]), (g[3])) and(a[0] in range
((g[0]), (g[2]-75)) or a[2] in range((g[0]+75), (g[2]))):
canvas.move(sprite, 0, -250)
window.after(250, lambda: canvas.move(sprite, 0, 250))
hit += 1
canvas.delete(health1)
# Enemy hit player twice (game over).
if hit == 2:
canvas.move(sprite, 0, -height)
canvas.delete(health2)
messagebox.showinfo("Game over",
"Natural selection got you.\nSurvival of the fittest.\n")
gameover()
return
# Enemy movement function (only for normal mode)
# Update: now also controls mating season second goat on right side
def moveenemy():
global width, dir, hit, score, scoreText, paused, dir3
mov1 = 12
mov2 = 8
if paused == 0:
g = canvas.bbox(enemy)
if(diff == "mating"):
g3 = canvas.bbox(enemytr)
if g3[0] < 100:
dir3 = "right"
if g3[2] > (width-100):
dir3 = "left"
if dir3 == "left":
mov2 = -mov2
if g[0] < 100:
dir = "right"
if g[2] > (width-100):
dir = "left"
if dir == "left":
mov1 = -mov1
canvas.move(enemy, mov1, 0)
if(diff == "mating"):
canvas.move(enemytr, mov2, 0)
if(over == False):
if(diff == "mating"):
window.after(7, moveenemy)
else:
window.after(10, moveenemy)
checkCollision(g, enemy)
if(diff == "mating"):
checkCollision(g3, enemytr)
# Second enemy movement function in Hard mode.
# Update: now also controls mating season second goat on left side
def moveenemy2():
global width, dir2, hit, score, scoreText, paused, dir4
mov1 = 12
mov2 = 8
if paused == 0:
g2 = canvas.bbox(enemy2)
if(diff == "mating"):
g4 = canvas.bbox(enemytl)
if g4[0] < 100:
dir4 = "left"
if g4[2] > (width-100):
dir4 = "right"
if dir4 == "right":
mov2 = -mov2
if g2[0] < 100:
dir2 = "left"
if g2[2] > (width-100):
dir2 = "right"
if dir2 == "right":
mov1 = -mov1
canvas.move(enemy2, mov1, 0)
if(diff == "mating"):
canvas.move(enemytl, mov2, 0)
if (over == False):
if(diff == "mating"):
window.after(7, moveenemy2)
else:
window.after(10, moveenemy2)
checkCollision(g2, enemy2)
if(diff == "mating"):
checkCollision(g4, enemytl)
# Normal mode function which is called for hard and Mating season modes.
def normal():
global score, sprite, enemy, health2, health1, box, scoreText
global start, start2, start3, diff, boss, start4, start5, start6
start.destroy()
start2.destroy()
start3.destroy()
start4.destroy()
start5.destroy()
start6.destroy()
diff = "normal"
bg = canvas.create_image(0, 0, anchor='nw', image=mint)
sprite = canvas.create_image((width/2), (height-350),
anchor='nw', image=img)
enemy = canvas.create_image((width-250), (height-350),
anchor='nw', image=img2)
health1 = canvas.create_image(220, 0, anchor='ne', image=snoop)
health2 = canvas.create_image(100, 0, anchor='ne', image=snoop)
box = canvas.create_rectangle((width/2-190), (height-610),
(width/2+120), (height-560), fill="brown")
scoreText = canvas.create_text(width/2, 10, fill="black",
font="terminal 28", text=txt)
if diff == "normal":
boss = canvas.create_image(0, -width, anchor='nw', image=bosspic)
over = False
moveenemy()
return
# Hard Mode adds an extra goat.
def hard():
global enemy2, diff, boss
normal()
diff = "hard"
enemy2 = canvas.create_image(0, (height-350), anchor='nw', image=img)
if diff == "hard":
boss = canvas.create_image(0, -width, anchor='nw', image=bosspic)
over = False
moveenemy2()
return
# Mating season adds 2 extra fast goats.
def mating():
global enemytr, enemytl, diff, boss
normal()
hard()
diff = "mating"
enemytr = canvas.create_image((width-250), (height-350), # tr=top right
anchor='nw', image=img2)
enemytl = canvas.create_image(0, (height-350), anchor='nw', image=img) # tl=top left
boss = canvas.create_image(0, -width, anchor='nw', image=bosspic)
over = False
return
# Pause function
def pause(event):
global paused
if paused == 0:
pausetxt = Label(canvas, text="Game paused\
\nReturn?\nThe game will pause for 3 seconds when\
you press p again", font="terminal 15", bg="green")
pausetxt.place(x=width/3, y=100)
paused += 1
window.after(10000, lambda: pausetxt.destroy())
elif paused == 1:
time.sleep(3)
paused = 0
moveenemy()
move()
moveenemy2()
# Bosskey
def bosskey(event):
global boss, width
canvas.move(boss, 0, width)
# Bosskey reversal button
def bossisgone(event):
global boss, width
canvas.move(boss, 0, -width)
def gameover():
global window, Restart, exitb, menub, hit, over, score
Restart = Button(window, text="Restart", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: restart())
Restart.place(x=50, y=50)
exitb = Button(window, text="Exit", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: window.destroy())
exitb.place(x=1450, y=50)
menub = Button(window, text="Return to\nmain menu", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: main_menu())
menub.place(x=1100, y=50)
hit = 0
score = 0
over = True
return
# Restart option after losing
def restart():
global Restart, exitb, menub, diff, speeden, speeden2, hit, over
Restart.destroy()
exitb.destroy()
menub.destroy()
over = False
if diff == "normal":
normal()
if diff == "hard":
hard()
if diff == "mating":
mating()
return
# Cheat code 1
def invincibility(event):
global hit
if hit == 3:
hit = 0
diaz2 = Label(canvas, text="Nick Diaz has retired.\n\
You are no longer invincible", font="terminal 15", bg="green")
diaz2.place(x=width/2, y=100)
window.after(3000, lambda: diaz2.destroy())
else:
hit = 3
diaz = Label(canvas, text="You have been blessed with the powers of Nick Diaz.\
\nYou are now invincible due to high amounts of THC\
(cannabis psychoactive component) in your system.",
font="terminal 15", bg="green")
diaz.place(x=250, y=100)
window.after(3000, lambda: diaz.destroy())
return
# Cheat code 2
def speedcheat(event):
global speed
if speed == 75:
speed = 125
fast = Label(canvas, text="You've been training with Usain Bolt\
\nYou are now faster than even the people on steroids. ",
font="terminal 15", bg="green")
fast.place(x=250, y=100)
window.after(3000, lambda: fast.destroy())
else:
speed = 75
diaz = Label(canvas, text="Your speed is back to normal.\
\nStill pretty fast for a goat.", font="terminal 15", bg="green")
diaz.place(x=250, y=100)
window.after(3000, lambda: diaz.destroy())
# TKinter window commands.
window = Tk()
width = window.winfo_screenwidth()
# width of screen resolution used for program.
height = window.winfo_screenheight()
# height of screen resolution used for program.
window = setWindowDimensions(width, height)
# Resizing background images to fit different resolutions
image = Image.open("goat.png")
image = image.resize((width, height), Image.ANTIALIAS)
image.save("goatnewres.png")
image2 = Image.open("cage.png")
image2 = image2.resize((width, height), Image.ANTIALIAS)
image2.save("cagenewres.png")
# Images loaded for the program.
img = ImageTk.PhotoImage(file="greg.png")
img2 = ImageTk.PhotoImage(file="gregleft.png")
mint = ImageTk.PhotoImage(file="goatnewres.png")
snoop = ImageTk.PhotoImage(file="420.png")
bosspic = ImageTk.PhotoImage(file="bosspic.png")
bg1 = ImageTk.PhotoImage(file="cagenewres.png")
# Variables used for collision functionality.
ramp = False
hit = 0
# Pause.
over = False
paused = 0
# Directions for enemies.
dir = "left"
dir2 = "right"
dir3 = "left"
dir4 = "right"
speed = 75
# Score text and score variable.
score = 0
txt = "Goats knocked out:" + str(score)
main_menu()
# Key bindings
canvas.bind("<Left>", leftKey)
canvas.bind("<Right>", rightKey)
canvas.bind("<Up>", upKey)
canvas.bind("<p>", pause)
canvas.bind("<b>", bosskey)
canvas.bind("<u>", bossisgone)
canvas.bind("<d>", invincibility)
canvas.bind("<c>", speedcheat)
canvas.focus_set()
# Player direction
direction = ""
# TKinter commands foro opening the window and packing the canvas
canvas.pack()
window.mainloop() |
# The goat background picture was taken on the
# 23rd of November 2019 at Snowdon, Wales by the author. | random_line_split |
Octagoat.py | # Octagoat. 13 December 2019. Author: Mohammed Madi.
# Coursework 2 project for COMP16321.
# The goat background picture was taken on the
# 23rd of November 2019 at Snowdon, Wales by the author.
# The goat pixel drawings and the cage were produced
# using PixilArt online tool and modified using GIMP.
from tkinter import Tk, PhotoImage, Button
from tkinter import Menu, messagebox, Canvas, Label
from PIL import Image, ImageTk
import time
# Window dimensions.
def setWindowDimensions(w, h):
window.title("Octagoat")
# title of window
ws = window.winfo_screenwidth()
# computers screen width used for window dimensions
hs = window.winfo_screenheight()
# computers screen height used for window dimensions
window.geometry(f"{ws}x{hs}")
# window size
return window
# Main menu with buttons and background picture.
def main_menu():
global window, start, start2, start3, over
global start4, start5, bg1, canvas, boss, start6
if(over==True):
Restart.destroy()
exitb.destroy()
menub.destroy()
if(over==False):
canvas = Canvas(window, bg="black", width=width, height=height)
over = False
background = canvas.create_image(0, 0, anchor='nw', image=bg1)
start = Button(window, text="Play Normal mode", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: normal())
start.place(x=400, y=50)
start2 = Button(window, text="Play Hard mode", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: hard())
start2.place(x=750, y=50)
start3 = Button(window, text="Play Mating\nseason mode", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: mating())
start3.place(x=1100, y=50)
start4 = Button(window, text="Leaderboard", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20))
start4.place(x=1450, y=50)
start5 = Button(window, text="Exit", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: window.destroy())
start5.place(x=50, y=200)
start6 = Button(window, text="Tutorial", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: tutorial())
start6.place(x=50, y=50)
boss = canvas.create_image(0, -width, anchor='nw', image=bosspic)
# Tutorial explaining different game elements and how to play.
def tutorial():
messagebox.showinfo("Welcome to Octagoat",
"The name comes from Mixed Martial arts (MMA) cages, which are octagons,\
put goats in an octagon and you have an octagoat.")
messagebox.showinfo("Welcome to Octagoat",
"Your character is a goat fighting other goats,\
the goats are running from,\
one side of the cage to the other trying to ram your goat,\
and your goat is trying to stomp the other goats (in Super Mario fashion)")
messagebox.showinfo("Welcome to Octagoat",
" Stomp another goat once, and it will count as a win.\
but get rammed from another goat twice and you lose.\
You can stay on the ramp if you are too scared")
messagebox.showinfo("Welcome to Octagoat",
"You can use the arrow keys to move your goat from\
side to side or jump. To get on the ramp jump to the top\
from underneath it. To get off the ramp simply walk off the sides")
messagebox.showinfo("Welcome to Octagoat",
"Press B if someone passes. C stands for cheat and \
D stands for Diaz.\nWHEN PLAYING MATING \
SEASON TRY TO GET ON THE RAMP AS SOON AS YOU SPAWN")
def rightKey(event):
global direction
direction = "right"
move()
def leftKey(event):
global direction
direction = "left"
move()
def upKey(event):
global direction, sprite
direction = "up"
move()
# Movement function for player
def move():
# This function includes movement, and collision with the ramp
global direction, ramp, score, sprite, speed, paused
if paused == 0:
if direction == "left":
canvas.move(sprite, -speed, 0)
elif direction == "right":
canvas.move(sprite, speed, 0)
elif direction == "up":
canvas.move(sprite, 0, -150)
window.after(350, lambda: canvas.move(sprite, 0, 150))
a = canvas.bbox(sprite)
c = canvas.bbox(box)
# Ramp collision
if a[1] in range((c[3]-100), c[3]) and(a[0] in range
(int(c[0]), int(c[2])) or a[2] in range(int(c[0]), int(c[2]))):
canvas.move(sprite, 0, -420)
ramp = True
if a[0] not in range(int(c[0]), int(c[2]-50.0)) and(
a[2] not in range(int(c[0]+50.0), int(c[2])) and ramp):
window.after(10, lambda: canvas.move(sprite, 0, 120))
window.after(250, lambda: canvas.move(sprite, 0, 150))
window.after(450, lambda: canvas.move(sprite, 0, 150))
ramp = False
def checkCollision(g, frame):
global width, dir, hit, score, scoreText
a = canvas.bbox(sprite)
if a[3] in range((g[3]-150), (g[3])) and(a[0] in range
((g[0]), (g[2]-75)) or a[2] in range((g[0]+75), (g[2]))):
if frame == enemy or frame == enemytr:
canvas.move(frame, (width-g[0]), 0)
else:
canvas.move(frame, (-g[0]), 0)
score += 10
txt = "Goats knocked out:" + str(score)
canvas.itemconfigure(scoreText, text=txt)
canvas.move(sprite, 0, -25)
window.after(250, lambda: canvas.move(sprite, 0, 25))
# Enemy collision (Losing condition)
if a[1] in range((g[1]), (g[3])) and(a[0] in range
((g[0]), (g[2]-75)) or a[2] in range((g[0]+75), (g[2]))):
canvas.move(sprite, 0, -250)
window.after(250, lambda: canvas.move(sprite, 0, 250))
hit += 1
canvas.delete(health1)
# Enemy hit player twice (game over).
if hit == 2:
canvas.move(sprite, 0, -height)
canvas.delete(health2)
messagebox.showinfo("Game over",
"Natural selection got you.\nSurvival of the fittest.\n")
gameover()
return
# Enemy movement function (only for normal mode)
# Update: now also controls mating season second goat on right side
def moveenemy():
global width, dir, hit, score, scoreText, paused, dir3
mov1 = 12
mov2 = 8
if paused == 0:
g = canvas.bbox(enemy)
if(diff == "mating"):
g3 = canvas.bbox(enemytr)
if g3[0] < 100:
dir3 = "right"
if g3[2] > (width-100):
dir3 = "left"
if dir3 == "left":
mov2 = -mov2
if g[0] < 100:
dir = "right"
if g[2] > (width-100):
dir = "left"
if dir == "left":
mov1 = -mov1
canvas.move(enemy, mov1, 0)
if(diff == "mating"):
canvas.move(enemytr, mov2, 0)
if(over == False):
if(diff == "mating"):
window.after(7, moveenemy)
else:
window.after(10, moveenemy)
checkCollision(g, enemy)
if(diff == "mating"):
checkCollision(g3, enemytr)
# Second enemy movement function in Hard mode.
# Update: now also controls mating season second goat on left side
def moveenemy2():
global width, dir2, hit, score, scoreText, paused, dir4
mov1 = 12
mov2 = 8
if paused == 0:
g2 = canvas.bbox(enemy2)
if(diff == "mating"):
g4 = canvas.bbox(enemytl)
if g4[0] < 100:
dir4 = "left"
if g4[2] > (width-100):
dir4 = "right"
if dir4 == "right":
mov2 = -mov2
if g2[0] < 100:
dir2 = "left"
if g2[2] > (width-100):
dir2 = "right"
if dir2 == "right":
mov1 = -mov1
canvas.move(enemy2, mov1, 0)
if(diff == "mating"):
canvas.move(enemytl, mov2, 0)
if (over == False):
if(diff == "mating"):
window.after(7, moveenemy2)
else:
window.after(10, moveenemy2)
checkCollision(g2, enemy2)
if(diff == "mating"):
checkCollision(g4, enemytl)
# Normal mode function which is called for hard and Mating season modes.
def normal():
global score, sprite, enemy, health2, health1, box, scoreText
global start, start2, start3, diff, boss, start4, start5, start6
start.destroy()
start2.destroy()
start3.destroy()
start4.destroy()
start5.destroy()
start6.destroy()
diff = "normal"
bg = canvas.create_image(0, 0, anchor='nw', image=mint)
sprite = canvas.create_image((width/2), (height-350),
anchor='nw', image=img)
enemy = canvas.create_image((width-250), (height-350),
anchor='nw', image=img2)
health1 = canvas.create_image(220, 0, anchor='ne', image=snoop)
health2 = canvas.create_image(100, 0, anchor='ne', image=snoop)
box = canvas.create_rectangle((width/2-190), (height-610),
(width/2+120), (height-560), fill="brown")
scoreText = canvas.create_text(width/2, 10, fill="black",
font="terminal 28", text=txt)
if diff == "normal":
boss = canvas.create_image(0, -width, anchor='nw', image=bosspic)
over = False
moveenemy()
return
# Hard Mode adds an extra goat.
def hard():
global enemy2, diff, boss
normal()
diff = "hard"
enemy2 = canvas.create_image(0, (height-350), anchor='nw', image=img)
if diff == "hard":
boss = canvas.create_image(0, -width, anchor='nw', image=bosspic)
over = False
moveenemy2()
return
# Mating season adds 2 extra fast goats.
def mating():
global enemytr, enemytl, diff, boss
normal()
hard()
diff = "mating"
enemytr = canvas.create_image((width-250), (height-350), # tr=top right
anchor='nw', image=img2)
enemytl = canvas.create_image(0, (height-350), anchor='nw', image=img) # tl=top left
boss = canvas.create_image(0, -width, anchor='nw', image=bosspic)
over = False
return
# Pause function
def | (event):
global paused
if paused == 0:
pausetxt = Label(canvas, text="Game paused\
\nReturn?\nThe game will pause for 3 seconds when\
you press p again", font="terminal 15", bg="green")
pausetxt.place(x=width/3, y=100)
paused += 1
window.after(10000, lambda: pausetxt.destroy())
elif paused == 1:
time.sleep(3)
paused = 0
moveenemy()
move()
moveenemy2()
# Bosskey
def bosskey(event):
global boss, width
canvas.move(boss, 0, width)
# Bosskey reversal button
def bossisgone(event):
global boss, width
canvas.move(boss, 0, -width)
def gameover():
global window, Restart, exitb, menub, hit, over, score
Restart = Button(window, text="Restart", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: restart())
Restart.place(x=50, y=50)
exitb = Button(window, text="Exit", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: window.destroy())
exitb.place(x=1450, y=50)
menub = Button(window, text="Return to\nmain menu", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: main_menu())
menub.place(x=1100, y=50)
hit = 0
score = 0
over = True
return
# Restart option after losing
def restart():
global Restart, exitb, menub, diff, speeden, speeden2, hit, over
Restart.destroy()
exitb.destroy()
menub.destroy()
over = False
if diff == "normal":
normal()
if diff == "hard":
hard()
if diff == "mating":
mating()
return
# Cheat code 1
def invincibility(event):
global hit
if hit == 3:
hit = 0
diaz2 = Label(canvas, text="Nick Diaz has retired.\n\
You are no longer invincible", font="terminal 15", bg="green")
diaz2.place(x=width/2, y=100)
window.after(3000, lambda: diaz2.destroy())
else:
hit = 3
diaz = Label(canvas, text="You have been blessed with the powers of Nick Diaz.\
\nYou are now invincible due to high amounts of THC\
(cannabis psychoactive component) in your system.",
font="terminal 15", bg="green")
diaz.place(x=250, y=100)
window.after(3000, lambda: diaz.destroy())
return
# Cheat code 2
def speedcheat(event):
global speed
if speed == 75:
speed = 125
fast = Label(canvas, text="You've been training with Usain Bolt\
\nYou are now faster than even the people on steroids. ",
font="terminal 15", bg="green")
fast.place(x=250, y=100)
window.after(3000, lambda: fast.destroy())
else:
speed = 75
diaz = Label(canvas, text="Your speed is back to normal.\
\nStill pretty fast for a goat.", font="terminal 15", bg="green")
diaz.place(x=250, y=100)
window.after(3000, lambda: diaz.destroy())
# TKinter window commands.
window = Tk()
width = window.winfo_screenwidth()
# width of screen resolution used for program.
height = window.winfo_screenheight()
# height of screen resolution used for program.
window = setWindowDimensions(width, height)
# Resizing background images to fit different resolutions
image = Image.open("goat.png")
image = image.resize((width, height), Image.ANTIALIAS)
image.save("goatnewres.png")
image2 = Image.open("cage.png")
image2 = image2.resize((width, height), Image.ANTIALIAS)
image2.save("cagenewres.png")
# Images loaded for the program.
img = ImageTk.PhotoImage(file="greg.png")
img2 = ImageTk.PhotoImage(file="gregleft.png")
mint = ImageTk.PhotoImage(file="goatnewres.png")
snoop = ImageTk.PhotoImage(file="420.png")
bosspic = ImageTk.PhotoImage(file="bosspic.png")
bg1 = ImageTk.PhotoImage(file="cagenewres.png")
# Variables used for collision functionality.
ramp = False
hit = 0
# Pause.
over = False
paused = 0
# Directions for enemies.
dir = "left"
dir2 = "right"
dir3 = "left"
dir4 = "right"
speed = 75
# Score text and score variable.
score = 0
txt = "Goats knocked out:" + str(score)
main_menu()
# Key bindings
canvas.bind("<Left>", leftKey)
canvas.bind("<Right>", rightKey)
canvas.bind("<Up>", upKey)
canvas.bind("<p>", pause)
canvas.bind("<b>", bosskey)
canvas.bind("<u>", bossisgone)
canvas.bind("<d>", invincibility)
canvas.bind("<c>", speedcheat)
canvas.focus_set()
# Player direction
direction = ""
# TKinter commands foro opening the window and packing the canvas
canvas.pack()
window.mainloop()
| pause | identifier_name |
main.py | import math
import random
import os
import shutil
from abc import *
def howItPrint():
print("hello world") # how to print
#-----variables-----
def variables():
name = "Ben Rafalski" # can use single or double quotes for a string in python
age = 20
height = 250.5
alive = True # booleans start with a capital letter
print("hello " + name + " who is: " + str(age) + " years old and " + str(250.5) + "cm tall, is ben alive? " + str(alive))
#-----multiple assignment, assign multiple variables in one line-----
#name, age, alive = "ben", 20, True
#print("name: " + name + ", age: " + str(age) + ", isAlive?: " + str(alive))
#-----string methods-----
#name = "ben"
#print(len(name)) # string length
#print(name.find("b")) # returns the index of the character selected
#print(name.capitalize()) # capitalizes the first letter
#print(name.upper()) #turns string all uppercase
#print(name.lower()) # turns string all lowercase
#print(name.isdigit()) # returns true if it is a number
#print(name.isalpha()) # returns true if the letters in the string are alphabetical
#print(name.count("e")) # returns the count of selected characters
#print(name.replace("e", "a")) # replaces first argument with the second one
#print(name*3) # prints a string multiple times
#-----Type Casting-----
#x, y, z = 1, 2.2, "3" # int, float, string
#print(int(y)) # drops the decimal
#print(float(z)) # adds a .0 to the end
#print(str(x)*3)
#-----user input-----
#name = input("what is your name?: ") # always returns a string, must cast if you need to do math operations
#age = float(input("what is your age?: "))
#age += 1
#print("your name is: " + name + ", next year you will be: " + str(age))
#-----math functions, must use "import math" to start-----
#pi = 3.14
#x, y, z = 1, 2, 3
#print(round(pi)) # rounds the number
#print(math.ceil(pi)) # rounds the number up
#print(math.floor(pi)) # rounds the number down
#print(abs(-pi)) # returns the absolute value of the number
#print(pow(pi, 2)) # takes the first argument to the power of the second argument
#print(math.sqrt(pi)) # returns the square root of the number
#print(max(x,y,z)) # returns the max number from a set of numbers
#print(min(x,y,z)) # returns the min number from a set of numbers
#-----string slicing, creating a substring from a string-----
#name = "Ben Rafalski"
#first_name = name[0:3:1] # indexing[start(inclusive):stop(exclusive):step]
#last_name = name[4:12:1]
#reverse_name = name[::-1] # returns the string in reverse, index [0:end:-1]
#print(first_name + last_name)
#-----If statements-----
#age = int(input("how old are you?: "))
#if age >= 65:
# print("you are a senior")
#elif age >= 18:
# print("you are an adult")
#else:
# print("you are a child")
#-----logical operators (and, or, not)-----
#temp = float(input("what is the tempurature outside?: "))
#if not(temp <= 0 and temp >= 30):
# print("the tempuratue is good today")
#elif temp < 0 or temp > 30:
# print("the tempurature is bad today")
#-----while loops-----
#name = ""
#while len(name) == 0:
# name = input("enter your name: ")
#print("hello " + name)
#-----for loops-----
#for i in range(1,11,1): #executed 10 times 'range(inclusive, exclusive, step)'
# print(i)
#-----nested loops-----
#rows = int(input("how many rows?: "))
#columns = int(input("how many columns?: "))
#symbol = input("enter a symbol to use: ")
#for i in range(rows):
# for j in range(columns):
# print(symbol, end = "") # prevents a newline
# print() # newline
#-----loop control statements, break (terminate loop), continue (skip to next iteration), pass (does nothing)-----
#while True:
# name = input("enter your name: ")
# if name != "":
# break
#phone_number = "123-456-7890"
#for i in phone_number:
# if i == "-":
# continue
# print(i, end = "")
#for i in range(1,21):
# if i == 13:
# pass # does nothing, place holder for the word 'nothing'
# else:
# print(i)
#-----lists, like arrays-----
#food = ["pizza", "cookies", "hamburger", "hot dog"]
#food.append("ice cream") # adds the element to the end of the list
#food.remove("hot dog") # removes the selected element from the list
#food.pop() # removes the last element from the list
#food.insert(0,"cake") # inserts the second argument as an element into the index of the first argument in the list
#food.sort() # sorts the list alphabetically
#food.clear() # clears all items from the list
#-----2D lists, list of lists-----
#drinks = ["coffee", "soda", "tea"]
#dinner = ["pizza", "hamburger", "hotdog"]
#dessert = ["pie", "ice cream", "cake"]
#food = [drinks, dinner, dessert]
#for i in range(3):
# for j in range(3):
# print(food[i][j], end="")
# if(j != 3):
# print(", ", end="")
# print()
#-----tuples, collections which are ordered and unchangeable, useful for related data-----
#student = ("ben", 21, "male")
#student.count("ben") # returns how many times a value appears in a tuple
#student.index("male") # returns the index of the selected element in the tuple
#-----sets, collection that is unordered and unindexed, has no dublicates-----
#utensils = {"fork", "spoon", "knife"}
#dishes = {"bowl", "plate", "cup", "knife"}
#utensils.add("napkin") # adds a specified element to the set
#utensils.remove("fork") # removes a specified element from the set
#utensils.clear() # clears all elements from a set
#utensils.update(dishes) # adds the elements of one set to another
#dinner_table = utensils.union(dishes) # takes the union of 2 sets
#for x in dinner_table:
# print(x)
#utensils.difference(dishes) # what does utensils have that dishes does not?
#-----dictionaries = changeable, unordered collection of key:value pairs-----
#capitols = {'USA':'Washington DC', 'India':'New Dehli', 'China':'Beijing', 'Russia':'Moscow'}
#print(capitols['Russia']) # use the key instead of the index of the element
#print(capitols.get('Germany')) # safer way of accessing the elements
#print(capitols.keys()) # prints all the keys
#print(capitols.values()) # prints only the values
#print(capitols.items()) # prints the keys and the values together
#capitols.update({'Germany':'Berlin'}) # adds/changes an element in the dictionary
#capitols.pop('USA') # removes the key value pair from the dictionary
#capitols.clear() # clears the dictionary
#-----index operator [], used in strings, lists, and tuples-----
#name = "ben rafalski"
#first_name = name[:3].upper() # [start:end]
#last_name = name[4:].upper() # [start:end]
#last_character = name[-1] # access element in reverse
#print(first_name+last_name)
#-----functions-----
#def func(name): #use the def keyword
# print("this is " + name + "'s first function")
#input = input("what is your name?: ")
#func(input)
#-----function that returns something-----
#def multiply(factor, multiplier):
# multiplicand = factor * multiplier
# return multiplicand
#number_1 = int(input("enter a number: "))
#number_2 = int(input("enter another number: "))
#multiplying = multiply(number_1, number_2)
#print(multiplying)
#-----keyword arguments, preceded by an identifier when passing into a func-----
#def hello(first, middle, last):
# print("hello "+first+" "+middle+" "+last)
#hello(last = "rafalski", first = "benjamin", middle = "charles")
#-----*args paramenter, packs all arguments into a tuple-----
def argsAndKwargsParameters():
def add(*args):
sum = 0
for i in args:
|
return sum
print(add(5,5,5,5,5,5,5))
#-----**kwargs, packs all arguments into a dictionary-----
def hello(**kwargs):
print("hello", end = " ")
for key,value in kwargs.items():
print(value, end=" ")
hello(first="ben", middle = "charles", last = "rafalski")
#-----format method, helps display output-----
def formatMethod():
animal = "cow"
item = "moon"
pi = 3.1415
print("the {0:^10} jumped over the {1:^10}".format(animal, item)) # uses placeholders {}
print("the number pi is {:.2f}".format(pi))
#-----random module, use 'import random'-----
def randomModule():
x = random.randint(1,6)
y = random.random()
myList = ["rock", "paper", "scissors"]
z = random.choice(myList)
print(x, y, z)
cards = [1,2,3,4,5,6,7,8,9,"J", "Q", "K", "A"]
random.shuffle(cards)
print(cards)
#-----exception handling-----
def exceptionHandling():
try:
numerator = int(input("enter a number to divide: "))
denominator = int(input("enter a number to divide by: "))
result = numerator / denominator
except ZeroDivisionError:
print("you cannot divide by zero")
except ValueError:
print("enter only numbers please")
except Exception: # catches all exceptions
print("something went wrong")
else:
print(result)
finally: # executes everytime the try/except statement is executed
print("this will always execute")
#-----file detection, use 'import os'-----
def detectFile():
path = "C:\\Users\\benrafalski\\Documents\\CSE365\\Assignment3\\README.txt"
if os.path.exists(path):
print("that location exists")
if os.path.isfile(path):
print("that is a file")
elif os.path.isdir(path):
print("that is a folder")
else:
print("that location does not exist")
#-----reading a file-----
def readFile():
try:
with open('test.txt') as file:
print(file.read()) # closes file automatically after opening
except FileNotFoundError:
print("that file was not found")
#-----writing a file-----
def writeFile():
text = "this is my written text\nThis is a newline"
with open('test.txt', 'w') as file: # open in write mode
file.write(text) # ovewrites previous file, use 'a' instead of 'w' for appending instead
#-----copying a file, use 'import shutil'-----
def copyFile():
shutil.copyfile('test.txt', 'copy.txt') # src,dest
#-----moving a file, use 'import os'
def moveFile():
source = "test.txt"
destination = "C:\\Users\\benrafalski\\Downloads\\test.txt"
try:
if os.path.exists(destination):
print("this file is already there")
else:
os.replace(source, destination)
print(source+ " was moved")
except FileNotFoundError:
print(source + " was not found")
#-----deleting a file, use 'import os'-----
def deleteFile():
try:
os.remove("copy.txt")
except FileNotFoundError:
print("that file was not found")
#-----classes-----
def createClass():
class Car:
# class variable
wheels = 4
# self is the same as 'this' keyword, __init__ is the constructor
def __init__(self, make, model, year, color):
# instance variables
self.make = make
self.model = model
self.year = year
self.color = color
def drive(self):
print("this car is driving")
def stop(self):
print("this car is stopped")
new_car = Car("toyota", "tacoma", 2002, "silver")
print(new_car.make, new_car.model, new_car.year, new_car.color, new_car.wheels)
new_car.drive()
new_car.stop()
#-----Inheritance-----
def inheritance():
# parent class
class Organism:
alive = True
# child class of organism
class Animal(Organism):
def eat(self):
print("this animal is eating")
def sleep(self):
print("this animal is sleeping")
# child classes of animal
class Rabbit(Animal):
def hop(self):
print("this rabbit is hopping")
class Fish(Animal):
def swim(self):
print("this fish is swimming")
class Hawk(Animal):
def fly(self):
print("this hawk is flying")
rabbit, fish, hawk = Rabbit(), Fish(), Hawk()
print(rabbit.alive, fish.alive, hawk.alive)
fish.eat()
hawk.sleep()
rabbit.hop()
fish.swim()
hawk.fly()
#-----multiple inheritance-----
def multipleInheritance():
class Prey:
def flee(self):
print("this animal is fleeing")
class Predator:
def hunt(self):
print("this animal is hunting")
class Rabbit(Prey):
pass
class Hawk(Predator):
pass
class Fish(Prey, Predator):
pass
rabbit, hawk, fish = Rabbit(), Hawk(), Fish()
rabbit.flee()
hawk.hunt()
fish.flee()
fish.hunt()
#-----method chaining-----
def methodChaining():
class Car:
def turn_on(self):
print("you start the engine")
return self # must have return self on the methods you would like to chain together
def drive(self):
print("you drive the car")
return self
def brake(self):
print("you step on the brakes")
return self
def turn_off(self):
print("you turn off the engine")
return self
car = Car()
car.turn_on().drive()
#-----super function-----
def superFunction():
class Rectangle:
def __init__(self, length, width):
self.length = length
self.width = width
class Square(Rectangle):
def __init__(self, length, width):
super().__init__(length,width)
class Cube(Rectangle):
def __init__(self, length, width, height):
super().__init__(length,width)
self.heigth = height
#-----abstract classes, uses 'from abc import *'-----
def abstractClasses():
class Vehicle:
# this is the abstract method, making this an abstract class
# this abstarct method must be overridden
@abstractmethod
def go(self):
pass
class Car(Vehicle):
def go(self):
print("you drive the car")
class Motorcycle(Vehicle):
def go(self):
print("you ride the motorcycle")
| sum += i | conditional_block |
main.py | import math
import random
import os
import shutil
from abc import *
def howItPrint():
print("hello world") # how to print
#-----variables-----
def variables():
name = "Ben Rafalski" # can use single or double quotes for a string in python
age = 20
height = 250.5
alive = True # booleans start with a capital letter
print("hello " + name + " who is: " + str(age) + " years old and " + str(250.5) + "cm tall, is ben alive? " + str(alive))
#-----multiple assignment, assign multiple variables in one line-----
#name, age, alive = "ben", 20, True
#print("name: " + name + ", age: " + str(age) + ", isAlive?: " + str(alive))
#-----string methods-----
#name = "ben"
#print(len(name)) # string length
#print(name.find("b")) # returns the index of the character selected
#print(name.capitalize()) # capitalizes the first letter
#print(name.upper()) #turns string all uppercase
#print(name.lower()) # turns string all lowercase
#print(name.isdigit()) # returns true if it is a number
#print(name.isalpha()) # returns true if the letters in the string are alphabetical
#print(name.count("e")) # returns the count of selected characters
#print(name.replace("e", "a")) # replaces first argument with the second one
#print(name*3) # prints a string multiple times
#-----Type Casting-----
#x, y, z = 1, 2.2, "3" # int, float, string
#print(int(y)) # drops the decimal
#print(float(z)) # adds a .0 to the end
#print(str(x)*3)
#-----user input-----
#name = input("what is your name?: ") # always returns a string, must cast if you need to do math operations
#age = float(input("what is your age?: "))
#age += 1
#print("your name is: " + name + ", next year you will be: " + str(age))
#-----math functions, must use "import math" to start-----
#pi = 3.14
#x, y, z = 1, 2, 3
#print(round(pi)) # rounds the number
#print(math.ceil(pi)) # rounds the number up
#print(math.floor(pi)) # rounds the number down
#print(abs(-pi)) # returns the absolute value of the number
#print(pow(pi, 2)) # takes the first argument to the power of the second argument
#print(math.sqrt(pi)) # returns the square root of the number
#print(max(x,y,z)) # returns the max number from a set of numbers
#print(min(x,y,z)) # returns the min number from a set of numbers
#-----string slicing, creating a substring from a string-----
#name = "Ben Rafalski"
#first_name = name[0:3:1] # indexing[start(inclusive):stop(exclusive):step]
#last_name = name[4:12:1]
#reverse_name = name[::-1] # returns the string in reverse, index [0:end:-1]
#print(first_name + last_name)
#-----If statements-----
#age = int(input("how old are you?: "))
#if age >= 65:
# print("you are a senior")
#elif age >= 18:
# print("you are an adult")
#else:
# print("you are a child")
#-----logical operators (and, or, not)-----
#temp = float(input("what is the tempurature outside?: "))
#if not(temp <= 0 and temp >= 30):
# print("the tempuratue is good today")
#elif temp < 0 or temp > 30:
# print("the tempurature is bad today")
#-----while loops-----
#name = ""
#while len(name) == 0:
# name = input("enter your name: ")
#print("hello " + name)
#-----for loops-----
#for i in range(1,11,1): #executed 10 times 'range(inclusive, exclusive, step)'
# print(i)
#-----nested loops-----
#rows = int(input("how many rows?: "))
#columns = int(input("how many columns?: "))
#symbol = input("enter a symbol to use: ")
#for i in range(rows):
# for j in range(columns):
# print(symbol, end = "") # prevents a newline
# print() # newline
#-----loop control statements, break (terminate loop), continue (skip to next iteration), pass (does nothing)-----
#while True:
# name = input("enter your name: ")
# if name != "":
# break
#phone_number = "123-456-7890"
#for i in phone_number:
# if i == "-":
# continue
# print(i, end = "")
#for i in range(1,21):
# if i == 13:
# pass # does nothing, place holder for the word 'nothing'
# else:
# print(i)
#-----lists, like arrays-----
#food = ["pizza", "cookies", "hamburger", "hot dog"]
#food.append("ice cream") # adds the element to the end of the list
#food.remove("hot dog") # removes the selected element from the list
#food.pop() # removes the last element from the list
#food.insert(0,"cake") # inserts the second argument as an element into the index of the first argument in the list
#food.sort() # sorts the list alphabetically
#food.clear() # clears all items from the list
#-----2D lists, list of lists-----
#drinks = ["coffee", "soda", "tea"]
#dinner = ["pizza", "hamburger", "hotdog"]
#dessert = ["pie", "ice cream", "cake"]
#food = [drinks, dinner, dessert]
#for i in range(3):
# for j in range(3):
# print(food[i][j], end="")
# if(j != 3):
# print(", ", end="")
# print()
#-----tuples, collections which are ordered and unchangeable, useful for related data-----
#student = ("ben", 21, "male")
#student.count("ben") # returns how many times a value appears in a tuple
#student.index("male") # returns the index of the selected element in the tuple
#-----sets, collection that is unordered and unindexed, has no dublicates-----
#utensils = {"fork", "spoon", "knife"}
#dishes = {"bowl", "plate", "cup", "knife"}
#utensils.add("napkin") # adds a specified element to the set
#utensils.remove("fork") # removes a specified element from the set
#utensils.clear() # clears all elements from a set
#utensils.update(dishes) # adds the elements of one set to another
#dinner_table = utensils.union(dishes) # takes the union of 2 sets
#for x in dinner_table:
# print(x)
#utensils.difference(dishes) # what does utensils have that dishes does not?
#-----dictionaries = changeable, unordered collection of key:value pairs-----
#capitols = {'USA':'Washington DC', 'India':'New Dehli', 'China':'Beijing', 'Russia':'Moscow'}
#print(capitols['Russia']) # use the key instead of the index of the element
#print(capitols.get('Germany')) # safer way of accessing the elements
#print(capitols.keys()) # prints all the keys
#print(capitols.values()) # prints only the values
#print(capitols.items()) # prints the keys and the values together
#capitols.update({'Germany':'Berlin'}) # adds/changes an element in the dictionary
#capitols.pop('USA') # removes the key value pair from the dictionary
#capitols.clear() # clears the dictionary
#-----index operator [], used in strings, lists, and tuples-----
#name = "ben rafalski"
#first_name = name[:3].upper() # [start:end]
#last_name = name[4:].upper() # [start:end]
#last_character = name[-1] # access element in reverse
#print(first_name+last_name)
#-----functions-----
#def func(name): #use the def keyword
# print("this is " + name + "'s first function")
#input = input("what is your name?: ")
#func(input)
#-----function that returns something-----
#def multiply(factor, multiplier):
# multiplicand = factor * multiplier
# return multiplicand
#number_1 = int(input("enter a number: "))
#number_2 = int(input("enter another number: "))
#multiplying = multiply(number_1, number_2)
#print(multiplying)
#-----keyword arguments, preceded by an identifier when passing into a func-----
#def hello(first, middle, last):
# print("hello "+first+" "+middle+" "+last)
#hello(last = "rafalski", first = "benjamin", middle = "charles")
#-----*args paramenter, packs all arguments into a tuple-----
def argsAndKwargsParameters():
def add(*args):
sum = 0
for i in args:
sum += i
return sum
print(add(5,5,5,5,5,5,5))
#-----**kwargs, packs all arguments into a dictionary-----
def hello(**kwargs):
print("hello", end = " ")
for key,value in kwargs.items():
print(value, end=" ")
hello(first="ben", middle = "charles", last = "rafalski")
#-----format method, helps display output-----
def formatMethod():
animal = "cow"
item = "moon"
pi = 3.1415
print("the {0:^10} jumped over the {1:^10}".format(animal, item)) # uses placeholders {}
print("the number pi is {:.2f}".format(pi))
#-----random module, use 'import random'-----
def randomModule():
x = random.randint(1,6)
y = random.random()
myList = ["rock", "paper", "scissors"]
z = random.choice(myList)
print(x, y, z)
cards = [1,2,3,4,5,6,7,8,9,"J", "Q", "K", "A"]
random.shuffle(cards)
print(cards)
#-----exception handling-----
def exceptionHandling():
try:
numerator = int(input("enter a number to divide: "))
denominator = int(input("enter a number to divide by: "))
result = numerator / denominator
except ZeroDivisionError:
print("you cannot divide by zero")
except ValueError:
print("enter only numbers please")
except Exception: # catches all exceptions
print("something went wrong")
else:
print(result)
finally: # executes everytime the try/except statement is executed
print("this will always execute")
#-----file detection, use 'import os'-----
def detectFile():
path = "C:\\Users\\benrafalski\\Documents\\CSE365\\Assignment3\\README.txt"
if os.path.exists(path):
print("that location exists")
if os.path.isfile(path):
print("that is a file")
elif os.path.isdir(path):
print("that is a folder")
else:
print("that location does not exist")
#-----reading a file-----
def readFile():
try:
with open('test.txt') as file:
print(file.read()) # closes file automatically after opening
except FileNotFoundError:
print("that file was not found")
#-----writing a file-----
def writeFile():
text = "this is my written text\nThis is a newline"
with open('test.txt', 'w') as file: # open in write mode
file.write(text) # ovewrites previous file, use 'a' instead of 'w' for appending instead
#-----copying a file, use 'import shutil'-----
def copyFile():
shutil.copyfile('test.txt', 'copy.txt') # src,dest
#-----moving a file, use 'import os'
def moveFile():
source = "test.txt"
destination = "C:\\Users\\benrafalski\\Downloads\\test.txt"
try:
if os.path.exists(destination):
print("this file is already there")
else:
os.replace(source, destination)
print(source+ " was moved")
except FileNotFoundError:
print(source + " was not found")
#-----deleting a file, use 'import os'-----
def deleteFile():
try:
os.remove("copy.txt")
except FileNotFoundError:
print("that file was not found")
#-----classes-----
def createClass():
class Car:
# class variable
wheels = 4
# self is the same as 'this' keyword, __init__ is the constructor
def __init__(self, make, model, year, color):
# instance variables
self.make = make
self.model = model
self.year = year
self.color = color
def drive(self):
print("this car is driving")
def stop(self):
print("this car is stopped")
new_car = Car("toyota", "tacoma", 2002, "silver")
print(new_car.make, new_car.model, new_car.year, new_car.color, new_car.wheels)
new_car.drive()
new_car.stop()
#-----Inheritance-----
def inheritance():
# parent class
class Organism:
alive = True
# child class of organism
class Animal(Organism):
def eat(self):
print("this animal is eating")
def sleep(self):
print("this animal is sleeping")
# child classes of animal
class Rabbit(Animal):
def hop(self):
print("this rabbit is hopping")
class Fish(Animal):
def swim(self):
print("this fish is swimming")
class Hawk(Animal):
def | (self):
print("this hawk is flying")
rabbit, fish, hawk = Rabbit(), Fish(), Hawk()
print(rabbit.alive, fish.alive, hawk.alive)
fish.eat()
hawk.sleep()
rabbit.hop()
fish.swim()
hawk.fly()
#-----multiple inheritance-----
def multipleInheritance():
class Prey:
def flee(self):
print("this animal is fleeing")
class Predator:
def hunt(self):
print("this animal is hunting")
class Rabbit(Prey):
pass
class Hawk(Predator):
pass
class Fish(Prey, Predator):
pass
rabbit, hawk, fish = Rabbit(), Hawk(), Fish()
rabbit.flee()
hawk.hunt()
fish.flee()
fish.hunt()
#-----method chaining-----
def methodChaining():
class Car:
def turn_on(self):
print("you start the engine")
return self # must have return self on the methods you would like to chain together
def drive(self):
print("you drive the car")
return self
def brake(self):
print("you step on the brakes")
return self
def turn_off(self):
print("you turn off the engine")
return self
car = Car()
car.turn_on().drive()
#-----super function-----
def superFunction():
class Rectangle:
def __init__(self, length, width):
self.length = length
self.width = width
class Square(Rectangle):
def __init__(self, length, width):
super().__init__(length,width)
class Cube(Rectangle):
def __init__(self, length, width, height):
super().__init__(length,width)
self.heigth = height
#-----abstract classes, uses 'from abc import *'-----
def abstractClasses():
class Vehicle:
# this is the abstract method, making this an abstract class
# this abstarct method must be overridden
@abstractmethod
def go(self):
pass
class Car(Vehicle):
def go(self):
print("you drive the car")
class Motorcycle(Vehicle):
def go(self):
print("you ride the motorcycle")
| fly | identifier_name |
main.py | import math
import random
import os
import shutil
from abc import *
def howItPrint():
|
#-----variables-----
def variables():
name = "Ben Rafalski" # can use single or double quotes for a string in python
age = 20
height = 250.5
alive = True # booleans start with a capital letter
print("hello " + name + " who is: " + str(age) + " years old and " + str(250.5) + "cm tall, is ben alive? " + str(alive))
#-----multiple assignment, assign multiple variables in one line-----
#name, age, alive = "ben", 20, True
#print("name: " + name + ", age: " + str(age) + ", isAlive?: " + str(alive))
#-----string methods-----
#name = "ben"
#print(len(name)) # string length
#print(name.find("b")) # returns the index of the character selected
#print(name.capitalize()) # capitalizes the first letter
#print(name.upper()) #turns string all uppercase
#print(name.lower()) # turns string all lowercase
#print(name.isdigit()) # returns true if it is a number
#print(name.isalpha()) # returns true if the letters in the string are alphabetical
#print(name.count("e")) # returns the count of selected characters
#print(name.replace("e", "a")) # replaces first argument with the second one
#print(name*3) # prints a string multiple times
#-----Type Casting-----
#x, y, z = 1, 2.2, "3" # int, float, string
#print(int(y)) # drops the decimal
#print(float(z)) # adds a .0 to the end
#print(str(x)*3)
#-----user input-----
#name = input("what is your name?: ") # always returns a string, must cast if you need to do math operations
#age = float(input("what is your age?: "))
#age += 1
#print("your name is: " + name + ", next year you will be: " + str(age))
#-----math functions, must use "import math" to start-----
#pi = 3.14
#x, y, z = 1, 2, 3
#print(round(pi)) # rounds the number
#print(math.ceil(pi)) # rounds the number up
#print(math.floor(pi)) # rounds the number down
#print(abs(-pi)) # returns the absolute value of the number
#print(pow(pi, 2)) # takes the first argument to the power of the second argument
#print(math.sqrt(pi)) # returns the square root of the number
#print(max(x,y,z)) # returns the max number from a set of numbers
#print(min(x,y,z)) # returns the min number from a set of numbers
#-----string slicing, creating a substring from a string-----
#name = "Ben Rafalski"
#first_name = name[0:3:1] # indexing[start(inclusive):stop(exclusive):step]
#last_name = name[4:12:1]
#reverse_name = name[::-1] # returns the string in reverse, index [0:end:-1]
#print(first_name + last_name)
#-----If statements-----
#age = int(input("how old are you?: "))
#if age >= 65:
# print("you are a senior")
#elif age >= 18:
# print("you are an adult")
#else:
# print("you are a child")
#-----logical operators (and, or, not)-----
#temp = float(input("what is the tempurature outside?: "))
#if not(temp <= 0 and temp >= 30):
# print("the tempuratue is good today")
#elif temp < 0 or temp > 30:
# print("the tempurature is bad today")
#-----while loops-----
#name = ""
#while len(name) == 0:
# name = input("enter your name: ")
#print("hello " + name)
#-----for loops-----
#for i in range(1,11,1): #executed 10 times 'range(inclusive, exclusive, step)'
# print(i)
#-----nested loops-----
#rows = int(input("how many rows?: "))
#columns = int(input("how many columns?: "))
#symbol = input("enter a symbol to use: ")
#for i in range(rows):
# for j in range(columns):
# print(symbol, end = "") # prevents a newline
# print() # newline
#-----loop control statements, break (terminate loop), continue (skip to next iteration), pass (does nothing)-----
#while True:
# name = input("enter your name: ")
# if name != "":
# break
#phone_number = "123-456-7890"
#for i in phone_number:
# if i == "-":
# continue
# print(i, end = "")
#for i in range(1,21):
# if i == 13:
# pass # does nothing, place holder for the word 'nothing'
# else:
# print(i)
#-----lists, like arrays-----
#food = ["pizza", "cookies", "hamburger", "hot dog"]
#food.append("ice cream") # adds the element to the end of the list
#food.remove("hot dog") # removes the selected element from the list
#food.pop() # removes the last element from the list
#food.insert(0,"cake") # inserts the second argument as an element into the index of the first argument in the list
#food.sort() # sorts the list alphabetically
#food.clear() # clears all items from the list
#-----2D lists, list of lists-----
#drinks = ["coffee", "soda", "tea"]
#dinner = ["pizza", "hamburger", "hotdog"]
#dessert = ["pie", "ice cream", "cake"]
#food = [drinks, dinner, dessert]
#for i in range(3):
# for j in range(3):
# print(food[i][j], end="")
# if(j != 3):
# print(", ", end="")
# print()
#-----tuples, collections which are ordered and unchangeable, useful for related data-----
#student = ("ben", 21, "male")
#student.count("ben") # returns how many times a value appears in a tuple
#student.index("male") # returns the index of the selected element in the tuple
#-----sets, collection that is unordered and unindexed, has no dublicates-----
#utensils = {"fork", "spoon", "knife"}
#dishes = {"bowl", "plate", "cup", "knife"}
#utensils.add("napkin") # adds a specified element to the set
#utensils.remove("fork") # removes a specified element from the set
#utensils.clear() # clears all elements from a set
#utensils.update(dishes) # adds the elements of one set to another
#dinner_table = utensils.union(dishes) # takes the union of 2 sets
#for x in dinner_table:
# print(x)
#utensils.difference(dishes) # what does utensils have that dishes does not?
#-----dictionaries = changeable, unordered collection of key:value pairs-----
#capitols = {'USA':'Washington DC', 'India':'New Dehli', 'China':'Beijing', 'Russia':'Moscow'}
#print(capitols['Russia']) # use the key instead of the index of the element
#print(capitols.get('Germany')) # safer way of accessing the elements
#print(capitols.keys()) # prints all the keys
#print(capitols.values()) # prints only the values
#print(capitols.items()) # prints the keys and the values together
#capitols.update({'Germany':'Berlin'}) # adds/changes an element in the dictionary
#capitols.pop('USA') # removes the key value pair from the dictionary
#capitols.clear() # clears the dictionary
#-----index operator [], used in strings, lists, and tuples-----
#name = "ben rafalski"
#first_name = name[:3].upper() # [start:end]
#last_name = name[4:].upper() # [start:end]
#last_character = name[-1] # access element in reverse
#print(first_name+last_name)
#-----functions-----
#def func(name): #use the def keyword
# print("this is " + name + "'s first function")
#input = input("what is your name?: ")
#func(input)
#-----function that returns something-----
#def multiply(factor, multiplier):
# multiplicand = factor * multiplier
# return multiplicand
#number_1 = int(input("enter a number: "))
#number_2 = int(input("enter another number: "))
#multiplying = multiply(number_1, number_2)
#print(multiplying)
#-----keyword arguments, preceded by an identifier when passing into a func-----
#def hello(first, middle, last):
# print("hello "+first+" "+middle+" "+last)
#hello(last = "rafalski", first = "benjamin", middle = "charles")
#-----*args paramenter, packs all arguments into a tuple-----
def argsAndKwargsParameters():
def add(*args):
sum = 0
for i in args:
sum += i
return sum
print(add(5,5,5,5,5,5,5))
#-----**kwargs, packs all arguments into a dictionary-----
def hello(**kwargs):
print("hello", end = " ")
for key,value in kwargs.items():
print(value, end=" ")
hello(first="ben", middle = "charles", last = "rafalski")
#-----format method, helps display output-----
def formatMethod():
animal = "cow"
item = "moon"
pi = 3.1415
print("the {0:^10} jumped over the {1:^10}".format(animal, item)) # uses placeholders {}
print("the number pi is {:.2f}".format(pi))
#-----random module, use 'import random'-----
def randomModule():
x = random.randint(1,6)
y = random.random()
myList = ["rock", "paper", "scissors"]
z = random.choice(myList)
print(x, y, z)
cards = [1,2,3,4,5,6,7,8,9,"J", "Q", "K", "A"]
random.shuffle(cards)
print(cards)
#-----exception handling-----
def exceptionHandling():
try:
numerator = int(input("enter a number to divide: "))
denominator = int(input("enter a number to divide by: "))
result = numerator / denominator
except ZeroDivisionError:
print("you cannot divide by zero")
except ValueError:
print("enter only numbers please")
except Exception: # catches all exceptions
print("something went wrong")
else:
print(result)
finally: # executes everytime the try/except statement is executed
print("this will always execute")
#-----file detection, use 'import os'-----
def detectFile():
path = "C:\\Users\\benrafalski\\Documents\\CSE365\\Assignment3\\README.txt"
if os.path.exists(path):
print("that location exists")
if os.path.isfile(path):
print("that is a file")
elif os.path.isdir(path):
print("that is a folder")
else:
print("that location does not exist")
#-----reading a file-----
def readFile():
try:
with open('test.txt') as file:
print(file.read()) # closes file automatically after opening
except FileNotFoundError:
print("that file was not found")
#-----writing a file-----
def writeFile():
text = "this is my written text\nThis is a newline"
with open('test.txt', 'w') as file: # open in write mode
file.write(text) # ovewrites previous file, use 'a' instead of 'w' for appending instead
#-----copying a file, use 'import shutil'-----
def copyFile():
shutil.copyfile('test.txt', 'copy.txt') # src,dest
#-----moving a file, use 'import os'
def moveFile():
source = "test.txt"
destination = "C:\\Users\\benrafalski\\Downloads\\test.txt"
try:
if os.path.exists(destination):
print("this file is already there")
else:
os.replace(source, destination)
print(source+ " was moved")
except FileNotFoundError:
print(source + " was not found")
#-----deleting a file, use 'import os'-----
def deleteFile():
try:
os.remove("copy.txt")
except FileNotFoundError:
print("that file was not found")
#-----classes-----
def createClass():
class Car:
# class variable
wheels = 4
# self is the same as 'this' keyword, __init__ is the constructor
def __init__(self, make, model, year, color):
# instance variables
self.make = make
self.model = model
self.year = year
self.color = color
def drive(self):
print("this car is driving")
def stop(self):
print("this car is stopped")
new_car = Car("toyota", "tacoma", 2002, "silver")
print(new_car.make, new_car.model, new_car.year, new_car.color, new_car.wheels)
new_car.drive()
new_car.stop()
#-----Inheritance-----
def inheritance():
# parent class
class Organism:
alive = True
# child class of organism
class Animal(Organism):
def eat(self):
print("this animal is eating")
def sleep(self):
print("this animal is sleeping")
# child classes of animal
class Rabbit(Animal):
def hop(self):
print("this rabbit is hopping")
class Fish(Animal):
def swim(self):
print("this fish is swimming")
class Hawk(Animal):
def fly(self):
print("this hawk is flying")
rabbit, fish, hawk = Rabbit(), Fish(), Hawk()
print(rabbit.alive, fish.alive, hawk.alive)
fish.eat()
hawk.sleep()
rabbit.hop()
fish.swim()
hawk.fly()
#-----multiple inheritance-----
def multipleInheritance():
class Prey:
def flee(self):
print("this animal is fleeing")
class Predator:
def hunt(self):
print("this animal is hunting")
class Rabbit(Prey):
pass
class Hawk(Predator):
pass
class Fish(Prey, Predator):
pass
rabbit, hawk, fish = Rabbit(), Hawk(), Fish()
rabbit.flee()
hawk.hunt()
fish.flee()
fish.hunt()
#-----method chaining-----
def methodChaining():
class Car:
def turn_on(self):
print("you start the engine")
return self # must have return self on the methods you would like to chain together
def drive(self):
print("you drive the car")
return self
def brake(self):
print("you step on the brakes")
return self
def turn_off(self):
print("you turn off the engine")
return self
car = Car()
car.turn_on().drive()
#-----super function-----
def superFunction():
class Rectangle:
def __init__(self, length, width):
self.length = length
self.width = width
class Square(Rectangle):
def __init__(self, length, width):
super().__init__(length,width)
class Cube(Rectangle):
def __init__(self, length, width, height):
super().__init__(length,width)
self.heigth = height
#-----abstract classes, uses 'from abc import *'-----
def abstractClasses():
class Vehicle:
# this is the abstract method, making this an abstract class
# this abstarct method must be overridden
@abstractmethod
def go(self):
pass
class Car(Vehicle):
def go(self):
print("you drive the car")
class Motorcycle(Vehicle):
def go(self):
print("you ride the motorcycle")
| print("hello world") # how to print
| identifier_body |
main.py | import math
import random
import os
import shutil
from abc import *
def howItPrint():
print("hello world") # how to print
#-----variables-----
def variables():
name = "Ben Rafalski" # can use single or double quotes for a string in python
age = 20
height = 250.5
alive = True # booleans start with a capital letter
print("hello " + name + " who is: " + str(age) + " years old and " + str(250.5) + "cm tall, is ben alive? " + str(alive))
#-----multiple assignment, assign multiple variables in one line-----
#name, age, alive = "ben", 20, True
#print("name: " + name + ", age: " + str(age) + ", isAlive?: " + str(alive))
#-----string methods-----
#name = "ben"
#print(len(name)) # string length
#print(name.find("b")) # returns the index of the character selected
#print(name.capitalize()) # capitalizes the first letter
#print(name.upper()) #turns string all uppercase
#print(name.lower()) # turns string all lowercase
#print(name.isdigit()) # returns true if it is a number
#print(name.isalpha()) # returns true if the letters in the string are alphabetical
#print(name.count("e")) # returns the count of selected characters
#print(name.replace("e", "a")) # replaces first argument with the second one
#print(name*3) # prints a string multiple times
#-----Type Casting-----
#x, y, z = 1, 2.2, "3" # int, float, string
#print(int(y)) # drops the decimal
#print(float(z)) # adds a .0 to the end
#print(str(x)*3)
#-----user input-----
#name = input("what is your name?: ") # always returns a string, must cast if you need to do math operations
#age = float(input("what is your age?: "))
#age += 1
#print("your name is: " + name + ", next year you will be: " + str(age))
#-----math functions, must use "import math" to start-----
#pi = 3.14
#x, y, z = 1, 2, 3
#print(round(pi)) # rounds the number
#print(math.ceil(pi)) # rounds the number up
#print(math.floor(pi)) # rounds the number down
#print(abs(-pi)) # returns the absolute value of the number
#print(pow(pi, 2)) # takes the first argument to the power of the second argument
#print(math.sqrt(pi)) # returns the square root of the number
#print(max(x,y,z)) # returns the max number from a set of numbers
#print(min(x,y,z)) # returns the min number from a set of numbers
#-----string slicing, creating a substring from a string-----
#name = "Ben Rafalski"
#first_name = name[0:3:1] # indexing[start(inclusive):stop(exclusive):step]
#last_name = name[4:12:1]
#reverse_name = name[::-1] # returns the string in reverse, index [0:end:-1]
#print(first_name + last_name)
#-----If statements-----
#age = int(input("how old are you?: "))
#if age >= 65:
# print("you are a senior")
#elif age >= 18:
# print("you are an adult")
#else:
# print("you are a child")
#-----logical operators (and, or, not)-----
#temp = float(input("what is the tempurature outside?: "))
#if not(temp <= 0 and temp >= 30):
# print("the tempuratue is good today")
#elif temp < 0 or temp > 30:
# print("the tempurature is bad today")
#-----while loops-----
#name = ""
#while len(name) == 0:
# name = input("enter your name: ")
#print("hello " + name)
#-----for loops-----
#for i in range(1,11,1): #executed 10 times 'range(inclusive, exclusive, step)'
# print(i)
#-----nested loops-----
#rows = int(input("how many rows?: "))
#columns = int(input("how many columns?: "))
#symbol = input("enter a symbol to use: ")
#for i in range(rows):
# for j in range(columns):
# print(symbol, end = "") # prevents a newline
# print() # newline
#-----loop control statements, break (terminate loop), continue (skip to next iteration), pass (does nothing)-----
#while True:
# name = input("enter your name: ")
# if name != "":
# break
#phone_number = "123-456-7890"
#for i in phone_number:
# if i == "-":
# continue
# print(i, end = "")
#for i in range(1,21):
# if i == 13:
# pass # does nothing, place holder for the word 'nothing'
# else:
# print(i)
#-----lists, like arrays-----
#food = ["pizza", "cookies", "hamburger", "hot dog"]
#food.append("ice cream") # adds the element to the end of the list
#food.remove("hot dog") # removes the selected element from the list
#food.pop() # removes the last element from the list
#food.insert(0,"cake") # inserts the second argument as an element into the index of the first argument in the list
#food.sort() # sorts the list alphabetically
#food.clear() # clears all items from the list
#-----2D lists, list of lists-----
#drinks = ["coffee", "soda", "tea"]
#dinner = ["pizza", "hamburger", "hotdog"]
#dessert = ["pie", "ice cream", "cake"]
#food = [drinks, dinner, dessert]
#for i in range(3):
# for j in range(3):
# print(food[i][j], end="")
# if(j != 3):
# print(", ", end="")
# print()
#-----tuples, collections which are ordered and unchangeable, useful for related data-----
#student = ("ben", 21, "male")
#student.count("ben") # returns how many times a value appears in a tuple
#student.index("male") # returns the index of the selected element in the tuple
#-----sets, collection that is unordered and unindexed, has no dublicates-----
#utensils = {"fork", "spoon", "knife"}
#dishes = {"bowl", "plate", "cup", "knife"}
#utensils.add("napkin") # adds a specified element to the set
#utensils.remove("fork") # removes a specified element from the set
#utensils.clear() # clears all elements from a set
#utensils.update(dishes) # adds the elements of one set to another
#dinner_table = utensils.union(dishes) # takes the union of 2 sets
#for x in dinner_table:
# print(x)
#utensils.difference(dishes) # what does utensils have that dishes does not?
#-----dictionaries = changeable, unordered collection of key:value pairs-----
#capitols = {'USA':'Washington DC', 'India':'New Dehli', 'China':'Beijing', 'Russia':'Moscow'}
#print(capitols['Russia']) # use the key instead of the index of the element
#print(capitols.get('Germany')) # safer way of accessing the elements
#print(capitols.keys()) # prints all the keys
#print(capitols.values()) # prints only the values
#print(capitols.items()) # prints the keys and the values together
#capitols.update({'Germany':'Berlin'}) # adds/changes an element in the dictionary
#capitols.pop('USA') # removes the key value pair from the dictionary
#capitols.clear() # clears the dictionary
#-----index operator [], used in strings, lists, and tuples-----
#name = "ben rafalski"
#first_name = name[:3].upper() # [start:end]
#last_name = name[4:].upper() # [start:end]
#last_character = name[-1] # access element in reverse
#print(first_name+last_name)
#-----functions-----
#def func(name): #use the def keyword
# print("this is " + name + "'s first function")
#input = input("what is your name?: ")
#func(input)
#-----function that returns something-----
#def multiply(factor, multiplier):
# multiplicand = factor * multiplier
# return multiplicand
#number_1 = int(input("enter a number: "))
#number_2 = int(input("enter another number: "))
#multiplying = multiply(number_1, number_2)
#print(multiplying)
#-----keyword arguments, preceded by an identifier when passing into a func-----
#def hello(first, middle, last):
# print("hello "+first+" "+middle+" "+last)
#hello(last = "rafalski", first = "benjamin", middle = "charles")
#-----*args paramenter, packs all arguments into a tuple-----
def argsAndKwargsParameters():
def add(*args):
sum = 0
for i in args:
sum += i
return sum
print(add(5,5,5,5,5,5,5))
#-----**kwargs, packs all arguments into a dictionary-----
def hello(**kwargs):
print("hello", end = " ")
for key,value in kwargs.items():
print(value, end=" ")
hello(first="ben", middle = "charles", last = "rafalski")
#-----format method, helps display output-----
def formatMethod():
animal = "cow"
item = "moon"
pi = 3.1415
print("the {0:^10} jumped over the {1:^10}".format(animal, item)) # uses placeholders {}
print("the number pi is {:.2f}".format(pi))
#-----random module, use 'import random'-----
def randomModule():
x = random.randint(1,6)
y = random.random()
myList = ["rock", "paper", "scissors"]
z = random.choice(myList)
print(x, y, z)
cards = [1,2,3,4,5,6,7,8,9,"J", "Q", "K", "A"]
random.shuffle(cards)
print(cards)
#-----exception handling-----
def exceptionHandling():
try:
numerator = int(input("enter a number to divide: "))
denominator = int(input("enter a number to divide by: "))
result = numerator / denominator
except ZeroDivisionError:
print("you cannot divide by zero")
except ValueError:
print("enter only numbers please")
except Exception: # catches all exceptions
print("something went wrong")
else:
print(result)
finally: # executes everytime the try/except statement is executed
print("this will always execute")
#-----file detection, use 'import os'-----
def detectFile():
path = "C:\\Users\\benrafalski\\Documents\\CSE365\\Assignment3\\README.txt"
if os.path.exists(path):
print("that location exists")
if os.path.isfile(path):
print("that is a file")
elif os.path.isdir(path):
print("that is a folder")
else:
print("that location does not exist")
#-----reading a file-----
def readFile():
try:
with open('test.txt') as file:
print(file.read()) # closes file automatically after opening
except FileNotFoundError:
print("that file was not found")
#-----writing a file-----
def writeFile():
| with open('test.txt', 'w') as file: # open in write mode
file.write(text) # ovewrites previous file, use 'a' instead of 'w' for appending instead
#-----copying a file, use 'import shutil'-----
def copyFile():
shutil.copyfile('test.txt', 'copy.txt') # src,dest
#-----moving a file, use 'import os'
def moveFile():
source = "test.txt"
destination = "C:\\Users\\benrafalski\\Downloads\\test.txt"
try:
if os.path.exists(destination):
print("this file is already there")
else:
os.replace(source, destination)
print(source+ " was moved")
except FileNotFoundError:
print(source + " was not found")
#-----deleting a file, use 'import os'-----
def deleteFile():
try:
os.remove("copy.txt")
except FileNotFoundError:
print("that file was not found")
#-----classes-----
def createClass():
class Car:
# class variable
wheels = 4
# self is the same as 'this' keyword, __init__ is the constructor
def __init__(self, make, model, year, color):
# instance variables
self.make = make
self.model = model
self.year = year
self.color = color
def drive(self):
print("this car is driving")
def stop(self):
print("this car is stopped")
new_car = Car("toyota", "tacoma", 2002, "silver")
print(new_car.make, new_car.model, new_car.year, new_car.color, new_car.wheels)
new_car.drive()
new_car.stop()
#-----Inheritance-----
def inheritance():
# parent class
class Organism:
alive = True
# child class of organism
class Animal(Organism):
def eat(self):
print("this animal is eating")
def sleep(self):
print("this animal is sleeping")
# child classes of animal
class Rabbit(Animal):
def hop(self):
print("this rabbit is hopping")
class Fish(Animal):
def swim(self):
print("this fish is swimming")
class Hawk(Animal):
def fly(self):
print("this hawk is flying")
rabbit, fish, hawk = Rabbit(), Fish(), Hawk()
print(rabbit.alive, fish.alive, hawk.alive)
fish.eat()
hawk.sleep()
rabbit.hop()
fish.swim()
hawk.fly()
#-----multiple inheritance-----
def multipleInheritance():
class Prey:
def flee(self):
print("this animal is fleeing")
class Predator:
def hunt(self):
print("this animal is hunting")
class Rabbit(Prey):
pass
class Hawk(Predator):
pass
class Fish(Prey, Predator):
pass
rabbit, hawk, fish = Rabbit(), Hawk(), Fish()
rabbit.flee()
hawk.hunt()
fish.flee()
fish.hunt()
#-----method chaining-----
def methodChaining():
class Car:
def turn_on(self):
print("you start the engine")
return self # must have return self on the methods you would like to chain together
def drive(self):
print("you drive the car")
return self
def brake(self):
print("you step on the brakes")
return self
def turn_off(self):
print("you turn off the engine")
return self
car = Car()
car.turn_on().drive()
#-----super function-----
def superFunction():
class Rectangle:
def __init__(self, length, width):
self.length = length
self.width = width
class Square(Rectangle):
def __init__(self, length, width):
super().__init__(length,width)
class Cube(Rectangle):
def __init__(self, length, width, height):
super().__init__(length,width)
self.heigth = height
#-----abstract classes, uses 'from abc import *'-----
def abstractClasses():
class Vehicle:
# this is the abstract method, making this an abstract class
# this abstarct method must be overridden
@abstractmethod
def go(self):
pass
class Car(Vehicle):
def go(self):
print("you drive the car")
class Motorcycle(Vehicle):
def go(self):
print("you ride the motorcycle") | text = "this is my written text\nThis is a newline"
| random_line_split |
client.go | /*
Copyright (c) 2023 SAP SE or an SAP affiliate company. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fakeclient
import (
"context"
"errors"
"fmt"
"k8s.io/klog/v2"
"sync"
"time"
fakeuntyped "github.com/gardener/machine-controller-manager/pkg/client/clientset/versioned/fake"
apipolicyv1beta1 "k8s.io/api/policy/v1beta1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/discovery"
fakediscovery "k8s.io/client-go/discovery/fake"
k8sfake "k8s.io/client-go/kubernetes/fake"
policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1"
fakepolicyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake"
k8stesting "k8s.io/client-go/testing"
)
// FakeObjectTracker implements both k8stesting.ObjectTracker as well as watch.Interface.
type FakeObjectTracker struct {
*watch.FakeWatcher
delegatee k8stesting.ObjectTracker
watchers []*watcher
trackerMutex sync.Mutex
fakingOptions
}
// Add receives an add event with the object
func (t *FakeObjectTracker) Add(obj runtime.Object) error {
if t.fakingOptions.failAll != nil {
err := t.fakingOptions.failAll.RunFakeInvocations()
if err != nil {
return err
}
}
return t.delegatee.Add(obj)
}
// Get receives a get event with the object
func (t *FakeObjectTracker) Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) {
var err error
if t.fakingOptions.failAll != nil {
err = t.fakingOptions.failAll.RunFakeInvocations()
if err != nil {
return nil, err
}
}
if t.fakingOptions.failAt != nil {
if gvr.Resource == "nodes" {
err = t.fakingOptions.failAt.Node.Get.RunFakeInvocations()
} else if gvr.Resource == "machines" {
err = t.fakingOptions.failAt.Machine.Get.RunFakeInvocations()
} else if gvr.Resource == "machinesets" {
err = t.fakingOptions.failAt.MachineSet.Get.RunFakeInvocations()
} else if gvr.Resource == "machinedeployments" {
err = t.fakingOptions.failAt.MachineDeployment.Get.RunFakeInvocations()
}
if err != nil {
return nil, err
}
}
return t.delegatee.Get(gvr, ns, name)
}
// Create receives a create event with the object. Not needed for CA.
func (t *FakeObjectTracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error {
return nil
}
// Update receives an update event with the object
func (t *FakeObjectTracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error {
var err error
if t.fakingOptions.failAll != nil {
err = t.fakingOptions.failAll.RunFakeInvocations()
if err != nil {
return err
}
}
if t.fakingOptions.failAt != nil {
if gvr.Resource == "nodes" {
err = t.fakingOptions.failAt.Node.Update.RunFakeInvocations()
} else if gvr.Resource == "machines" {
err = t.fakingOptions.failAt.Machine.Update.RunFakeInvocations()
} else if gvr.Resource == "machinedeployments" {
err = t.fakingOptions.failAt.MachineDeployment.Update.RunFakeInvocations()
}
if err != nil {
return err
}
}
err = t.delegatee.Update(gvr, obj, ns)
if err != nil {
return err
}
if t.FakeWatcher == nil {
return errors.New("error sending event on a tracker with no watch support")
}
if t.IsStopped() {
return errors.New("error sending event on a stopped tracker")
}
t.FakeWatcher.Modify(obj)
return nil
}
// List receives a list event with the object
func (t *FakeObjectTracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) {
if t.fakingOptions.failAll != nil {
err := t.fakingOptions.failAll.RunFakeInvocations()
if err != nil {
return nil, err
}
}
return t.delegatee.List(gvr, gvk, ns)
}
// Delete receives an delete event with the object. Not needed for CA.
func (t *FakeObjectTracker) Delete(gvr schema.GroupVersionResource, ns, name string) error {
return nil
}
// Watch receives a watch event with the object
func (t *FakeObjectTracker) Watch(gvr schema.GroupVersionResource, name string) (watch.Interface, error) {
if t.fakingOptions.failAll != nil {
err := t.fakingOptions.failAll.RunFakeInvocations()
if err != nil {
return nil, err
}
}
return t.delegatee.Watch(gvr, name)
}
func (t *FakeObjectTracker) watchReactionFunc(action k8stesting.Action) (bool, watch.Interface, error) {
if t.FakeWatcher == nil {
return false, nil, errors.New("cannot watch on a tracker with no watch support")
}
switch a := action.(type) {
case k8stesting.WatchAction:
w := &watcher{
FakeWatcher: watch.NewFake(),
action: a,
}
go func() {
err := w.dispatchInitialObjects(a, t)
if err != nil {
klog.Errorf("error dispatching initial objects, Err: %v", err)
}
}()
t.trackerMutex.Lock()
defer t.trackerMutex.Unlock()
t.watchers = append(t.watchers, w)
return true, w, nil
default:
return false, nil, fmt.Errorf("expected WatchAction but got %v", action)
}
}
// Start begins tracking of an FakeObjectTracker
func (t *FakeObjectTracker) Start() error {
if t.FakeWatcher == nil {
return errors.New("tracker has no watch support")
}
for event := range t.ResultChan() {
event := event.DeepCopy() // passing a deep copy to avoid race.
t.dispatch(event)
}
return nil
}
func (t *FakeObjectTracker) dispatch(event *watch.Event) {
for _, w := range t.watchers {
go w.dispatch(event)
}
}
// Stop terminates tracking of an FakeObjectTracker
func (t *FakeObjectTracker) Stop() {
if t.FakeWatcher == nil {
panic(errors.New("tracker has no watch support"))
}
t.trackerMutex.Lock()
defer t.trackerMutex.Unlock()
t.FakeWatcher.Stop()
for _, w := range t.watchers {
w.Stop()
}
}
type watcher struct {
*watch.FakeWatcher
action k8stesting.WatchAction
updateMutex sync.Mutex
}
func (w *watcher) Stop() {
w.updateMutex.Lock()
defer w.updateMutex.Unlock()
w.FakeWatcher.Stop()
}
func (w *watcher) handles(event *watch.Event) bool {
if w.IsStopped() {
return false
}
t, err := meta.TypeAccessor(event.Object)
if err != nil {
return false
}
gvr, _ := meta.UnsafeGuessKindToResource(schema.FromAPIVersionAndKind(t.GetAPIVersion(), t.GetKind()))
if !(&k8stesting.SimpleWatchReactor{Resource: gvr.Resource}).Handles(w.action) {
return false
}
o, err := meta.Accessor(event.Object)
if err != nil {
return false
}
info := w.action.GetWatchRestrictions()
rv, fs, ls := info.ResourceVersion, info.Fields, info.Labels
if rv != "" && o.GetResourceVersion() != rv {
return false
}
if fs != nil && !fs.Matches(fields.Set{
"metadata.name": o.GetName(),
"metadata.namespace": o.GetNamespace(),
}) {
return false
}
if ls != nil && !ls.Matches(labels.Set(o.GetLabels())) {
return false
}
return true
}
func (w *watcher) dispatch(event *watch.Event) {
w.updateMutex.Lock()
defer w.updateMutex.Unlock()
if !w.handles(event) {
return
}
w.Action(event.Type, event.Object)
}
func (w *watcher) dispatchInitialObjects(action k8stesting.WatchAction, t k8stesting.ObjectTracker) error {
listObj, err := t.List(action.GetResource(), action.GetResource().GroupVersion().WithKind(action.GetResource().Resource), action.GetNamespace())
if err != nil {
return err
}
itemsPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
return err
}
items := itemsPtr.([]runtime.Object)
for _, o := range items {
w.dispatch(&watch.Event{
Type: watch.Added,
Object: o,
})
}
return nil
}
// ResourceActions contains of Kubernetes/Machine resources whose response can be faked
type ResourceActions struct {
Node Actions
Machine Actions
MachineSet Actions
MachineDeployment Actions
}
// Actions contains the actions whose response can be faked
type Actions struct {
Get FakeResponse
Update FakeResponse
}
// FakeResponse is the custom error response configuration that are used for responding to client calls
type FakeResponse struct {
counter int
errorMsg string
responseDelay time.Duration
}
// fakingOptions are options that can be set while trying to fake object tracker returns
type fakingOptions struct {
// Fail at different resource action
failAt *ResourceActions
// Fail every action
failAll *FakeResponse
}
// CreateFakeResponse creates a fake response for an action
func CreateFakeResponse(counter int, errorMsg string, responseDelay time.Duration) FakeResponse {
return FakeResponse{
counter: counter,
errorMsg: errorMsg,
responseDelay: responseDelay,
}
}
// DecrementCounter reduces the counter for the particular action response by 1
func (o *FakeResponse) DecrementCounter() {
o.counter--
}
// RunFakeInvocations runs any custom fake configurations/methods before invoking standard ObjectTrackers
func (o *FakeResponse) RunFakeInvocations() error {
if !o.IsFakingEnabled() {
return nil
}
// decrement the counter
o.DecrementCounter()
// Delay while returning call
if o.responseDelay != 0 {
time.Sleep(o.responseDelay)
}
// If error message has been set
if o.errorMsg != "" {
return errors.New(o.errorMsg)
}
return nil
}
// IsFakingEnabled will return true if counter is positive for the fake response
func (o *FakeResponse) IsFakingEnabled() bool {
return o.counter > 0
}
// SetFailAtFakeResourceActions sets up the errorMessage to be returned on specific calls
func (o *fakingOptions) SetFailAtFakeResourceActions(resourceActions *ResourceActions) {
o.failAt = resourceActions
}
// SetFailAllFakeResponse sets the error message for all calls from the client
func (o *fakingOptions) SetFailAllFakeResponse(response *FakeResponse) {
o.failAll = response
}
// NewMachineClientSet returns a clientset that will respond with the provided objects.
// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
// without applying any validations and/or defaults. It shouldn't be considered a replacement
// for a real clientset and is mostly useful in simple unit tests.
func NewMachineClientSet(objects ...runtime.Object) (*fakeuntyped.Clientset, *FakeObjectTracker) {
var scheme = runtime.NewScheme()
var codecs = serializer.NewCodecFactory(scheme)
metav1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
_ = fakeuntyped.AddToScheme(scheme)
o := &FakeObjectTracker{
FakeWatcher: watch.NewFake(),
delegatee: k8stesting.NewObjectTracker(scheme, codecs.UniversalDecoder()),
}
for _, obj := range objects {
if err := o.Add(obj); err != nil {
panic(err)
}
}
cs := &fakeuntyped.Clientset{}
cs.Fake.AddReactor("*", "*", k8stesting.ObjectReaction(o))
cs.Fake.AddWatchReactor("*", o.watchReactionFunc)
return cs, o
}
// FakeObjectTrackers is a struct containing all the controller fake object trackers
type FakeObjectTrackers struct {
ControlMachine, TargetCore *FakeObjectTracker
}
// NewFakeObjectTrackers initializes fakeObjectTrackers initializes the fake object trackers
func NewFakeObjectTrackers(controlMachine, targetCore *FakeObjectTracker) *FakeObjectTrackers {
fakeObjectTrackers := &FakeObjectTrackers{
ControlMachine: controlMachine,
TargetCore: targetCore,
}
return fakeObjectTrackers
}
// Start starts all object trackers as go routines
func (o *FakeObjectTrackers) Start() {
go func() {
err := o.ControlMachine.Start()
if err != nil {
klog.Errorf("failed to start machine object tracker, Err: %v", err)
}
}()
go func() {
err := o.TargetCore.Start()
if err != nil {
klog.Errorf("failed to start target core object tracker, Err: %v", err)
}
}()
}
// Stop stops all object trackers
func (o *FakeObjectTrackers) Stop() {
o.ControlMachine.Stop()
o.TargetCore.Stop()
}
// NewCoreClientSet returns a clientset that will respond with the provided objects.
// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
// without applying any validations and/or defaults. It shouldn't be considered a replacement
// for a real clientset and is mostly useful in simple unit tests.
func NewCoreClientSet(objects ...runtime.Object) (*Clientset, *FakeObjectTracker) {
var scheme = runtime.NewScheme()
var codecs = serializer.NewCodecFactory(scheme)
metav1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
_ = k8sfake.AddToScheme(scheme)
o := &FakeObjectTracker{
FakeWatcher: watch.NewFake(),
delegatee: k8stesting.NewObjectTracker(scheme, codecs.UniversalDecoder()), |
for _, obj := range objects {
if err := o.Add(obj); err != nil {
panic(err)
}
}
cs := &Clientset{Clientset: &k8sfake.Clientset{}}
cs.FakeDiscovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake}
cs.Fake.AddReactor("*", "*", k8stesting.ObjectReaction(o))
cs.Fake.AddWatchReactor("*", o.watchReactionFunc)
return cs, o
}
// Clientset extends k8sfake.Clientset to override the Policy implementation.
// This is because the default Policy fake implementation does not propagate the
// eviction name.
type Clientset struct {
*k8sfake.Clientset
FakeDiscovery *fakediscovery.FakeDiscovery
}
// Discovery returns the fake discovery implementation.
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
return c.FakeDiscovery
}
// PolicyV1beta1 retrieves the PolicyV1beta1Client
func (c *Clientset) PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface {
return &FakePolicyV1beta1{
FakePolicyV1beta1: &fakepolicyv1beta1.FakePolicyV1beta1{
Fake: &c.Fake,
},
}
}
// Policy retrieves the PolicyV1beta1Client
func (c *Clientset) Policy() policyv1beta1.PolicyV1beta1Interface {
return c.PolicyV1beta1()
}
// FakePolicyV1beta1 extends fakepolicyv1beta1.FakePolicyV1beta1 to override the
// Policy implementation. This is because the default Policy fake implementation
// does not propagate the eviction name.
type FakePolicyV1beta1 struct {
*fakepolicyv1beta1.FakePolicyV1beta1
}
// Evictions extends fakepolicyv1beta1.FakeEvictions to override the
// Policy implementation. This is because the default Policy fake implementation
// does not propagate the eviction name.
func (c *FakePolicyV1beta1) Evictions(namespace string) policyv1beta1.EvictionInterface {
return &FakeEvictions{
FakePolicyV1beta1: c.FakePolicyV1beta1,
ns: namespace,
}
}
// FakeEvictions extends fakepolicyv1beta1.FakeEvictions to override the
// Policy implementation. This is because the default Policy fake implementation
// does not propagate the eviction name.
type FakeEvictions struct {
*fakepolicyv1beta1.FakePolicyV1beta1
ns string
}
// Evict overrides the fakepolicyv1beta1.FakeEvictions to override the
// Policy implementation. This is because the default Policy fake implementation
// does not propagate the eviction name.
func (c *FakeEvictions) Evict(_ context.Context, eviction *apipolicyv1beta1.Eviction) error {
action := k8stesting.GetActionImpl{}
action.Name = eviction.Name
action.Verb = "post"
action.Namespace = c.ns
action.Resource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
action.Subresource = "eviction"
_, err := c.Fake.Invokes(action, eviction)
return err
} | } | random_line_split |
client.go | /*
Copyright (c) 2023 SAP SE or an SAP affiliate company. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fakeclient
import (
"context"
"errors"
"fmt"
"k8s.io/klog/v2"
"sync"
"time"
fakeuntyped "github.com/gardener/machine-controller-manager/pkg/client/clientset/versioned/fake"
apipolicyv1beta1 "k8s.io/api/policy/v1beta1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/discovery"
fakediscovery "k8s.io/client-go/discovery/fake"
k8sfake "k8s.io/client-go/kubernetes/fake"
policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1"
fakepolicyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake"
k8stesting "k8s.io/client-go/testing"
)
// FakeObjectTracker implements both k8stesting.ObjectTracker as well as watch.Interface.
type FakeObjectTracker struct {
*watch.FakeWatcher
delegatee k8stesting.ObjectTracker
watchers []*watcher
trackerMutex sync.Mutex
fakingOptions
}
// Add receives an add event with the object
func (t *FakeObjectTracker) Add(obj runtime.Object) error {
if t.fakingOptions.failAll != nil {
err := t.fakingOptions.failAll.RunFakeInvocations()
if err != nil {
return err
}
}
return t.delegatee.Add(obj)
}
// Get receives a get event with the object
func (t *FakeObjectTracker) Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) |
// Create receives a create event with the object. Not needed for CA.
func (t *FakeObjectTracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error {
return nil
}
// Update receives an update event with the object
func (t *FakeObjectTracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error {
var err error
if t.fakingOptions.failAll != nil {
err = t.fakingOptions.failAll.RunFakeInvocations()
if err != nil {
return err
}
}
if t.fakingOptions.failAt != nil {
if gvr.Resource == "nodes" {
err = t.fakingOptions.failAt.Node.Update.RunFakeInvocations()
} else if gvr.Resource == "machines" {
err = t.fakingOptions.failAt.Machine.Update.RunFakeInvocations()
} else if gvr.Resource == "machinedeployments" {
err = t.fakingOptions.failAt.MachineDeployment.Update.RunFakeInvocations()
}
if err != nil {
return err
}
}
err = t.delegatee.Update(gvr, obj, ns)
if err != nil {
return err
}
if t.FakeWatcher == nil {
return errors.New("error sending event on a tracker with no watch support")
}
if t.IsStopped() {
return errors.New("error sending event on a stopped tracker")
}
t.FakeWatcher.Modify(obj)
return nil
}
// List receives a list event with the object
func (t *FakeObjectTracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) {
if t.fakingOptions.failAll != nil {
err := t.fakingOptions.failAll.RunFakeInvocations()
if err != nil {
return nil, err
}
}
return t.delegatee.List(gvr, gvk, ns)
}
// Delete receives an delete event with the object. Not needed for CA.
func (t *FakeObjectTracker) Delete(gvr schema.GroupVersionResource, ns, name string) error {
return nil
}
// Watch receives a watch event with the object
func (t *FakeObjectTracker) Watch(gvr schema.GroupVersionResource, name string) (watch.Interface, error) {
if t.fakingOptions.failAll != nil {
err := t.fakingOptions.failAll.RunFakeInvocations()
if err != nil {
return nil, err
}
}
return t.delegatee.Watch(gvr, name)
}
func (t *FakeObjectTracker) watchReactionFunc(action k8stesting.Action) (bool, watch.Interface, error) {
if t.FakeWatcher == nil {
return false, nil, errors.New("cannot watch on a tracker with no watch support")
}
switch a := action.(type) {
case k8stesting.WatchAction:
w := &watcher{
FakeWatcher: watch.NewFake(),
action: a,
}
go func() {
err := w.dispatchInitialObjects(a, t)
if err != nil {
klog.Errorf("error dispatching initial objects, Err: %v", err)
}
}()
t.trackerMutex.Lock()
defer t.trackerMutex.Unlock()
t.watchers = append(t.watchers, w)
return true, w, nil
default:
return false, nil, fmt.Errorf("expected WatchAction but got %v", action)
}
}
// Start begins tracking of an FakeObjectTracker
func (t *FakeObjectTracker) Start() error {
if t.FakeWatcher == nil {
return errors.New("tracker has no watch support")
}
for event := range t.ResultChan() {
event := event.DeepCopy() // passing a deep copy to avoid race.
t.dispatch(event)
}
return nil
}
func (t *FakeObjectTracker) dispatch(event *watch.Event) {
for _, w := range t.watchers {
go w.dispatch(event)
}
}
// Stop terminates tracking of an FakeObjectTracker
func (t *FakeObjectTracker) Stop() {
if t.FakeWatcher == nil {
panic(errors.New("tracker has no watch support"))
}
t.trackerMutex.Lock()
defer t.trackerMutex.Unlock()
t.FakeWatcher.Stop()
for _, w := range t.watchers {
w.Stop()
}
}
type watcher struct {
*watch.FakeWatcher
action k8stesting.WatchAction
updateMutex sync.Mutex
}
func (w *watcher) Stop() {
w.updateMutex.Lock()
defer w.updateMutex.Unlock()
w.FakeWatcher.Stop()
}
func (w *watcher) handles(event *watch.Event) bool {
if w.IsStopped() {
return false
}
t, err := meta.TypeAccessor(event.Object)
if err != nil {
return false
}
gvr, _ := meta.UnsafeGuessKindToResource(schema.FromAPIVersionAndKind(t.GetAPIVersion(), t.GetKind()))
if !(&k8stesting.SimpleWatchReactor{Resource: gvr.Resource}).Handles(w.action) {
return false
}
o, err := meta.Accessor(event.Object)
if err != nil {
return false
}
info := w.action.GetWatchRestrictions()
rv, fs, ls := info.ResourceVersion, info.Fields, info.Labels
if rv != "" && o.GetResourceVersion() != rv {
return false
}
if fs != nil && !fs.Matches(fields.Set{
"metadata.name": o.GetName(),
"metadata.namespace": o.GetNamespace(),
}) {
return false
}
if ls != nil && !ls.Matches(labels.Set(o.GetLabels())) {
return false
}
return true
}
func (w *watcher) dispatch(event *watch.Event) {
w.updateMutex.Lock()
defer w.updateMutex.Unlock()
if !w.handles(event) {
return
}
w.Action(event.Type, event.Object)
}
func (w *watcher) dispatchInitialObjects(action k8stesting.WatchAction, t k8stesting.ObjectTracker) error {
listObj, err := t.List(action.GetResource(), action.GetResource().GroupVersion().WithKind(action.GetResource().Resource), action.GetNamespace())
if err != nil {
return err
}
itemsPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
return err
}
items := itemsPtr.([]runtime.Object)
for _, o := range items {
w.dispatch(&watch.Event{
Type: watch.Added,
Object: o,
})
}
return nil
}
// ResourceActions contains of Kubernetes/Machine resources whose response can be faked
type ResourceActions struct {
Node Actions
Machine Actions
MachineSet Actions
MachineDeployment Actions
}
// Actions contains the actions whose response can be faked
type Actions struct {
Get FakeResponse
Update FakeResponse
}
// FakeResponse is the custom error response configuration that are used for responding to client calls
type FakeResponse struct {
counter int
errorMsg string
responseDelay time.Duration
}
// fakingOptions are options that can be set while trying to fake object tracker returns
type fakingOptions struct {
// Fail at different resource action
failAt *ResourceActions
// Fail every action
failAll *FakeResponse
}
// CreateFakeResponse creates a fake response for an action
func CreateFakeResponse(counter int, errorMsg string, responseDelay time.Duration) FakeResponse {
return FakeResponse{
counter: counter,
errorMsg: errorMsg,
responseDelay: responseDelay,
}
}
// DecrementCounter reduces the counter for the particular action response by 1
func (o *FakeResponse) DecrementCounter() {
o.counter--
}
// RunFakeInvocations runs any custom fake configurations/methods before invoking standard ObjectTrackers
func (o *FakeResponse) RunFakeInvocations() error {
if !o.IsFakingEnabled() {
return nil
}
// decrement the counter
o.DecrementCounter()
// Delay while returning call
if o.responseDelay != 0 {
time.Sleep(o.responseDelay)
}
// If error message has been set
if o.errorMsg != "" {
return errors.New(o.errorMsg)
}
return nil
}
// IsFakingEnabled will return true if counter is positive for the fake response
func (o *FakeResponse) IsFakingEnabled() bool {
return o.counter > 0
}
// SetFailAtFakeResourceActions sets up the errorMessage to be returned on specific calls
func (o *fakingOptions) SetFailAtFakeResourceActions(resourceActions *ResourceActions) {
o.failAt = resourceActions
}
// SetFailAllFakeResponse sets the error message for all calls from the client
func (o *fakingOptions) SetFailAllFakeResponse(response *FakeResponse) {
o.failAll = response
}
// NewMachineClientSet returns a clientset that will respond with the provided objects.
// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
// without applying any validations and/or defaults. It shouldn't be considered a replacement
// for a real clientset and is mostly useful in simple unit tests.
func NewMachineClientSet(objects ...runtime.Object) (*fakeuntyped.Clientset, *FakeObjectTracker) {
var scheme = runtime.NewScheme()
var codecs = serializer.NewCodecFactory(scheme)
metav1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
_ = fakeuntyped.AddToScheme(scheme)
o := &FakeObjectTracker{
FakeWatcher: watch.NewFake(),
delegatee: k8stesting.NewObjectTracker(scheme, codecs.UniversalDecoder()),
}
for _, obj := range objects {
if err := o.Add(obj); err != nil {
panic(err)
}
}
cs := &fakeuntyped.Clientset{}
cs.Fake.AddReactor("*", "*", k8stesting.ObjectReaction(o))
cs.Fake.AddWatchReactor("*", o.watchReactionFunc)
return cs, o
}
// FakeObjectTrackers is a struct containing all the controller fake object trackers
type FakeObjectTrackers struct {
ControlMachine, TargetCore *FakeObjectTracker
}
// NewFakeObjectTrackers initializes fakeObjectTrackers initializes the fake object trackers
func NewFakeObjectTrackers(controlMachine, targetCore *FakeObjectTracker) *FakeObjectTrackers {
fakeObjectTrackers := &FakeObjectTrackers{
ControlMachine: controlMachine,
TargetCore: targetCore,
}
return fakeObjectTrackers
}
// Start starts all object trackers as go routines
func (o *FakeObjectTrackers) Start() {
go func() {
err := o.ControlMachine.Start()
if err != nil {
klog.Errorf("failed to start machine object tracker, Err: %v", err)
}
}()
go func() {
err := o.TargetCore.Start()
if err != nil {
klog.Errorf("failed to start target core object tracker, Err: %v", err)
}
}()
}
// Stop stops all object trackers
func (o *FakeObjectTrackers) Stop() {
o.ControlMachine.Stop()
o.TargetCore.Stop()
}
// NewCoreClientSet returns a clientset that will respond with the provided objects.
// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
// without applying any validations and/or defaults. It shouldn't be considered a replacement
// for a real clientset and is mostly useful in simple unit tests.
func NewCoreClientSet(objects ...runtime.Object) (*Clientset, *FakeObjectTracker) {
var scheme = runtime.NewScheme()
var codecs = serializer.NewCodecFactory(scheme)
metav1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
_ = k8sfake.AddToScheme(scheme)
o := &FakeObjectTracker{
FakeWatcher: watch.NewFake(),
delegatee: k8stesting.NewObjectTracker(scheme, codecs.UniversalDecoder()),
}
for _, obj := range objects {
if err := o.Add(obj); err != nil {
panic(err)
}
}
cs := &Clientset{Clientset: &k8sfake.Clientset{}}
cs.FakeDiscovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake}
cs.Fake.AddReactor("*", "*", k8stesting.ObjectReaction(o))
cs.Fake.AddWatchReactor("*", o.watchReactionFunc)
return cs, o
}
// Clientset extends k8sfake.Clientset to override the Policy implementation.
// This is because the default Policy fake implementation does not propagate the
// eviction name.
type Clientset struct {
*k8sfake.Clientset
FakeDiscovery *fakediscovery.FakeDiscovery
}
// Discovery returns the fake discovery implementation.
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
return c.FakeDiscovery
}
// PolicyV1beta1 retrieves the PolicyV1beta1Client
func (c *Clientset) PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface {
return &FakePolicyV1beta1{
FakePolicyV1beta1: &fakepolicyv1beta1.FakePolicyV1beta1{
Fake: &c.Fake,
},
}
}
// Policy retrieves the PolicyV1beta1Client
func (c *Clientset) Policy() policyv1beta1.PolicyV1beta1Interface {
return c.PolicyV1beta1()
}
// FakePolicyV1beta1 extends fakepolicyv1beta1.FakePolicyV1beta1 to override the
// Policy implementation. This is because the default Policy fake implementation
// does not propagate the eviction name.
type FakePolicyV1beta1 struct {
*fakepolicyv1beta1.FakePolicyV1beta1
}
// Evictions extends fakepolicyv1beta1.FakeEvictions to override the
// Policy implementation. This is because the default Policy fake implementation
// does not propagate the eviction name.
func (c *FakePolicyV1beta1) Evictions(namespace string) policyv1beta1.EvictionInterface {
return &FakeEvictions{
FakePolicyV1beta1: c.FakePolicyV1beta1,
ns: namespace,
}
}
// FakeEvictions extends fakepolicyv1beta1.FakeEvictions to override the
// Policy implementation. This is because the default Policy fake implementation
// does not propagate the eviction name.
type FakeEvictions struct {
*fakepolicyv1beta1.FakePolicyV1beta1
ns string
}
// Evict overrides the fakepolicyv1beta1.FakeEvictions to override the
// Policy implementation. This is because the default Policy fake implementation
// does not propagate the eviction name.
func (c *FakeEvictions) Evict(_ context.Context, eviction *apipolicyv1beta1.Eviction) error {
action := k8stesting.GetActionImpl{}
action.Name = eviction.Name
action.Verb = "post"
action.Namespace = c.ns
action.Resource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
action.Subresource = "eviction"
_, err := c.Fake.Invokes(action, eviction)
return err
}
| {
var err error
if t.fakingOptions.failAll != nil {
err = t.fakingOptions.failAll.RunFakeInvocations()
if err != nil {
return nil, err
}
}
if t.fakingOptions.failAt != nil {
if gvr.Resource == "nodes" {
err = t.fakingOptions.failAt.Node.Get.RunFakeInvocations()
} else if gvr.Resource == "machines" {
err = t.fakingOptions.failAt.Machine.Get.RunFakeInvocations()
} else if gvr.Resource == "machinesets" {
err = t.fakingOptions.failAt.MachineSet.Get.RunFakeInvocations()
} else if gvr.Resource == "machinedeployments" {
err = t.fakingOptions.failAt.MachineDeployment.Get.RunFakeInvocations()
}
if err != nil {
return nil, err
}
}
return t.delegatee.Get(gvr, ns, name)
} | identifier_body |
client.go | /*
Copyright (c) 2023 SAP SE or an SAP affiliate company. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fakeclient
import (
"context"
"errors"
"fmt"
"k8s.io/klog/v2"
"sync"
"time"
fakeuntyped "github.com/gardener/machine-controller-manager/pkg/client/clientset/versioned/fake"
apipolicyv1beta1 "k8s.io/api/policy/v1beta1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/discovery"
fakediscovery "k8s.io/client-go/discovery/fake"
k8sfake "k8s.io/client-go/kubernetes/fake"
policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1"
fakepolicyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake"
k8stesting "k8s.io/client-go/testing"
)
// FakeObjectTracker implements both k8stesting.ObjectTracker as well as watch.Interface.
type FakeObjectTracker struct {
*watch.FakeWatcher
delegatee k8stesting.ObjectTracker
watchers []*watcher
trackerMutex sync.Mutex
fakingOptions
}
// Add receives an add event with the object
func (t *FakeObjectTracker) Add(obj runtime.Object) error {
if t.fakingOptions.failAll != nil {
err := t.fakingOptions.failAll.RunFakeInvocations()
if err != nil {
return err
}
}
return t.delegatee.Add(obj)
}
// Get receives a get event with the object
func (t *FakeObjectTracker) Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) {
var err error
if t.fakingOptions.failAll != nil {
err = t.fakingOptions.failAll.RunFakeInvocations()
if err != nil {
return nil, err
}
}
if t.fakingOptions.failAt != nil {
if gvr.Resource == "nodes" {
err = t.fakingOptions.failAt.Node.Get.RunFakeInvocations()
} else if gvr.Resource == "machines" {
err = t.fakingOptions.failAt.Machine.Get.RunFakeInvocations()
} else if gvr.Resource == "machinesets" {
err = t.fakingOptions.failAt.MachineSet.Get.RunFakeInvocations()
} else if gvr.Resource == "machinedeployments" {
err = t.fakingOptions.failAt.MachineDeployment.Get.RunFakeInvocations()
}
if err != nil {
return nil, err
}
}
return t.delegatee.Get(gvr, ns, name)
}
// Create receives a create event with the object. Not needed for CA.
func (t *FakeObjectTracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error {
return nil
}
// Update receives an update event with the object
func (t *FakeObjectTracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error {
var err error
if t.fakingOptions.failAll != nil {
err = t.fakingOptions.failAll.RunFakeInvocations()
if err != nil {
return err
}
}
if t.fakingOptions.failAt != nil {
if gvr.Resource == "nodes" {
err = t.fakingOptions.failAt.Node.Update.RunFakeInvocations()
} else if gvr.Resource == "machines" {
err = t.fakingOptions.failAt.Machine.Update.RunFakeInvocations()
} else if gvr.Resource == "machinedeployments" {
err = t.fakingOptions.failAt.MachineDeployment.Update.RunFakeInvocations()
}
if err != nil {
return err
}
}
err = t.delegatee.Update(gvr, obj, ns)
if err != nil {
return err
}
if t.FakeWatcher == nil {
return errors.New("error sending event on a tracker with no watch support")
}
if t.IsStopped() {
return errors.New("error sending event on a stopped tracker")
}
t.FakeWatcher.Modify(obj)
return nil
}
// List receives a list event with the object
func (t *FakeObjectTracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) {
if t.fakingOptions.failAll != nil {
err := t.fakingOptions.failAll.RunFakeInvocations()
if err != nil {
return nil, err
}
}
return t.delegatee.List(gvr, gvk, ns)
}
// Delete receives an delete event with the object. Not needed for CA.
func (t *FakeObjectTracker) Delete(gvr schema.GroupVersionResource, ns, name string) error {
return nil
}
// Watch receives a watch event with the object
func (t *FakeObjectTracker) Watch(gvr schema.GroupVersionResource, name string) (watch.Interface, error) {
if t.fakingOptions.failAll != nil {
err := t.fakingOptions.failAll.RunFakeInvocations()
if err != nil {
return nil, err
}
}
return t.delegatee.Watch(gvr, name)
}
func (t *FakeObjectTracker) watchReactionFunc(action k8stesting.Action) (bool, watch.Interface, error) {
if t.FakeWatcher == nil {
return false, nil, errors.New("cannot watch on a tracker with no watch support")
}
switch a := action.(type) {
case k8stesting.WatchAction:
w := &watcher{
FakeWatcher: watch.NewFake(),
action: a,
}
go func() {
err := w.dispatchInitialObjects(a, t)
if err != nil {
klog.Errorf("error dispatching initial objects, Err: %v", err)
}
}()
t.trackerMutex.Lock()
defer t.trackerMutex.Unlock()
t.watchers = append(t.watchers, w)
return true, w, nil
default:
return false, nil, fmt.Errorf("expected WatchAction but got %v", action)
}
}
// Start begins tracking of an FakeObjectTracker
func (t *FakeObjectTracker) Start() error {
if t.FakeWatcher == nil {
return errors.New("tracker has no watch support")
}
for event := range t.ResultChan() {
event := event.DeepCopy() // passing a deep copy to avoid race.
t.dispatch(event)
}
return nil
}
func (t *FakeObjectTracker) | (event *watch.Event) {
for _, w := range t.watchers {
go w.dispatch(event)
}
}
// Stop terminates tracking of an FakeObjectTracker
func (t *FakeObjectTracker) Stop() {
if t.FakeWatcher == nil {
panic(errors.New("tracker has no watch support"))
}
t.trackerMutex.Lock()
defer t.trackerMutex.Unlock()
t.FakeWatcher.Stop()
for _, w := range t.watchers {
w.Stop()
}
}
type watcher struct {
*watch.FakeWatcher
action k8stesting.WatchAction
updateMutex sync.Mutex
}
func (w *watcher) Stop() {
w.updateMutex.Lock()
defer w.updateMutex.Unlock()
w.FakeWatcher.Stop()
}
func (w *watcher) handles(event *watch.Event) bool {
if w.IsStopped() {
return false
}
t, err := meta.TypeAccessor(event.Object)
if err != nil {
return false
}
gvr, _ := meta.UnsafeGuessKindToResource(schema.FromAPIVersionAndKind(t.GetAPIVersion(), t.GetKind()))
if !(&k8stesting.SimpleWatchReactor{Resource: gvr.Resource}).Handles(w.action) {
return false
}
o, err := meta.Accessor(event.Object)
if err != nil {
return false
}
info := w.action.GetWatchRestrictions()
rv, fs, ls := info.ResourceVersion, info.Fields, info.Labels
if rv != "" && o.GetResourceVersion() != rv {
return false
}
if fs != nil && !fs.Matches(fields.Set{
"metadata.name": o.GetName(),
"metadata.namespace": o.GetNamespace(),
}) {
return false
}
if ls != nil && !ls.Matches(labels.Set(o.GetLabels())) {
return false
}
return true
}
func (w *watcher) dispatch(event *watch.Event) {
w.updateMutex.Lock()
defer w.updateMutex.Unlock()
if !w.handles(event) {
return
}
w.Action(event.Type, event.Object)
}
func (w *watcher) dispatchInitialObjects(action k8stesting.WatchAction, t k8stesting.ObjectTracker) error {
listObj, err := t.List(action.GetResource(), action.GetResource().GroupVersion().WithKind(action.GetResource().Resource), action.GetNamespace())
if err != nil {
return err
}
itemsPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
return err
}
items := itemsPtr.([]runtime.Object)
for _, o := range items {
w.dispatch(&watch.Event{
Type: watch.Added,
Object: o,
})
}
return nil
}
// ResourceActions contains of Kubernetes/Machine resources whose response can be faked
type ResourceActions struct {
Node Actions
Machine Actions
MachineSet Actions
MachineDeployment Actions
}
// Actions contains the actions whose response can be faked
type Actions struct {
Get FakeResponse
Update FakeResponse
}
// FakeResponse is the custom error response configuration that are used for responding to client calls
type FakeResponse struct {
counter int
errorMsg string
responseDelay time.Duration
}
// fakingOptions are options that can be set while trying to fake object tracker returns
type fakingOptions struct {
// Fail at different resource action
failAt *ResourceActions
// Fail every action
failAll *FakeResponse
}
// CreateFakeResponse creates a fake response for an action
func CreateFakeResponse(counter int, errorMsg string, responseDelay time.Duration) FakeResponse {
return FakeResponse{
counter: counter,
errorMsg: errorMsg,
responseDelay: responseDelay,
}
}
// DecrementCounter reduces the counter for the particular action response by 1
func (o *FakeResponse) DecrementCounter() {
o.counter--
}
// RunFakeInvocations runs any custom fake configurations/methods before invoking standard ObjectTrackers
func (o *FakeResponse) RunFakeInvocations() error {
if !o.IsFakingEnabled() {
return nil
}
// decrement the counter
o.DecrementCounter()
// Delay while returning call
if o.responseDelay != 0 {
time.Sleep(o.responseDelay)
}
// If error message has been set
if o.errorMsg != "" {
return errors.New(o.errorMsg)
}
return nil
}
// IsFakingEnabled will return true if counter is positive for the fake response
func (o *FakeResponse) IsFakingEnabled() bool {
return o.counter > 0
}
// SetFailAtFakeResourceActions sets up the errorMessage to be returned on specific calls
func (o *fakingOptions) SetFailAtFakeResourceActions(resourceActions *ResourceActions) {
o.failAt = resourceActions
}
// SetFailAllFakeResponse sets the error message for all calls from the client
func (o *fakingOptions) SetFailAllFakeResponse(response *FakeResponse) {
o.failAll = response
}
// NewMachineClientSet returns a clientset that will respond with the provided objects.
// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
// without applying any validations and/or defaults. It shouldn't be considered a replacement
// for a real clientset and is mostly useful in simple unit tests.
func NewMachineClientSet(objects ...runtime.Object) (*fakeuntyped.Clientset, *FakeObjectTracker) {
var scheme = runtime.NewScheme()
var codecs = serializer.NewCodecFactory(scheme)
metav1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
_ = fakeuntyped.AddToScheme(scheme)
o := &FakeObjectTracker{
FakeWatcher: watch.NewFake(),
delegatee: k8stesting.NewObjectTracker(scheme, codecs.UniversalDecoder()),
}
for _, obj := range objects {
if err := o.Add(obj); err != nil {
panic(err)
}
}
cs := &fakeuntyped.Clientset{}
cs.Fake.AddReactor("*", "*", k8stesting.ObjectReaction(o))
cs.Fake.AddWatchReactor("*", o.watchReactionFunc)
return cs, o
}
// FakeObjectTrackers is a struct containing all the controller fake object trackers
type FakeObjectTrackers struct {
ControlMachine, TargetCore *FakeObjectTracker
}
// NewFakeObjectTrackers initializes fakeObjectTrackers initializes the fake object trackers
func NewFakeObjectTrackers(controlMachine, targetCore *FakeObjectTracker) *FakeObjectTrackers {
fakeObjectTrackers := &FakeObjectTrackers{
ControlMachine: controlMachine,
TargetCore: targetCore,
}
return fakeObjectTrackers
}
// Start starts all object trackers as go routines
func (o *FakeObjectTrackers) Start() {
go func() {
err := o.ControlMachine.Start()
if err != nil {
klog.Errorf("failed to start machine object tracker, Err: %v", err)
}
}()
go func() {
err := o.TargetCore.Start()
if err != nil {
klog.Errorf("failed to start target core object tracker, Err: %v", err)
}
}()
}
// Stop stops all object trackers
func (o *FakeObjectTrackers) Stop() {
o.ControlMachine.Stop()
o.TargetCore.Stop()
}
// NewCoreClientSet returns a clientset that will respond with the provided objects.
// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
// without applying any validations and/or defaults. It shouldn't be considered a replacement
// for a real clientset and is mostly useful in simple unit tests.
func NewCoreClientSet(objects ...runtime.Object) (*Clientset, *FakeObjectTracker) {
var scheme = runtime.NewScheme()
var codecs = serializer.NewCodecFactory(scheme)
metav1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
_ = k8sfake.AddToScheme(scheme)
o := &FakeObjectTracker{
FakeWatcher: watch.NewFake(),
delegatee: k8stesting.NewObjectTracker(scheme, codecs.UniversalDecoder()),
}
for _, obj := range objects {
if err := o.Add(obj); err != nil {
panic(err)
}
}
cs := &Clientset{Clientset: &k8sfake.Clientset{}}
cs.FakeDiscovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake}
cs.Fake.AddReactor("*", "*", k8stesting.ObjectReaction(o))
cs.Fake.AddWatchReactor("*", o.watchReactionFunc)
return cs, o
}
// Clientset extends k8sfake.Clientset to override the Policy implementation.
// This is because the default Policy fake implementation does not propagate the
// eviction name.
type Clientset struct {
*k8sfake.Clientset
FakeDiscovery *fakediscovery.FakeDiscovery
}
// Discovery returns the fake discovery implementation.
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
return c.FakeDiscovery
}
// PolicyV1beta1 retrieves the PolicyV1beta1Client
func (c *Clientset) PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface {
return &FakePolicyV1beta1{
FakePolicyV1beta1: &fakepolicyv1beta1.FakePolicyV1beta1{
Fake: &c.Fake,
},
}
}
// Policy retrieves the PolicyV1beta1Client
func (c *Clientset) Policy() policyv1beta1.PolicyV1beta1Interface {
return c.PolicyV1beta1()
}
// FakePolicyV1beta1 extends fakepolicyv1beta1.FakePolicyV1beta1 to override the
// Policy implementation. This is because the default Policy fake implementation
// does not propagate the eviction name.
type FakePolicyV1beta1 struct {
*fakepolicyv1beta1.FakePolicyV1beta1
}
// Evictions extends fakepolicyv1beta1.FakeEvictions to override the
// Policy implementation. This is because the default Policy fake implementation
// does not propagate the eviction name.
func (c *FakePolicyV1beta1) Evictions(namespace string) policyv1beta1.EvictionInterface {
return &FakeEvictions{
FakePolicyV1beta1: c.FakePolicyV1beta1,
ns: namespace,
}
}
// FakeEvictions extends fakepolicyv1beta1.FakeEvictions to override the
// Policy implementation. This is because the default Policy fake implementation
// does not propagate the eviction name.
type FakeEvictions struct {
*fakepolicyv1beta1.FakePolicyV1beta1
ns string
}
// Evict overrides the fakepolicyv1beta1.FakeEvictions to override the
// Policy implementation. This is because the default Policy fake implementation
// does not propagate the eviction name.
func (c *FakeEvictions) Evict(_ context.Context, eviction *apipolicyv1beta1.Eviction) error {
action := k8stesting.GetActionImpl{}
action.Name = eviction.Name
action.Verb = "post"
action.Namespace = c.ns
action.Resource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
action.Subresource = "eviction"
_, err := c.Fake.Invokes(action, eviction)
return err
}
| dispatch | identifier_name |
client.go | /*
Copyright (c) 2023 SAP SE or an SAP affiliate company. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fakeclient
import (
"context"
"errors"
"fmt"
"k8s.io/klog/v2"
"sync"
"time"
fakeuntyped "github.com/gardener/machine-controller-manager/pkg/client/clientset/versioned/fake"
apipolicyv1beta1 "k8s.io/api/policy/v1beta1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/discovery"
fakediscovery "k8s.io/client-go/discovery/fake"
k8sfake "k8s.io/client-go/kubernetes/fake"
policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1"
fakepolicyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake"
k8stesting "k8s.io/client-go/testing"
)
// FakeObjectTracker implements both k8stesting.ObjectTracker as well as watch.Interface.
type FakeObjectTracker struct {
*watch.FakeWatcher
delegatee k8stesting.ObjectTracker
watchers []*watcher
trackerMutex sync.Mutex
fakingOptions
}
// Add receives an add event with the object
func (t *FakeObjectTracker) Add(obj runtime.Object) error {
if t.fakingOptions.failAll != nil {
err := t.fakingOptions.failAll.RunFakeInvocations()
if err != nil {
return err
}
}
return t.delegatee.Add(obj)
}
// Get receives a get event with the object
func (t *FakeObjectTracker) Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) {
var err error
if t.fakingOptions.failAll != nil {
err = t.fakingOptions.failAll.RunFakeInvocations()
if err != nil {
return nil, err
}
}
if t.fakingOptions.failAt != nil {
if gvr.Resource == "nodes" {
err = t.fakingOptions.failAt.Node.Get.RunFakeInvocations()
} else if gvr.Resource == "machines" {
err = t.fakingOptions.failAt.Machine.Get.RunFakeInvocations()
} else if gvr.Resource == "machinesets" {
err = t.fakingOptions.failAt.MachineSet.Get.RunFakeInvocations()
} else if gvr.Resource == "machinedeployments" {
err = t.fakingOptions.failAt.MachineDeployment.Get.RunFakeInvocations()
}
if err != nil |
}
return t.delegatee.Get(gvr, ns, name)
}
// Create receives a create event with the object. Not needed for CA.
func (t *FakeObjectTracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error {
return nil
}
// Update receives an update event with the object
func (t *FakeObjectTracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error {
var err error
if t.fakingOptions.failAll != nil {
err = t.fakingOptions.failAll.RunFakeInvocations()
if err != nil {
return err
}
}
if t.fakingOptions.failAt != nil {
if gvr.Resource == "nodes" {
err = t.fakingOptions.failAt.Node.Update.RunFakeInvocations()
} else if gvr.Resource == "machines" {
err = t.fakingOptions.failAt.Machine.Update.RunFakeInvocations()
} else if gvr.Resource == "machinedeployments" {
err = t.fakingOptions.failAt.MachineDeployment.Update.RunFakeInvocations()
}
if err != nil {
return err
}
}
err = t.delegatee.Update(gvr, obj, ns)
if err != nil {
return err
}
if t.FakeWatcher == nil {
return errors.New("error sending event on a tracker with no watch support")
}
if t.IsStopped() {
return errors.New("error sending event on a stopped tracker")
}
t.FakeWatcher.Modify(obj)
return nil
}
// List receives a list event with the object
func (t *FakeObjectTracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) {
if t.fakingOptions.failAll != nil {
err := t.fakingOptions.failAll.RunFakeInvocations()
if err != nil {
return nil, err
}
}
return t.delegatee.List(gvr, gvk, ns)
}
// Delete receives an delete event with the object. Not needed for CA.
func (t *FakeObjectTracker) Delete(gvr schema.GroupVersionResource, ns, name string) error {
return nil
}
// Watch receives a watch event with the object
func (t *FakeObjectTracker) Watch(gvr schema.GroupVersionResource, name string) (watch.Interface, error) {
if t.fakingOptions.failAll != nil {
err := t.fakingOptions.failAll.RunFakeInvocations()
if err != nil {
return nil, err
}
}
return t.delegatee.Watch(gvr, name)
}
func (t *FakeObjectTracker) watchReactionFunc(action k8stesting.Action) (bool, watch.Interface, error) {
if t.FakeWatcher == nil {
return false, nil, errors.New("cannot watch on a tracker with no watch support")
}
switch a := action.(type) {
case k8stesting.WatchAction:
w := &watcher{
FakeWatcher: watch.NewFake(),
action: a,
}
go func() {
err := w.dispatchInitialObjects(a, t)
if err != nil {
klog.Errorf("error dispatching initial objects, Err: %v", err)
}
}()
t.trackerMutex.Lock()
defer t.trackerMutex.Unlock()
t.watchers = append(t.watchers, w)
return true, w, nil
default:
return false, nil, fmt.Errorf("expected WatchAction but got %v", action)
}
}
// Start begins tracking of an FakeObjectTracker
func (t *FakeObjectTracker) Start() error {
if t.FakeWatcher == nil {
return errors.New("tracker has no watch support")
}
for event := range t.ResultChan() {
event := event.DeepCopy() // passing a deep copy to avoid race.
t.dispatch(event)
}
return nil
}
func (t *FakeObjectTracker) dispatch(event *watch.Event) {
for _, w := range t.watchers {
go w.dispatch(event)
}
}
// Stop terminates tracking of an FakeObjectTracker
func (t *FakeObjectTracker) Stop() {
if t.FakeWatcher == nil {
panic(errors.New("tracker has no watch support"))
}
t.trackerMutex.Lock()
defer t.trackerMutex.Unlock()
t.FakeWatcher.Stop()
for _, w := range t.watchers {
w.Stop()
}
}
type watcher struct {
*watch.FakeWatcher
action k8stesting.WatchAction
updateMutex sync.Mutex
}
func (w *watcher) Stop() {
w.updateMutex.Lock()
defer w.updateMutex.Unlock()
w.FakeWatcher.Stop()
}
func (w *watcher) handles(event *watch.Event) bool {
if w.IsStopped() {
return false
}
t, err := meta.TypeAccessor(event.Object)
if err != nil {
return false
}
gvr, _ := meta.UnsafeGuessKindToResource(schema.FromAPIVersionAndKind(t.GetAPIVersion(), t.GetKind()))
if !(&k8stesting.SimpleWatchReactor{Resource: gvr.Resource}).Handles(w.action) {
return false
}
o, err := meta.Accessor(event.Object)
if err != nil {
return false
}
info := w.action.GetWatchRestrictions()
rv, fs, ls := info.ResourceVersion, info.Fields, info.Labels
if rv != "" && o.GetResourceVersion() != rv {
return false
}
if fs != nil && !fs.Matches(fields.Set{
"metadata.name": o.GetName(),
"metadata.namespace": o.GetNamespace(),
}) {
return false
}
if ls != nil && !ls.Matches(labels.Set(o.GetLabels())) {
return false
}
return true
}
func (w *watcher) dispatch(event *watch.Event) {
w.updateMutex.Lock()
defer w.updateMutex.Unlock()
if !w.handles(event) {
return
}
w.Action(event.Type, event.Object)
}
func (w *watcher) dispatchInitialObjects(action k8stesting.WatchAction, t k8stesting.ObjectTracker) error {
listObj, err := t.List(action.GetResource(), action.GetResource().GroupVersion().WithKind(action.GetResource().Resource), action.GetNamespace())
if err != nil {
return err
}
itemsPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
return err
}
items := itemsPtr.([]runtime.Object)
for _, o := range items {
w.dispatch(&watch.Event{
Type: watch.Added,
Object: o,
})
}
return nil
}
// ResourceActions contains of Kubernetes/Machine resources whose response can be faked
type ResourceActions struct {
Node Actions
Machine Actions
MachineSet Actions
MachineDeployment Actions
}
// Actions contains the actions whose response can be faked
type Actions struct {
Get FakeResponse
Update FakeResponse
}
// FakeResponse is the custom error response configuration that are used for responding to client calls
type FakeResponse struct {
counter int
errorMsg string
responseDelay time.Duration
}
// fakingOptions are options that can be set while trying to fake object tracker returns
type fakingOptions struct {
// Fail at different resource action
failAt *ResourceActions
// Fail every action
failAll *FakeResponse
}
// CreateFakeResponse creates a fake response for an action
func CreateFakeResponse(counter int, errorMsg string, responseDelay time.Duration) FakeResponse {
return FakeResponse{
counter: counter,
errorMsg: errorMsg,
responseDelay: responseDelay,
}
}
// DecrementCounter reduces the counter for the particular action response by 1
func (o *FakeResponse) DecrementCounter() {
o.counter--
}
// RunFakeInvocations runs any custom fake configurations/methods before invoking standard ObjectTrackers
func (o *FakeResponse) RunFakeInvocations() error {
if !o.IsFakingEnabled() {
return nil
}
// decrement the counter
o.DecrementCounter()
// Delay while returning call
if o.responseDelay != 0 {
time.Sleep(o.responseDelay)
}
// If error message has been set
if o.errorMsg != "" {
return errors.New(o.errorMsg)
}
return nil
}
// IsFakingEnabled will return true if counter is positive for the fake response
func (o *FakeResponse) IsFakingEnabled() bool {
return o.counter > 0
}
// SetFailAtFakeResourceActions sets up the errorMessage to be returned on specific calls
func (o *fakingOptions) SetFailAtFakeResourceActions(resourceActions *ResourceActions) {
o.failAt = resourceActions
}
// SetFailAllFakeResponse sets the error message for all calls from the client
func (o *fakingOptions) SetFailAllFakeResponse(response *FakeResponse) {
o.failAll = response
}
// NewMachineClientSet returns a clientset that will respond with the provided objects.
// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
// without applying any validations and/or defaults. It shouldn't be considered a replacement
// for a real clientset and is mostly useful in simple unit tests.
func NewMachineClientSet(objects ...runtime.Object) (*fakeuntyped.Clientset, *FakeObjectTracker) {
var scheme = runtime.NewScheme()
var codecs = serializer.NewCodecFactory(scheme)
metav1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
_ = fakeuntyped.AddToScheme(scheme)
o := &FakeObjectTracker{
FakeWatcher: watch.NewFake(),
delegatee: k8stesting.NewObjectTracker(scheme, codecs.UniversalDecoder()),
}
for _, obj := range objects {
if err := o.Add(obj); err != nil {
panic(err)
}
}
cs := &fakeuntyped.Clientset{}
cs.Fake.AddReactor("*", "*", k8stesting.ObjectReaction(o))
cs.Fake.AddWatchReactor("*", o.watchReactionFunc)
return cs, o
}
// FakeObjectTrackers is a struct containing all the controller fake object trackers
type FakeObjectTrackers struct {
ControlMachine, TargetCore *FakeObjectTracker
}
// NewFakeObjectTrackers initializes fakeObjectTrackers initializes the fake object trackers
func NewFakeObjectTrackers(controlMachine, targetCore *FakeObjectTracker) *FakeObjectTrackers {
fakeObjectTrackers := &FakeObjectTrackers{
ControlMachine: controlMachine,
TargetCore: targetCore,
}
return fakeObjectTrackers
}
// Start starts all object trackers as go routines
func (o *FakeObjectTrackers) Start() {
go func() {
err := o.ControlMachine.Start()
if err != nil {
klog.Errorf("failed to start machine object tracker, Err: %v", err)
}
}()
go func() {
err := o.TargetCore.Start()
if err != nil {
klog.Errorf("failed to start target core object tracker, Err: %v", err)
}
}()
}
// Stop stops all object trackers
func (o *FakeObjectTrackers) Stop() {
o.ControlMachine.Stop()
o.TargetCore.Stop()
}
// NewCoreClientSet returns a clientset that will respond with the provided objects.
// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
// without applying any validations and/or defaults. It shouldn't be considered a replacement
// for a real clientset and is mostly useful in simple unit tests.
func NewCoreClientSet(objects ...runtime.Object) (*Clientset, *FakeObjectTracker) {
var scheme = runtime.NewScheme()
var codecs = serializer.NewCodecFactory(scheme)
metav1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
_ = k8sfake.AddToScheme(scheme)
o := &FakeObjectTracker{
FakeWatcher: watch.NewFake(),
delegatee: k8stesting.NewObjectTracker(scheme, codecs.UniversalDecoder()),
}
for _, obj := range objects {
if err := o.Add(obj); err != nil {
panic(err)
}
}
cs := &Clientset{Clientset: &k8sfake.Clientset{}}
cs.FakeDiscovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake}
cs.Fake.AddReactor("*", "*", k8stesting.ObjectReaction(o))
cs.Fake.AddWatchReactor("*", o.watchReactionFunc)
return cs, o
}
// Clientset extends k8sfake.Clientset to override the Policy implementation.
// This is because the default Policy fake implementation does not propagate the
// eviction name.
type Clientset struct {
*k8sfake.Clientset
FakeDiscovery *fakediscovery.FakeDiscovery
}
// Discovery returns the fake discovery implementation.
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
return c.FakeDiscovery
}
// PolicyV1beta1 retrieves the PolicyV1beta1Client
func (c *Clientset) PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface {
return &FakePolicyV1beta1{
FakePolicyV1beta1: &fakepolicyv1beta1.FakePolicyV1beta1{
Fake: &c.Fake,
},
}
}
// Policy retrieves the PolicyV1beta1Client
func (c *Clientset) Policy() policyv1beta1.PolicyV1beta1Interface {
return c.PolicyV1beta1()
}
// FakePolicyV1beta1 extends fakepolicyv1beta1.FakePolicyV1beta1 to override the
// Policy implementation. This is because the default Policy fake implementation
// does not propagate the eviction name.
type FakePolicyV1beta1 struct {
*fakepolicyv1beta1.FakePolicyV1beta1
}
// Evictions extends fakepolicyv1beta1.FakeEvictions to override the
// Policy implementation. This is because the default Policy fake implementation
// does not propagate the eviction name.
func (c *FakePolicyV1beta1) Evictions(namespace string) policyv1beta1.EvictionInterface {
return &FakeEvictions{
FakePolicyV1beta1: c.FakePolicyV1beta1,
ns: namespace,
}
}
// FakeEvictions extends fakepolicyv1beta1.FakeEvictions to override the
// Policy implementation. This is because the default Policy fake implementation
// does not propagate the eviction name.
type FakeEvictions struct {
*fakepolicyv1beta1.FakePolicyV1beta1
ns string
}
// Evict overrides the fakepolicyv1beta1.FakeEvictions to override the
// Policy implementation. This is because the default Policy fake implementation
// does not propagate the eviction name.
func (c *FakeEvictions) Evict(_ context.Context, eviction *apipolicyv1beta1.Eviction) error {
action := k8stesting.GetActionImpl{}
action.Name = eviction.Name
action.Verb = "post"
action.Namespace = c.ns
action.Resource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
action.Subresource = "eviction"
_, err := c.Fake.Invokes(action, eviction)
return err
}
| {
return nil, err
} | conditional_block |
xf_numba.py | #! /usr/bin/env python
# =============================================================================
# Copyright (c) 2012, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
# Written by Joel Bernier <bernier2@llnl.gov> and others.
# LLNL-CODE-529294.
# All rights reserved.
#
# This file is part of HEXRD. For details on dowloading the source,
# see the file COPYING.
#
# Please also see the file LICENSE.
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License (as published by the Free
# Software Foundation) version 2.1 dated February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program (see file LICENSE); if not, write to
# the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA or visit <http://www.gnu.org/licenses/>.
# =============================================================================
# ??? do we want to set np.seterr(invalid='ignore') to avoid nan warnings?
# -*- coding: utf-8 -*-
"""Tranforms module implementation using numba.
Currently, this implementation contains code for the following functions:
- angles_to_gvec
- angles_to_dvec
- row_norm
- unit_vector
- make_rmat_of_expmap
- make_beam_rmat
"""
import numpy as np
from numpy import float_ as npfloat
from numpy import int_ as npint
from . import constants as cnst
from .transforms_definitions import xf_api, get_signature
from .xf_numpy import _beam_to_crystal
try:
import numba
except ImportError:
# Numba is an optional dependency. Any code relying on numba should be
# optional
raise ImportError("xf_numba not available: numba not installed")
# Use the following decorator instead of numba.jit for interface functions.
# This is so we can patch certain features.
def xfapi_jit(fn):
out = numba.jit(fn)
out.__signature__ = get_signature(fn)
return out
@numba.njit
def _angles_to_gvec_helper(angs, out=None):
"""
angs are vstacked [2*theta, eta, omega], although omega is optional
This should be equivalent to the one-liner numpy version:
out = np.vstack([[np.cos(0.5*angs[:, 0]) * np.cos(angs[:, 1])],
[np.cos(0.5*angs[:, 0]) * np.sin(angs[:, 1])],
[np.sin(0.5*angs[:, 0])]])
although much faster
"""
count, dim = angs.shape
out = out if out is not None else np.empty((count, 3), dtype=angs.dtype)
for i in range(count):
ca0 = np.cos(0.5*angs[i, 0])
sa0 = np.sin(0.5*angs[i, 0])
ca1 = np.cos(angs[i, 1])
sa1 = np.sin(angs[i, 1])
out[i, 0] = ca0 * ca1
out[i, 1] = ca0 * sa1
out[i, 2] = sa0
return out
@numba.njit
def _angles_to_dvec_helper(angs, out=None):
"""
angs are vstacked [2*theta, eta, omega], although omega is optional
This shoud be equivalent to the one-liner numpy version:
out = np.vstack([[np.sin(angs[:, 0]) * np.cos(angs[:, 1])],
[np.sin(angs[:, 0]) * np.sin(angs[:, 1])],
[-np.cos(angs[:, 0])]])
although much faster
"""
_, dim = angs.shape
out = out if out is not None else np.empty((dim, 3), dtype=angs.dtype)
for i in range(len(angs)):
ca0 = np.cos(angs[i, 0])
sa0 = np.sin(angs[i, 0])
ca1 = np.cos(angs[i, 1])
sa1 = np.sin(angs[i, 1])
out[i, 0] = sa0 * ca1
out[i, 1] = sa0 * sa1
out[i, 2] = -ca0
return out
@numba.njit
def _rmat_s_helper(chi=None, omes=None, out=None):
"""
simple utility for calculating sample rotation matrices based on
standard definition for HEDM
chi is a single value, 0.0 by default
omes is either a 1d array or None.
If None the code should be equivalent to a single ome of value 0.0
out is a preallocated output array. No check is done about it having the
proper size. If None a new array will be allocated. The expected size
of the array is as many 3x3 matrices as omes (n, 3, 3).
"""
if chi is not None:
cx = np.cos(chi)
sx = np.sin(chi)
else:
cx = 1.0
sx = 0.0
if omes is not None:
# omes is an array (vector): output is as many rotation matrices as omes entries.
n = len(omes)
out = out if out is not None else np.empty((n,3,3), dtype=omes.dtype)
if chi is not None:
# ome is array and chi is a value... compute output
cx = np.cos(chi)
sx = np.sin(chi)
for i in range(n):
cw = np.cos(omes[i])
sw = np.sin(omes[i])
out[i, 0, 0] = cw; out[i, 0, 1] = 0.; out[i, 0, 2] = sw
out[i, 1, 0] = sx*sw; out[i, 1, 1] = cx; out[i, 1, 2] = -sx*cw
out[i, 2, 0] = -cx*sw; out[i, 2, 1] = sx; out[i, 2, 2] = cx*cw
else:
# omes is array and chi is None -> equivalent to chi=0.0, but shortcut computations.
# cx IS 1.0, sx IS 0.0
for i in range(n):
cw = np.cos(omes[i])
sw = np.sin(omes[i])
out[i, 0, 0] = cw; out[i, 0, 1] = 0.; out[i, 0, 2] = sw
out[i, 1, 0] = 0.; out[i, 1, 1] = 1.; out[i, 1, 2] = 0.
out[i, 2, 0] = -sw; out[i, 2, 1] = 0.; out[i, 2, 2] = cw
else:
# omes is None, results should be equivalent to an array with a single element 0.0
out = out if out is not None else np.empty((1, 3, 3))
if chi is not None:
# ome is 0.0. cw is 1.0 and sw is 0.0
cx = np.cos(chi)
sx = np.sin(chi)
out[0, 0, 0] = 1.; out[0, 0, 1] = 0.; out[0, 0, 2] = 0.
out[0, 1, 0] = 0.; out[0, 1, 1] = cx; out[0, 1, 2] = -sx
out[0, 2, 0] = 0.; out[0, 2, 1] = sx; out[0, 2, 2] = cx
else:
# both omes and chi are None... return a single identity matrix.
out[0, 0, 0] = 1.; out[0, 0, 1] = 0.; out[0, 0, 2] = 0.
out[0, 1, 0] = 0.; out[0, 1, 1] = 1.; out[0, 1, 2] = 0.
out[0, 2, 0] = 0.; out[0, 2, 1] = 0.; out[0, 2, 2] = 1.
return out
@xf_api
def angles_to_gvec(angs,
beam_vec=None, eta_vec=None,
chi=None, rmat_c=None):
"""Note about this implementation:
This used to take rmat_b instead of the pair beam_vec, eta_vec. So it may require
some checking.
"""
orig_ndim = angs.ndim
angs = np.atleast_2d(angs)
nvecs, dim = angs.shape
# make vectors in beam frame
gvec_b = _angles_to_gvec_helper(angs[:,0:2])
# _rmat_s_helper could return None to mean "Identity" when chi and ome are None.
omes = angs[:, 2] if dim > 2 else None
if chi is not None or omes is not None:
rmat_s = _rmat_s_helper(chi=chi, omes=omes)
else:
rmat_s = None
# apply defaults to beam_vec and eta_vec.
# TODO: use a default rmat when beam_vec and eta_vec are None so computations
# can be avoided?
beam_vec = beam_vec if beam_vec is not None else cnst.beam_vec
eta_vec = eta_vec if eta_vec is not None else cnst.beam_vec
rmat_b = make_beam_rmat(beam_vec, eta_vec)
out = _beam_to_crystal(gvec_b,
rmat_b=rmat_b, rmat_s=rmat_s, rmat_c=rmat_c)
return out[0] if orig_ndim == 1 else out
@xf_api
def angles_to_dvec(angs,
beam_vec=None, eta_vec=None,
chi=None, rmat_c=None):
"""Note about this implementation:
This used to take rmat_b instead of the pair beam_vec, eta_vec. So it may
require some checking.
"""
angs = np.atleast_2d(angs)
nvecs, dim = angs.shape
# make vectors in beam frame
dvec_b = _angles_to_dvec_helper(angs[:,0:2])
# calculate rmat_s
omes = angs[:, 2] if dim>2 else None
if chi is not None or omes is not None:
rmat_s = _rmat_s_helper(chi=chi, omes=omes)
else:
rmat_s = None
# apply defaults to beam_vec and eta_vec.
# TODO: use a default rmat when beam_vec and eta_vec are None so computations
# can be avoided?
beam_vec = beam_vec if beam_vec is not None else cnst.beam_vec
eta_vec = eta_vec if eta_vec is not None else cnst.beam_vec
rmat_b = make_beam_rmat(beam_vec, eta_vec)
return _beam_to_crystal(dvec_b,
rmat_b=rmat_b, rmat_s=rmat_s, rmat_c=rmat_c)
# this could be a gufunc... (n)->()
@numba.njit
def _row_norm(a, out=None):
n, dim = a.shape
out = out if out is not None else np.empty(n, dtype=a.dtype)
for i in range(n):
nrm = 0.0
for j in range(dim):
x = a[i, j]
nrm += x*x
out[i] = np.sqrt(nrm)
return out
# this and _unit_vector_single would be better as a gufunc.
@numba.njit
def _unit_vector_single(a, out=None):
out = out if out is not None else np.empty_like(a)
n = len(a)
sqr_norm = a[0]*a[0]
for i in range(1, n):
sqr_norm += a[i]*a[i]
# prevent divide by zero
if sqr_norm > cnst.epsf:
recip_norm = 1.0 / np.sqrt(sqr_norm)
out[:] = a[:] * recip_norm
else:
out[:] = a[:]
return out
@numba.njit
def _unit_vector_multi(a, out=None):
out = out if out is not None else np.empty_like(a)
n, dim = a.shape
for i in range(n):
#_unit_vector_single(a[i], out=out[i])
sqr_norm = a[i, 0] * a[i, 0]
for j in range(1, dim):
sqr_norm += a[i, j]*a[i, j]
if sqr_norm > cnst.epsf:
recip_norm = 1.0 / np.sqrt(sqr_norm)
out[i,:] = a[i,:] * recip_norm
else:
out[i,:] = a[i,:]
return out
@xf_api
def row_norm(vec_in):
"""
return row-wise norms for a list of vectors
"""
# TODO: leave this to a PRECONDITION in the xf_api?
if vec_in.ndim == 1:
out = _row_norm(np.atleast_2d(vec_in))[0]
elif vec_in.ndim == 2:
out = _row_norm(vec_in)
else:
raise ValueError(
"incorrect shape: arg must be 1-d or 2-d, yours is %d"
% (len(vec_in.shape)))
return out
@xf_api
def unit_vector(vec_in):
|
@numba.njit
def _make_rmat_of_expmap(x, out=None):
"""
TODO:
Test effectiveness of two options:
1) avoid conditional inside for loop and use np.divide to return NaN
for the phi = 0 cases, and deal with it later; or
2) catch phi = 0 cases inside the loop and just return squeezed answer
"""
n = len(x)
out = out if out is not None else np.empty((n,3,3), dtype=x.dtype)
for i in range(n):
phi = np.sqrt(x[i, 0]*x[i, 0] + x[i, 1]*x[i, 1] + x[i, 2]*x[i, 2])
if phi <= cnst.sqrt_epsf:
out[i, 0, 0] = 1.; out[i, 0, 1] = 0.; out[i, 0, 2] = 0.
out[i, 1, 0] = 0.; out[i, 1, 1] = 1.; out[i, 1, 2] = 0.
out[i, 2, 0] = 0.; out[i, 2, 1] = 0.; out[i, 2, 2] = 1.
else:
f1 = np.sin(phi)/phi
f2 = (1. - np.cos(phi)) / (phi*phi)
out[i, 0, 0] = 1. - f2*(x[i, 2]*x[i, 2] + x[i, 1]*x[i, 1])
out[i, 0, 1] = f2*x[i, 1]*x[i, 0] - f1*x[i, 2]
out[i, 0, 2] = f1*x[i, 1] + f2*x[i, 2]*x[i, 0]
out[i, 1, 0] = f1*x[i, 2] + f2*x[i, 1]*x[i, 0]
out[i, 1, 1] = 1. - f2*(x[i, 2]*x[i, 2] + x[i, 0]*x[i, 0])
out[i, 1, 2] = f2*x[i, 2]*x[i, 1] - f1*x[i, 0]
out[i, 2, 0] = f2*x[i, 2]*x[i, 0] - f1*x[i, 1]
out[i, 2, 1] = f1*x[i, 0] + f2*x[i, 2]*x[i, 1]
out[i, 2, 2] = 1. - f2*(x[i, 1]*x[i, 1] + x[i, 0]*x[i, 0])
return out
"""
if the help above was set up to return nans...
def make_rmat_of_expmap(exp_map):
exp_map = np.atleast_2d(exp_map)
rmats = np.empty((len(exp_map), 3, 3))
_make_rmat_of_expmap(exp_map, rmats)
chk = np.isnan(rmats)
if np.any(chk):
rmats[chk] = np.tile(
[1., 0., 0., 0., 1., 0., 0., 0., 1.], np.sum(chk)/9
)
return rmats
"""
@xf_api
def make_rmat_of_expmap(exp_map):
exp_map = np.atleast_2d(exp_map)
rmats = _make_rmat_of_expmap(exp_map)
return np.squeeze(rmats)
@xf_api
@xfapi_jit
def make_beam_rmat(bvec_l, evec_l):
# bvec_l and evec_l CANNOT have 0 magnitude!
# must catch this case as well as colinear bhat_l/ehat_l elsewhere...
bvec_mag = np.sqrt(bvec_l[0]**2 + bvec_l[1]**2 + bvec_l[2]**2)
if bvec_mag < cnst.sqrt_epsf:
raise RuntimeError("bvec_l MUST NOT be ZERO!")
pass
# assign Ze as -bhat_l
Ze0 = -bvec_l[0] / bvec_mag
Ze1 = -bvec_l[1] / bvec_mag
Ze2 = -bvec_l[2] / bvec_mag
# find Ye as Ze ^ ehat_l
Ye0 = Ze1*evec_l[2] - evec_l[1]*Ze2
Ye1 = Ze2*evec_l[0] - evec_l[2]*Ze0
Ye2 = Ze0*evec_l[1] - evec_l[0]*Ze1
Ye_mag = np.sqrt(Ye0**2 + Ye1**2 + Ye2**2)
if Ye_mag < cnst.sqrt_epsf:
raise RuntimeError("bvec_l and evec_l MUST NOT be collinear!")
pass
out = np.empty((3,3), dtype=bvec_l.dtype)
Ye0 /= Ye_mag
Ye1 /= Ye_mag
Ye2 /= Ye_mag
# find Xe as Ye ^ Ze
Xe0 = Ye1*Ze2 - Ze1*Ye2
Xe1 = Ye2*Ze0 - Ze2*Ye0
Xe2 = Ye0*Ze1 - Ze0*Ye1
out[0, 0] = Xe0
out[0, 1] = Ye0
out[0, 2] = Ze0
out[1, 0] = Xe1
out[1, 1] = Ye1
out[1, 2] = Ze1
out[2, 0] = Xe2
out[2, 1] = Ye2
out[2, 2] = Ze2
return out
| """
normalize array of column vectors (hstacked, axis = 0)
"""
if vec_in.ndim == 1:
out = _unit_vector_single(vec_in)
elif vec_in.ndim == 2:
out = _unit_vector_multi(vec_in)
else:
raise ValueError(
"incorrect arg shape; must be 1-d or 2-d, yours is %d-d"
% (vec_in.ndim)
)
return out | identifier_body |
xf_numba.py | #! /usr/bin/env python
# =============================================================================
# Copyright (c) 2012, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
# Written by Joel Bernier <bernier2@llnl.gov> and others.
# LLNL-CODE-529294.
# All rights reserved.
#
# This file is part of HEXRD. For details on dowloading the source,
# see the file COPYING.
#
# Please also see the file LICENSE.
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License (as published by the Free
# Software Foundation) version 2.1 dated February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program (see file LICENSE); if not, write to
# the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA or visit <http://www.gnu.org/licenses/>.
# =============================================================================
# ??? do we want to set np.seterr(invalid='ignore') to avoid nan warnings?
# -*- coding: utf-8 -*-
"""Tranforms module implementation using numba.
Currently, this implementation contains code for the following functions:
- angles_to_gvec
- angles_to_dvec
- row_norm
- unit_vector
- make_rmat_of_expmap
- make_beam_rmat
"""
import numpy as np
from numpy import float_ as npfloat
from numpy import int_ as npint
from . import constants as cnst
from .transforms_definitions import xf_api, get_signature
from .xf_numpy import _beam_to_crystal
try:
import numba
except ImportError:
# Numba is an optional dependency. Any code relying on numba should be
# optional
raise ImportError("xf_numba not available: numba not installed")
# Use the following decorator instead of numba.jit for interface functions.
# This is so we can patch certain features.
def | (fn):
out = numba.jit(fn)
out.__signature__ = get_signature(fn)
return out
@numba.njit
def _angles_to_gvec_helper(angs, out=None):
"""
angs are vstacked [2*theta, eta, omega], although omega is optional
This should be equivalent to the one-liner numpy version:
out = np.vstack([[np.cos(0.5*angs[:, 0]) * np.cos(angs[:, 1])],
[np.cos(0.5*angs[:, 0]) * np.sin(angs[:, 1])],
[np.sin(0.5*angs[:, 0])]])
although much faster
"""
count, dim = angs.shape
out = out if out is not None else np.empty((count, 3), dtype=angs.dtype)
for i in range(count):
ca0 = np.cos(0.5*angs[i, 0])
sa0 = np.sin(0.5*angs[i, 0])
ca1 = np.cos(angs[i, 1])
sa1 = np.sin(angs[i, 1])
out[i, 0] = ca0 * ca1
out[i, 1] = ca0 * sa1
out[i, 2] = sa0
return out
@numba.njit
def _angles_to_dvec_helper(angs, out=None):
"""
angs are vstacked [2*theta, eta, omega], although omega is optional
This shoud be equivalent to the one-liner numpy version:
out = np.vstack([[np.sin(angs[:, 0]) * np.cos(angs[:, 1])],
[np.sin(angs[:, 0]) * np.sin(angs[:, 1])],
[-np.cos(angs[:, 0])]])
although much faster
"""
_, dim = angs.shape
out = out if out is not None else np.empty((dim, 3), dtype=angs.dtype)
for i in range(len(angs)):
ca0 = np.cos(angs[i, 0])
sa0 = np.sin(angs[i, 0])
ca1 = np.cos(angs[i, 1])
sa1 = np.sin(angs[i, 1])
out[i, 0] = sa0 * ca1
out[i, 1] = sa0 * sa1
out[i, 2] = -ca0
return out
@numba.njit
def _rmat_s_helper(chi=None, omes=None, out=None):
"""
simple utility for calculating sample rotation matrices based on
standard definition for HEDM
chi is a single value, 0.0 by default
omes is either a 1d array or None.
If None the code should be equivalent to a single ome of value 0.0
out is a preallocated output array. No check is done about it having the
proper size. If None a new array will be allocated. The expected size
of the array is as many 3x3 matrices as omes (n, 3, 3).
"""
if chi is not None:
cx = np.cos(chi)
sx = np.sin(chi)
else:
cx = 1.0
sx = 0.0
if omes is not None:
# omes is an array (vector): output is as many rotation matrices as omes entries.
n = len(omes)
out = out if out is not None else np.empty((n,3,3), dtype=omes.dtype)
if chi is not None:
# ome is array and chi is a value... compute output
cx = np.cos(chi)
sx = np.sin(chi)
for i in range(n):
cw = np.cos(omes[i])
sw = np.sin(omes[i])
out[i, 0, 0] = cw; out[i, 0, 1] = 0.; out[i, 0, 2] = sw
out[i, 1, 0] = sx*sw; out[i, 1, 1] = cx; out[i, 1, 2] = -sx*cw
out[i, 2, 0] = -cx*sw; out[i, 2, 1] = sx; out[i, 2, 2] = cx*cw
else:
# omes is array and chi is None -> equivalent to chi=0.0, but shortcut computations.
# cx IS 1.0, sx IS 0.0
for i in range(n):
cw = np.cos(omes[i])
sw = np.sin(omes[i])
out[i, 0, 0] = cw; out[i, 0, 1] = 0.; out[i, 0, 2] = sw
out[i, 1, 0] = 0.; out[i, 1, 1] = 1.; out[i, 1, 2] = 0.
out[i, 2, 0] = -sw; out[i, 2, 1] = 0.; out[i, 2, 2] = cw
else:
# omes is None, results should be equivalent to an array with a single element 0.0
out = out if out is not None else np.empty((1, 3, 3))
if chi is not None:
# ome is 0.0. cw is 1.0 and sw is 0.0
cx = np.cos(chi)
sx = np.sin(chi)
out[0, 0, 0] = 1.; out[0, 0, 1] = 0.; out[0, 0, 2] = 0.
out[0, 1, 0] = 0.; out[0, 1, 1] = cx; out[0, 1, 2] = -sx
out[0, 2, 0] = 0.; out[0, 2, 1] = sx; out[0, 2, 2] = cx
else:
# both omes and chi are None... return a single identity matrix.
out[0, 0, 0] = 1.; out[0, 0, 1] = 0.; out[0, 0, 2] = 0.
out[0, 1, 0] = 0.; out[0, 1, 1] = 1.; out[0, 1, 2] = 0.
out[0, 2, 0] = 0.; out[0, 2, 1] = 0.; out[0, 2, 2] = 1.
return out
@xf_api
def angles_to_gvec(angs,
beam_vec=None, eta_vec=None,
chi=None, rmat_c=None):
"""Note about this implementation:
This used to take rmat_b instead of the pair beam_vec, eta_vec. So it may require
some checking.
"""
orig_ndim = angs.ndim
angs = np.atleast_2d(angs)
nvecs, dim = angs.shape
# make vectors in beam frame
gvec_b = _angles_to_gvec_helper(angs[:,0:2])
# _rmat_s_helper could return None to mean "Identity" when chi and ome are None.
omes = angs[:, 2] if dim > 2 else None
if chi is not None or omes is not None:
rmat_s = _rmat_s_helper(chi=chi, omes=omes)
else:
rmat_s = None
# apply defaults to beam_vec and eta_vec.
# TODO: use a default rmat when beam_vec and eta_vec are None so computations
# can be avoided?
beam_vec = beam_vec if beam_vec is not None else cnst.beam_vec
eta_vec = eta_vec if eta_vec is not None else cnst.beam_vec
rmat_b = make_beam_rmat(beam_vec, eta_vec)
out = _beam_to_crystal(gvec_b,
rmat_b=rmat_b, rmat_s=rmat_s, rmat_c=rmat_c)
return out[0] if orig_ndim == 1 else out
@xf_api
def angles_to_dvec(angs,
beam_vec=None, eta_vec=None,
chi=None, rmat_c=None):
"""Note about this implementation:
This used to take rmat_b instead of the pair beam_vec, eta_vec. So it may
require some checking.
"""
angs = np.atleast_2d(angs)
nvecs, dim = angs.shape
# make vectors in beam frame
dvec_b = _angles_to_dvec_helper(angs[:,0:2])
# calculate rmat_s
omes = angs[:, 2] if dim>2 else None
if chi is not None or omes is not None:
rmat_s = _rmat_s_helper(chi=chi, omes=omes)
else:
rmat_s = None
# apply defaults to beam_vec and eta_vec.
# TODO: use a default rmat when beam_vec and eta_vec are None so computations
# can be avoided?
beam_vec = beam_vec if beam_vec is not None else cnst.beam_vec
eta_vec = eta_vec if eta_vec is not None else cnst.beam_vec
rmat_b = make_beam_rmat(beam_vec, eta_vec)
return _beam_to_crystal(dvec_b,
rmat_b=rmat_b, rmat_s=rmat_s, rmat_c=rmat_c)
# this could be a gufunc... (n)->()
@numba.njit
def _row_norm(a, out=None):
n, dim = a.shape
out = out if out is not None else np.empty(n, dtype=a.dtype)
for i in range(n):
nrm = 0.0
for j in range(dim):
x = a[i, j]
nrm += x*x
out[i] = np.sqrt(nrm)
return out
# this and _unit_vector_single would be better as a gufunc.
@numba.njit
def _unit_vector_single(a, out=None):
out = out if out is not None else np.empty_like(a)
n = len(a)
sqr_norm = a[0]*a[0]
for i in range(1, n):
sqr_norm += a[i]*a[i]
# prevent divide by zero
if sqr_norm > cnst.epsf:
recip_norm = 1.0 / np.sqrt(sqr_norm)
out[:] = a[:] * recip_norm
else:
out[:] = a[:]
return out
@numba.njit
def _unit_vector_multi(a, out=None):
out = out if out is not None else np.empty_like(a)
n, dim = a.shape
for i in range(n):
#_unit_vector_single(a[i], out=out[i])
sqr_norm = a[i, 0] * a[i, 0]
for j in range(1, dim):
sqr_norm += a[i, j]*a[i, j]
if sqr_norm > cnst.epsf:
recip_norm = 1.0 / np.sqrt(sqr_norm)
out[i,:] = a[i,:] * recip_norm
else:
out[i,:] = a[i,:]
return out
@xf_api
def row_norm(vec_in):
"""
return row-wise norms for a list of vectors
"""
# TODO: leave this to a PRECONDITION in the xf_api?
if vec_in.ndim == 1:
out = _row_norm(np.atleast_2d(vec_in))[0]
elif vec_in.ndim == 2:
out = _row_norm(vec_in)
else:
raise ValueError(
"incorrect shape: arg must be 1-d or 2-d, yours is %d"
% (len(vec_in.shape)))
return out
@xf_api
def unit_vector(vec_in):
"""
normalize array of column vectors (hstacked, axis = 0)
"""
if vec_in.ndim == 1:
out = _unit_vector_single(vec_in)
elif vec_in.ndim == 2:
out = _unit_vector_multi(vec_in)
else:
raise ValueError(
"incorrect arg shape; must be 1-d or 2-d, yours is %d-d"
% (vec_in.ndim)
)
return out
@numba.njit
def _make_rmat_of_expmap(x, out=None):
"""
TODO:
Test effectiveness of two options:
1) avoid conditional inside for loop and use np.divide to return NaN
for the phi = 0 cases, and deal with it later; or
2) catch phi = 0 cases inside the loop and just return squeezed answer
"""
n = len(x)
out = out if out is not None else np.empty((n,3,3), dtype=x.dtype)
for i in range(n):
phi = np.sqrt(x[i, 0]*x[i, 0] + x[i, 1]*x[i, 1] + x[i, 2]*x[i, 2])
if phi <= cnst.sqrt_epsf:
out[i, 0, 0] = 1.; out[i, 0, 1] = 0.; out[i, 0, 2] = 0.
out[i, 1, 0] = 0.; out[i, 1, 1] = 1.; out[i, 1, 2] = 0.
out[i, 2, 0] = 0.; out[i, 2, 1] = 0.; out[i, 2, 2] = 1.
else:
f1 = np.sin(phi)/phi
f2 = (1. - np.cos(phi)) / (phi*phi)
out[i, 0, 0] = 1. - f2*(x[i, 2]*x[i, 2] + x[i, 1]*x[i, 1])
out[i, 0, 1] = f2*x[i, 1]*x[i, 0] - f1*x[i, 2]
out[i, 0, 2] = f1*x[i, 1] + f2*x[i, 2]*x[i, 0]
out[i, 1, 0] = f1*x[i, 2] + f2*x[i, 1]*x[i, 0]
out[i, 1, 1] = 1. - f2*(x[i, 2]*x[i, 2] + x[i, 0]*x[i, 0])
out[i, 1, 2] = f2*x[i, 2]*x[i, 1] - f1*x[i, 0]
out[i, 2, 0] = f2*x[i, 2]*x[i, 0] - f1*x[i, 1]
out[i, 2, 1] = f1*x[i, 0] + f2*x[i, 2]*x[i, 1]
out[i, 2, 2] = 1. - f2*(x[i, 1]*x[i, 1] + x[i, 0]*x[i, 0])
return out
"""
if the help above was set up to return nans...
def make_rmat_of_expmap(exp_map):
exp_map = np.atleast_2d(exp_map)
rmats = np.empty((len(exp_map), 3, 3))
_make_rmat_of_expmap(exp_map, rmats)
chk = np.isnan(rmats)
if np.any(chk):
rmats[chk] = np.tile(
[1., 0., 0., 0., 1., 0., 0., 0., 1.], np.sum(chk)/9
)
return rmats
"""
@xf_api
def make_rmat_of_expmap(exp_map):
exp_map = np.atleast_2d(exp_map)
rmats = _make_rmat_of_expmap(exp_map)
return np.squeeze(rmats)
@xf_api
@xfapi_jit
def make_beam_rmat(bvec_l, evec_l):
# bvec_l and evec_l CANNOT have 0 magnitude!
# must catch this case as well as colinear bhat_l/ehat_l elsewhere...
bvec_mag = np.sqrt(bvec_l[0]**2 + bvec_l[1]**2 + bvec_l[2]**2)
if bvec_mag < cnst.sqrt_epsf:
raise RuntimeError("bvec_l MUST NOT be ZERO!")
pass
# assign Ze as -bhat_l
Ze0 = -bvec_l[0] / bvec_mag
Ze1 = -bvec_l[1] / bvec_mag
Ze2 = -bvec_l[2] / bvec_mag
# find Ye as Ze ^ ehat_l
Ye0 = Ze1*evec_l[2] - evec_l[1]*Ze2
Ye1 = Ze2*evec_l[0] - evec_l[2]*Ze0
Ye2 = Ze0*evec_l[1] - evec_l[0]*Ze1
Ye_mag = np.sqrt(Ye0**2 + Ye1**2 + Ye2**2)
if Ye_mag < cnst.sqrt_epsf:
raise RuntimeError("bvec_l and evec_l MUST NOT be collinear!")
pass
out = np.empty((3,3), dtype=bvec_l.dtype)
Ye0 /= Ye_mag
Ye1 /= Ye_mag
Ye2 /= Ye_mag
# find Xe as Ye ^ Ze
Xe0 = Ye1*Ze2 - Ze1*Ye2
Xe1 = Ye2*Ze0 - Ze2*Ye0
Xe2 = Ye0*Ze1 - Ze0*Ye1
out[0, 0] = Xe0
out[0, 1] = Ye0
out[0, 2] = Ze0
out[1, 0] = Xe1
out[1, 1] = Ye1
out[1, 2] = Ze1
out[2, 0] = Xe2
out[2, 1] = Ye2
out[2, 2] = Ze2
return out
| xfapi_jit | identifier_name |
xf_numba.py | #! /usr/bin/env python
# =============================================================================
# Copyright (c) 2012, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
# Written by Joel Bernier <bernier2@llnl.gov> and others.
# LLNL-CODE-529294.
# All rights reserved.
#
# This file is part of HEXRD. For details on dowloading the source,
# see the file COPYING.
#
# Please also see the file LICENSE.
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License (as published by the Free
# Software Foundation) version 2.1 dated February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program (see file LICENSE); if not, write to
# the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA or visit <http://www.gnu.org/licenses/>.
# =============================================================================
# ??? do we want to set np.seterr(invalid='ignore') to avoid nan warnings?
# -*- coding: utf-8 -*-
"""Tranforms module implementation using numba.
Currently, this implementation contains code for the following functions:
- angles_to_gvec
- angles_to_dvec
- row_norm
- unit_vector
- make_rmat_of_expmap
- make_beam_rmat
"""
import numpy as np
from numpy import float_ as npfloat
from numpy import int_ as npint
from . import constants as cnst
from .transforms_definitions import xf_api, get_signature
from .xf_numpy import _beam_to_crystal
try:
import numba
except ImportError:
# Numba is an optional dependency. Any code relying on numba should be
# optional
raise ImportError("xf_numba not available: numba not installed")
# Use the following decorator instead of numba.jit for interface functions.
# This is so we can patch certain features.
def xfapi_jit(fn):
out = numba.jit(fn)
out.__signature__ = get_signature(fn)
return out
@numba.njit
def _angles_to_gvec_helper(angs, out=None):
"""
angs are vstacked [2*theta, eta, omega], although omega is optional
This should be equivalent to the one-liner numpy version:
out = np.vstack([[np.cos(0.5*angs[:, 0]) * np.cos(angs[:, 1])],
[np.cos(0.5*angs[:, 0]) * np.sin(angs[:, 1])],
[np.sin(0.5*angs[:, 0])]])
although much faster
"""
count, dim = angs.shape
out = out if out is not None else np.empty((count, 3), dtype=angs.dtype)
for i in range(count):
ca0 = np.cos(0.5*angs[i, 0])
sa0 = np.sin(0.5*angs[i, 0])
ca1 = np.cos(angs[i, 1])
sa1 = np.sin(angs[i, 1])
out[i, 0] = ca0 * ca1
out[i, 1] = ca0 * sa1
out[i, 2] = sa0
return out
@numba.njit
def _angles_to_dvec_helper(angs, out=None):
"""
angs are vstacked [2*theta, eta, omega], although omega is optional
This shoud be equivalent to the one-liner numpy version:
out = np.vstack([[np.sin(angs[:, 0]) * np.cos(angs[:, 1])],
[np.sin(angs[:, 0]) * np.sin(angs[:, 1])],
[-np.cos(angs[:, 0])]])
although much faster
"""
_, dim = angs.shape
out = out if out is not None else np.empty((dim, 3), dtype=angs.dtype)
for i in range(len(angs)):
ca0 = np.cos(angs[i, 0])
sa0 = np.sin(angs[i, 0])
ca1 = np.cos(angs[i, 1])
sa1 = np.sin(angs[i, 1])
out[i, 0] = sa0 * ca1
out[i, 1] = sa0 * sa1
out[i, 2] = -ca0
return out
@numba.njit
def _rmat_s_helper(chi=None, omes=None, out=None):
"""
simple utility for calculating sample rotation matrices based on
standard definition for HEDM
chi is a single value, 0.0 by default
omes is either a 1d array or None.
If None the code should be equivalent to a single ome of value 0.0
out is a preallocated output array. No check is done about it having the
proper size. If None a new array will be allocated. The expected size
of the array is as many 3x3 matrices as omes (n, 3, 3).
"""
if chi is not None:
cx = np.cos(chi)
sx = np.sin(chi)
else:
cx = 1.0
sx = 0.0
if omes is not None:
# omes is an array (vector): output is as many rotation matrices as omes entries.
n = len(omes)
out = out if out is not None else np.empty((n,3,3), dtype=omes.dtype)
if chi is not None:
# ome is array and chi is a value... compute output
cx = np.cos(chi)
sx = np.sin(chi)
for i in range(n):
cw = np.cos(omes[i])
sw = np.sin(omes[i])
out[i, 0, 0] = cw; out[i, 0, 1] = 0.; out[i, 0, 2] = sw
out[i, 1, 0] = sx*sw; out[i, 1, 1] = cx; out[i, 1, 2] = -sx*cw
out[i, 2, 0] = -cx*sw; out[i, 2, 1] = sx; out[i, 2, 2] = cx*cw
else:
# omes is array and chi is None -> equivalent to chi=0.0, but shortcut computations.
# cx IS 1.0, sx IS 0.0
for i in range(n):
cw = np.cos(omes[i])
sw = np.sin(omes[i])
out[i, 0, 0] = cw; out[i, 0, 1] = 0.; out[i, 0, 2] = sw
out[i, 1, 0] = 0.; out[i, 1, 1] = 1.; out[i, 1, 2] = 0.
out[i, 2, 0] = -sw; out[i, 2, 1] = 0.; out[i, 2, 2] = cw
else:
# omes is None, results should be equivalent to an array with a single element 0.0
out = out if out is not None else np.empty((1, 3, 3))
if chi is not None:
# ome is 0.0. cw is 1.0 and sw is 0.0
cx = np.cos(chi)
sx = np.sin(chi)
out[0, 0, 0] = 1.; out[0, 0, 1] = 0.; out[0, 0, 2] = 0.
out[0, 1, 0] = 0.; out[0, 1, 1] = cx; out[0, 1, 2] = -sx
out[0, 2, 0] = 0.; out[0, 2, 1] = sx; out[0, 2, 2] = cx
else:
# both omes and chi are None... return a single identity matrix.
out[0, 0, 0] = 1.; out[0, 0, 1] = 0.; out[0, 0, 2] = 0.
out[0, 1, 0] = 0.; out[0, 1, 1] = 1.; out[0, 1, 2] = 0.
out[0, 2, 0] = 0.; out[0, 2, 1] = 0.; out[0, 2, 2] = 1.
return out
@xf_api
def angles_to_gvec(angs,
beam_vec=None, eta_vec=None,
chi=None, rmat_c=None):
"""Note about this implementation:
This used to take rmat_b instead of the pair beam_vec, eta_vec. So it may require
some checking.
"""
orig_ndim = angs.ndim
angs = np.atleast_2d(angs)
nvecs, dim = angs.shape
# make vectors in beam frame
gvec_b = _angles_to_gvec_helper(angs[:,0:2])
# _rmat_s_helper could return None to mean "Identity" when chi and ome are None.
omes = angs[:, 2] if dim > 2 else None
if chi is not None or omes is not None:
rmat_s = _rmat_s_helper(chi=chi, omes=omes)
else:
rmat_s = None
# apply defaults to beam_vec and eta_vec.
# TODO: use a default rmat when beam_vec and eta_vec are None so computations
# can be avoided?
beam_vec = beam_vec if beam_vec is not None else cnst.beam_vec
eta_vec = eta_vec if eta_vec is not None else cnst.beam_vec
rmat_b = make_beam_rmat(beam_vec, eta_vec)
out = _beam_to_crystal(gvec_b,
rmat_b=rmat_b, rmat_s=rmat_s, rmat_c=rmat_c)
return out[0] if orig_ndim == 1 else out
@xf_api
def angles_to_dvec(angs,
beam_vec=None, eta_vec=None,
chi=None, rmat_c=None):
"""Note about this implementation:
This used to take rmat_b instead of the pair beam_vec, eta_vec. So it may
require some checking.
"""
angs = np.atleast_2d(angs)
nvecs, dim = angs.shape
# make vectors in beam frame
dvec_b = _angles_to_dvec_helper(angs[:,0:2])
# calculate rmat_s
omes = angs[:, 2] if dim>2 else None
if chi is not None or omes is not None:
rmat_s = _rmat_s_helper(chi=chi, omes=omes)
else:
rmat_s = None
# apply defaults to beam_vec and eta_vec.
# TODO: use a default rmat when beam_vec and eta_vec are None so computations
# can be avoided?
beam_vec = beam_vec if beam_vec is not None else cnst.beam_vec
eta_vec = eta_vec if eta_vec is not None else cnst.beam_vec
rmat_b = make_beam_rmat(beam_vec, eta_vec)
return _beam_to_crystal(dvec_b,
rmat_b=rmat_b, rmat_s=rmat_s, rmat_c=rmat_c)
# this could be a gufunc... (n)->()
@numba.njit
def _row_norm(a, out=None):
n, dim = a.shape
out = out if out is not None else np.empty(n, dtype=a.dtype)
for i in range(n):
nrm = 0.0
for j in range(dim):
x = a[i, j]
nrm += x*x
out[i] = np.sqrt(nrm)
return out
# this and _unit_vector_single would be better as a gufunc.
@numba.njit
def _unit_vector_single(a, out=None):
out = out if out is not None else np.empty_like(a)
n = len(a)
sqr_norm = a[0]*a[0]
for i in range(1, n):
sqr_norm += a[i]*a[i]
# prevent divide by zero
if sqr_norm > cnst.epsf:
recip_norm = 1.0 / np.sqrt(sqr_norm)
out[:] = a[:] * recip_norm
else:
|
return out
@numba.njit
def _unit_vector_multi(a, out=None):
out = out if out is not None else np.empty_like(a)
n, dim = a.shape
for i in range(n):
#_unit_vector_single(a[i], out=out[i])
sqr_norm = a[i, 0] * a[i, 0]
for j in range(1, dim):
sqr_norm += a[i, j]*a[i, j]
if sqr_norm > cnst.epsf:
recip_norm = 1.0 / np.sqrt(sqr_norm)
out[i,:] = a[i,:] * recip_norm
else:
out[i,:] = a[i,:]
return out
@xf_api
def row_norm(vec_in):
"""
return row-wise norms for a list of vectors
"""
# TODO: leave this to a PRECONDITION in the xf_api?
if vec_in.ndim == 1:
out = _row_norm(np.atleast_2d(vec_in))[0]
elif vec_in.ndim == 2:
out = _row_norm(vec_in)
else:
raise ValueError(
"incorrect shape: arg must be 1-d or 2-d, yours is %d"
% (len(vec_in.shape)))
return out
@xf_api
def unit_vector(vec_in):
"""
normalize array of column vectors (hstacked, axis = 0)
"""
if vec_in.ndim == 1:
out = _unit_vector_single(vec_in)
elif vec_in.ndim == 2:
out = _unit_vector_multi(vec_in)
else:
raise ValueError(
"incorrect arg shape; must be 1-d or 2-d, yours is %d-d"
% (vec_in.ndim)
)
return out
@numba.njit
def _make_rmat_of_expmap(x, out=None):
"""
TODO:
Test effectiveness of two options:
1) avoid conditional inside for loop and use np.divide to return NaN
for the phi = 0 cases, and deal with it later; or
2) catch phi = 0 cases inside the loop and just return squeezed answer
"""
n = len(x)
out = out if out is not None else np.empty((n,3,3), dtype=x.dtype)
for i in range(n):
phi = np.sqrt(x[i, 0]*x[i, 0] + x[i, 1]*x[i, 1] + x[i, 2]*x[i, 2])
if phi <= cnst.sqrt_epsf:
out[i, 0, 0] = 1.; out[i, 0, 1] = 0.; out[i, 0, 2] = 0.
out[i, 1, 0] = 0.; out[i, 1, 1] = 1.; out[i, 1, 2] = 0.
out[i, 2, 0] = 0.; out[i, 2, 1] = 0.; out[i, 2, 2] = 1.
else:
f1 = np.sin(phi)/phi
f2 = (1. - np.cos(phi)) / (phi*phi)
out[i, 0, 0] = 1. - f2*(x[i, 2]*x[i, 2] + x[i, 1]*x[i, 1])
out[i, 0, 1] = f2*x[i, 1]*x[i, 0] - f1*x[i, 2]
out[i, 0, 2] = f1*x[i, 1] + f2*x[i, 2]*x[i, 0]
out[i, 1, 0] = f1*x[i, 2] + f2*x[i, 1]*x[i, 0]
out[i, 1, 1] = 1. - f2*(x[i, 2]*x[i, 2] + x[i, 0]*x[i, 0])
out[i, 1, 2] = f2*x[i, 2]*x[i, 1] - f1*x[i, 0]
out[i, 2, 0] = f2*x[i, 2]*x[i, 0] - f1*x[i, 1]
out[i, 2, 1] = f1*x[i, 0] + f2*x[i, 2]*x[i, 1]
out[i, 2, 2] = 1. - f2*(x[i, 1]*x[i, 1] + x[i, 0]*x[i, 0])
return out
"""
if the help above was set up to return nans...
def make_rmat_of_expmap(exp_map):
exp_map = np.atleast_2d(exp_map)
rmats = np.empty((len(exp_map), 3, 3))
_make_rmat_of_expmap(exp_map, rmats)
chk = np.isnan(rmats)
if np.any(chk):
rmats[chk] = np.tile(
[1., 0., 0., 0., 1., 0., 0., 0., 1.], np.sum(chk)/9
)
return rmats
"""
@xf_api
def make_rmat_of_expmap(exp_map):
exp_map = np.atleast_2d(exp_map)
rmats = _make_rmat_of_expmap(exp_map)
return np.squeeze(rmats)
@xf_api
@xfapi_jit
def make_beam_rmat(bvec_l, evec_l):
# bvec_l and evec_l CANNOT have 0 magnitude!
# must catch this case as well as colinear bhat_l/ehat_l elsewhere...
bvec_mag = np.sqrt(bvec_l[0]**2 + bvec_l[1]**2 + bvec_l[2]**2)
if bvec_mag < cnst.sqrt_epsf:
raise RuntimeError("bvec_l MUST NOT be ZERO!")
pass
# assign Ze as -bhat_l
Ze0 = -bvec_l[0] / bvec_mag
Ze1 = -bvec_l[1] / bvec_mag
Ze2 = -bvec_l[2] / bvec_mag
# find Ye as Ze ^ ehat_l
Ye0 = Ze1*evec_l[2] - evec_l[1]*Ze2
Ye1 = Ze2*evec_l[0] - evec_l[2]*Ze0
Ye2 = Ze0*evec_l[1] - evec_l[0]*Ze1
Ye_mag = np.sqrt(Ye0**2 + Ye1**2 + Ye2**2)
if Ye_mag < cnst.sqrt_epsf:
raise RuntimeError("bvec_l and evec_l MUST NOT be collinear!")
pass
out = np.empty((3,3), dtype=bvec_l.dtype)
Ye0 /= Ye_mag
Ye1 /= Ye_mag
Ye2 /= Ye_mag
# find Xe as Ye ^ Ze
Xe0 = Ye1*Ze2 - Ze1*Ye2
Xe1 = Ye2*Ze0 - Ze2*Ye0
Xe2 = Ye0*Ze1 - Ze0*Ye1
out[0, 0] = Xe0
out[0, 1] = Ye0
out[0, 2] = Ze0
out[1, 0] = Xe1
out[1, 1] = Ye1
out[1, 2] = Ze1
out[2, 0] = Xe2
out[2, 1] = Ye2
out[2, 2] = Ze2
return out
| out[:] = a[:] | conditional_block |
xf_numba.py | #! /usr/bin/env python
# =============================================================================
# Copyright (c) 2012, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
# Written by Joel Bernier <bernier2@llnl.gov> and others.
# LLNL-CODE-529294.
# All rights reserved.
#
# This file is part of HEXRD. For details on dowloading the source,
# see the file COPYING.
#
# Please also see the file LICENSE.
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License (as published by the Free
# Software Foundation) version 2.1 dated February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program (see file LICENSE); if not, write to
# the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA or visit <http://www.gnu.org/licenses/>.
# =============================================================================
# ??? do we want to set np.seterr(invalid='ignore') to avoid nan warnings?
# -*- coding: utf-8 -*-
"""Tranforms module implementation using numba.
Currently, this implementation contains code for the following functions:
- angles_to_gvec
- angles_to_dvec
- row_norm
- unit_vector
- make_rmat_of_expmap
- make_beam_rmat
"""
import numpy as np
from numpy import float_ as npfloat
from numpy import int_ as npint
from . import constants as cnst
from .transforms_definitions import xf_api, get_signature
from .xf_numpy import _beam_to_crystal
try:
import numba
except ImportError:
# Numba is an optional dependency. Any code relying on numba should be
# optional
raise ImportError("xf_numba not available: numba not installed")
# Use the following decorator instead of numba.jit for interface functions.
# This is so we can patch certain features.
def xfapi_jit(fn):
out = numba.jit(fn)
out.__signature__ = get_signature(fn)
return out
@numba.njit
def _angles_to_gvec_helper(angs, out=None):
"""
angs are vstacked [2*theta, eta, omega], although omega is optional
This should be equivalent to the one-liner numpy version:
out = np.vstack([[np.cos(0.5*angs[:, 0]) * np.cos(angs[:, 1])],
[np.cos(0.5*angs[:, 0]) * np.sin(angs[:, 1])],
[np.sin(0.5*angs[:, 0])]])
although much faster
"""
count, dim = angs.shape
out = out if out is not None else np.empty((count, 3), dtype=angs.dtype)
for i in range(count):
ca0 = np.cos(0.5*angs[i, 0])
sa0 = np.sin(0.5*angs[i, 0])
ca1 = np.cos(angs[i, 1])
sa1 = np.sin(angs[i, 1])
out[i, 0] = ca0 * ca1
out[i, 1] = ca0 * sa1
out[i, 2] = sa0
return out
@numba.njit
def _angles_to_dvec_helper(angs, out=None):
"""
angs are vstacked [2*theta, eta, omega], although omega is optional
This shoud be equivalent to the one-liner numpy version:
out = np.vstack([[np.sin(angs[:, 0]) * np.cos(angs[:, 1])],
[np.sin(angs[:, 0]) * np.sin(angs[:, 1])],
[-np.cos(angs[:, 0])]])
although much faster
"""
_, dim = angs.shape
out = out if out is not None else np.empty((dim, 3), dtype=angs.dtype)
for i in range(len(angs)):
ca0 = np.cos(angs[i, 0])
sa0 = np.sin(angs[i, 0])
ca1 = np.cos(angs[i, 1])
sa1 = np.sin(angs[i, 1])
out[i, 0] = sa0 * ca1
out[i, 1] = sa0 * sa1
out[i, 2] = -ca0
return out
@numba.njit
def _rmat_s_helper(chi=None, omes=None, out=None):
"""
simple utility for calculating sample rotation matrices based on
standard definition for HEDM
chi is a single value, 0.0 by default
omes is either a 1d array or None.
If None the code should be equivalent to a single ome of value 0.0
out is a preallocated output array. No check is done about it having the
proper size. If None a new array will be allocated. The expected size
of the array is as many 3x3 matrices as omes (n, 3, 3).
"""
if chi is not None:
cx = np.cos(chi)
sx = np.sin(chi)
else:
cx = 1.0
sx = 0.0
if omes is not None:
# omes is an array (vector): output is as many rotation matrices as omes entries.
n = len(omes)
out = out if out is not None else np.empty((n,3,3), dtype=omes.dtype)
if chi is not None:
# ome is array and chi is a value... compute output
cx = np.cos(chi)
sx = np.sin(chi)
for i in range(n):
cw = np.cos(omes[i])
sw = np.sin(omes[i])
out[i, 0, 0] = cw; out[i, 0, 1] = 0.; out[i, 0, 2] = sw
out[i, 1, 0] = sx*sw; out[i, 1, 1] = cx; out[i, 1, 2] = -sx*cw
out[i, 2, 0] = -cx*sw; out[i, 2, 1] = sx; out[i, 2, 2] = cx*cw
else:
# omes is array and chi is None -> equivalent to chi=0.0, but shortcut computations.
# cx IS 1.0, sx IS 0.0
for i in range(n):
cw = np.cos(omes[i])
sw = np.sin(omes[i])
out[i, 0, 0] = cw; out[i, 0, 1] = 0.; out[i, 0, 2] = sw
out[i, 1, 0] = 0.; out[i, 1, 1] = 1.; out[i, 1, 2] = 0.
out[i, 2, 0] = -sw; out[i, 2, 1] = 0.; out[i, 2, 2] = cw
else:
# omes is None, results should be equivalent to an array with a single element 0.0
out = out if out is not None else np.empty((1, 3, 3))
if chi is not None:
# ome is 0.0. cw is 1.0 and sw is 0.0
cx = np.cos(chi)
sx = np.sin(chi)
out[0, 0, 0] = 1.; out[0, 0, 1] = 0.; out[0, 0, 2] = 0.
out[0, 1, 0] = 0.; out[0, 1, 1] = cx; out[0, 1, 2] = -sx
out[0, 2, 0] = 0.; out[0, 2, 1] = sx; out[0, 2, 2] = cx
else:
# both omes and chi are None... return a single identity matrix.
out[0, 0, 0] = 1.; out[0, 0, 1] = 0.; out[0, 0, 2] = 0.
out[0, 1, 0] = 0.; out[0, 1, 1] = 1.; out[0, 1, 2] = 0.
out[0, 2, 0] = 0.; out[0, 2, 1] = 0.; out[0, 2, 2] = 1.
return out
@xf_api
def angles_to_gvec(angs,
beam_vec=None, eta_vec=None,
chi=None, rmat_c=None):
"""Note about this implementation:
This used to take rmat_b instead of the pair beam_vec, eta_vec. So it may require
some checking.
"""
orig_ndim = angs.ndim
angs = np.atleast_2d(angs)
nvecs, dim = angs.shape
# make vectors in beam frame
gvec_b = _angles_to_gvec_helper(angs[:,0:2])
# _rmat_s_helper could return None to mean "Identity" when chi and ome are None.
omes = angs[:, 2] if dim > 2 else None
if chi is not None or omes is not None:
rmat_s = _rmat_s_helper(chi=chi, omes=omes)
else:
rmat_s = None
# apply defaults to beam_vec and eta_vec.
# TODO: use a default rmat when beam_vec and eta_vec are None so computations
# can be avoided?
beam_vec = beam_vec if beam_vec is not None else cnst.beam_vec
eta_vec = eta_vec if eta_vec is not None else cnst.beam_vec
rmat_b = make_beam_rmat(beam_vec, eta_vec)
out = _beam_to_crystal(gvec_b,
rmat_b=rmat_b, rmat_s=rmat_s, rmat_c=rmat_c)
return out[0] if orig_ndim == 1 else out
@xf_api
def angles_to_dvec(angs,
beam_vec=None, eta_vec=None,
chi=None, rmat_c=None):
"""Note about this implementation:
This used to take rmat_b instead of the pair beam_vec, eta_vec. So it may
require some checking.
"""
angs = np.atleast_2d(angs)
nvecs, dim = angs.shape
# make vectors in beam frame
dvec_b = _angles_to_dvec_helper(angs[:,0:2])
# calculate rmat_s
omes = angs[:, 2] if dim>2 else None
if chi is not None or omes is not None:
rmat_s = _rmat_s_helper(chi=chi, omes=omes)
else:
rmat_s = None
# apply defaults to beam_vec and eta_vec.
# TODO: use a default rmat when beam_vec and eta_vec are None so computations
# can be avoided?
beam_vec = beam_vec if beam_vec is not None else cnst.beam_vec
eta_vec = eta_vec if eta_vec is not None else cnst.beam_vec
rmat_b = make_beam_rmat(beam_vec, eta_vec)
return _beam_to_crystal(dvec_b,
rmat_b=rmat_b, rmat_s=rmat_s, rmat_c=rmat_c)
# this could be a gufunc... (n)->()
@numba.njit
def _row_norm(a, out=None):
n, dim = a.shape
out = out if out is not None else np.empty(n, dtype=a.dtype)
for i in range(n):
nrm = 0.0
for j in range(dim):
x = a[i, j]
nrm += x*x
out[i] = np.sqrt(nrm)
return out
# this and _unit_vector_single would be better as a gufunc.
@numba.njit
def _unit_vector_single(a, out=None):
out = out if out is not None else np.empty_like(a)
n = len(a)
sqr_norm = a[0]*a[0]
for i in range(1, n):
sqr_norm += a[i]*a[i]
# prevent divide by zero
if sqr_norm > cnst.epsf:
recip_norm = 1.0 / np.sqrt(sqr_norm)
out[:] = a[:] * recip_norm
else:
out[:] = a[:]
return out | out = out if out is not None else np.empty_like(a)
n, dim = a.shape
for i in range(n):
#_unit_vector_single(a[i], out=out[i])
sqr_norm = a[i, 0] * a[i, 0]
for j in range(1, dim):
sqr_norm += a[i, j]*a[i, j]
if sqr_norm > cnst.epsf:
recip_norm = 1.0 / np.sqrt(sqr_norm)
out[i,:] = a[i,:] * recip_norm
else:
out[i,:] = a[i,:]
return out
@xf_api
def row_norm(vec_in):
"""
return row-wise norms for a list of vectors
"""
# TODO: leave this to a PRECONDITION in the xf_api?
if vec_in.ndim == 1:
out = _row_norm(np.atleast_2d(vec_in))[0]
elif vec_in.ndim == 2:
out = _row_norm(vec_in)
else:
raise ValueError(
"incorrect shape: arg must be 1-d or 2-d, yours is %d"
% (len(vec_in.shape)))
return out
@xf_api
def unit_vector(vec_in):
"""
normalize array of column vectors (hstacked, axis = 0)
"""
if vec_in.ndim == 1:
out = _unit_vector_single(vec_in)
elif vec_in.ndim == 2:
out = _unit_vector_multi(vec_in)
else:
raise ValueError(
"incorrect arg shape; must be 1-d or 2-d, yours is %d-d"
% (vec_in.ndim)
)
return out
@numba.njit
def _make_rmat_of_expmap(x, out=None):
"""
TODO:
Test effectiveness of two options:
1) avoid conditional inside for loop and use np.divide to return NaN
for the phi = 0 cases, and deal with it later; or
2) catch phi = 0 cases inside the loop and just return squeezed answer
"""
n = len(x)
out = out if out is not None else np.empty((n,3,3), dtype=x.dtype)
for i in range(n):
phi = np.sqrt(x[i, 0]*x[i, 0] + x[i, 1]*x[i, 1] + x[i, 2]*x[i, 2])
if phi <= cnst.sqrt_epsf:
out[i, 0, 0] = 1.; out[i, 0, 1] = 0.; out[i, 0, 2] = 0.
out[i, 1, 0] = 0.; out[i, 1, 1] = 1.; out[i, 1, 2] = 0.
out[i, 2, 0] = 0.; out[i, 2, 1] = 0.; out[i, 2, 2] = 1.
else:
f1 = np.sin(phi)/phi
f2 = (1. - np.cos(phi)) / (phi*phi)
out[i, 0, 0] = 1. - f2*(x[i, 2]*x[i, 2] + x[i, 1]*x[i, 1])
out[i, 0, 1] = f2*x[i, 1]*x[i, 0] - f1*x[i, 2]
out[i, 0, 2] = f1*x[i, 1] + f2*x[i, 2]*x[i, 0]
out[i, 1, 0] = f1*x[i, 2] + f2*x[i, 1]*x[i, 0]
out[i, 1, 1] = 1. - f2*(x[i, 2]*x[i, 2] + x[i, 0]*x[i, 0])
out[i, 1, 2] = f2*x[i, 2]*x[i, 1] - f1*x[i, 0]
out[i, 2, 0] = f2*x[i, 2]*x[i, 0] - f1*x[i, 1]
out[i, 2, 1] = f1*x[i, 0] + f2*x[i, 2]*x[i, 1]
out[i, 2, 2] = 1. - f2*(x[i, 1]*x[i, 1] + x[i, 0]*x[i, 0])
return out
"""
if the help above was set up to return nans...
def make_rmat_of_expmap(exp_map):
exp_map = np.atleast_2d(exp_map)
rmats = np.empty((len(exp_map), 3, 3))
_make_rmat_of_expmap(exp_map, rmats)
chk = np.isnan(rmats)
if np.any(chk):
rmats[chk] = np.tile(
[1., 0., 0., 0., 1., 0., 0., 0., 1.], np.sum(chk)/9
)
return rmats
"""
@xf_api
def make_rmat_of_expmap(exp_map):
exp_map = np.atleast_2d(exp_map)
rmats = _make_rmat_of_expmap(exp_map)
return np.squeeze(rmats)
@xf_api
@xfapi_jit
def make_beam_rmat(bvec_l, evec_l):
# bvec_l and evec_l CANNOT have 0 magnitude!
# must catch this case as well as colinear bhat_l/ehat_l elsewhere...
bvec_mag = np.sqrt(bvec_l[0]**2 + bvec_l[1]**2 + bvec_l[2]**2)
if bvec_mag < cnst.sqrt_epsf:
raise RuntimeError("bvec_l MUST NOT be ZERO!")
pass
# assign Ze as -bhat_l
Ze0 = -bvec_l[0] / bvec_mag
Ze1 = -bvec_l[1] / bvec_mag
Ze2 = -bvec_l[2] / bvec_mag
# find Ye as Ze ^ ehat_l
Ye0 = Ze1*evec_l[2] - evec_l[1]*Ze2
Ye1 = Ze2*evec_l[0] - evec_l[2]*Ze0
Ye2 = Ze0*evec_l[1] - evec_l[0]*Ze1
Ye_mag = np.sqrt(Ye0**2 + Ye1**2 + Ye2**2)
if Ye_mag < cnst.sqrt_epsf:
raise RuntimeError("bvec_l and evec_l MUST NOT be collinear!")
pass
out = np.empty((3,3), dtype=bvec_l.dtype)
Ye0 /= Ye_mag
Ye1 /= Ye_mag
Ye2 /= Ye_mag
# find Xe as Ye ^ Ze
Xe0 = Ye1*Ze2 - Ze1*Ye2
Xe1 = Ye2*Ze0 - Ze2*Ye0
Xe2 = Ye0*Ze1 - Ze0*Ye1
out[0, 0] = Xe0
out[0, 1] = Ye0
out[0, 2] = Ze0
out[1, 0] = Xe1
out[1, 1] = Ye1
out[1, 2] = Ze1
out[2, 0] = Xe2
out[2, 1] = Ye2
out[2, 2] = Ze2
return out |
@numba.njit
def _unit_vector_multi(a, out=None): | random_line_split |
report_errors_service.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/devtools/clouderrorreporting/v1beta1/report_errors_service.proto
package clouderrorreporting
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "google.golang.org/genproto/googleapis/api/annotations"
import google_protobuf1 "github.com/golang/protobuf/ptypes/timestamp"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// A request for reporting an individual error event.
type ReportErrorEventRequest struct {
// [Required] The resource name of the Google Cloud Platform project. Written
// as `projects/` plus the
// [Google Cloud Platform project ID](https://support.google.com/cloud/answer/6158840).
// Example: `projects/my-project-123`.
ProjectName string `protobuf:"bytes,1,opt,name=project_name,json=projectName" json:"project_name,omitempty"`
// [Required] The error event to be reported.
Event *ReportedErrorEvent `protobuf:"bytes,2,opt,name=event" json:"event,omitempty"`
}
func (m *ReportErrorEventRequest) Reset() { *m = ReportErrorEventRequest{} }
func (m *ReportErrorEventRequest) String() string { return proto.CompactTextString(m) }
func (*ReportErrorEventRequest) ProtoMessage() {}
func (*ReportErrorEventRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} }
func (m *ReportErrorEventRequest) GetProjectName() string {
if m != nil {
return m.ProjectName
}
return ""
}
func (m *ReportErrorEventRequest) GetEvent() *ReportedErrorEvent {
if m != nil {
return m.Event
}
return nil
}
// Response for reporting an individual error event.
// Data may be added to this message in the future.
type ReportErrorEventResponse struct {
}
func (m *ReportErrorEventResponse) Reset() { *m = ReportErrorEventResponse{} }
func (m *ReportErrorEventResponse) | () string { return proto.CompactTextString(m) }
func (*ReportErrorEventResponse) ProtoMessage() {}
func (*ReportErrorEventResponse) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} }
// An error event which is reported to the Error Reporting system.
type ReportedErrorEvent struct {
// [Optional] Time when the event occurred.
// If not provided, the time when the event was received by the
// Error Reporting system will be used.
EventTime *google_protobuf1.Timestamp `protobuf:"bytes,1,opt,name=event_time,json=eventTime" json:"event_time,omitempty"`
// [Required] The service context in which this error has occurred.
ServiceContext *ServiceContext `protobuf:"bytes,2,opt,name=service_context,json=serviceContext" json:"service_context,omitempty"`
// [Required] A message describing the error. The message can contain an
// exception stack in one of the supported programming languages and formats.
// In that case, the message is parsed and detailed exception information
// is returned when retrieving the error event again.
Message string `protobuf:"bytes,3,opt,name=message" json:"message,omitempty"`
// [Optional] A description of the context in which the error occurred.
Context *ErrorContext `protobuf:"bytes,4,opt,name=context" json:"context,omitempty"`
}
func (m *ReportedErrorEvent) Reset() { *m = ReportedErrorEvent{} }
func (m *ReportedErrorEvent) String() string { return proto.CompactTextString(m) }
func (*ReportedErrorEvent) ProtoMessage() {}
func (*ReportedErrorEvent) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{2} }
func (m *ReportedErrorEvent) GetEventTime() *google_protobuf1.Timestamp {
if m != nil {
return m.EventTime
}
return nil
}
func (m *ReportedErrorEvent) GetServiceContext() *ServiceContext {
if m != nil {
return m.ServiceContext
}
return nil
}
func (m *ReportedErrorEvent) GetMessage() string {
if m != nil {
return m.Message
}
return ""
}
func (m *ReportedErrorEvent) GetContext() *ErrorContext {
if m != nil {
return m.Context
}
return nil
}
func init() {
proto.RegisterType((*ReportErrorEventRequest)(nil), "google.devtools.clouderrorreporting.v1beta1.ReportErrorEventRequest")
proto.RegisterType((*ReportErrorEventResponse)(nil), "google.devtools.clouderrorreporting.v1beta1.ReportErrorEventResponse")
proto.RegisterType((*ReportedErrorEvent)(nil), "google.devtools.clouderrorreporting.v1beta1.ReportedErrorEvent")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for ReportErrorsService service
type ReportErrorsServiceClient interface {
// Report an individual error event.
//
// This endpoint accepts <strong>either</strong> an OAuth token,
// <strong>or</strong> an
// <a href="https://support.google.com/cloud/answer/6158862">API key</a>
// for authentication. To use an API key, append it to the URL as the value of
// a `key` parameter. For example:
// <pre>POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456</pre>
ReportErrorEvent(ctx context.Context, in *ReportErrorEventRequest, opts ...grpc.CallOption) (*ReportErrorEventResponse, error)
}
type reportErrorsServiceClient struct {
cc *grpc.ClientConn
}
func NewReportErrorsServiceClient(cc *grpc.ClientConn) ReportErrorsServiceClient {
return &reportErrorsServiceClient{cc}
}
func (c *reportErrorsServiceClient) ReportErrorEvent(ctx context.Context, in *ReportErrorEventRequest, opts ...grpc.CallOption) (*ReportErrorEventResponse, error) {
out := new(ReportErrorEventResponse)
err := grpc.Invoke(ctx, "/google.devtools.clouderrorreporting.v1beta1.ReportErrorsService/ReportErrorEvent", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for ReportErrorsService service
type ReportErrorsServiceServer interface {
// Report an individual error event.
//
// This endpoint accepts <strong>either</strong> an OAuth token,
// <strong>or</strong> an
// <a href="https://support.google.com/cloud/answer/6158862">API key</a>
// for authentication. To use an API key, append it to the URL as the value of
// a `key` parameter. For example:
// <pre>POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456</pre>
ReportErrorEvent(context.Context, *ReportErrorEventRequest) (*ReportErrorEventResponse, error)
}
func RegisterReportErrorsServiceServer(s *grpc.Server, srv ReportErrorsServiceServer) {
s.RegisterService(&_ReportErrorsService_serviceDesc, srv)
}
func _ReportErrorsService_ReportErrorEvent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ReportErrorEventRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ReportErrorsServiceServer).ReportErrorEvent(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.devtools.clouderrorreporting.v1beta1.ReportErrorsService/ReportErrorEvent",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ReportErrorsServiceServer).ReportErrorEvent(ctx, req.(*ReportErrorEventRequest))
}
return interceptor(ctx, in, info, handler)
}
var _ReportErrorsService_serviceDesc = grpc.ServiceDesc{
ServiceName: "google.devtools.clouderrorreporting.v1beta1.ReportErrorsService",
HandlerType: (*ReportErrorsServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "ReportErrorEvent",
Handler: _ReportErrorsService_ReportErrorEvent_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "google/devtools/clouderrorreporting/v1beta1/report_errors_service.proto",
}
func init() {
proto.RegisterFile("google/devtools/clouderrorreporting/v1beta1/report_errors_service.proto", fileDescriptor3)
}
var fileDescriptor3 = []byte{
// 490 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x93, 0xcd, 0x8a, 0x13, 0x41,
0x10, 0xc7, 0x99, 0xf8, 0xb1, 0x6c, 0x47, 0x54, 0xda, 0x83, 0xc3, 0x20, 0xb8, 0xc6, 0xcb, 0xa2,
0x30, 0x6d, 0xe2, 0xc5, 0xec, 0x22, 0x0b, 0x59, 0xc3, 0xde, 0x64, 0x99, 0xd5, 0x3d, 0x48, 0x70,
0xe8, 0x4c, 0xca, 0x61, 0x24, 0xd3, 0x35, 0x76, 0x77, 0x82, 0x20, 0x5e, 0x7c, 0x85, 0x7d, 0x05,
0x4f, 0x3e, 0x8a, 0x57, 0x5f, 0xc0, 0x83, 0x0f, 0xa1, 0x37, 0xe9, 0xaf, 0x25, 0x6b, 0x72, 0x70,
0xf4, 0x58, 0xd3, 0x55, 0xbf, 0xff, 0xbf, 0x3e, 0x86, 0x1c, 0x95, 0x88, 0xe5, 0x1c, 0xd8, 0x0c,
0x96, 0x1a, 0x71, 0xae, 0x58, 0x31, 0xc7, 0xc5, 0x0c, 0xa4, 0x44, 0x29, 0xa1, 0x41, 0xa9, 0x2b,
0x51, 0xb2, 0x65, 0x7f, 0x0a, 0x9a, 0xf7, 0x99, 0xfb, 0x92, 0xdb, 0x57, 0x95, 0x2b, 0x90, 0xcb,
0xaa, 0x80, 0xb4, 0x91, 0xa8, 0x91, 0x3e, 0x74, 0xa0, 0x34, 0x80, 0xd2, 0x0d, 0xa0, 0xd4, 0x83,
0x92, 0x3b, 0x5e, 0x95, 0x37, 0x15, 0xe3, 0x42, 0xa0, 0xe6, 0xba, 0x42, 0xa1, 0x1c, 0x2a, 0x79,
0xd2, 0xc6, 0x53, 0x81, 0x75, 0x8d, 0xc2, 0x57, 0xde, 0xf5, 0x95, 0x36, 0x9a, 0x2e, 0xde, 0x30,
0x5d, 0xd5, 0xa0, 0x34, 0xaf, 0x1b, 0x97, 0xd0, 0x3b, 0x8b, 0xc8, 0xed, 0xcc, 0x32, 0xc6, 0x06,
0x37, 0x5e, 0x82, 0xd0, 0x19, 0xbc, 0x5b, 0x80, 0xd2, 0xf4, 0x1e, 0xb9, 0xd6, 0x48, 0x7c, 0x0b,
0x85, 0xce, 0x05, 0xaf, 0x21, 0x8e, 0x76, 0xa2, 0xdd, 0xed, 0xac, 0xeb, 0xbf, 0x3d, 0xe7, 0x35,
0xd0, 0x97, 0xe4, 0x0a, 0x98, 0x92, 0xb8, 0xb3, 0x13, 0xed, 0x76, 0x07, 0x07, 0x69, 0x8b, 0xa6,
0x53, 0xa7, 0x0b, 0xb3, 0x15, 0x65, 0x47, 0xeb, 0x25, 0x24, 0x5e, 0x37, 0xa5, 0x1a, 0x14, 0x0a,
0x7a, 0x9f, 0x3b, 0x84, 0xae, 0x57, 0xd2, 0x21, 0x21, 0xb6, 0x36, 0x37, 0x1d, 0x5a, 0xab, 0xdd,
0x41, 0x12, 0xec, 0x84, 0xf6, 0xd3, 0x17, 0xa1, 0xfd, 0x6c, 0xdb, 0x66, 0x9b, 0x98, 0xce, 0xc8,
0x0d, 0xbf, 0xba, 0xbc, 0x40, 0xa1, 0xe1, 0x7d, 0x68, 0x67, 0xbf, 0x55, 0x3b, 0x27, 0x8e, 0x71,
0xe8, 0x10, 0xd9, 0x75, 0x75, 0x21, 0xa6, 0x31, 0xd9, 0xaa, 0x41, 0x29, 0x5e, 0x42, 0x7c, 0xc9,
0x0e, 0x32, 0x84, 0xf4, 0x84, 0x6c, 0x05, 0xdd, 0xcb, 0x56, 0x77, 0xd8, 0x4a, 0xd7, 0x0e, 0x21,
0xa8, 0x06, 0xd2, 0xe0, 0x67, 0x44, 0x6e, 0xad, 0xcc, 0x50, 0x79, 0x77, 0xf4, 0x7b, 0x44, 0x6e,
0xfe, 0x39, 0x5b, 0xfa, 0xec, 0x1f, 0xf6, 0xb6, 0x76, 0x2f, 0xc9, 0xf8, 0x3f, 0x29, 0x7e, 0xc1,
0x07, 0x9f, 0xbe, 0xfd, 0x38, 0xeb, 0x0c, 0x7b, 0x8f, 0xce, 0x4f, 0xfa, 0xc3, 0xea, 0x19, 0x3e,
0xf5, 0x81, 0x62, 0x0f, 0x3e, 0x32, 0xbb, 0x44, 0xb5, 0xe7, 0xe8, 0x7b, 0xee, 0x7a, 0x46, 0xbf,
0x22, 0x62, 0xfe, 0x82, 0x36, 0x6e, 0x46, 0xf1, 0x86, 0x59, 0x1d, 0x9b, 0xab, 0x39, 0x8e, 0x5e,
0xbd, 0xf6, 0xa0, 0x12, 0xe7, 0x5c, 0x94, 0x29, 0xca, 0x92, 0x95, 0x20, 0xec, 0x4d, 0x31, 0xf7,
0xc4, 0x9b, 0x4a, 0xfd, 0xd5, 0xdf, 0xb9, 0xbf, 0xe1, 0xed, 0x4b, 0xe7, 0xfe, 0x91, 0x13, 0x38,
0x34, 0x8f, 0x6e, 0x9f, 0xd9, 0xb9, 0xc3, 0xd3, 0xfe, 0xc8, 0x54, 0x7e, 0x0d, 0x59, 0x13, 0x9b,
0x35, 0xb9, 0x98, 0x35, 0x39, 0x75, 0xfc, 0xe9, 0x55, 0x6b, 0xeb, 0xf1, 0xef, 0x00, 0x00, 0x00,
0xff, 0xff, 0x2c, 0xd1, 0x8e, 0x76, 0xc7, 0x04, 0x00, 0x00,
}
| String | identifier_name |
report_errors_service.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/devtools/clouderrorreporting/v1beta1/report_errors_service.proto
package clouderrorreporting
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "google.golang.org/genproto/googleapis/api/annotations"
import google_protobuf1 "github.com/golang/protobuf/ptypes/timestamp"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// A request for reporting an individual error event.
type ReportErrorEventRequest struct {
// [Required] The resource name of the Google Cloud Platform project. Written
// as `projects/` plus the
// [Google Cloud Platform project ID](https://support.google.com/cloud/answer/6158840).
// Example: `projects/my-project-123`.
ProjectName string `protobuf:"bytes,1,opt,name=project_name,json=projectName" json:"project_name,omitempty"`
// [Required] The error event to be reported.
Event *ReportedErrorEvent `protobuf:"bytes,2,opt,name=event" json:"event,omitempty"`
}
func (m *ReportErrorEventRequest) Reset() { *m = ReportErrorEventRequest{} }
func (m *ReportErrorEventRequest) String() string { return proto.CompactTextString(m) }
func (*ReportErrorEventRequest) ProtoMessage() {}
func (*ReportErrorEventRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} }
func (m *ReportErrorEventRequest) GetProjectName() string {
if m != nil {
return m.ProjectName
}
return ""
}
func (m *ReportErrorEventRequest) GetEvent() *ReportedErrorEvent {
if m != nil {
return m.Event
}
return nil
}
// Response for reporting an individual error event.
// Data may be added to this message in the future.
type ReportErrorEventResponse struct {
}
func (m *ReportErrorEventResponse) Reset() { *m = ReportErrorEventResponse{} }
func (m *ReportErrorEventResponse) String() string { return proto.CompactTextString(m) }
func (*ReportErrorEventResponse) ProtoMessage() {}
func (*ReportErrorEventResponse) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} }
// An error event which is reported to the Error Reporting system.
type ReportedErrorEvent struct {
// [Optional] Time when the event occurred.
// If not provided, the time when the event was received by the
// Error Reporting system will be used.
EventTime *google_protobuf1.Timestamp `protobuf:"bytes,1,opt,name=event_time,json=eventTime" json:"event_time,omitempty"`
// [Required] The service context in which this error has occurred.
ServiceContext *ServiceContext `protobuf:"bytes,2,opt,name=service_context,json=serviceContext" json:"service_context,omitempty"`
// [Required] A message describing the error. The message can contain an
// exception stack in one of the supported programming languages and formats.
// In that case, the message is parsed and detailed exception information
// is returned when retrieving the error event again.
Message string `protobuf:"bytes,3,opt,name=message" json:"message,omitempty"`
// [Optional] A description of the context in which the error occurred.
Context *ErrorContext `protobuf:"bytes,4,opt,name=context" json:"context,omitempty"`
}
func (m *ReportedErrorEvent) Reset() { *m = ReportedErrorEvent{} }
func (m *ReportedErrorEvent) String() string { return proto.CompactTextString(m) }
func (*ReportedErrorEvent) ProtoMessage() {}
func (*ReportedErrorEvent) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{2} }
func (m *ReportedErrorEvent) GetEventTime() *google_protobuf1.Timestamp {
if m != nil {
return m.EventTime
}
return nil
}
func (m *ReportedErrorEvent) GetServiceContext() *ServiceContext {
if m != nil {
return m.ServiceContext
}
return nil
}
func (m *ReportedErrorEvent) GetMessage() string {
if m != nil {
return m.Message
}
return ""
}
func (m *ReportedErrorEvent) GetContext() *ErrorContext {
if m != nil {
return m.Context
}
return nil
}
func init() {
proto.RegisterType((*ReportErrorEventRequest)(nil), "google.devtools.clouderrorreporting.v1beta1.ReportErrorEventRequest")
proto.RegisterType((*ReportErrorEventResponse)(nil), "google.devtools.clouderrorreporting.v1beta1.ReportErrorEventResponse")
proto.RegisterType((*ReportedErrorEvent)(nil), "google.devtools.clouderrorreporting.v1beta1.ReportedErrorEvent")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for ReportErrorsService service
type ReportErrorsServiceClient interface {
// Report an individual error event.
//
// This endpoint accepts <strong>either</strong> an OAuth token,
// <strong>or</strong> an
// <a href="https://support.google.com/cloud/answer/6158862">API key</a>
// for authentication. To use an API key, append it to the URL as the value of
// a `key` parameter. For example:
// <pre>POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456</pre>
ReportErrorEvent(ctx context.Context, in *ReportErrorEventRequest, opts ...grpc.CallOption) (*ReportErrorEventResponse, error)
}
type reportErrorsServiceClient struct {
cc *grpc.ClientConn
}
func NewReportErrorsServiceClient(cc *grpc.ClientConn) ReportErrorsServiceClient {
return &reportErrorsServiceClient{cc}
}
func (c *reportErrorsServiceClient) ReportErrorEvent(ctx context.Context, in *ReportErrorEventRequest, opts ...grpc.CallOption) (*ReportErrorEventResponse, error) {
out := new(ReportErrorEventResponse)
err := grpc.Invoke(ctx, "/google.devtools.clouderrorreporting.v1beta1.ReportErrorsService/ReportErrorEvent", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for ReportErrorsService service
type ReportErrorsServiceServer interface {
// Report an individual error event.
//
// This endpoint accepts <strong>either</strong> an OAuth token,
// <strong>or</strong> an
// <a href="https://support.google.com/cloud/answer/6158862">API key</a>
// for authentication. To use an API key, append it to the URL as the value of
// a `key` parameter. For example:
// <pre>POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456</pre>
ReportErrorEvent(context.Context, *ReportErrorEventRequest) (*ReportErrorEventResponse, error)
} | func RegisterReportErrorsServiceServer(s *grpc.Server, srv ReportErrorsServiceServer) {
s.RegisterService(&_ReportErrorsService_serviceDesc, srv)
}
func _ReportErrorsService_ReportErrorEvent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ReportErrorEventRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ReportErrorsServiceServer).ReportErrorEvent(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.devtools.clouderrorreporting.v1beta1.ReportErrorsService/ReportErrorEvent",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ReportErrorsServiceServer).ReportErrorEvent(ctx, req.(*ReportErrorEventRequest))
}
return interceptor(ctx, in, info, handler)
}
var _ReportErrorsService_serviceDesc = grpc.ServiceDesc{
ServiceName: "google.devtools.clouderrorreporting.v1beta1.ReportErrorsService",
HandlerType: (*ReportErrorsServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "ReportErrorEvent",
Handler: _ReportErrorsService_ReportErrorEvent_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "google/devtools/clouderrorreporting/v1beta1/report_errors_service.proto",
}
func init() {
proto.RegisterFile("google/devtools/clouderrorreporting/v1beta1/report_errors_service.proto", fileDescriptor3)
}
var fileDescriptor3 = []byte{
// 490 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x93, 0xcd, 0x8a, 0x13, 0x41,
0x10, 0xc7, 0x99, 0xf8, 0xb1, 0x6c, 0x47, 0x54, 0xda, 0x83, 0xc3, 0x20, 0xb8, 0xc6, 0xcb, 0xa2,
0x30, 0x6d, 0xe2, 0xc5, 0xec, 0x22, 0x0b, 0x59, 0xc3, 0xde, 0x64, 0x99, 0xd5, 0x3d, 0x48, 0x70,
0xe8, 0x4c, 0xca, 0x61, 0x24, 0xd3, 0x35, 0x76, 0x77, 0x82, 0x20, 0x5e, 0x7c, 0x85, 0x7d, 0x05,
0x4f, 0x3e, 0x8a, 0x57, 0x5f, 0xc0, 0x83, 0x0f, 0xa1, 0x37, 0xe9, 0xaf, 0x25, 0x6b, 0x72, 0x70,
0xf4, 0x58, 0xd3, 0x55, 0xbf, 0xff, 0xbf, 0x3e, 0x86, 0x1c, 0x95, 0x88, 0xe5, 0x1c, 0xd8, 0x0c,
0x96, 0x1a, 0x71, 0xae, 0x58, 0x31, 0xc7, 0xc5, 0x0c, 0xa4, 0x44, 0x29, 0xa1, 0x41, 0xa9, 0x2b,
0x51, 0xb2, 0x65, 0x7f, 0x0a, 0x9a, 0xf7, 0x99, 0xfb, 0x92, 0xdb, 0x57, 0x95, 0x2b, 0x90, 0xcb,
0xaa, 0x80, 0xb4, 0x91, 0xa8, 0x91, 0x3e, 0x74, 0xa0, 0x34, 0x80, 0xd2, 0x0d, 0xa0, 0xd4, 0x83,
0x92, 0x3b, 0x5e, 0x95, 0x37, 0x15, 0xe3, 0x42, 0xa0, 0xe6, 0xba, 0x42, 0xa1, 0x1c, 0x2a, 0x79,
0xd2, 0xc6, 0x53, 0x81, 0x75, 0x8d, 0xc2, 0x57, 0xde, 0xf5, 0x95, 0x36, 0x9a, 0x2e, 0xde, 0x30,
0x5d, 0xd5, 0xa0, 0x34, 0xaf, 0x1b, 0x97, 0xd0, 0x3b, 0x8b, 0xc8, 0xed, 0xcc, 0x32, 0xc6, 0x06,
0x37, 0x5e, 0x82, 0xd0, 0x19, 0xbc, 0x5b, 0x80, 0xd2, 0xf4, 0x1e, 0xb9, 0xd6, 0x48, 0x7c, 0x0b,
0x85, 0xce, 0x05, 0xaf, 0x21, 0x8e, 0x76, 0xa2, 0xdd, 0xed, 0xac, 0xeb, 0xbf, 0x3d, 0xe7, 0x35,
0xd0, 0x97, 0xe4, 0x0a, 0x98, 0x92, 0xb8, 0xb3, 0x13, 0xed, 0x76, 0x07, 0x07, 0x69, 0x8b, 0xa6,
0x53, 0xa7, 0x0b, 0xb3, 0x15, 0x65, 0x47, 0xeb, 0x25, 0x24, 0x5e, 0x37, 0xa5, 0x1a, 0x14, 0x0a,
0x7a, 0x9f, 0x3b, 0x84, 0xae, 0x57, 0xd2, 0x21, 0x21, 0xb6, 0x36, 0x37, 0x1d, 0x5a, 0xab, 0xdd,
0x41, 0x12, 0xec, 0x84, 0xf6, 0xd3, 0x17, 0xa1, 0xfd, 0x6c, 0xdb, 0x66, 0x9b, 0x98, 0xce, 0xc8,
0x0d, 0xbf, 0xba, 0xbc, 0x40, 0xa1, 0xe1, 0x7d, 0x68, 0x67, 0xbf, 0x55, 0x3b, 0x27, 0x8e, 0x71,
0xe8, 0x10, 0xd9, 0x75, 0x75, 0x21, 0xa6, 0x31, 0xd9, 0xaa, 0x41, 0x29, 0x5e, 0x42, 0x7c, 0xc9,
0x0e, 0x32, 0x84, 0xf4, 0x84, 0x6c, 0x05, 0xdd, 0xcb, 0x56, 0x77, 0xd8, 0x4a, 0xd7, 0x0e, 0x21,
0xa8, 0x06, 0xd2, 0xe0, 0x67, 0x44, 0x6e, 0xad, 0xcc, 0x50, 0x79, 0x77, 0xf4, 0x7b, 0x44, 0x6e,
0xfe, 0x39, 0x5b, 0xfa, 0xec, 0x1f, 0xf6, 0xb6, 0x76, 0x2f, 0xc9, 0xf8, 0x3f, 0x29, 0x7e, 0xc1,
0x07, 0x9f, 0xbe, 0xfd, 0x38, 0xeb, 0x0c, 0x7b, 0x8f, 0xce, 0x4f, 0xfa, 0xc3, 0xea, 0x19, 0x3e,
0xf5, 0x81, 0x62, 0x0f, 0x3e, 0x32, 0xbb, 0x44, 0xb5, 0xe7, 0xe8, 0x7b, 0xee, 0x7a, 0x46, 0xbf,
0x22, 0x62, 0xfe, 0x82, 0x36, 0x6e, 0x46, 0xf1, 0x86, 0x59, 0x1d, 0x9b, 0xab, 0x39, 0x8e, 0x5e,
0xbd, 0xf6, 0xa0, 0x12, 0xe7, 0x5c, 0x94, 0x29, 0xca, 0x92, 0x95, 0x20, 0xec, 0x4d, 0x31, 0xf7,
0xc4, 0x9b, 0x4a, 0xfd, 0xd5, 0xdf, 0xb9, 0xbf, 0xe1, 0xed, 0x4b, 0xe7, 0xfe, 0x91, 0x13, 0x38,
0x34, 0x8f, 0x6e, 0x9f, 0xd9, 0xb9, 0xc3, 0xd3, 0xfe, 0xc8, 0x54, 0x7e, 0x0d, 0x59, 0x13, 0x9b,
0x35, 0xb9, 0x98, 0x35, 0x39, 0x75, 0xfc, 0xe9, 0x55, 0x6b, 0xeb, 0xf1, 0xef, 0x00, 0x00, 0x00,
0xff, 0xff, 0x2c, 0xd1, 0x8e, 0x76, 0xc7, 0x04, 0x00, 0x00,
} | random_line_split | |
report_errors_service.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/devtools/clouderrorreporting/v1beta1/report_errors_service.proto
package clouderrorreporting
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "google.golang.org/genproto/googleapis/api/annotations"
import google_protobuf1 "github.com/golang/protobuf/ptypes/timestamp"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// A request for reporting an individual error event.
type ReportErrorEventRequest struct {
// [Required] The resource name of the Google Cloud Platform project. Written
// as `projects/` plus the
// [Google Cloud Platform project ID](https://support.google.com/cloud/answer/6158840).
// Example: `projects/my-project-123`.
ProjectName string `protobuf:"bytes,1,opt,name=project_name,json=projectName" json:"project_name,omitempty"`
// [Required] The error event to be reported.
Event *ReportedErrorEvent `protobuf:"bytes,2,opt,name=event" json:"event,omitempty"`
}
func (m *ReportErrorEventRequest) Reset() { *m = ReportErrorEventRequest{} }
func (m *ReportErrorEventRequest) String() string { return proto.CompactTextString(m) }
func (*ReportErrorEventRequest) ProtoMessage() {}
func (*ReportErrorEventRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} }
func (m *ReportErrorEventRequest) GetProjectName() string {
if m != nil {
return m.ProjectName
}
return ""
}
func (m *ReportErrorEventRequest) GetEvent() *ReportedErrorEvent {
if m != nil |
return nil
}
// Response for reporting an individual error event.
// Data may be added to this message in the future.
type ReportErrorEventResponse struct {
}
func (m *ReportErrorEventResponse) Reset() { *m = ReportErrorEventResponse{} }
func (m *ReportErrorEventResponse) String() string { return proto.CompactTextString(m) }
func (*ReportErrorEventResponse) ProtoMessage() {}
func (*ReportErrorEventResponse) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} }
// An error event which is reported to the Error Reporting system.
type ReportedErrorEvent struct {
// [Optional] Time when the event occurred.
// If not provided, the time when the event was received by the
// Error Reporting system will be used.
EventTime *google_protobuf1.Timestamp `protobuf:"bytes,1,opt,name=event_time,json=eventTime" json:"event_time,omitempty"`
// [Required] The service context in which this error has occurred.
ServiceContext *ServiceContext `protobuf:"bytes,2,opt,name=service_context,json=serviceContext" json:"service_context,omitempty"`
// [Required] A message describing the error. The message can contain an
// exception stack in one of the supported programming languages and formats.
// In that case, the message is parsed and detailed exception information
// is returned when retrieving the error event again.
Message string `protobuf:"bytes,3,opt,name=message" json:"message,omitempty"`
// [Optional] A description of the context in which the error occurred.
Context *ErrorContext `protobuf:"bytes,4,opt,name=context" json:"context,omitempty"`
}
func (m *ReportedErrorEvent) Reset() { *m = ReportedErrorEvent{} }
func (m *ReportedErrorEvent) String() string { return proto.CompactTextString(m) }
func (*ReportedErrorEvent) ProtoMessage() {}
func (*ReportedErrorEvent) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{2} }
func (m *ReportedErrorEvent) GetEventTime() *google_protobuf1.Timestamp {
if m != nil {
return m.EventTime
}
return nil
}
func (m *ReportedErrorEvent) GetServiceContext() *ServiceContext {
if m != nil {
return m.ServiceContext
}
return nil
}
func (m *ReportedErrorEvent) GetMessage() string {
if m != nil {
return m.Message
}
return ""
}
func (m *ReportedErrorEvent) GetContext() *ErrorContext {
if m != nil {
return m.Context
}
return nil
}
func init() {
proto.RegisterType((*ReportErrorEventRequest)(nil), "google.devtools.clouderrorreporting.v1beta1.ReportErrorEventRequest")
proto.RegisterType((*ReportErrorEventResponse)(nil), "google.devtools.clouderrorreporting.v1beta1.ReportErrorEventResponse")
proto.RegisterType((*ReportedErrorEvent)(nil), "google.devtools.clouderrorreporting.v1beta1.ReportedErrorEvent")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for ReportErrorsService service
type ReportErrorsServiceClient interface {
// Report an individual error event.
//
// This endpoint accepts <strong>either</strong> an OAuth token,
// <strong>or</strong> an
// <a href="https://support.google.com/cloud/answer/6158862">API key</a>
// for authentication. To use an API key, append it to the URL as the value of
// a `key` parameter. For example:
// <pre>POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456</pre>
ReportErrorEvent(ctx context.Context, in *ReportErrorEventRequest, opts ...grpc.CallOption) (*ReportErrorEventResponse, error)
}
type reportErrorsServiceClient struct {
cc *grpc.ClientConn
}
func NewReportErrorsServiceClient(cc *grpc.ClientConn) ReportErrorsServiceClient {
return &reportErrorsServiceClient{cc}
}
func (c *reportErrorsServiceClient) ReportErrorEvent(ctx context.Context, in *ReportErrorEventRequest, opts ...grpc.CallOption) (*ReportErrorEventResponse, error) {
out := new(ReportErrorEventResponse)
err := grpc.Invoke(ctx, "/google.devtools.clouderrorreporting.v1beta1.ReportErrorsService/ReportErrorEvent", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for ReportErrorsService service
type ReportErrorsServiceServer interface {
// Report an individual error event.
//
// This endpoint accepts <strong>either</strong> an OAuth token,
// <strong>or</strong> an
// <a href="https://support.google.com/cloud/answer/6158862">API key</a>
// for authentication. To use an API key, append it to the URL as the value of
// a `key` parameter. For example:
// <pre>POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456</pre>
ReportErrorEvent(context.Context, *ReportErrorEventRequest) (*ReportErrorEventResponse, error)
}
func RegisterReportErrorsServiceServer(s *grpc.Server, srv ReportErrorsServiceServer) {
s.RegisterService(&_ReportErrorsService_serviceDesc, srv)
}
func _ReportErrorsService_ReportErrorEvent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ReportErrorEventRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ReportErrorsServiceServer).ReportErrorEvent(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.devtools.clouderrorreporting.v1beta1.ReportErrorsService/ReportErrorEvent",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ReportErrorsServiceServer).ReportErrorEvent(ctx, req.(*ReportErrorEventRequest))
}
return interceptor(ctx, in, info, handler)
}
var _ReportErrorsService_serviceDesc = grpc.ServiceDesc{
ServiceName: "google.devtools.clouderrorreporting.v1beta1.ReportErrorsService",
HandlerType: (*ReportErrorsServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "ReportErrorEvent",
Handler: _ReportErrorsService_ReportErrorEvent_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "google/devtools/clouderrorreporting/v1beta1/report_errors_service.proto",
}
func init() {
proto.RegisterFile("google/devtools/clouderrorreporting/v1beta1/report_errors_service.proto", fileDescriptor3)
}
var fileDescriptor3 = []byte{
// 490 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x93, 0xcd, 0x8a, 0x13, 0x41,
0x10, 0xc7, 0x99, 0xf8, 0xb1, 0x6c, 0x47, 0x54, 0xda, 0x83, 0xc3, 0x20, 0xb8, 0xc6, 0xcb, 0xa2,
0x30, 0x6d, 0xe2, 0xc5, 0xec, 0x22, 0x0b, 0x59, 0xc3, 0xde, 0x64, 0x99, 0xd5, 0x3d, 0x48, 0x70,
0xe8, 0x4c, 0xca, 0x61, 0x24, 0xd3, 0x35, 0x76, 0x77, 0x82, 0x20, 0x5e, 0x7c, 0x85, 0x7d, 0x05,
0x4f, 0x3e, 0x8a, 0x57, 0x5f, 0xc0, 0x83, 0x0f, 0xa1, 0x37, 0xe9, 0xaf, 0x25, 0x6b, 0x72, 0x70,
0xf4, 0x58, 0xd3, 0x55, 0xbf, 0xff, 0xbf, 0x3e, 0x86, 0x1c, 0x95, 0x88, 0xe5, 0x1c, 0xd8, 0x0c,
0x96, 0x1a, 0x71, 0xae, 0x58, 0x31, 0xc7, 0xc5, 0x0c, 0xa4, 0x44, 0x29, 0xa1, 0x41, 0xa9, 0x2b,
0x51, 0xb2, 0x65, 0x7f, 0x0a, 0x9a, 0xf7, 0x99, 0xfb, 0x92, 0xdb, 0x57, 0x95, 0x2b, 0x90, 0xcb,
0xaa, 0x80, 0xb4, 0x91, 0xa8, 0x91, 0x3e, 0x74, 0xa0, 0x34, 0x80, 0xd2, 0x0d, 0xa0, 0xd4, 0x83,
0x92, 0x3b, 0x5e, 0x95, 0x37, 0x15, 0xe3, 0x42, 0xa0, 0xe6, 0xba, 0x42, 0xa1, 0x1c, 0x2a, 0x79,
0xd2, 0xc6, 0x53, 0x81, 0x75, 0x8d, 0xc2, 0x57, 0xde, 0xf5, 0x95, 0x36, 0x9a, 0x2e, 0xde, 0x30,
0x5d, 0xd5, 0xa0, 0x34, 0xaf, 0x1b, 0x97, 0xd0, 0x3b, 0x8b, 0xc8, 0xed, 0xcc, 0x32, 0xc6, 0x06,
0x37, 0x5e, 0x82, 0xd0, 0x19, 0xbc, 0x5b, 0x80, 0xd2, 0xf4, 0x1e, 0xb9, 0xd6, 0x48, 0x7c, 0x0b,
0x85, 0xce, 0x05, 0xaf, 0x21, 0x8e, 0x76, 0xa2, 0xdd, 0xed, 0xac, 0xeb, 0xbf, 0x3d, 0xe7, 0x35,
0xd0, 0x97, 0xe4, 0x0a, 0x98, 0x92, 0xb8, 0xb3, 0x13, 0xed, 0x76, 0x07, 0x07, 0x69, 0x8b, 0xa6,
0x53, 0xa7, 0x0b, 0xb3, 0x15, 0x65, 0x47, 0xeb, 0x25, 0x24, 0x5e, 0x37, 0xa5, 0x1a, 0x14, 0x0a,
0x7a, 0x9f, 0x3b, 0x84, 0xae, 0x57, 0xd2, 0x21, 0x21, 0xb6, 0x36, 0x37, 0x1d, 0x5a, 0xab, 0xdd,
0x41, 0x12, 0xec, 0x84, 0xf6, 0xd3, 0x17, 0xa1, 0xfd, 0x6c, 0xdb, 0x66, 0x9b, 0x98, 0xce, 0xc8,
0x0d, 0xbf, 0xba, 0xbc, 0x40, 0xa1, 0xe1, 0x7d, 0x68, 0x67, 0xbf, 0x55, 0x3b, 0x27, 0x8e, 0x71,
0xe8, 0x10, 0xd9, 0x75, 0x75, 0x21, 0xa6, 0x31, 0xd9, 0xaa, 0x41, 0x29, 0x5e, 0x42, 0x7c, 0xc9,
0x0e, 0x32, 0x84, 0xf4, 0x84, 0x6c, 0x05, 0xdd, 0xcb, 0x56, 0x77, 0xd8, 0x4a, 0xd7, 0x0e, 0x21,
0xa8, 0x06, 0xd2, 0xe0, 0x67, 0x44, 0x6e, 0xad, 0xcc, 0x50, 0x79, 0x77, 0xf4, 0x7b, 0x44, 0x6e,
0xfe, 0x39, 0x5b, 0xfa, 0xec, 0x1f, 0xf6, 0xb6, 0x76, 0x2f, 0xc9, 0xf8, 0x3f, 0x29, 0x7e, 0xc1,
0x07, 0x9f, 0xbe, 0xfd, 0x38, 0xeb, 0x0c, 0x7b, 0x8f, 0xce, 0x4f, 0xfa, 0xc3, 0xea, 0x19, 0x3e,
0xf5, 0x81, 0x62, 0x0f, 0x3e, 0x32, 0xbb, 0x44, 0xb5, 0xe7, 0xe8, 0x7b, 0xee, 0x7a, 0x46, 0xbf,
0x22, 0x62, 0xfe, 0x82, 0x36, 0x6e, 0x46, 0xf1, 0x86, 0x59, 0x1d, 0x9b, 0xab, 0x39, 0x8e, 0x5e,
0xbd, 0xf6, 0xa0, 0x12, 0xe7, 0x5c, 0x94, 0x29, 0xca, 0x92, 0x95, 0x20, 0xec, 0x4d, 0x31, 0xf7,
0xc4, 0x9b, 0x4a, 0xfd, 0xd5, 0xdf, 0xb9, 0xbf, 0xe1, 0xed, 0x4b, 0xe7, 0xfe, 0x91, 0x13, 0x38,
0x34, 0x8f, 0x6e, 0x9f, 0xd9, 0xb9, 0xc3, 0xd3, 0xfe, 0xc8, 0x54, 0x7e, 0x0d, 0x59, 0x13, 0x9b,
0x35, 0xb9, 0x98, 0x35, 0x39, 0x75, 0xfc, 0xe9, 0x55, 0x6b, 0xeb, 0xf1, 0xef, 0x00, 0x00, 0x00,
0xff, 0xff, 0x2c, 0xd1, 0x8e, 0x76, 0xc7, 0x04, 0x00, 0x00,
}
| {
return m.Event
} | conditional_block |
report_errors_service.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/devtools/clouderrorreporting/v1beta1/report_errors_service.proto
package clouderrorreporting
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "google.golang.org/genproto/googleapis/api/annotations"
import google_protobuf1 "github.com/golang/protobuf/ptypes/timestamp"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// A request for reporting an individual error event.
type ReportErrorEventRequest struct {
// [Required] The resource name of the Google Cloud Platform project. Written
// as `projects/` plus the
// [Google Cloud Platform project ID](https://support.google.com/cloud/answer/6158840).
// Example: `projects/my-project-123`.
ProjectName string `protobuf:"bytes,1,opt,name=project_name,json=projectName" json:"project_name,omitempty"`
// [Required] The error event to be reported.
Event *ReportedErrorEvent `protobuf:"bytes,2,opt,name=event" json:"event,omitempty"`
}
func (m *ReportErrorEventRequest) Reset() { *m = ReportErrorEventRequest{} }
func (m *ReportErrorEventRequest) String() string { return proto.CompactTextString(m) }
func (*ReportErrorEventRequest) ProtoMessage() {}
func (*ReportErrorEventRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} }
func (m *ReportErrorEventRequest) GetProjectName() string {
if m != nil {
return m.ProjectName
}
return ""
}
func (m *ReportErrorEventRequest) GetEvent() *ReportedErrorEvent {
if m != nil {
return m.Event
}
return nil
}
// Response for reporting an individual error event.
// Data may be added to this message in the future.
type ReportErrorEventResponse struct {
}
func (m *ReportErrorEventResponse) Reset() { *m = ReportErrorEventResponse{} }
func (m *ReportErrorEventResponse) String() string { return proto.CompactTextString(m) }
func (*ReportErrorEventResponse) ProtoMessage() {}
func (*ReportErrorEventResponse) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} }
// An error event which is reported to the Error Reporting system.
type ReportedErrorEvent struct {
// [Optional] Time when the event occurred.
// If not provided, the time when the event was received by the
// Error Reporting system will be used.
EventTime *google_protobuf1.Timestamp `protobuf:"bytes,1,opt,name=event_time,json=eventTime" json:"event_time,omitempty"`
// [Required] The service context in which this error has occurred.
ServiceContext *ServiceContext `protobuf:"bytes,2,opt,name=service_context,json=serviceContext" json:"service_context,omitempty"`
// [Required] A message describing the error. The message can contain an
// exception stack in one of the supported programming languages and formats.
// In that case, the message is parsed and detailed exception information
// is returned when retrieving the error event again.
Message string `protobuf:"bytes,3,opt,name=message" json:"message,omitempty"`
// [Optional] A description of the context in which the error occurred.
Context *ErrorContext `protobuf:"bytes,4,opt,name=context" json:"context,omitempty"`
}
func (m *ReportedErrorEvent) Reset() { *m = ReportedErrorEvent{} }
func (m *ReportedErrorEvent) String() string { return proto.CompactTextString(m) }
func (*ReportedErrorEvent) ProtoMessage() {}
func (*ReportedErrorEvent) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{2} }
func (m *ReportedErrorEvent) GetEventTime() *google_protobuf1.Timestamp {
if m != nil {
return m.EventTime
}
return nil
}
func (m *ReportedErrorEvent) GetServiceContext() *ServiceContext {
if m != nil {
return m.ServiceContext
}
return nil
}
func (m *ReportedErrorEvent) GetMessage() string {
if m != nil {
return m.Message
}
return ""
}
func (m *ReportedErrorEvent) GetContext() *ErrorContext {
if m != nil {
return m.Context
}
return nil
}
func init() {
proto.RegisterType((*ReportErrorEventRequest)(nil), "google.devtools.clouderrorreporting.v1beta1.ReportErrorEventRequest")
proto.RegisterType((*ReportErrorEventResponse)(nil), "google.devtools.clouderrorreporting.v1beta1.ReportErrorEventResponse")
proto.RegisterType((*ReportedErrorEvent)(nil), "google.devtools.clouderrorreporting.v1beta1.ReportedErrorEvent")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for ReportErrorsService service
type ReportErrorsServiceClient interface {
// Report an individual error event.
//
// This endpoint accepts <strong>either</strong> an OAuth token,
// <strong>or</strong> an
// <a href="https://support.google.com/cloud/answer/6158862">API key</a>
// for authentication. To use an API key, append it to the URL as the value of
// a `key` parameter. For example:
// <pre>POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456</pre>
ReportErrorEvent(ctx context.Context, in *ReportErrorEventRequest, opts ...grpc.CallOption) (*ReportErrorEventResponse, error)
}
type reportErrorsServiceClient struct {
cc *grpc.ClientConn
}
func NewReportErrorsServiceClient(cc *grpc.ClientConn) ReportErrorsServiceClient {
return &reportErrorsServiceClient{cc}
}
func (c *reportErrorsServiceClient) ReportErrorEvent(ctx context.Context, in *ReportErrorEventRequest, opts ...grpc.CallOption) (*ReportErrorEventResponse, error) {
out := new(ReportErrorEventResponse)
err := grpc.Invoke(ctx, "/google.devtools.clouderrorreporting.v1beta1.ReportErrorsService/ReportErrorEvent", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for ReportErrorsService service
type ReportErrorsServiceServer interface {
// Report an individual error event.
//
// This endpoint accepts <strong>either</strong> an OAuth token,
// <strong>or</strong> an
// <a href="https://support.google.com/cloud/answer/6158862">API key</a>
// for authentication. To use an API key, append it to the URL as the value of
// a `key` parameter. For example:
// <pre>POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456</pre>
ReportErrorEvent(context.Context, *ReportErrorEventRequest) (*ReportErrorEventResponse, error)
}
func RegisterReportErrorsServiceServer(s *grpc.Server, srv ReportErrorsServiceServer) {
s.RegisterService(&_ReportErrorsService_serviceDesc, srv)
}
func _ReportErrorsService_ReportErrorEvent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ReportErrorEventRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ReportErrorsServiceServer).ReportErrorEvent(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.devtools.clouderrorreporting.v1beta1.ReportErrorsService/ReportErrorEvent",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ReportErrorsServiceServer).ReportErrorEvent(ctx, req.(*ReportErrorEventRequest))
}
return interceptor(ctx, in, info, handler)
}
var _ReportErrorsService_serviceDesc = grpc.ServiceDesc{
ServiceName: "google.devtools.clouderrorreporting.v1beta1.ReportErrorsService",
HandlerType: (*ReportErrorsServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "ReportErrorEvent",
Handler: _ReportErrorsService_ReportErrorEvent_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "google/devtools/clouderrorreporting/v1beta1/report_errors_service.proto",
}
func init() |
var fileDescriptor3 = []byte{
// 490 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x93, 0xcd, 0x8a, 0x13, 0x41,
0x10, 0xc7, 0x99, 0xf8, 0xb1, 0x6c, 0x47, 0x54, 0xda, 0x83, 0xc3, 0x20, 0xb8, 0xc6, 0xcb, 0xa2,
0x30, 0x6d, 0xe2, 0xc5, 0xec, 0x22, 0x0b, 0x59, 0xc3, 0xde, 0x64, 0x99, 0xd5, 0x3d, 0x48, 0x70,
0xe8, 0x4c, 0xca, 0x61, 0x24, 0xd3, 0x35, 0x76, 0x77, 0x82, 0x20, 0x5e, 0x7c, 0x85, 0x7d, 0x05,
0x4f, 0x3e, 0x8a, 0x57, 0x5f, 0xc0, 0x83, 0x0f, 0xa1, 0x37, 0xe9, 0xaf, 0x25, 0x6b, 0x72, 0x70,
0xf4, 0x58, 0xd3, 0x55, 0xbf, 0xff, 0xbf, 0x3e, 0x86, 0x1c, 0x95, 0x88, 0xe5, 0x1c, 0xd8, 0x0c,
0x96, 0x1a, 0x71, 0xae, 0x58, 0x31, 0xc7, 0xc5, 0x0c, 0xa4, 0x44, 0x29, 0xa1, 0x41, 0xa9, 0x2b,
0x51, 0xb2, 0x65, 0x7f, 0x0a, 0x9a, 0xf7, 0x99, 0xfb, 0x92, 0xdb, 0x57, 0x95, 0x2b, 0x90, 0xcb,
0xaa, 0x80, 0xb4, 0x91, 0xa8, 0x91, 0x3e, 0x74, 0xa0, 0x34, 0x80, 0xd2, 0x0d, 0xa0, 0xd4, 0x83,
0x92, 0x3b, 0x5e, 0x95, 0x37, 0x15, 0xe3, 0x42, 0xa0, 0xe6, 0xba, 0x42, 0xa1, 0x1c, 0x2a, 0x79,
0xd2, 0xc6, 0x53, 0x81, 0x75, 0x8d, 0xc2, 0x57, 0xde, 0xf5, 0x95, 0x36, 0x9a, 0x2e, 0xde, 0x30,
0x5d, 0xd5, 0xa0, 0x34, 0xaf, 0x1b, 0x97, 0xd0, 0x3b, 0x8b, 0xc8, 0xed, 0xcc, 0x32, 0xc6, 0x06,
0x37, 0x5e, 0x82, 0xd0, 0x19, 0xbc, 0x5b, 0x80, 0xd2, 0xf4, 0x1e, 0xb9, 0xd6, 0x48, 0x7c, 0x0b,
0x85, 0xce, 0x05, 0xaf, 0x21, 0x8e, 0x76, 0xa2, 0xdd, 0xed, 0xac, 0xeb, 0xbf, 0x3d, 0xe7, 0x35,
0xd0, 0x97, 0xe4, 0x0a, 0x98, 0x92, 0xb8, 0xb3, 0x13, 0xed, 0x76, 0x07, 0x07, 0x69, 0x8b, 0xa6,
0x53, 0xa7, 0x0b, 0xb3, 0x15, 0x65, 0x47, 0xeb, 0x25, 0x24, 0x5e, 0x37, 0xa5, 0x1a, 0x14, 0x0a,
0x7a, 0x9f, 0x3b, 0x84, 0xae, 0x57, 0xd2, 0x21, 0x21, 0xb6, 0x36, 0x37, 0x1d, 0x5a, 0xab, 0xdd,
0x41, 0x12, 0xec, 0x84, 0xf6, 0xd3, 0x17, 0xa1, 0xfd, 0x6c, 0xdb, 0x66, 0x9b, 0x98, 0xce, 0xc8,
0x0d, 0xbf, 0xba, 0xbc, 0x40, 0xa1, 0xe1, 0x7d, 0x68, 0x67, 0xbf, 0x55, 0x3b, 0x27, 0x8e, 0x71,
0xe8, 0x10, 0xd9, 0x75, 0x75, 0x21, 0xa6, 0x31, 0xd9, 0xaa, 0x41, 0x29, 0x5e, 0x42, 0x7c, 0xc9,
0x0e, 0x32, 0x84, 0xf4, 0x84, 0x6c, 0x05, 0xdd, 0xcb, 0x56, 0x77, 0xd8, 0x4a, 0xd7, 0x0e, 0x21,
0xa8, 0x06, 0xd2, 0xe0, 0x67, 0x44, 0x6e, 0xad, 0xcc, 0x50, 0x79, 0x77, 0xf4, 0x7b, 0x44, 0x6e,
0xfe, 0x39, 0x5b, 0xfa, 0xec, 0x1f, 0xf6, 0xb6, 0x76, 0x2f, 0xc9, 0xf8, 0x3f, 0x29, 0x7e, 0xc1,
0x07, 0x9f, 0xbe, 0xfd, 0x38, 0xeb, 0x0c, 0x7b, 0x8f, 0xce, 0x4f, 0xfa, 0xc3, 0xea, 0x19, 0x3e,
0xf5, 0x81, 0x62, 0x0f, 0x3e, 0x32, 0xbb, 0x44, 0xb5, 0xe7, 0xe8, 0x7b, 0xee, 0x7a, 0x46, 0xbf,
0x22, 0x62, 0xfe, 0x82, 0x36, 0x6e, 0x46, 0xf1, 0x86, 0x59, 0x1d, 0x9b, 0xab, 0x39, 0x8e, 0x5e,
0xbd, 0xf6, 0xa0, 0x12, 0xe7, 0x5c, 0x94, 0x29, 0xca, 0x92, 0x95, 0x20, 0xec, 0x4d, 0x31, 0xf7,
0xc4, 0x9b, 0x4a, 0xfd, 0xd5, 0xdf, 0xb9, 0xbf, 0xe1, 0xed, 0x4b, 0xe7, 0xfe, 0x91, 0x13, 0x38,
0x34, 0x8f, 0x6e, 0x9f, 0xd9, 0xb9, 0xc3, 0xd3, 0xfe, 0xc8, 0x54, 0x7e, 0x0d, 0x59, 0x13, 0x9b,
0x35, 0xb9, 0x98, 0x35, 0x39, 0x75, 0xfc, 0xe9, 0x55, 0x6b, 0xeb, 0xf1, 0xef, 0x00, 0x00, 0x00,
0xff, 0xff, 0x2c, 0xd1, 0x8e, 0x76, 0xc7, 0x04, 0x00, 0x00,
}
| {
proto.RegisterFile("google/devtools/clouderrorreporting/v1beta1/report_errors_service.proto", fileDescriptor3)
} | identifier_body |
crunchauto.py | #!/usr/bin/python
#import ConfigParser
import icalendar,sys,os,datetime
import stripe
import pytz
import urllib
import json
from dateutil import tz
from ..templateCommon import *
def utctolocal(dt,endofdate=False):
from_zone = tz.gettz('UTC')
to_zone = tz.gettz('America/New_York')
if isinstance(dt,datetime.datetime):
#dt = dt.replace(tzinfo=from_zone)
|
else:
if endofdate:
dt = datetime.datetime.combine(dt,datetime.time(hour=23,minute=59,second=59,tzinfo=to_zone))
else:
dt = datetime.datetime.combine(dt,datetime.time(tzinfo=to_zone))
return dt
weekday=['Sun','Mon','Tues','Wed','Thurs','Fri','Sat'] # OUR Sunday=0 Convention!!
def crunch_calendar(rundate=None):
#ICAL_URL = Config.get('autoplot','ICAL_URI')
ICAL_URL = current_app.config['globalConfig'].Config.get("autoplot","ICAL_URI")
g = urllib.request.urlopen(ICAL_URL)
data= g.read()
print(data)
cal = icalendar.Calendar.from_ical(data)
g.close()
"""
g = urllib.urlopen(ICAL_URL)
print g.read()
g.close()
"""
if rundate:
now = datetime.datetime.strptime(rundate,"%Y-%m-%d").replace(tzinfo=tz.gettz('America/New York'))
else:
now = datetime.datetime.now().replace(tzinfo=tz.gettz('America/New York'))
#print "CRUNCH EFFECTIVE RUNDATE",rundate
## ADJUST HERE FOR TZ! (i.e. If we run Midnight on Sunday don't want LAST week's run
dow = now.weekday() # 0=Monday
dow = (dow+1) %7 #0=Sunday
weeknum = int(now.strftime("%U"))
#print "weeknum",weeknum,"Weekday",weekday[dow],"DOW",dow
weekstart = (now - datetime.timedelta(days=dow))
weekstart = weekstart.replace(hour=0,minute=0,second=0,microsecond=0)
weekend = weekstart + datetime.timedelta(days=7)
weekend = weekend - datetime.timedelta(seconds=1)
#print "WEEKSTART",weekstart,"through",weekend
errors=[]
warnings=[]
billables=[]
summaries=[]
debug=[]
data={}
debug.append("{2} Week #{3} - {0} through {1}".format(weekstart.strftime("%b-%d"),weekend.strftime("%b-%d"),weekstart.year,weeknum))
data['title']="Auto Plot Lease {2} Week #{3} - {0} through {1}".format(weekstart.strftime("%b-%d"),weekend.strftime("%b-%d"),weekstart.year,weeknum)
data['lease-id']="autoplot-lease-{2}-Week{3:02}".format(weekstart.strftime("%b-%d"),weekend.strftime("%b-%d"),weekstart.year,weeknum)
data['weekid']="{2:04}-{3:02}".format(weekstart.strftime("%b-%d"),weekend.strftime("%b-%d"),weekstart.year,weeknum)
for component in cal.walk():
#print component.name
#print dict(component)
#print dir(component)
#print(component.get('summary'))
#print(component.get('dtstart'))
#print(component.get('dtend'))
#print(component.get('dtstamp'))
summary={'errors':[],'warnings':[]}
if component.name != 'VEVENT':
print ("NOT A VEVENT!!!",component.name)
else:
#print "VEVENT",component
billable=False
members=[]
event={}
calstart = component['DTSTART'].dt
#print "CALSTART",calstart
calstart = utctolocal(calstart)
calend = component['DTEND'].dt
calend = utctolocal(calend,endofdate=True)
#print "SUMMARY",component['SUMMARY']
#print "START",calstart
#print "END",calend
if 'ORGANIZER' in component:
# print "ORGANIZER",component['ORGANIZER']
for p in component['ORGANIZER'].params:
pass #print "_ ---- ",p,component['ORGANIZER'].params[p]
#print "CHECK",weekstart,"<",calstart,
#print "aand",calend,"<",weekend
#if (weekstart <= calstart) and (calend <= weekend):
rrule = None
weeks=1
if 'RRULE' in component and 'COUNT' in component['RRULE'] and 'FREQ' in component['RRULE']:
rrule=component['RRULE']
#print "RRULE",calstart.strftime("%b-%d %H:%M ")+component['SUMMARY'],
#print rrule['COUNT'][0],rrule['FREQ'][0]
if rrule['FREQ'][0]== "WEEKLY":
weeks = rrule['COUNT'][0]
for weekno in range(0,weeks):
short = calstart.strftime("%b-%d %H:%M ")+component['SUMMARY']
if (calstart <= weekend) and (weekstart < calend):
#print "THISWEEK calendar",calstart,calend
#print "THISWEEK curweel",weekstart,weekend
#print "PROCESS",short
#print "WEEK IN SERIES",weekno
if 'ATTENDEE' not in component:
summary['errors'].append("No Attendees")
else:
if isinstance(component['ATTENDEE'],list):
attlist = component['ATTENDEE']
else:
attlist = [component['ATTENDEE']]
for a in attlist:
#print " -- Attendee:",a
#print " -- Params:"
for p in a.params:
pass #print "_ ---- ",p,a.params[p]
if 'CUTYPE' in a.params and a.params['CUTYPE'] == 'INDIVIDUAL':
members.append(a.params['CN'])
"""
print " -- DIR",dir(a)
print
print " -- ICAL",type(a.to_ical),dir(a.to_ical())
print
"""
hrs=(calend-calstart).total_seconds()/3600
#print "*** CURRENT!!! {0} Hours total".format(hrs)
if (hrs <= 24):
summary['warnings'].append("Partial day entry - NOT BILLING")
elif (hrs <= 167):
summary['warnings'].append("Entry isn't quite full week, but billing anyway")
if (hrs > 24):
if len(members) > 1:
summary['errors'].append("More than one member assigned: "+str(", ".join(members)))
elif len(members) == 0:
summary['errors'].append("No attendees in calendar entry")
else:
if not members[0].lower().endswith("@makeitlabs.com"):
summary['errors'].append("Non-MIL email: "+str(members[0]))
else:
billable=True
#print "*** BILLABLE"
event['summary']=short
event['member']=members[0]
#if component['SUMMARY'].strip().lower().startswith("rental"):
# print "** IS RENTAL"
# Figure out what to do based on Summary
if (len(summary['errors']) == 0) and billable:
billables.append(event)
for e in summary['errors']:
errors.append(short + ": "+e)
for w in summary['warnings']:
warnings.append(short + ": "+w)
#print "END PARSE"
calstart = calstart + datetime.timedelta(weeks=1)
calend = calend + datetime.timedelta(weeks=1)
# End of FOR for weeks
"""
for x in component:
print x,type(component[x]),
if (isinstance(component[x],icalendar.prop.vDDDTypes)):
print component.decoded(x)
print type(component[x].dt)
print component[x].dt
else:
print component.decoded(x)
#print dir(component[x])
print
"""
if len(billables) ==0:
warnings.append("WARNING - NO BILLABLES THIS WEEK!")
elif len(billables) >1:
errors.append("ERROR - MULTIPLE BILLABLES THIS WEEK!")
if (len(errors) != 0):
data['Decision']='error'
elif (len(billables) == 0):
data['Decision']='no_bill'
else:
data['Decision']='bill'
return (errors,warnings,debug,data,billables)
def do_payment(customer,price,leaseid,description,test=False,pay=False):
errors=[]
warnings=[]
debug=[]
stripe.api_key = current_app.config['globalConfig'].Config.get('autoplot','stripe_token')
#stripe.api_key = "sk_test_4eC39HqLyjWDarjtT1zdp7dc" # TEST KEY
#print stripe.SKU.list(limit=99)
#print stripe.Customer.list(limit=99)
debug.append("Process Payment customer {0} Price {1} leaseid {2}".format(customer,price,leaseid))
#print "Process Payment customer {0} Price {1} leaseid {2}".format(customer,price,leaseid)
debug.append("Description: {0}".format(description))
#print "Description: {0}".format(description)
"""
"""
print ("""
** GET EXISTING INVOICE ITEM
""")
# Get existing outstanding items in Stripe to invoice
lastItem=None
pendingleases={}
while True:
ii= stripe.InvoiceItem.list(
limit=2,
#customer="cus_J0mrDmtpzbfYOk", # Stripe Test Customer
customer=customer, # MIL Brad Goodman
starting_after=lastItem
)
#print "EXISTING ITEMS"
#print ii
if ii:
for d in ii['data']:
lastItem=d['id']
if 'metadata' in d:
#print "Metadata ",d['metadata']
if 'X-MIL-lease-id' in d['metadata']:
pendingleases[d['metadata']['X-MIL-lease-id']] = { 'invoice':d['invoice'],'invoiceitem':d['id']}
warnings.append("Lease already pending: "+d['metadata']['X-MIL-lease-id']+" in invoice "+str(d['invoice']))
else:
warnings.append("No metadata in item")
if not ii['has_more']: break
#print "PENDING LEASES",pendingleases
# If our new entry is not here - create item in stripe
if leaseid not in pendingleases:
print ("""
** ADD INVOICE ITEM
""")
ii= stripe.InvoiceItem.create(
#customer="cus_J0mrDmtpzbfYOk", # Stripe Test Customer
customer=customer, # MIL Brad Goodman
description=description,
#price="sku_IpxYEyVzmdmEy6", # TEST
price=price, # MIL ZERO DOLLAR PLOT
metadata={
'X-MIL-lease-id':leaseid,
'X-MIL-lease-location':'autoplot'
}
)
pendingleases[leaseid]= { 'invoice':None,'invoiceitem':ii['id']}
None # We have a pending now, with no invoice
debug.append("Created Invoice Item {0} for lease {1}".format(ii['id'],leaseid))
# If we have not created an invoice with this item in it - do so
if leaseid not in pendingleases or pendingleases[leaseid]['invoice'] is None:
print ("""
** INVOICE
""")
inv = stripe.Invoice.create(
customer=customer,
description=description,
auto_advance=False,
collection_method="charge_automatically",
metadata={
'X-MIL-lease-id':leaseid,
'X-MIL-lease-location':'autoplot'
}
#period_start=,
#period_end=json
)
pendingleases[leaseid]['invoice']=inv['id']
debug.append("Created Invoice {0} for lease {1}".format(inv['id'],leaseid))
status="invoiced"
else:
status="already_invoiced"
warnings.append("Using existing Invoice {0} for lease {1}".format(pendingleases[leaseid]['invoice'],leaseid))
# We have a current lease - let's look at it!
print ("INSPECT INVOICE")
print ("***")
inv = stripe.Invoice.retrieve(pendingleases[leaseid]['invoice'])
print (json.dumps(inv,indent=2))
# If unpaied - pay it!
if inv['paid'] == True and inv['status']=='paid':
debug.append("Already paid")
#print "** Aleady Paid!"
status="already_paid_stripe"
elif pay:
#print "** Paying!"
debug.append("Paying")
try:
#stripe.Invoice.pay(inv['id'])
stripe.Invoice.pay(inv)
debug.append("Payment Done")
#print "** Paid!"
status="paid"
except BaseException as e:
errors.append("Payment failed on invoice {0}: {1}".format(inv['id'],e))
print ("** Payment failed!")
status="payment_failed"
#print "DELETEING INVOICE"
#print stripe.Invoice.delete(inv['id'])
#debug.append("Created Invoice {0} for lease {1}".format(inv['id'],leaseid))
return (errors,warnings,debug,status)
| dt = dt.astimezone(to_zone) | conditional_block |
crunchauto.py | #!/usr/bin/python
#import ConfigParser
import icalendar,sys,os,datetime
import stripe
import pytz
import urllib
import json
from dateutil import tz
from ..templateCommon import *
def utctolocal(dt,endofdate=False):
from_zone = tz.gettz('UTC')
to_zone = tz.gettz('America/New_York')
if isinstance(dt,datetime.datetime):
#dt = dt.replace(tzinfo=from_zone)
dt = dt.astimezone(to_zone)
else:
if endofdate:
dt = datetime.datetime.combine(dt,datetime.time(hour=23,minute=59,second=59,tzinfo=to_zone))
else:
dt = datetime.datetime.combine(dt,datetime.time(tzinfo=to_zone))
return dt
weekday=['Sun','Mon','Tues','Wed','Thurs','Fri','Sat'] # OUR Sunday=0 Convention!!
def crunch_calendar(rundate=None):
#ICAL_URL = Config.get('autoplot','ICAL_URI')
ICAL_URL = current_app.config['globalConfig'].Config.get("autoplot","ICAL_URI")
g = urllib.request.urlopen(ICAL_URL)
data= g.read()
print(data)
cal = icalendar.Calendar.from_ical(data)
g.close()
"""
g = urllib.urlopen(ICAL_URL)
print g.read()
g.close()
"""
if rundate:
now = datetime.datetime.strptime(rundate,"%Y-%m-%d").replace(tzinfo=tz.gettz('America/New York'))
else:
now = datetime.datetime.now().replace(tzinfo=tz.gettz('America/New York'))
#print "CRUNCH EFFECTIVE RUNDATE",rundate
## ADJUST HERE FOR TZ! (i.e. If we run Midnight on Sunday don't want LAST week's run
dow = now.weekday() # 0=Monday
dow = (dow+1) %7 #0=Sunday
weeknum = int(now.strftime("%U"))
#print "weeknum",weeknum,"Weekday",weekday[dow],"DOW",dow
weekstart = (now - datetime.timedelta(days=dow))
weekstart = weekstart.replace(hour=0,minute=0,second=0,microsecond=0)
weekend = weekstart + datetime.timedelta(days=7)
weekend = weekend - datetime.timedelta(seconds=1)
#print "WEEKSTART",weekstart,"through",weekend
errors=[]
warnings=[]
billables=[]
summaries=[]
debug=[]
data={}
debug.append("{2} Week #{3} - {0} through {1}".format(weekstart.strftime("%b-%d"),weekend.strftime("%b-%d"),weekstart.year,weeknum))
data['title']="Auto Plot Lease {2} Week #{3} - {0} through {1}".format(weekstart.strftime("%b-%d"),weekend.strftime("%b-%d"),weekstart.year,weeknum)
data['lease-id']="autoplot-lease-{2}-Week{3:02}".format(weekstart.strftime("%b-%d"),weekend.strftime("%b-%d"),weekstart.year,weeknum)
data['weekid']="{2:04}-{3:02}".format(weekstart.strftime("%b-%d"),weekend.strftime("%b-%d"),weekstart.year,weeknum)
for component in cal.walk():
#print component.name
#print dict(component)
#print dir(component)
#print(component.get('summary'))
#print(component.get('dtstart'))
#print(component.get('dtend'))
#print(component.get('dtstamp'))
summary={'errors':[],'warnings':[]}
if component.name != 'VEVENT':
print ("NOT A VEVENT!!!",component.name)
else:
#print "VEVENT",component
billable=False
members=[]
event={}
calstart = component['DTSTART'].dt
#print "CALSTART",calstart
calstart = utctolocal(calstart)
calend = component['DTEND'].dt
calend = utctolocal(calend,endofdate=True)
#print "SUMMARY",component['SUMMARY']
#print "START",calstart
#print "END",calend
if 'ORGANIZER' in component:
# print "ORGANIZER",component['ORGANIZER']
for p in component['ORGANIZER'].params:
pass #print "_ ---- ",p,component['ORGANIZER'].params[p]
#print "CHECK",weekstart,"<",calstart,
#print "aand",calend,"<",weekend
#if (weekstart <= calstart) and (calend <= weekend):
rrule = None
weeks=1
if 'RRULE' in component and 'COUNT' in component['RRULE'] and 'FREQ' in component['RRULE']:
rrule=component['RRULE']
#print "RRULE",calstart.strftime("%b-%d %H:%M ")+component['SUMMARY'],
#print rrule['COUNT'][0],rrule['FREQ'][0]
if rrule['FREQ'][0]== "WEEKLY":
weeks = rrule['COUNT'][0]
for weekno in range(0,weeks):
short = calstart.strftime("%b-%d %H:%M ")+component['SUMMARY']
if (calstart <= weekend) and (weekstart < calend):
#print "THISWEEK calendar",calstart,calend
#print "THISWEEK curweel",weekstart,weekend
#print "PROCESS",short
#print "WEEK IN SERIES",weekno
if 'ATTENDEE' not in component:
summary['errors'].append("No Attendees")
else:
if isinstance(component['ATTENDEE'],list):
attlist = component['ATTENDEE']
else:
attlist = [component['ATTENDEE']]
for a in attlist:
#print " -- Attendee:",a
#print " -- Params:"
for p in a.params:
pass #print "_ ---- ",p,a.params[p]
if 'CUTYPE' in a.params and a.params['CUTYPE'] == 'INDIVIDUAL':
members.append(a.params['CN'])
"""
print " -- DIR",dir(a)
print
print " -- ICAL",type(a.to_ical),dir(a.to_ical())
print
"""
hrs=(calend-calstart).total_seconds()/3600
#print "*** CURRENT!!! {0} Hours total".format(hrs)
if (hrs <= 24):
summary['warnings'].append("Partial day entry - NOT BILLING")
elif (hrs <= 167):
summary['warnings'].append("Entry isn't quite full week, but billing anyway")
if (hrs > 24):
if len(members) > 1:
summary['errors'].append("More than one member assigned: "+str(", ".join(members)))
elif len(members) == 0:
summary['errors'].append("No attendees in calendar entry")
else:
if not members[0].lower().endswith("@makeitlabs.com"):
summary['errors'].append("Non-MIL email: "+str(members[0]))
else:
billable=True
#print "*** BILLABLE"
event['summary']=short
event['member']=members[0]
#if component['SUMMARY'].strip().lower().startswith("rental"):
# print "** IS RENTAL"
# Figure out what to do based on Summary
if (len(summary['errors']) == 0) and billable:
billables.append(event)
for e in summary['errors']:
errors.append(short + ": "+e)
for w in summary['warnings']:
warnings.append(short + ": "+w)
#print "END PARSE"
calstart = calstart + datetime.timedelta(weeks=1)
calend = calend + datetime.timedelta(weeks=1)
# End of FOR for weeks
"""
for x in component:
print x,type(component[x]),
if (isinstance(component[x],icalendar.prop.vDDDTypes)):
print component.decoded(x)
print type(component[x].dt)
print component[x].dt
else:
print component.decoded(x)
#print dir(component[x])
print
"""
if len(billables) ==0:
warnings.append("WARNING - NO BILLABLES THIS WEEK!")
elif len(billables) >1:
errors.append("ERROR - MULTIPLE BILLABLES THIS WEEK!")
if (len(errors) != 0):
data['Decision']='error'
elif (len(billables) == 0):
data['Decision']='no_bill'
else:
data['Decision']='bill'
return (errors,warnings,debug,data,billables)
def do_payment(customer,price,leaseid,description,test=False,pay=False):
| errors=[]
warnings=[]
debug=[]
stripe.api_key = current_app.config['globalConfig'].Config.get('autoplot','stripe_token')
#stripe.api_key = "sk_test_4eC39HqLyjWDarjtT1zdp7dc" # TEST KEY
#print stripe.SKU.list(limit=99)
#print stripe.Customer.list(limit=99)
debug.append("Process Payment customer {0} Price {1} leaseid {2}".format(customer,price,leaseid))
#print "Process Payment customer {0} Price {1} leaseid {2}".format(customer,price,leaseid)
debug.append("Description: {0}".format(description))
#print "Description: {0}".format(description)
"""
"""
print ("""
** GET EXISTING INVOICE ITEM
""")
# Get existing outstanding items in Stripe to invoice
lastItem=None
pendingleases={}
while True:
ii= stripe.InvoiceItem.list(
limit=2,
#customer="cus_J0mrDmtpzbfYOk", # Stripe Test Customer
customer=customer, # MIL Brad Goodman
starting_after=lastItem
)
#print "EXISTING ITEMS"
#print ii
if ii:
for d in ii['data']:
lastItem=d['id']
if 'metadata' in d:
#print "Metadata ",d['metadata']
if 'X-MIL-lease-id' in d['metadata']:
pendingleases[d['metadata']['X-MIL-lease-id']] = { 'invoice':d['invoice'],'invoiceitem':d['id']}
warnings.append("Lease already pending: "+d['metadata']['X-MIL-lease-id']+" in invoice "+str(d['invoice']))
else:
warnings.append("No metadata in item")
if not ii['has_more']: break
#print "PENDING LEASES",pendingleases
# If our new entry is not here - create item in stripe
if leaseid not in pendingleases:
print ("""
** ADD INVOICE ITEM
""")
ii= stripe.InvoiceItem.create(
#customer="cus_J0mrDmtpzbfYOk", # Stripe Test Customer
customer=customer, # MIL Brad Goodman
description=description,
#price="sku_IpxYEyVzmdmEy6", # TEST
price=price, # MIL ZERO DOLLAR PLOT
metadata={
'X-MIL-lease-id':leaseid,
'X-MIL-lease-location':'autoplot'
}
)
pendingleases[leaseid]= { 'invoice':None,'invoiceitem':ii['id']}
None # We have a pending now, with no invoice
debug.append("Created Invoice Item {0} for lease {1}".format(ii['id'],leaseid))
# If we have not created an invoice with this item in it - do so
if leaseid not in pendingleases or pendingleases[leaseid]['invoice'] is None:
print ("""
** INVOICE
""")
inv = stripe.Invoice.create(
customer=customer,
description=description,
auto_advance=False,
collection_method="charge_automatically",
metadata={
'X-MIL-lease-id':leaseid,
'X-MIL-lease-location':'autoplot'
}
#period_start=,
#period_end=json
)
pendingleases[leaseid]['invoice']=inv['id']
debug.append("Created Invoice {0} for lease {1}".format(inv['id'],leaseid))
status="invoiced"
else:
status="already_invoiced"
warnings.append("Using existing Invoice {0} for lease {1}".format(pendingleases[leaseid]['invoice'],leaseid))
# We have a current lease - let's look at it!
print ("INSPECT INVOICE")
print ("***")
inv = stripe.Invoice.retrieve(pendingleases[leaseid]['invoice'])
print (json.dumps(inv,indent=2))
# If unpaied - pay it!
if inv['paid'] == True and inv['status']=='paid':
debug.append("Already paid")
#print "** Aleady Paid!"
status="already_paid_stripe"
elif pay:
#print "** Paying!"
debug.append("Paying")
try:
#stripe.Invoice.pay(inv['id'])
stripe.Invoice.pay(inv)
debug.append("Payment Done")
#print "** Paid!"
status="paid"
except BaseException as e:
errors.append("Payment failed on invoice {0}: {1}".format(inv['id'],e))
print ("** Payment failed!")
status="payment_failed"
#print "DELETEING INVOICE"
#print stripe.Invoice.delete(inv['id'])
#debug.append("Created Invoice {0} for lease {1}".format(inv['id'],leaseid))
return (errors,warnings,debug,status) | identifier_body | |
crunchauto.py | #!/usr/bin/python
#import ConfigParser
import icalendar,sys,os,datetime
import stripe
import pytz
import urllib
import json
from dateutil import tz
from ..templateCommon import *
def utctolocal(dt,endofdate=False):
from_zone = tz.gettz('UTC')
to_zone = tz.gettz('America/New_York')
if isinstance(dt,datetime.datetime):
#dt = dt.replace(tzinfo=from_zone)
dt = dt.astimezone(to_zone)
else:
if endofdate:
dt = datetime.datetime.combine(dt,datetime.time(hour=23,minute=59,second=59,tzinfo=to_zone))
else:
dt = datetime.datetime.combine(dt,datetime.time(tzinfo=to_zone))
return dt
weekday=['Sun','Mon','Tues','Wed','Thurs','Fri','Sat'] # OUR Sunday=0 Convention!!
def crunch_calendar(rundate=None):
#ICAL_URL = Config.get('autoplot','ICAL_URI')
ICAL_URL = current_app.config['globalConfig'].Config.get("autoplot","ICAL_URI")
g = urllib.request.urlopen(ICAL_URL)
data= g.read()
print(data)
cal = icalendar.Calendar.from_ical(data)
g.close()
"""
g = urllib.urlopen(ICAL_URL)
print g.read()
g.close()
"""
if rundate:
now = datetime.datetime.strptime(rundate,"%Y-%m-%d").replace(tzinfo=tz.gettz('America/New York'))
else:
now = datetime.datetime.now().replace(tzinfo=tz.gettz('America/New York'))
#print "CRUNCH EFFECTIVE RUNDATE",rundate
## ADJUST HERE FOR TZ! (i.e. If we run Midnight on Sunday don't want LAST week's run
dow = now.weekday() # 0=Monday
dow = (dow+1) %7 #0=Sunday
weeknum = int(now.strftime("%U"))
#print "weeknum",weeknum,"Weekday",weekday[dow],"DOW",dow
weekstart = (now - datetime.timedelta(days=dow))
weekstart = weekstart.replace(hour=0,minute=0,second=0,microsecond=0)
weekend = weekstart + datetime.timedelta(days=7)
weekend = weekend - datetime.timedelta(seconds=1)
#print "WEEKSTART",weekstart,"through",weekend
errors=[]
warnings=[]
billables=[]
summaries=[]
debug=[]
data={}
debug.append("{2} Week #{3} - {0} through {1}".format(weekstart.strftime("%b-%d"),weekend.strftime("%b-%d"),weekstart.year,weeknum))
data['title']="Auto Plot Lease {2} Week #{3} - {0} through {1}".format(weekstart.strftime("%b-%d"),weekend.strftime("%b-%d"),weekstart.year,weeknum)
data['lease-id']="autoplot-lease-{2}-Week{3:02}".format(weekstart.strftime("%b-%d"),weekend.strftime("%b-%d"),weekstart.year,weeknum)
data['weekid']="{2:04}-{3:02}".format(weekstart.strftime("%b-%d"),weekend.strftime("%b-%d"),weekstart.year,weeknum)
for component in cal.walk():
#print component.name
#print dict(component)
#print dir(component)
#print(component.get('summary'))
#print(component.get('dtstart'))
#print(component.get('dtend'))
#print(component.get('dtstamp'))
summary={'errors':[],'warnings':[]}
if component.name != 'VEVENT':
print ("NOT A VEVENT!!!",component.name)
else:
#print "VEVENT",component
billable=False
members=[]
event={}
calstart = component['DTSTART'].dt
#print "CALSTART",calstart
calstart = utctolocal(calstart)
calend = component['DTEND'].dt
calend = utctolocal(calend,endofdate=True)
#print "SUMMARY",component['SUMMARY']
#print "START",calstart
#print "END",calend
if 'ORGANIZER' in component:
# print "ORGANIZER",component['ORGANIZER']
for p in component['ORGANIZER'].params:
pass #print "_ ---- ",p,component['ORGANIZER'].params[p]
#print "CHECK",weekstart,"<",calstart,
#print "aand",calend,"<",weekend
#if (weekstart <= calstart) and (calend <= weekend):
rrule = None
weeks=1
if 'RRULE' in component and 'COUNT' in component['RRULE'] and 'FREQ' in component['RRULE']:
rrule=component['RRULE']
#print "RRULE",calstart.strftime("%b-%d %H:%M ")+component['SUMMARY'],
#print rrule['COUNT'][0],rrule['FREQ'][0]
if rrule['FREQ'][0]== "WEEKLY":
weeks = rrule['COUNT'][0]
for weekno in range(0,weeks):
short = calstart.strftime("%b-%d %H:%M ")+component['SUMMARY']
if (calstart <= weekend) and (weekstart < calend):
#print "THISWEEK calendar",calstart,calend
#print "THISWEEK curweel",weekstart,weekend
#print "PROCESS",short
#print "WEEK IN SERIES",weekno
if 'ATTENDEE' not in component:
summary['errors'].append("No Attendees")
else:
if isinstance(component['ATTENDEE'],list):
attlist = component['ATTENDEE']
else:
attlist = [component['ATTENDEE']]
for a in attlist:
#print " -- Attendee:",a
#print " -- Params:"
for p in a.params:
pass #print "_ ---- ",p,a.params[p]
if 'CUTYPE' in a.params and a.params['CUTYPE'] == 'INDIVIDUAL':
members.append(a.params['CN'])
"""
print " -- DIR",dir(a)
print
print " -- ICAL",type(a.to_ical),dir(a.to_ical())
print
"""
hrs=(calend-calstart).total_seconds()/3600
#print "*** CURRENT!!! {0} Hours total".format(hrs)
if (hrs <= 24):
summary['warnings'].append("Partial day entry - NOT BILLING")
elif (hrs <= 167):
summary['warnings'].append("Entry isn't quite full week, but billing anyway")
if (hrs > 24):
if len(members) > 1:
summary['errors'].append("More than one member assigned: "+str(", ".join(members)))
elif len(members) == 0:
summary['errors'].append("No attendees in calendar entry")
else:
if not members[0].lower().endswith("@makeitlabs.com"):
summary['errors'].append("Non-MIL email: "+str(members[0]))
else:
billable=True
#print "*** BILLABLE"
event['summary']=short
event['member']=members[0]
#if component['SUMMARY'].strip().lower().startswith("rental"):
# print "** IS RENTAL"
# Figure out what to do based on Summary
if (len(summary['errors']) == 0) and billable:
billables.append(event)
for e in summary['errors']:
errors.append(short + ": "+e)
for w in summary['warnings']:
warnings.append(short + ": "+w)
#print "END PARSE"
calstart = calstart + datetime.timedelta(weeks=1)
calend = calend + datetime.timedelta(weeks=1)
# End of FOR for weeks
"""
for x in component:
print x,type(component[x]),
if (isinstance(component[x],icalendar.prop.vDDDTypes)):
print component.decoded(x)
print type(component[x].dt)
print component[x].dt
else:
print component.decoded(x)
#print dir(component[x])
print
"""
if len(billables) ==0:
warnings.append("WARNING - NO BILLABLES THIS WEEK!")
elif len(billables) >1:
errors.append("ERROR - MULTIPLE BILLABLES THIS WEEK!")
if (len(errors) != 0):
data['Decision']='error'
elif (len(billables) == 0):
data['Decision']='no_bill'
else:
data['Decision']='bill'
return (errors,warnings,debug,data,billables)
def | (customer,price,leaseid,description,test=False,pay=False):
errors=[]
warnings=[]
debug=[]
stripe.api_key = current_app.config['globalConfig'].Config.get('autoplot','stripe_token')
#stripe.api_key = "sk_test_4eC39HqLyjWDarjtT1zdp7dc" # TEST KEY
#print stripe.SKU.list(limit=99)
#print stripe.Customer.list(limit=99)
debug.append("Process Payment customer {0} Price {1} leaseid {2}".format(customer,price,leaseid))
#print "Process Payment customer {0} Price {1} leaseid {2}".format(customer,price,leaseid)
debug.append("Description: {0}".format(description))
#print "Description: {0}".format(description)
"""
"""
print ("""
** GET EXISTING INVOICE ITEM
""")
# Get existing outstanding items in Stripe to invoice
lastItem=None
pendingleases={}
while True:
ii= stripe.InvoiceItem.list(
limit=2,
#customer="cus_J0mrDmtpzbfYOk", # Stripe Test Customer
customer=customer, # MIL Brad Goodman
starting_after=lastItem
)
#print "EXISTING ITEMS"
#print ii
if ii:
for d in ii['data']:
lastItem=d['id']
if 'metadata' in d:
#print "Metadata ",d['metadata']
if 'X-MIL-lease-id' in d['metadata']:
pendingleases[d['metadata']['X-MIL-lease-id']] = { 'invoice':d['invoice'],'invoiceitem':d['id']}
warnings.append("Lease already pending: "+d['metadata']['X-MIL-lease-id']+" in invoice "+str(d['invoice']))
else:
warnings.append("No metadata in item")
if not ii['has_more']: break
#print "PENDING LEASES",pendingleases
# If our new entry is not here - create item in stripe
if leaseid not in pendingleases:
print ("""
** ADD INVOICE ITEM
""")
ii= stripe.InvoiceItem.create(
#customer="cus_J0mrDmtpzbfYOk", # Stripe Test Customer
customer=customer, # MIL Brad Goodman
description=description,
#price="sku_IpxYEyVzmdmEy6", # TEST
price=price, # MIL ZERO DOLLAR PLOT
metadata={
'X-MIL-lease-id':leaseid,
'X-MIL-lease-location':'autoplot'
}
)
pendingleases[leaseid]= { 'invoice':None,'invoiceitem':ii['id']}
None # We have a pending now, with no invoice
debug.append("Created Invoice Item {0} for lease {1}".format(ii['id'],leaseid))
# If we have not created an invoice with this item in it - do so
if leaseid not in pendingleases or pendingleases[leaseid]['invoice'] is None:
print ("""
** INVOICE
""")
inv = stripe.Invoice.create(
customer=customer,
description=description,
auto_advance=False,
collection_method="charge_automatically",
metadata={
'X-MIL-lease-id':leaseid,
'X-MIL-lease-location':'autoplot'
}
#period_start=,
#period_end=json
)
pendingleases[leaseid]['invoice']=inv['id']
debug.append("Created Invoice {0} for lease {1}".format(inv['id'],leaseid))
status="invoiced"
else:
status="already_invoiced"
warnings.append("Using existing Invoice {0} for lease {1}".format(pendingleases[leaseid]['invoice'],leaseid))
# We have a current lease - let's look at it!
print ("INSPECT INVOICE")
print ("***")
inv = stripe.Invoice.retrieve(pendingleases[leaseid]['invoice'])
print (json.dumps(inv,indent=2))
# If unpaied - pay it!
if inv['paid'] == True and inv['status']=='paid':
debug.append("Already paid")
#print "** Aleady Paid!"
status="already_paid_stripe"
elif pay:
#print "** Paying!"
debug.append("Paying")
try:
#stripe.Invoice.pay(inv['id'])
stripe.Invoice.pay(inv)
debug.append("Payment Done")
#print "** Paid!"
status="paid"
except BaseException as e:
errors.append("Payment failed on invoice {0}: {1}".format(inv['id'],e))
print ("** Payment failed!")
status="payment_failed"
#print "DELETEING INVOICE"
#print stripe.Invoice.delete(inv['id'])
#debug.append("Created Invoice {0} for lease {1}".format(inv['id'],leaseid))
return (errors,warnings,debug,status)
| do_payment | identifier_name |
crunchauto.py | #!/usr/bin/python
#import ConfigParser
import icalendar,sys,os,datetime
import stripe
import pytz
import urllib
import json
from dateutil import tz
from ..templateCommon import *
def utctolocal(dt,endofdate=False):
from_zone = tz.gettz('UTC')
to_zone = tz.gettz('America/New_York')
if isinstance(dt,datetime.datetime):
#dt = dt.replace(tzinfo=from_zone)
dt = dt.astimezone(to_zone)
else:
if endofdate:
dt = datetime.datetime.combine(dt,datetime.time(hour=23,minute=59,second=59,tzinfo=to_zone))
else:
dt = datetime.datetime.combine(dt,datetime.time(tzinfo=to_zone))
return dt
weekday=['Sun','Mon','Tues','Wed','Thurs','Fri','Sat'] # OUR Sunday=0 Convention!!
def crunch_calendar(rundate=None):
#ICAL_URL = Config.get('autoplot','ICAL_URI')
ICAL_URL = current_app.config['globalConfig'].Config.get("autoplot","ICAL_URI")
g = urllib.request.urlopen(ICAL_URL)
data= g.read()
print(data)
cal = icalendar.Calendar.from_ical(data)
g.close()
"""
g = urllib.urlopen(ICAL_URL)
print g.read()
g.close()
"""
if rundate:
now = datetime.datetime.strptime(rundate,"%Y-%m-%d").replace(tzinfo=tz.gettz('America/New York'))
else:
now = datetime.datetime.now().replace(tzinfo=tz.gettz('America/New York'))
#print "CRUNCH EFFECTIVE RUNDATE",rundate
## ADJUST HERE FOR TZ! (i.e. If we run Midnight on Sunday don't want LAST week's run
dow = now.weekday() # 0=Monday | weekstart = weekstart.replace(hour=0,minute=0,second=0,microsecond=0)
weekend = weekstart + datetime.timedelta(days=7)
weekend = weekend - datetime.timedelta(seconds=1)
#print "WEEKSTART",weekstart,"through",weekend
errors=[]
warnings=[]
billables=[]
summaries=[]
debug=[]
data={}
debug.append("{2} Week #{3} - {0} through {1}".format(weekstart.strftime("%b-%d"),weekend.strftime("%b-%d"),weekstart.year,weeknum))
data['title']="Auto Plot Lease {2} Week #{3} - {0} through {1}".format(weekstart.strftime("%b-%d"),weekend.strftime("%b-%d"),weekstart.year,weeknum)
data['lease-id']="autoplot-lease-{2}-Week{3:02}".format(weekstart.strftime("%b-%d"),weekend.strftime("%b-%d"),weekstart.year,weeknum)
data['weekid']="{2:04}-{3:02}".format(weekstart.strftime("%b-%d"),weekend.strftime("%b-%d"),weekstart.year,weeknum)
for component in cal.walk():
#print component.name
#print dict(component)
#print dir(component)
#print(component.get('summary'))
#print(component.get('dtstart'))
#print(component.get('dtend'))
#print(component.get('dtstamp'))
summary={'errors':[],'warnings':[]}
if component.name != 'VEVENT':
print ("NOT A VEVENT!!!",component.name)
else:
#print "VEVENT",component
billable=False
members=[]
event={}
calstart = component['DTSTART'].dt
#print "CALSTART",calstart
calstart = utctolocal(calstart)
calend = component['DTEND'].dt
calend = utctolocal(calend,endofdate=True)
#print "SUMMARY",component['SUMMARY']
#print "START",calstart
#print "END",calend
if 'ORGANIZER' in component:
# print "ORGANIZER",component['ORGANIZER']
for p in component['ORGANIZER'].params:
pass #print "_ ---- ",p,component['ORGANIZER'].params[p]
#print "CHECK",weekstart,"<",calstart,
#print "aand",calend,"<",weekend
#if (weekstart <= calstart) and (calend <= weekend):
rrule = None
weeks=1
if 'RRULE' in component and 'COUNT' in component['RRULE'] and 'FREQ' in component['RRULE']:
rrule=component['RRULE']
#print "RRULE",calstart.strftime("%b-%d %H:%M ")+component['SUMMARY'],
#print rrule['COUNT'][0],rrule['FREQ'][0]
if rrule['FREQ'][0]== "WEEKLY":
weeks = rrule['COUNT'][0]
for weekno in range(0,weeks):
short = calstart.strftime("%b-%d %H:%M ")+component['SUMMARY']
if (calstart <= weekend) and (weekstart < calend):
#print "THISWEEK calendar",calstart,calend
#print "THISWEEK curweel",weekstart,weekend
#print "PROCESS",short
#print "WEEK IN SERIES",weekno
if 'ATTENDEE' not in component:
summary['errors'].append("No Attendees")
else:
if isinstance(component['ATTENDEE'],list):
attlist = component['ATTENDEE']
else:
attlist = [component['ATTENDEE']]
for a in attlist:
#print " -- Attendee:",a
#print " -- Params:"
for p in a.params:
pass #print "_ ---- ",p,a.params[p]
if 'CUTYPE' in a.params and a.params['CUTYPE'] == 'INDIVIDUAL':
members.append(a.params['CN'])
"""
print " -- DIR",dir(a)
print
print " -- ICAL",type(a.to_ical),dir(a.to_ical())
print
"""
hrs=(calend-calstart).total_seconds()/3600
#print "*** CURRENT!!! {0} Hours total".format(hrs)
if (hrs <= 24):
summary['warnings'].append("Partial day entry - NOT BILLING")
elif (hrs <= 167):
summary['warnings'].append("Entry isn't quite full week, but billing anyway")
if (hrs > 24):
if len(members) > 1:
summary['errors'].append("More than one member assigned: "+str(", ".join(members)))
elif len(members) == 0:
summary['errors'].append("No attendees in calendar entry")
else:
if not members[0].lower().endswith("@makeitlabs.com"):
summary['errors'].append("Non-MIL email: "+str(members[0]))
else:
billable=True
#print "*** BILLABLE"
event['summary']=short
event['member']=members[0]
#if component['SUMMARY'].strip().lower().startswith("rental"):
# print "** IS RENTAL"
# Figure out what to do based on Summary
if (len(summary['errors']) == 0) and billable:
billables.append(event)
for e in summary['errors']:
errors.append(short + ": "+e)
for w in summary['warnings']:
warnings.append(short + ": "+w)
#print "END PARSE"
calstart = calstart + datetime.timedelta(weeks=1)
calend = calend + datetime.timedelta(weeks=1)
# End of FOR for weeks
"""
for x in component:
print x,type(component[x]),
if (isinstance(component[x],icalendar.prop.vDDDTypes)):
print component.decoded(x)
print type(component[x].dt)
print component[x].dt
else:
print component.decoded(x)
#print dir(component[x])
print
"""
if len(billables) ==0:
warnings.append("WARNING - NO BILLABLES THIS WEEK!")
elif len(billables) >1:
errors.append("ERROR - MULTIPLE BILLABLES THIS WEEK!")
if (len(errors) != 0):
data['Decision']='error'
elif (len(billables) == 0):
data['Decision']='no_bill'
else:
data['Decision']='bill'
return (errors,warnings,debug,data,billables)
def do_payment(customer,price,leaseid,description,test=False,pay=False):
errors=[]
warnings=[]
debug=[]
stripe.api_key = current_app.config['globalConfig'].Config.get('autoplot','stripe_token')
#stripe.api_key = "sk_test_4eC39HqLyjWDarjtT1zdp7dc" # TEST KEY
#print stripe.SKU.list(limit=99)
#print stripe.Customer.list(limit=99)
debug.append("Process Payment customer {0} Price {1} leaseid {2}".format(customer,price,leaseid))
#print "Process Payment customer {0} Price {1} leaseid {2}".format(customer,price,leaseid)
debug.append("Description: {0}".format(description))
#print "Description: {0}".format(description)
"""
"""
print ("""
** GET EXISTING INVOICE ITEM
""")
# Get existing outstanding items in Stripe to invoice
lastItem=None
pendingleases={}
while True:
ii= stripe.InvoiceItem.list(
limit=2,
#customer="cus_J0mrDmtpzbfYOk", # Stripe Test Customer
customer=customer, # MIL Brad Goodman
starting_after=lastItem
)
#print "EXISTING ITEMS"
#print ii
if ii:
for d in ii['data']:
lastItem=d['id']
if 'metadata' in d:
#print "Metadata ",d['metadata']
if 'X-MIL-lease-id' in d['metadata']:
pendingleases[d['metadata']['X-MIL-lease-id']] = { 'invoice':d['invoice'],'invoiceitem':d['id']}
warnings.append("Lease already pending: "+d['metadata']['X-MIL-lease-id']+" in invoice "+str(d['invoice']))
else:
warnings.append("No metadata in item")
if not ii['has_more']: break
#print "PENDING LEASES",pendingleases
# If our new entry is not here - create item in stripe
if leaseid not in pendingleases:
print ("""
** ADD INVOICE ITEM
""")
ii= stripe.InvoiceItem.create(
#customer="cus_J0mrDmtpzbfYOk", # Stripe Test Customer
customer=customer, # MIL Brad Goodman
description=description,
#price="sku_IpxYEyVzmdmEy6", # TEST
price=price, # MIL ZERO DOLLAR PLOT
metadata={
'X-MIL-lease-id':leaseid,
'X-MIL-lease-location':'autoplot'
}
)
pendingleases[leaseid]= { 'invoice':None,'invoiceitem':ii['id']}
None # We have a pending now, with no invoice
debug.append("Created Invoice Item {0} for lease {1}".format(ii['id'],leaseid))
# If we have not created an invoice with this item in it - do so
if leaseid not in pendingleases or pendingleases[leaseid]['invoice'] is None:
print ("""
** INVOICE
""")
inv = stripe.Invoice.create(
customer=customer,
description=description,
auto_advance=False,
collection_method="charge_automatically",
metadata={
'X-MIL-lease-id':leaseid,
'X-MIL-lease-location':'autoplot'
}
#period_start=,
#period_end=json
)
pendingleases[leaseid]['invoice']=inv['id']
debug.append("Created Invoice {0} for lease {1}".format(inv['id'],leaseid))
status="invoiced"
else:
status="already_invoiced"
warnings.append("Using existing Invoice {0} for lease {1}".format(pendingleases[leaseid]['invoice'],leaseid))
# We have a current lease - let's look at it!
print ("INSPECT INVOICE")
print ("***")
inv = stripe.Invoice.retrieve(pendingleases[leaseid]['invoice'])
print (json.dumps(inv,indent=2))
# If unpaied - pay it!
if inv['paid'] == True and inv['status']=='paid':
debug.append("Already paid")
#print "** Aleady Paid!"
status="already_paid_stripe"
elif pay:
#print "** Paying!"
debug.append("Paying")
try:
#stripe.Invoice.pay(inv['id'])
stripe.Invoice.pay(inv)
debug.append("Payment Done")
#print "** Paid!"
status="paid"
except BaseException as e:
errors.append("Payment failed on invoice {0}: {1}".format(inv['id'],e))
print ("** Payment failed!")
status="payment_failed"
#print "DELETEING INVOICE"
#print stripe.Invoice.delete(inv['id'])
#debug.append("Created Invoice {0} for lease {1}".format(inv['id'],leaseid))
return (errors,warnings,debug,status) | dow = (dow+1) %7 #0=Sunday
weeknum = int(now.strftime("%U"))
#print "weeknum",weeknum,"Weekday",weekday[dow],"DOW",dow
weekstart = (now - datetime.timedelta(days=dow)) | random_line_split |
lib.rs | //! Salak is a multi layered configuration loader and zero-boilerplate configuration parser, with many predefined sources.
//!
//! 1. [About](#about)
//! 2. [Quick Start](#quick-start)
//! 3. [Features](#features)
//! * [Predefined Sources](#predefined-sources)
//! * [Key Convention](#key-convention)
//! * [Value Placeholder Parsing](#value-placeholder-parsing)
//! * [Attributes For Derive](#attributes-for-derive)
//! * [Reload Configuration](#reload-configuration)
//! * [Resource Factory](#resource-factory)
//!
//! ## About
//! `salak` is a multi layered configuration loader with many predefined sources. Also it
//! is a zero-boilerplate configuration parser which provides an auto-derive procedure macro
//! to derive [`FromEnvironment`] so that we can parse configuration structs without any additional codes.
//!
//! ## Quick Start
//! A simple example of `salak`:
//!
//! ```
//! use salak::*;
//!
//! #[derive(Debug, FromEnvironment)]
//! #[salak(prefix = "config")]
//! struct Config {
//! #[salak(default = false)]
//! verbose: bool,
//! optional: Option<String>,
//! #[salak(name = "val")]
//! value: i64,
//! }
//! let env = Salak::builder()
//! .set("config.val", "2021")
//! .build()
//! .unwrap();
//! let config = env.get::<Config>().unwrap();
//! assert_eq!(2021, config.value);
//! assert_eq!(None, config.optional);
//! assert_eq!(false, config.verbose);
//! ```
//!
//! ## Features
//!
//! #### Predefined Sources
//! Predefined sources has the following order, [`Salak`] will find by sequence of these orders,
//! if the property with specified key is found at the current source, than return immediately. Otherwise,
//! it will search the next source.
//!
//! 1. Random source provides a group of keys can return random values.
//! * `random.u8`
//! * `random.u16`
//! * `random.u32`
//! * `random.u64`
//! * `random.u128`
//! * `random.usize`
//! * `random.i8`
//! * `random.i16`
//! * `random.i32`
//! * `random.i64`
//! * `random.i128`
//! * `random.isize`
//! 2. Custom arguments source. [`SalakBuilder::set()`] can set a single kv,
//! and [`SalakBuilder::set_args()`] can set a group of kvs.
//! 3. System environment source. Implemented by [`source::system_environment`].
//! 4. Profile specified file source, eg. `app-dev.toml`, supports reloading.
//! 5. No profile file source, eg. `app.toml`, supports reloading.
//! 6. Custom sources, which can register by [`Salak::register()`].
//!
//! #### Key Convention
//! Key is used for search configuration from [`Environment`], normally it is represented by string.
//! Key is a group of SubKey separated by dot(`.`), and SubKey is a name or a name followed by index.
//! 1. SubKey Format (`[a-z][_a-z0-9]+(\[[0-9]+\])*`)
//! * `a`
//! * `a0`
//! * `a_b`
//! * `a[0]`
//! * `a[0][0]`
//! 2. Key Format (`SubKey(\.SubKey)*`)
//! * `a`
//! * `a.b`
//! * `a.val[0]`
//! * `a_b[0]`
//!
//! #### Value Placeholder Parsing
//! 1. Placeholder Format
//! * `${key}` => Get value of `key`.
//! * `${key:default}` => Get value of `key`, if not exists return `default`.
//! 2. Escape Format
//! * `\$\{key\}` => Return `${key}`.
//! * `$`, `\`, `{`, `}` must use escape format.
//!
//! #### Attributes For Derive
//! `salak` supports some attributes for automatically derive [`FromEnvironment`].
//! All attributes have format `#[salak(..)]`, eg. `#[salak(default = "default value")]`.
//! 1. Struct Header Attribute.
//! * `#[salak(prefix = "salak.application")]`, has this attr will auto implement [`PrefixedFromEnvironment`].
//! 2. Struct Field Attribute.
//! * `#[salak(default = "value")]`, this attr can specify default value.
//! * `#[salak(name = "key")]`, this attr can specify property key, default convension is use field name.
//! * `#[salak(desc = "Field Description")]`, this attr can be describe this property.
//!
//! #### Reload Configuration
//! `salak` supports reload configurations. Since in rust mutable
//! and alias can't be used together, here we introduce a wrapper
//! [`wrapper::IORef`] for updating values when reloading.
//!
//! #### Resource Factory
//! [`Resource`] defines a standard way to create instance. [`Factory`] provides functions to initialize resource
//! and cache resource. Please refer to [salak_factory](https://docs.rs/salak_factory) for resource usage.
//! Feature 'app' should be open for this feature.
//!
#![cfg_attr(docsrs, feature(doc_cfg))]
#![warn(
anonymous_parameters,
missing_copy_implementations,
missing_debug_implementations,
missing_docs,
nonstandard_style,
rust_2018_idioms,
single_use_lifetimes,
trivial_casts,
trivial_numeric_casts,
unreachable_pub,
unused_extern_crates,
unused_qualifications,
variant_size_differences
)]
use parking_lot::Mutex;
#[cfg(feature = "derive")]
use crate::derive::KeyDesc;
#[cfg(feature = "derive")]
mod derive;
#[cfg(feature = "derive")]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
pub use crate::derive::{
AutoDeriveFromEnvironment, DescFromEnvironment, PrefixedFromEnvironment, SalakDescContext,
};
use raw_ioref::IORefT;
/// Auto derive [`FromEnvironment`] for struct.
#[cfg(feature = "derive")]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
pub use salak_derive::FromEnvironment;
/// Auto derive [`Service`] for struct.
#[cfg(all(feature = "derive", feature = "app"))]
#[cfg_attr(docsrs, doc(cfg(all(feature = "derive", feature = "app"))))]
pub use salak_derive::Service;
use source_raw::PropertyRegistryInternal;
#[cfg(feature = "args")]
#[cfg_attr(docsrs, doc(cfg(feature = "args")))]
mod args;
#[cfg(feature = "args")]
#[cfg_attr(docsrs, doc(cfg(feature = "args")))]
pub use crate::args::AppInfo;
mod err;
mod raw;
use crate::raw::SubKey;
pub use crate::raw::{IsProperty, Property};
mod raw_ioref;
mod raw_vec;
use crate::env::PREFIX;
pub use crate::env::{Salak, SalakBuilder};
mod env;
mod raw_enum;
pub use crate::err::PropertyError;
pub use crate::raw_enum::EnumProperty;
mod source_map;
#[cfg(feature = "rand")]
#[cfg_attr(docsrs, doc(cfg(feature = "rand")))]
mod source_rand;
mod source_raw;
#[cfg(feature = "toml")]
#[cfg_attr(docsrs, doc(cfg(feature = "toml")))]
mod source_toml;
#[cfg(feature = "yaml")]
#[cfg_attr(docsrs, doc(cfg(feature = "yaml")))]
mod source_yaml;
use crate::source::Key;
use crate::source::SubKeys;
#[cfg(feature = "app")]
#[cfg_attr(docsrs, doc(cfg(feature = "app")))]
mod app;
#[cfg(feature = "app")]
#[cfg_attr(docsrs, doc(cfg(feature = "app")))]
pub use crate::app::*;
#[cfg(test)]
#[macro_use(quickcheck)]
extern crate quickcheck_macros;
/// Salak wrapper for configuration parsing.
///
/// Wrapper can determine extra behavior for parsing.
/// Such as check empty of vec or update when reloading.
pub mod wrapper {
pub use crate::raw_ioref::IORef;
pub use crate::raw_vec::NonEmptyVec;
}
/// Salak sources.
///
/// This mod exports all pub sources.
pub mod source {
#[cfg(feature = "args")]
#[cfg_attr(docsrs, doc(cfg(feature = "args")))]
pub(crate) use crate::args::from_args;
pub use crate::raw::Key;
pub use crate::raw::SubKeys;
pub use crate::source_map::system_environment;
pub use crate::source_map::HashMapSource;
}
pub(crate) type Res<T> = Result<T, PropertyError>;
pub(crate) type Void = Res<()>;
/// A property source defines how to load properties.
/// `salak` has some predefined sources, user can
/// provide custom source by implementing this trait.
///
/// Sources provided by `salak`.
///
/// * hashmap source
/// * std::env source
/// * toml source
/// * yaml source
pub trait PropertySource: Send + Sync {
/// [`PropertySource`] name.
fn name(&self) -> &str;
/// Get property by key.
fn get_property(&self, key: &Key<'_>) -> Option<Property<'_>>;
/// Get all subkeys with given key.
///
/// Subkeys are keys without dot('.').
/// This method is unstable, and will be simplified by hidding
/// Key and SubKeys.
fn get_sub_keys<'a>(&'a self, key: &Key<'_>, sub_keys: &mut SubKeys<'a>);
/// Check whether the [`PropertySource`] is empty.
/// Empty source will be ignored when registering to `salak`.
fn is_empty(&self) -> bool;
/// Reload source, if nothing changes, then return none.
#[inline]
fn reload_source(&self) -> Res<Option<Box<dyn PropertySource>>> {
Ok(None)
}
}
/// Environment defines interface for getting values, and reloading
/// configurations.
///
/// The implementor of this trait is [`Salak`].
pub trait Environment {
/// Get value by key.
/// * `key` - Configuration key.
///
/// Require means is if the value `T` is not found,
/// then error will be returned. But if you try to get
/// `Option<T>`, then not found will return `None`.
fn require<T: FromEnvironment>(&self, key: &str) -> Res<T>;
/// Reload configuration. If reloading is completed,
/// all values wrapped by [`wrapper::IORef`] will be updated.
///
/// Currently, this feature is unstable, the returned bool
/// value means reloading is completed without error.
fn reload(&self) -> Res<bool>;
#[cfg(feature = "derive")]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
#[inline]
/// Get value with predefined key.
///
/// [`PrefixedFromEnvironment`] can be auto derives by
/// [`salak_derive::FromEnvironment`] macro. It provides
/// a standard key for getting value `T`.
fn | <T: PrefixedFromEnvironment>(&self) -> Res<T> {
self.require::<T>(T::prefix())
}
}
/// Context for implementing [`FromEnvironment`].
#[allow(missing_debug_implementations)]
pub struct SalakContext<'a> {
registry: &'a PropertyRegistryInternal<'a>,
iorefs: &'a Mutex<Vec<Box<dyn IORefT + Send>>>,
key: &'a mut Key<'a>,
}
/// Parsing value from environment by [`SalakContext`].
pub trait FromEnvironment: Sized {
/// Generate object from [`SalakContext`].
/// * `val` - Property value can be parsed from.
/// * `env` - Context.
///
/// ```no_run
/// use salak::*;
/// pub struct Config {
/// key: String
/// }
/// impl FromEnvironment for Config {
/// fn from_env(
/// val: Option<Property<'_>>,
/// env: &mut SalakContext<'_>,
/// ) -> Result<Self, PropertyError> {
/// Ok(Self{
/// key: env.require_def("key", None)?,
/// })
/// }
/// }
///
/// ```
fn from_env(val: Option<Property<'_>>, env: &mut SalakContext<'_>) -> Res<Self>;
}
| get | identifier_name |
lib.rs | //! Salak is a multi layered configuration loader and zero-boilerplate configuration parser, with many predefined sources.
//!
//! 1. [About](#about)
//! 2. [Quick Start](#quick-start)
//! 3. [Features](#features)
//! * [Predefined Sources](#predefined-sources)
//! * [Key Convention](#key-convention)
//! * [Value Placeholder Parsing](#value-placeholder-parsing)
//! * [Attributes For Derive](#attributes-for-derive)
//! * [Reload Configuration](#reload-configuration)
//! * [Resource Factory](#resource-factory)
//!
//! ## About
//! `salak` is a multi layered configuration loader with many predefined sources. Also it
//! is a zero-boilerplate configuration parser which provides an auto-derive procedure macro
//! to derive [`FromEnvironment`] so that we can parse configuration structs without any additional codes.
//!
//! ## Quick Start
//! A simple example of `salak`:
//!
//! ```
//! use salak::*;
//!
//! #[derive(Debug, FromEnvironment)]
//! #[salak(prefix = "config")]
//! struct Config {
//! #[salak(default = false)]
//! verbose: bool,
//! optional: Option<String>,
//! #[salak(name = "val")]
//! value: i64,
//! }
//! let env = Salak::builder()
//! .set("config.val", "2021")
//! .build()
//! .unwrap();
//! let config = env.get::<Config>().unwrap();
//! assert_eq!(2021, config.value);
//! assert_eq!(None, config.optional);
//! assert_eq!(false, config.verbose);
//! ```
//!
//! ## Features
//!
//! #### Predefined Sources
//! Predefined sources has the following order, [`Salak`] will find by sequence of these orders,
//! if the property with specified key is found at the current source, than return immediately. Otherwise,
//! it will search the next source.
//!
//! 1. Random source provides a group of keys can return random values.
//! * `random.u8`
//! * `random.u16`
//! * `random.u32`
//! * `random.u64`
//! * `random.u128`
//! * `random.usize`
//! * `random.i8`
//! * `random.i16`
//! * `random.i32`
//! * `random.i64`
//! * `random.i128`
//! * `random.isize`
//! 2. Custom arguments source. [`SalakBuilder::set()`] can set a single kv,
//! and [`SalakBuilder::set_args()`] can set a group of kvs.
//! 3. System environment source. Implemented by [`source::system_environment`].
//! 4. Profile specified file source, eg. `app-dev.toml`, supports reloading.
//! 5. No profile file source, eg. `app.toml`, supports reloading.
//! 6. Custom sources, which can register by [`Salak::register()`].
//!
//! #### Key Convention
//! Key is used for search configuration from [`Environment`], normally it is represented by string.
//! Key is a group of SubKey separated by dot(`.`), and SubKey is a name or a name followed by index.
//! 1. SubKey Format (`[a-z][_a-z0-9]+(\[[0-9]+\])*`)
//! * `a`
//! * `a0`
//! * `a_b`
//! * `a[0]`
//! * `a[0][0]`
//! 2. Key Format (`SubKey(\.SubKey)*`)
//! * `a`
//! * `a.b`
//! * `a.val[0]`
//! * `a_b[0]`
//!
//! #### Value Placeholder Parsing
//! 1. Placeholder Format
//! * `${key}` => Get value of `key`.
//! * `${key:default}` => Get value of `key`, if not exists return `default`.
//! 2. Escape Format
//! * `\$\{key\}` => Return `${key}`.
//! * `$`, `\`, `{`, `}` must use escape format.
//!
//! #### Attributes For Derive
//! `salak` supports some attributes for automatically derive [`FromEnvironment`].
//! All attributes have format `#[salak(..)]`, eg. `#[salak(default = "default value")]`.
//! 1. Struct Header Attribute.
//! * `#[salak(prefix = "salak.application")]`, has this attr will auto implement [`PrefixedFromEnvironment`].
//! 2. Struct Field Attribute.
//! * `#[salak(default = "value")]`, this attr can specify default value.
//! * `#[salak(name = "key")]`, this attr can specify property key, default convension is use field name.
//! * `#[salak(desc = "Field Description")]`, this attr can be describe this property.
//!
//! #### Reload Configuration
//! `salak` supports reload configurations. Since in rust mutable
//! and alias can't be used together, here we introduce a wrapper
//! [`wrapper::IORef`] for updating values when reloading.
//!
//! #### Resource Factory
//! [`Resource`] defines a standard way to create instance. [`Factory`] provides functions to initialize resource
//! and cache resource. Please refer to [salak_factory](https://docs.rs/salak_factory) for resource usage.
//! Feature 'app' should be open for this feature.
//!
#![cfg_attr(docsrs, feature(doc_cfg))]
#![warn(
anonymous_parameters,
missing_copy_implementations,
missing_debug_implementations,
missing_docs,
nonstandard_style,
rust_2018_idioms,
single_use_lifetimes,
trivial_casts,
trivial_numeric_casts,
unreachable_pub,
unused_extern_crates,
unused_qualifications,
variant_size_differences
)]
use parking_lot::Mutex;
#[cfg(feature = "derive")]
use crate::derive::KeyDesc;
#[cfg(feature = "derive")]
mod derive;
#[cfg(feature = "derive")]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
pub use crate::derive::{
AutoDeriveFromEnvironment, DescFromEnvironment, PrefixedFromEnvironment, SalakDescContext,
};
use raw_ioref::IORefT;
/// Auto derive [`FromEnvironment`] for struct.
#[cfg(feature = "derive")]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
pub use salak_derive::FromEnvironment;
/// Auto derive [`Service`] for struct.
#[cfg(all(feature = "derive", feature = "app"))]
#[cfg_attr(docsrs, doc(cfg(all(feature = "derive", feature = "app"))))]
pub use salak_derive::Service;
use source_raw::PropertyRegistryInternal;
#[cfg(feature = "args")]
#[cfg_attr(docsrs, doc(cfg(feature = "args")))]
mod args;
#[cfg(feature = "args")]
#[cfg_attr(docsrs, doc(cfg(feature = "args")))]
pub use crate::args::AppInfo;
mod err;
mod raw;
use crate::raw::SubKey;
pub use crate::raw::{IsProperty, Property};
mod raw_ioref;
mod raw_vec;
use crate::env::PREFIX;
pub use crate::env::{Salak, SalakBuilder};
mod env;
mod raw_enum;
pub use crate::err::PropertyError;
pub use crate::raw_enum::EnumProperty;
mod source_map;
#[cfg(feature = "rand")]
#[cfg_attr(docsrs, doc(cfg(feature = "rand")))]
mod source_rand;
mod source_raw;
#[cfg(feature = "toml")]
#[cfg_attr(docsrs, doc(cfg(feature = "toml")))]
mod source_toml;
#[cfg(feature = "yaml")]
#[cfg_attr(docsrs, doc(cfg(feature = "yaml")))]
mod source_yaml;
use crate::source::Key;
use crate::source::SubKeys;
#[cfg(feature = "app")]
#[cfg_attr(docsrs, doc(cfg(feature = "app")))]
mod app;
#[cfg(feature = "app")]
#[cfg_attr(docsrs, doc(cfg(feature = "app")))]
pub use crate::app::*;
#[cfg(test)]
#[macro_use(quickcheck)]
extern crate quickcheck_macros;
/// Salak wrapper for configuration parsing.
///
/// Wrapper can determine extra behavior for parsing.
/// Such as check empty of vec or update when reloading.
pub mod wrapper {
pub use crate::raw_ioref::IORef;
pub use crate::raw_vec::NonEmptyVec;
}
/// Salak sources.
///
/// This mod exports all pub sources.
pub mod source {
#[cfg(feature = "args")]
#[cfg_attr(docsrs, doc(cfg(feature = "args")))]
pub(crate) use crate::args::from_args;
pub use crate::raw::Key;
pub use crate::raw::SubKeys;
pub use crate::source_map::system_environment;
pub use crate::source_map::HashMapSource;
}
pub(crate) type Res<T> = Result<T, PropertyError>;
pub(crate) type Void = Res<()>;
/// A property source defines how to load properties.
/// `salak` has some predefined sources, user can
/// provide custom source by implementing this trait.
///
/// Sources provided by `salak`.
///
/// * hashmap source
/// * std::env source
/// * toml source
/// * yaml source
pub trait PropertySource: Send + Sync {
/// [`PropertySource`] name.
fn name(&self) -> &str;
/// Get property by key.
fn get_property(&self, key: &Key<'_>) -> Option<Property<'_>>;
/// Get all subkeys with given key.
///
/// Subkeys are keys without dot('.').
/// This method is unstable, and will be simplified by hidding
/// Key and SubKeys.
fn get_sub_keys<'a>(&'a self, key: &Key<'_>, sub_keys: &mut SubKeys<'a>);
/// Check whether the [`PropertySource`] is empty.
/// Empty source will be ignored when registering to `salak`.
fn is_empty(&self) -> bool;
/// Reload source, if nothing changes, then return none.
#[inline]
fn reload_source(&self) -> Res<Option<Box<dyn PropertySource>>> {
Ok(None)
}
}
/// Environment defines interface for getting values, and reloading
/// configurations.
///
/// The implementor of this trait is [`Salak`].
pub trait Environment {
/// Get value by key.
/// * `key` - Configuration key.
///
/// Require means is if the value `T` is not found,
/// then error will be returned. But if you try to get
/// `Option<T>`, then not found will return `None`.
fn require<T: FromEnvironment>(&self, key: &str) -> Res<T>;
/// Reload configuration. If reloading is completed,
/// all values wrapped by [`wrapper::IORef`] will be updated.
///
/// Currently, this feature is unstable, the returned bool
/// value means reloading is completed without error.
fn reload(&self) -> Res<bool>;
#[cfg(feature = "derive")]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
#[inline]
/// Get value with predefined key.
///
/// [`PrefixedFromEnvironment`] can be auto derives by
/// [`salak_derive::FromEnvironment`] macro. It provides
/// a standard key for getting value `T`.
fn get<T: PrefixedFromEnvironment>(&self) -> Res<T> {
self.require::<T>(T::prefix())
}
}
/// Context for implementing [`FromEnvironment`].
#[allow(missing_debug_implementations)]
pub struct SalakContext<'a> {
registry: &'a PropertyRegistryInternal<'a>, | }
/// Parsing value from environment by [`SalakContext`].
pub trait FromEnvironment: Sized {
/// Generate object from [`SalakContext`].
/// * `val` - Property value can be parsed from.
/// * `env` - Context.
///
/// ```no_run
/// use salak::*;
/// pub struct Config {
/// key: String
/// }
/// impl FromEnvironment for Config {
/// fn from_env(
/// val: Option<Property<'_>>,
/// env: &mut SalakContext<'_>,
/// ) -> Result<Self, PropertyError> {
/// Ok(Self{
/// key: env.require_def("key", None)?,
/// })
/// }
/// }
///
/// ```
fn from_env(val: Option<Property<'_>>, env: &mut SalakContext<'_>) -> Res<Self>;
} | iorefs: &'a Mutex<Vec<Box<dyn IORefT + Send>>>,
key: &'a mut Key<'a>, | random_line_split |
lib.rs | //! Salak is a multi layered configuration loader and zero-boilerplate configuration parser, with many predefined sources.
//!
//! 1. [About](#about)
//! 2. [Quick Start](#quick-start)
//! 3. [Features](#features)
//! * [Predefined Sources](#predefined-sources)
//! * [Key Convention](#key-convention)
//! * [Value Placeholder Parsing](#value-placeholder-parsing)
//! * [Attributes For Derive](#attributes-for-derive)
//! * [Reload Configuration](#reload-configuration)
//! * [Resource Factory](#resource-factory)
//!
//! ## About
//! `salak` is a multi layered configuration loader with many predefined sources. Also it
//! is a zero-boilerplate configuration parser which provides an auto-derive procedure macro
//! to derive [`FromEnvironment`] so that we can parse configuration structs without any additional codes.
//!
//! ## Quick Start
//! A simple example of `salak`:
//!
//! ```
//! use salak::*;
//!
//! #[derive(Debug, FromEnvironment)]
//! #[salak(prefix = "config")]
//! struct Config {
//! #[salak(default = false)]
//! verbose: bool,
//! optional: Option<String>,
//! #[salak(name = "val")]
//! value: i64,
//! }
//! let env = Salak::builder()
//! .set("config.val", "2021")
//! .build()
//! .unwrap();
//! let config = env.get::<Config>().unwrap();
//! assert_eq!(2021, config.value);
//! assert_eq!(None, config.optional);
//! assert_eq!(false, config.verbose);
//! ```
//!
//! ## Features
//!
//! #### Predefined Sources
//! Predefined sources has the following order, [`Salak`] will find by sequence of these orders,
//! if the property with specified key is found at the current source, than return immediately. Otherwise,
//! it will search the next source.
//!
//! 1. Random source provides a group of keys can return random values.
//! * `random.u8`
//! * `random.u16`
//! * `random.u32`
//! * `random.u64`
//! * `random.u128`
//! * `random.usize`
//! * `random.i8`
//! * `random.i16`
//! * `random.i32`
//! * `random.i64`
//! * `random.i128`
//! * `random.isize`
//! 2. Custom arguments source. [`SalakBuilder::set()`] can set a single kv,
//! and [`SalakBuilder::set_args()`] can set a group of kvs.
//! 3. System environment source. Implemented by [`source::system_environment`].
//! 4. Profile specified file source, eg. `app-dev.toml`, supports reloading.
//! 5. No profile file source, eg. `app.toml`, supports reloading.
//! 6. Custom sources, which can register by [`Salak::register()`].
//!
//! #### Key Convention
//! Key is used for search configuration from [`Environment`], normally it is represented by string.
//! Key is a group of SubKey separated by dot(`.`), and SubKey is a name or a name followed by index.
//! 1. SubKey Format (`[a-z][_a-z0-9]+(\[[0-9]+\])*`)
//! * `a`
//! * `a0`
//! * `a_b`
//! * `a[0]`
//! * `a[0][0]`
//! 2. Key Format (`SubKey(\.SubKey)*`)
//! * `a`
//! * `a.b`
//! * `a.val[0]`
//! * `a_b[0]`
//!
//! #### Value Placeholder Parsing
//! 1. Placeholder Format
//! * `${key}` => Get value of `key`.
//! * `${key:default}` => Get value of `key`, if not exists return `default`.
//! 2. Escape Format
//! * `\$\{key\}` => Return `${key}`.
//! * `$`, `\`, `{`, `}` must use escape format.
//!
//! #### Attributes For Derive
//! `salak` supports some attributes for automatically derive [`FromEnvironment`].
//! All attributes have format `#[salak(..)]`, eg. `#[salak(default = "default value")]`.
//! 1. Struct Header Attribute.
//! * `#[salak(prefix = "salak.application")]`, has this attr will auto implement [`PrefixedFromEnvironment`].
//! 2. Struct Field Attribute.
//! * `#[salak(default = "value")]`, this attr can specify default value.
//! * `#[salak(name = "key")]`, this attr can specify property key, default convension is use field name.
//! * `#[salak(desc = "Field Description")]`, this attr can be describe this property.
//!
//! #### Reload Configuration
//! `salak` supports reload configurations. Since in rust mutable
//! and alias can't be used together, here we introduce a wrapper
//! [`wrapper::IORef`] for updating values when reloading.
//!
//! #### Resource Factory
//! [`Resource`] defines a standard way to create instance. [`Factory`] provides functions to initialize resource
//! and cache resource. Please refer to [salak_factory](https://docs.rs/salak_factory) for resource usage.
//! Feature 'app' should be open for this feature.
//!
#![cfg_attr(docsrs, feature(doc_cfg))]
#![warn(
anonymous_parameters,
missing_copy_implementations,
missing_debug_implementations,
missing_docs,
nonstandard_style,
rust_2018_idioms,
single_use_lifetimes,
trivial_casts,
trivial_numeric_casts,
unreachable_pub,
unused_extern_crates,
unused_qualifications,
variant_size_differences
)]
use parking_lot::Mutex;
#[cfg(feature = "derive")]
use crate::derive::KeyDesc;
#[cfg(feature = "derive")]
mod derive;
#[cfg(feature = "derive")]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
pub use crate::derive::{
AutoDeriveFromEnvironment, DescFromEnvironment, PrefixedFromEnvironment, SalakDescContext,
};
use raw_ioref::IORefT;
/// Auto derive [`FromEnvironment`] for struct.
#[cfg(feature = "derive")]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
pub use salak_derive::FromEnvironment;
/// Auto derive [`Service`] for struct.
#[cfg(all(feature = "derive", feature = "app"))]
#[cfg_attr(docsrs, doc(cfg(all(feature = "derive", feature = "app"))))]
pub use salak_derive::Service;
use source_raw::PropertyRegistryInternal;
#[cfg(feature = "args")]
#[cfg_attr(docsrs, doc(cfg(feature = "args")))]
mod args;
#[cfg(feature = "args")]
#[cfg_attr(docsrs, doc(cfg(feature = "args")))]
pub use crate::args::AppInfo;
mod err;
mod raw;
use crate::raw::SubKey;
pub use crate::raw::{IsProperty, Property};
mod raw_ioref;
mod raw_vec;
use crate::env::PREFIX;
pub use crate::env::{Salak, SalakBuilder};
mod env;
mod raw_enum;
pub use crate::err::PropertyError;
pub use crate::raw_enum::EnumProperty;
mod source_map;
#[cfg(feature = "rand")]
#[cfg_attr(docsrs, doc(cfg(feature = "rand")))]
mod source_rand;
mod source_raw;
#[cfg(feature = "toml")]
#[cfg_attr(docsrs, doc(cfg(feature = "toml")))]
mod source_toml;
#[cfg(feature = "yaml")]
#[cfg_attr(docsrs, doc(cfg(feature = "yaml")))]
mod source_yaml;
use crate::source::Key;
use crate::source::SubKeys;
#[cfg(feature = "app")]
#[cfg_attr(docsrs, doc(cfg(feature = "app")))]
mod app;
#[cfg(feature = "app")]
#[cfg_attr(docsrs, doc(cfg(feature = "app")))]
pub use crate::app::*;
#[cfg(test)]
#[macro_use(quickcheck)]
extern crate quickcheck_macros;
/// Salak wrapper for configuration parsing.
///
/// Wrapper can determine extra behavior for parsing.
/// Such as check empty of vec or update when reloading.
pub mod wrapper {
pub use crate::raw_ioref::IORef;
pub use crate::raw_vec::NonEmptyVec;
}
/// Salak sources.
///
/// This mod exports all pub sources.
pub mod source {
#[cfg(feature = "args")]
#[cfg_attr(docsrs, doc(cfg(feature = "args")))]
pub(crate) use crate::args::from_args;
pub use crate::raw::Key;
pub use crate::raw::SubKeys;
pub use crate::source_map::system_environment;
pub use crate::source_map::HashMapSource;
}
pub(crate) type Res<T> = Result<T, PropertyError>;
pub(crate) type Void = Res<()>;
/// A property source defines how to load properties.
/// `salak` has some predefined sources, user can
/// provide custom source by implementing this trait.
///
/// Sources provided by `salak`.
///
/// * hashmap source
/// * std::env source
/// * toml source
/// * yaml source
pub trait PropertySource: Send + Sync {
/// [`PropertySource`] name.
fn name(&self) -> &str;
/// Get property by key.
fn get_property(&self, key: &Key<'_>) -> Option<Property<'_>>;
/// Get all subkeys with given key.
///
/// Subkeys are keys without dot('.').
/// This method is unstable, and will be simplified by hidding
/// Key and SubKeys.
fn get_sub_keys<'a>(&'a self, key: &Key<'_>, sub_keys: &mut SubKeys<'a>);
/// Check whether the [`PropertySource`] is empty.
/// Empty source will be ignored when registering to `salak`.
fn is_empty(&self) -> bool;
/// Reload source, if nothing changes, then return none.
#[inline]
fn reload_source(&self) -> Res<Option<Box<dyn PropertySource>>> {
Ok(None)
}
}
/// Environment defines interface for getting values, and reloading
/// configurations.
///
/// The implementor of this trait is [`Salak`].
pub trait Environment {
/// Get value by key.
/// * `key` - Configuration key.
///
/// Require means is if the value `T` is not found,
/// then error will be returned. But if you try to get
/// `Option<T>`, then not found will return `None`.
fn require<T: FromEnvironment>(&self, key: &str) -> Res<T>;
/// Reload configuration. If reloading is completed,
/// all values wrapped by [`wrapper::IORef`] will be updated.
///
/// Currently, this feature is unstable, the returned bool
/// value means reloading is completed without error.
fn reload(&self) -> Res<bool>;
#[cfg(feature = "derive")]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
#[inline]
/// Get value with predefined key.
///
/// [`PrefixedFromEnvironment`] can be auto derives by
/// [`salak_derive::FromEnvironment`] macro. It provides
/// a standard key for getting value `T`.
fn get<T: PrefixedFromEnvironment>(&self) -> Res<T> |
}
/// Context for implementing [`FromEnvironment`].
#[allow(missing_debug_implementations)]
pub struct SalakContext<'a> {
registry: &'a PropertyRegistryInternal<'a>,
iorefs: &'a Mutex<Vec<Box<dyn IORefT + Send>>>,
key: &'a mut Key<'a>,
}
/// Parsing value from environment by [`SalakContext`].
pub trait FromEnvironment: Sized {
/// Generate object from [`SalakContext`].
/// * `val` - Property value can be parsed from.
/// * `env` - Context.
///
/// ```no_run
/// use salak::*;
/// pub struct Config {
/// key: String
/// }
/// impl FromEnvironment for Config {
/// fn from_env(
/// val: Option<Property<'_>>,
/// env: &mut SalakContext<'_>,
/// ) -> Result<Self, PropertyError> {
/// Ok(Self{
/// key: env.require_def("key", None)?,
/// })
/// }
/// }
///
/// ```
fn from_env(val: Option<Property<'_>>, env: &mut SalakContext<'_>) -> Res<Self>;
}
| {
self.require::<T>(T::prefix())
} | identifier_body |
core.py | import json
import os
import random
import shutil
import subprocess
import tempfile
import time
from dataclasses import asdict, fields
from distutils.version import StrictVersion
from typing import IO, Any, Dict, List
import jinja2
from jmeslog import model
from jmeslog.constants import DEFAULT_TEMPLATE, VALID_CHARS
from jmeslog.errors import NoChangesFoundError, ValidationError
class EditorRetriever:
def prompt_entry_values(self, entry: model.JMESLogEntry) -> None:
with tempfile.NamedTemporaryFile('w') as f:
self._write_template_to_tempfile(f, entry)
self._open_tempfile_in_editor(f.name)
contents = self._read_tempfile(f.name)
return self._parse_filled_in_contents(contents, entry)
def _open_tempfile_in_editor(self, filename: str) -> None:
env = os.environ
editor = env.get('VISUAL', env.get('EDITOR', 'vim'))
subprocess.run([editor, filename], check=True)
def _write_template_to_tempfile(
self, f: IO[str], entry: model.JMESLogEntry
) -> None:
contents = DEFAULT_TEMPLATE.format(
type=entry.type,
category=entry.category,
description=entry.description,
)
f.write(contents)
f.flush()
def _read_tempfile(self, filename: str) -> str:
with open(filename) as f:
filled_in_contents = f.read()
return filled_in_contents
def _parse_filled_in_contents(
self, contents: str, entry: model.JMESLogEntry
) -> None:
parsed_entry = EntryFileParser().parse_contents(contents)
self._update_values_from_new_entry(entry, parsed_entry)
def _update_values_from_new_entry(
self, entry: model.JMESLogEntry, new_entry: model.JMESLogEntry
) -> None:
for key, value in asdict(new_entry).items():
if value:
setattr(entry, key, value)
class EntryFileParser:
def parse_contents(self, contents: str) -> model.JMESLogEntry:
entry = model.JMESLogEntry.empty()
if not contents.strip():
return entry
field_names = [f.name for f in fields(entry)]
line_starts = tuple([f'{name}:' for name in field_names])
for line in contents.splitlines():
line = line.lstrip()
if line.startswith('#') or not line:
continue
if line.startswith(line_starts):
field_name, remaining = line.split(':', 1)
setattr(entry, field_name, remaining.strip())
return entry
class EntryGenerator:
def __init__(self, entry: model.JMESLogEntry, retriever: EditorRetriever):
self._entry = entry
self._retriever = retriever
def complete_entry(self) -> None:
if not self._entry.is_completed():
self._retriever.prompt_entry_values(self._entry)
@property
def change_entry(self) -> model.JMESLogEntry:
return self._entry
class EntryFileWriter:
def write_next_release_entry(
self, entry: model.JMESLogEntry, change_dir: str
) -> str:
self._create_next_release_dir(change_dir)
abs_filename = self._generate_random_file(entry, change_dir)
with open(abs_filename, 'w') as f:
f.write(entry.to_json())
f.write('\n')
return abs_filename
def _create_next_release_dir(self, change_dir: str) -> None:
next_release = os.path.join(change_dir, 'next-release')
if not os.path.isdir(next_release):
os.mkdir(next_release)
def _generate_random_file(
self, entry: model.JMESLogEntry, change_dir: str
) -> str:
next_release = os.path.join(change_dir, 'next-release')
# Need to generate a unique filename for this change.
short_summary = ''.join(
ch for ch in entry.category if ch in VALID_CHARS
)
filename = f'{entry.type}-{short_summary}'
possible_filename = self._random_filename(next_release, filename)
while os.path.isfile(possible_filename):
possible_filename = self._random_filename(next_release, filename)
return possible_filename
def _random_filename(self, next_release: str, filename: str) -> str:
return os.path.join(
next_release,
'%s-%s-%s.json'
% (time.monotonic_ns(), filename, str(random.randint(1, 100000))),
)
class EntryRecorder:
def __init__(
self,
entry_gen: EntryGenerator,
schema: model.EntrySchema,
file_writer: EntryFileWriter,
output_dir: str = '.changes',
):
self._entry_gen = entry_gen
self._schema = schema
self._file_writer = file_writer
self._output_dir = output_dir
def write_change_file_entry(self) -> str:
self._entry_gen.complete_entry()
entry = self._entry_gen.change_entry
validate_change_entry(entry, self._schema)
filename = self._file_writer.write_next_release_entry(
entry, change_dir=self._output_dir
)
return filename
def validate_change_entry(
entry: model.JMESLogEntry, schema: model.EntrySchema
) -> None:
entry_dict = asdict(entry)
schema_dict = asdict(schema)
errors = []
for schema_field in fields(schema):
value = entry_dict[schema_field.name]
allowed_values = schema_dict[schema_field.name]
if allowed_values and value not in allowed_values:
errors.append(
f'The "{schema_field.name}" value must be one of: '
f'{", ".join(allowed_values)}, received: "{value}"'
)
for key, value in entry_dict.items():
if not value:
errors.append(f'The "{key}" value cannot be empty.')
if errors:
raise ValidationError(errors)
def consolidate_next_release(
next_version: str, change_dir: str, changes: model.JMESLogEntryCollection
) -> str:
# Creates a new x.y.x.json file in .changes/ with the changes in
# .changes/next-release.
# It'll then remove the .changes/next-release directory.
release_file = os.path.join(change_dir, f'{next_version}.json')
with open(release_file, 'w') as f:
f.write(json.dumps(changes.to_dict(), indent=2))
f.write('\n')
next_release_dir = os.path.join(change_dir, 'next-release')
shutil.rmtree(next_release_dir)
return release_file
def find_last_released_version(change_dir: str) -> str:
results = sorted_versioned_releases(change_dir)
if results:
return results[-1]
return '0.0.0'
def sorted_versioned_releases(change_dir: str) -> List[str]:
# Strip off the '.json' suffix.
files = [f[:-5] for f in os.listdir(change_dir) if f.endswith('.json')]
return sorted(files, key=lambda x: StrictVersion(x))
def determine_next_version(
last_released_version: str, version_bump_type: model.VersionBump
) -> str:
parts = last_released_version.split('.')
if version_bump_type == model.VersionBump.PATCH_VERSION:
parts[2] = str(int(parts[2]) + 1)
elif version_bump_type == model.VersionBump.MINOR_VERSION:
parts[1] = str(int(parts[1]) + 1)
parts[2] = '0'
elif version_bump_type == model.VersionBump.MAJOR_VERSION:
parts[0] = str(int(parts[0]) + 1)
parts[1] = '0'
parts[2] = '0'
return '.'.join(parts)
def load_next_changes(change_dir: str) -> model.JMESLogEntryCollection:
next_release = os.path.join(change_dir, 'next-release')
if not os.path.isdir(next_release):
raise NoChangesFoundError()
changes = []
for change in sorted(os.listdir(next_release)):
entry = parse_entry(os.path.join(next_release, change))
changes.append(entry)
return model.JMESLogEntryCollection(changes=changes)
def parse_entry(filename: str) -> model.JMESLogEntry:
with open(filename) as f:
data = json.load(f)
return model.JMESLogEntry(**data)
def create_entry_recorder(
entry: model.JMESLogEntry, change_dir: str
) -> EntryRecorder:
recorder = EntryRecorder(
entry_gen=EntryGenerator(
entry=entry,
retriever=EditorRetriever(),
),
schema=model.EntrySchema(),
file_writer=EntryFileWriter(),
output_dir=change_dir,
)
return recorder
def render_changes(
changes: Dict[str, model.JMESLogEntryCollection],
out: IO[str],
template_contents: str,
) -> None:
context = {
'releases': reversed(list(changes.items())),
}
template = jinja2.Template(template_contents)
result = template.render(**context)
out.write(result)
def load_all_changes(
change_dir: str,
) -> Dict[str, model.JMESLogEntryCollection]:
releases = {}
for version_number in sorted_versioned_releases(change_dir):
filename = os.path.join(change_dir, f'{version_number}.json')
with open(filename) as f:
data = json.load(f)
releases[version_number] = model.JMESLogEntryCollection.from_dict(
data
)
return releases
def render_single_release_changes(
change_collection: model.JMESLogEntryCollection, out: IO[str]
) -> None:
for change in change_collection.changes:
description = '\n '.join(change.description.splitlines())
out.write(f'* {change.type}:{change.category}:{description}\n')
out.write('\n\n')
class ChangeQuery:
def __init__(self, change_dir: str) -> None:
self._change_dir = change_dir
def run_query(self, query_for: str) -> Any:
try:
handler = getattr(self, f'query_{query_for.replace("-", "_")}')
except AttributeError:
raise RuntimeError(f"Unknown query type: {query_for}")
return handler()
def | (self) -> str:
return find_last_released_version(self._change_dir)
def query_next_version(self) -> str:
changes = load_next_changes(self._change_dir)
last_released_version = find_last_released_version(self._change_dir)
next_version = determine_next_version(
last_released_version, changes.version_bump_type
)
return next_version
def query_next_release_type(self) -> str:
changes = load_next_changes(self._change_dir)
return changes.version_bump_type.value
| query_last_release_version | identifier_name |
core.py | import json
import os
import random
import shutil
import subprocess
import tempfile
import time
from dataclasses import asdict, fields
from distutils.version import StrictVersion
from typing import IO, Any, Dict, List
import jinja2
from jmeslog import model
from jmeslog.constants import DEFAULT_TEMPLATE, VALID_CHARS
from jmeslog.errors import NoChangesFoundError, ValidationError
class EditorRetriever:
def prompt_entry_values(self, entry: model.JMESLogEntry) -> None:
with tempfile.NamedTemporaryFile('w') as f:
self._write_template_to_tempfile(f, entry)
self._open_tempfile_in_editor(f.name)
contents = self._read_tempfile(f.name)
return self._parse_filled_in_contents(contents, entry)
def _open_tempfile_in_editor(self, filename: str) -> None:
env = os.environ
editor = env.get('VISUAL', env.get('EDITOR', 'vim'))
subprocess.run([editor, filename], check=True)
def _write_template_to_tempfile(
self, f: IO[str], entry: model.JMESLogEntry
) -> None:
contents = DEFAULT_TEMPLATE.format(
type=entry.type,
category=entry.category,
description=entry.description,
)
f.write(contents)
f.flush()
def _read_tempfile(self, filename: str) -> str:
with open(filename) as f:
filled_in_contents = f.read()
return filled_in_contents
def _parse_filled_in_contents(
self, contents: str, entry: model.JMESLogEntry
) -> None:
parsed_entry = EntryFileParser().parse_contents(contents)
self._update_values_from_new_entry(entry, parsed_entry)
def _update_values_from_new_entry(
self, entry: model.JMESLogEntry, new_entry: model.JMESLogEntry
) -> None:
for key, value in asdict(new_entry).items():
if value:
setattr(entry, key, value)
class EntryFileParser:
def parse_contents(self, contents: str) -> model.JMESLogEntry:
entry = model.JMESLogEntry.empty()
if not contents.strip():
return entry
field_names = [f.name for f in fields(entry)]
line_starts = tuple([f'{name}:' for name in field_names])
for line in contents.splitlines():
line = line.lstrip()
if line.startswith('#') or not line:
continue
if line.startswith(line_starts):
field_name, remaining = line.split(':', 1)
setattr(entry, field_name, remaining.strip())
return entry
class EntryGenerator:
def __init__(self, entry: model.JMESLogEntry, retriever: EditorRetriever):
self._entry = entry
self._retriever = retriever
def complete_entry(self) -> None:
if not self._entry.is_completed():
self._retriever.prompt_entry_values(self._entry)
@property
def change_entry(self) -> model.JMESLogEntry:
return self._entry
class EntryFileWriter:
def write_next_release_entry(
self, entry: model.JMESLogEntry, change_dir: str
) -> str:
self._create_next_release_dir(change_dir)
abs_filename = self._generate_random_file(entry, change_dir)
with open(abs_filename, 'w') as f:
f.write(entry.to_json())
f.write('\n')
return abs_filename
def _create_next_release_dir(self, change_dir: str) -> None:
next_release = os.path.join(change_dir, 'next-release')
if not os.path.isdir(next_release):
os.mkdir(next_release)
def _generate_random_file(
self, entry: model.JMESLogEntry, change_dir: str
) -> str:
next_release = os.path.join(change_dir, 'next-release')
# Need to generate a unique filename for this change.
short_summary = ''.join(
ch for ch in entry.category if ch in VALID_CHARS
)
filename = f'{entry.type}-{short_summary}'
possible_filename = self._random_filename(next_release, filename)
while os.path.isfile(possible_filename):
possible_filename = self._random_filename(next_release, filename)
return possible_filename
def _random_filename(self, next_release: str, filename: str) -> str:
return os.path.join(
next_release,
'%s-%s-%s.json'
% (time.monotonic_ns(), filename, str(random.randint(1, 100000))),
)
class EntryRecorder:
def __init__(
self,
entry_gen: EntryGenerator,
schema: model.EntrySchema,
file_writer: EntryFileWriter,
output_dir: str = '.changes',
):
self._entry_gen = entry_gen
self._schema = schema
self._file_writer = file_writer
self._output_dir = output_dir
def write_change_file_entry(self) -> str:
self._entry_gen.complete_entry()
entry = self._entry_gen.change_entry
validate_change_entry(entry, self._schema)
filename = self._file_writer.write_next_release_entry(
entry, change_dir=self._output_dir
)
return filename
def validate_change_entry(
entry: model.JMESLogEntry, schema: model.EntrySchema
) -> None:
entry_dict = asdict(entry)
schema_dict = asdict(schema)
errors = []
for schema_field in fields(schema):
value = entry_dict[schema_field.name]
allowed_values = schema_dict[schema_field.name]
if allowed_values and value not in allowed_values:
errors.append(
f'The "{schema_field.name}" value must be one of: '
f'{", ".join(allowed_values)}, received: "{value}"'
)
for key, value in entry_dict.items():
if not value:
errors.append(f'The "{key}" value cannot be empty.')
if errors:
raise ValidationError(errors)
def consolidate_next_release(
next_version: str, change_dir: str, changes: model.JMESLogEntryCollection
) -> str:
# Creates a new x.y.x.json file in .changes/ with the changes in
# .changes/next-release.
# It'll then remove the .changes/next-release directory.
release_file = os.path.join(change_dir, f'{next_version}.json')
with open(release_file, 'w') as f:
f.write(json.dumps(changes.to_dict(), indent=2))
f.write('\n')
next_release_dir = os.path.join(change_dir, 'next-release')
shutil.rmtree(next_release_dir)
return release_file
def find_last_released_version(change_dir: str) -> str:
results = sorted_versioned_releases(change_dir)
if results:
return results[-1]
return '0.0.0'
def sorted_versioned_releases(change_dir: str) -> List[str]:
# Strip off the '.json' suffix.
files = [f[:-5] for f in os.listdir(change_dir) if f.endswith('.json')]
return sorted(files, key=lambda x: StrictVersion(x))
def determine_next_version(
last_released_version: str, version_bump_type: model.VersionBump
) -> str:
parts = last_released_version.split('.')
if version_bump_type == model.VersionBump.PATCH_VERSION:
parts[2] = str(int(parts[2]) + 1)
elif version_bump_type == model.VersionBump.MINOR_VERSION:
parts[1] = str(int(parts[1]) + 1)
parts[2] = '0'
elif version_bump_type == model.VersionBump.MAJOR_VERSION:
parts[0] = str(int(parts[0]) + 1)
parts[1] = '0'
parts[2] = '0'
return '.'.join(parts)
def load_next_changes(change_dir: str) -> model.JMESLogEntryCollection:
next_release = os.path.join(change_dir, 'next-release')
if not os.path.isdir(next_release):
raise NoChangesFoundError()
changes = []
for change in sorted(os.listdir(next_release)):
entry = parse_entry(os.path.join(next_release, change))
changes.append(entry)
return model.JMESLogEntryCollection(changes=changes)
def parse_entry(filename: str) -> model.JMESLogEntry:
with open(filename) as f:
data = json.load(f)
return model.JMESLogEntry(**data)
def create_entry_recorder(
entry: model.JMESLogEntry, change_dir: str
) -> EntryRecorder:
recorder = EntryRecorder(
entry_gen=EntryGenerator(
entry=entry,
retriever=EditorRetriever(),
),
schema=model.EntrySchema(),
file_writer=EntryFileWriter(),
output_dir=change_dir,
)
return recorder
def render_changes(
changes: Dict[str, model.JMESLogEntryCollection],
out: IO[str],
template_contents: str,
) -> None:
context = {
'releases': reversed(list(changes.items())),
}
template = jinja2.Template(template_contents)
result = template.render(**context)
out.write(result)
def load_all_changes(
change_dir: str,
) -> Dict[str, model.JMESLogEntryCollection]:
releases = {}
for version_number in sorted_versioned_releases(change_dir):
filename = os.path.join(change_dir, f'{version_number}.json')
with open(filename) as f:
data = json.load(f)
releases[version_number] = model.JMESLogEntryCollection.from_dict(
data
)
return releases
def render_single_release_changes(
change_collection: model.JMESLogEntryCollection, out: IO[str]
) -> None:
for change in change_collection.changes:
description = '\n '.join(change.description.splitlines())
out.write(f'* {change.type}:{change.category}:{description}\n')
out.write('\n\n')
class ChangeQuery:
def __init__(self, change_dir: str) -> None:
self._change_dir = change_dir
def run_query(self, query_for: str) -> Any:
try:
handler = getattr(self, f'query_{query_for.replace("-", "_")}')
except AttributeError:
raise RuntimeError(f"Unknown query type: {query_for}")
return handler()
def query_last_release_version(self) -> str:
|
def query_next_version(self) -> str:
changes = load_next_changes(self._change_dir)
last_released_version = find_last_released_version(self._change_dir)
next_version = determine_next_version(
last_released_version, changes.version_bump_type
)
return next_version
def query_next_release_type(self) -> str:
changes = load_next_changes(self._change_dir)
return changes.version_bump_type.value
| return find_last_released_version(self._change_dir) | identifier_body |
core.py | import json
import os
import random
import shutil
import subprocess
import tempfile
import time
from dataclasses import asdict, fields
from distutils.version import StrictVersion
from typing import IO, Any, Dict, List
import jinja2
from jmeslog import model
from jmeslog.constants import DEFAULT_TEMPLATE, VALID_CHARS
from jmeslog.errors import NoChangesFoundError, ValidationError
class EditorRetriever:
def prompt_entry_values(self, entry: model.JMESLogEntry) -> None:
with tempfile.NamedTemporaryFile('w') as f:
self._write_template_to_tempfile(f, entry)
self._open_tempfile_in_editor(f.name)
contents = self._read_tempfile(f.name)
return self._parse_filled_in_contents(contents, entry)
def _open_tempfile_in_editor(self, filename: str) -> None:
env = os.environ
editor = env.get('VISUAL', env.get('EDITOR', 'vim'))
subprocess.run([editor, filename], check=True)
def _write_template_to_tempfile(
self, f: IO[str], entry: model.JMESLogEntry
) -> None:
contents = DEFAULT_TEMPLATE.format(
type=entry.type,
category=entry.category,
description=entry.description,
)
f.write(contents)
f.flush()
def _read_tempfile(self, filename: str) -> str:
with open(filename) as f:
filled_in_contents = f.read()
return filled_in_contents
def _parse_filled_in_contents(
self, contents: str, entry: model.JMESLogEntry
) -> None:
parsed_entry = EntryFileParser().parse_contents(contents)
self._update_values_from_new_entry(entry, parsed_entry)
def _update_values_from_new_entry(
self, entry: model.JMESLogEntry, new_entry: model.JMESLogEntry
) -> None:
for key, value in asdict(new_entry).items():
if value:
setattr(entry, key, value)
class EntryFileParser:
def parse_contents(self, contents: str) -> model.JMESLogEntry:
entry = model.JMESLogEntry.empty()
if not contents.strip():
return entry
field_names = [f.name for f in fields(entry)]
line_starts = tuple([f'{name}:' for name in field_names])
for line in contents.splitlines():
line = line.lstrip()
if line.startswith('#') or not line:
continue
if line.startswith(line_starts):
field_name, remaining = line.split(':', 1)
setattr(entry, field_name, remaining.strip())
return entry
class EntryGenerator:
def __init__(self, entry: model.JMESLogEntry, retriever: EditorRetriever):
self._entry = entry
self._retriever = retriever
def complete_entry(self) -> None:
if not self._entry.is_completed():
self._retriever.prompt_entry_values(self._entry)
@property
def change_entry(self) -> model.JMESLogEntry:
return self._entry
class EntryFileWriter:
def write_next_release_entry(
self, entry: model.JMESLogEntry, change_dir: str
) -> str:
self._create_next_release_dir(change_dir)
abs_filename = self._generate_random_file(entry, change_dir)
with open(abs_filename, 'w') as f:
f.write(entry.to_json())
f.write('\n')
return abs_filename
def _create_next_release_dir(self, change_dir: str) -> None:
next_release = os.path.join(change_dir, 'next-release')
if not os.path.isdir(next_release):
os.mkdir(next_release)
def _generate_random_file(
self, entry: model.JMESLogEntry, change_dir: str
) -> str:
next_release = os.path.join(change_dir, 'next-release')
# Need to generate a unique filename for this change.
short_summary = ''.join(
ch for ch in entry.category if ch in VALID_CHARS
)
filename = f'{entry.type}-{short_summary}'
possible_filename = self._random_filename(next_release, filename)
while os.path.isfile(possible_filename):
possible_filename = self._random_filename(next_release, filename)
return possible_filename
def _random_filename(self, next_release: str, filename: str) -> str:
return os.path.join(
next_release,
'%s-%s-%s.json'
% (time.monotonic_ns(), filename, str(random.randint(1, 100000))),
)
class EntryRecorder:
def __init__(
self,
entry_gen: EntryGenerator,
schema: model.EntrySchema,
file_writer: EntryFileWriter,
output_dir: str = '.changes',
):
self._entry_gen = entry_gen
self._schema = schema
self._file_writer = file_writer
self._output_dir = output_dir
def write_change_file_entry(self) -> str:
self._entry_gen.complete_entry()
entry = self._entry_gen.change_entry
validate_change_entry(entry, self._schema)
filename = self._file_writer.write_next_release_entry(
entry, change_dir=self._output_dir
)
return filename
def validate_change_entry(
entry: model.JMESLogEntry, schema: model.EntrySchema
) -> None:
entry_dict = asdict(entry)
schema_dict = asdict(schema)
errors = []
for schema_field in fields(schema):
value = entry_dict[schema_field.name]
allowed_values = schema_dict[schema_field.name]
if allowed_values and value not in allowed_values:
errors.append( | f'The "{schema_field.name}" value must be one of: '
f'{", ".join(allowed_values)}, received: "{value}"'
)
for key, value in entry_dict.items():
if not value:
errors.append(f'The "{key}" value cannot be empty.')
if errors:
raise ValidationError(errors)
def consolidate_next_release(
next_version: str, change_dir: str, changes: model.JMESLogEntryCollection
) -> str:
# Creates a new x.y.x.json file in .changes/ with the changes in
# .changes/next-release.
# It'll then remove the .changes/next-release directory.
release_file = os.path.join(change_dir, f'{next_version}.json')
with open(release_file, 'w') as f:
f.write(json.dumps(changes.to_dict(), indent=2))
f.write('\n')
next_release_dir = os.path.join(change_dir, 'next-release')
shutil.rmtree(next_release_dir)
return release_file
def find_last_released_version(change_dir: str) -> str:
results = sorted_versioned_releases(change_dir)
if results:
return results[-1]
return '0.0.0'
def sorted_versioned_releases(change_dir: str) -> List[str]:
# Strip off the '.json' suffix.
files = [f[:-5] for f in os.listdir(change_dir) if f.endswith('.json')]
return sorted(files, key=lambda x: StrictVersion(x))
def determine_next_version(
last_released_version: str, version_bump_type: model.VersionBump
) -> str:
parts = last_released_version.split('.')
if version_bump_type == model.VersionBump.PATCH_VERSION:
parts[2] = str(int(parts[2]) + 1)
elif version_bump_type == model.VersionBump.MINOR_VERSION:
parts[1] = str(int(parts[1]) + 1)
parts[2] = '0'
elif version_bump_type == model.VersionBump.MAJOR_VERSION:
parts[0] = str(int(parts[0]) + 1)
parts[1] = '0'
parts[2] = '0'
return '.'.join(parts)
def load_next_changes(change_dir: str) -> model.JMESLogEntryCollection:
next_release = os.path.join(change_dir, 'next-release')
if not os.path.isdir(next_release):
raise NoChangesFoundError()
changes = []
for change in sorted(os.listdir(next_release)):
entry = parse_entry(os.path.join(next_release, change))
changes.append(entry)
return model.JMESLogEntryCollection(changes=changes)
def parse_entry(filename: str) -> model.JMESLogEntry:
with open(filename) as f:
data = json.load(f)
return model.JMESLogEntry(**data)
def create_entry_recorder(
entry: model.JMESLogEntry, change_dir: str
) -> EntryRecorder:
recorder = EntryRecorder(
entry_gen=EntryGenerator(
entry=entry,
retriever=EditorRetriever(),
),
schema=model.EntrySchema(),
file_writer=EntryFileWriter(),
output_dir=change_dir,
)
return recorder
def render_changes(
changes: Dict[str, model.JMESLogEntryCollection],
out: IO[str],
template_contents: str,
) -> None:
context = {
'releases': reversed(list(changes.items())),
}
template = jinja2.Template(template_contents)
result = template.render(**context)
out.write(result)
def load_all_changes(
change_dir: str,
) -> Dict[str, model.JMESLogEntryCollection]:
releases = {}
for version_number in sorted_versioned_releases(change_dir):
filename = os.path.join(change_dir, f'{version_number}.json')
with open(filename) as f:
data = json.load(f)
releases[version_number] = model.JMESLogEntryCollection.from_dict(
data
)
return releases
def render_single_release_changes(
change_collection: model.JMESLogEntryCollection, out: IO[str]
) -> None:
for change in change_collection.changes:
description = '\n '.join(change.description.splitlines())
out.write(f'* {change.type}:{change.category}:{description}\n')
out.write('\n\n')
class ChangeQuery:
def __init__(self, change_dir: str) -> None:
self._change_dir = change_dir
def run_query(self, query_for: str) -> Any:
try:
handler = getattr(self, f'query_{query_for.replace("-", "_")}')
except AttributeError:
raise RuntimeError(f"Unknown query type: {query_for}")
return handler()
def query_last_release_version(self) -> str:
return find_last_released_version(self._change_dir)
def query_next_version(self) -> str:
changes = load_next_changes(self._change_dir)
last_released_version = find_last_released_version(self._change_dir)
next_version = determine_next_version(
last_released_version, changes.version_bump_type
)
return next_version
def query_next_release_type(self) -> str:
changes = load_next_changes(self._change_dir)
return changes.version_bump_type.value | random_line_split | |
core.py | import json
import os
import random
import shutil
import subprocess
import tempfile
import time
from dataclasses import asdict, fields
from distutils.version import StrictVersion
from typing import IO, Any, Dict, List
import jinja2
from jmeslog import model
from jmeslog.constants import DEFAULT_TEMPLATE, VALID_CHARS
from jmeslog.errors import NoChangesFoundError, ValidationError
class EditorRetriever:
def prompt_entry_values(self, entry: model.JMESLogEntry) -> None:
with tempfile.NamedTemporaryFile('w') as f:
self._write_template_to_tempfile(f, entry)
self._open_tempfile_in_editor(f.name)
contents = self._read_tempfile(f.name)
return self._parse_filled_in_contents(contents, entry)
def _open_tempfile_in_editor(self, filename: str) -> None:
env = os.environ
editor = env.get('VISUAL', env.get('EDITOR', 'vim'))
subprocess.run([editor, filename], check=True)
def _write_template_to_tempfile(
self, f: IO[str], entry: model.JMESLogEntry
) -> None:
contents = DEFAULT_TEMPLATE.format(
type=entry.type,
category=entry.category,
description=entry.description,
)
f.write(contents)
f.flush()
def _read_tempfile(self, filename: str) -> str:
with open(filename) as f:
filled_in_contents = f.read()
return filled_in_contents
def _parse_filled_in_contents(
self, contents: str, entry: model.JMESLogEntry
) -> None:
parsed_entry = EntryFileParser().parse_contents(contents)
self._update_values_from_new_entry(entry, parsed_entry)
def _update_values_from_new_entry(
self, entry: model.JMESLogEntry, new_entry: model.JMESLogEntry
) -> None:
for key, value in asdict(new_entry).items():
if value:
|
class EntryFileParser:
def parse_contents(self, contents: str) -> model.JMESLogEntry:
entry = model.JMESLogEntry.empty()
if not contents.strip():
return entry
field_names = [f.name for f in fields(entry)]
line_starts = tuple([f'{name}:' for name in field_names])
for line in contents.splitlines():
line = line.lstrip()
if line.startswith('#') or not line:
continue
if line.startswith(line_starts):
field_name, remaining = line.split(':', 1)
setattr(entry, field_name, remaining.strip())
return entry
class EntryGenerator:
def __init__(self, entry: model.JMESLogEntry, retriever: EditorRetriever):
self._entry = entry
self._retriever = retriever
def complete_entry(self) -> None:
if not self._entry.is_completed():
self._retriever.prompt_entry_values(self._entry)
@property
def change_entry(self) -> model.JMESLogEntry:
return self._entry
class EntryFileWriter:
def write_next_release_entry(
self, entry: model.JMESLogEntry, change_dir: str
) -> str:
self._create_next_release_dir(change_dir)
abs_filename = self._generate_random_file(entry, change_dir)
with open(abs_filename, 'w') as f:
f.write(entry.to_json())
f.write('\n')
return abs_filename
def _create_next_release_dir(self, change_dir: str) -> None:
next_release = os.path.join(change_dir, 'next-release')
if not os.path.isdir(next_release):
os.mkdir(next_release)
def _generate_random_file(
self, entry: model.JMESLogEntry, change_dir: str
) -> str:
next_release = os.path.join(change_dir, 'next-release')
# Need to generate a unique filename for this change.
short_summary = ''.join(
ch for ch in entry.category if ch in VALID_CHARS
)
filename = f'{entry.type}-{short_summary}'
possible_filename = self._random_filename(next_release, filename)
while os.path.isfile(possible_filename):
possible_filename = self._random_filename(next_release, filename)
return possible_filename
def _random_filename(self, next_release: str, filename: str) -> str:
return os.path.join(
next_release,
'%s-%s-%s.json'
% (time.monotonic_ns(), filename, str(random.randint(1, 100000))),
)
class EntryRecorder:
def __init__(
self,
entry_gen: EntryGenerator,
schema: model.EntrySchema,
file_writer: EntryFileWriter,
output_dir: str = '.changes',
):
self._entry_gen = entry_gen
self._schema = schema
self._file_writer = file_writer
self._output_dir = output_dir
def write_change_file_entry(self) -> str:
self._entry_gen.complete_entry()
entry = self._entry_gen.change_entry
validate_change_entry(entry, self._schema)
filename = self._file_writer.write_next_release_entry(
entry, change_dir=self._output_dir
)
return filename
def validate_change_entry(
entry: model.JMESLogEntry, schema: model.EntrySchema
) -> None:
entry_dict = asdict(entry)
schema_dict = asdict(schema)
errors = []
for schema_field in fields(schema):
value = entry_dict[schema_field.name]
allowed_values = schema_dict[schema_field.name]
if allowed_values and value not in allowed_values:
errors.append(
f'The "{schema_field.name}" value must be one of: '
f'{", ".join(allowed_values)}, received: "{value}"'
)
for key, value in entry_dict.items():
if not value:
errors.append(f'The "{key}" value cannot be empty.')
if errors:
raise ValidationError(errors)
def consolidate_next_release(
next_version: str, change_dir: str, changes: model.JMESLogEntryCollection
) -> str:
# Creates a new x.y.x.json file in .changes/ with the changes in
# .changes/next-release.
# It'll then remove the .changes/next-release directory.
release_file = os.path.join(change_dir, f'{next_version}.json')
with open(release_file, 'w') as f:
f.write(json.dumps(changes.to_dict(), indent=2))
f.write('\n')
next_release_dir = os.path.join(change_dir, 'next-release')
shutil.rmtree(next_release_dir)
return release_file
def find_last_released_version(change_dir: str) -> str:
results = sorted_versioned_releases(change_dir)
if results:
return results[-1]
return '0.0.0'
def sorted_versioned_releases(change_dir: str) -> List[str]:
# Strip off the '.json' suffix.
files = [f[:-5] for f in os.listdir(change_dir) if f.endswith('.json')]
return sorted(files, key=lambda x: StrictVersion(x))
def determine_next_version(
last_released_version: str, version_bump_type: model.VersionBump
) -> str:
parts = last_released_version.split('.')
if version_bump_type == model.VersionBump.PATCH_VERSION:
parts[2] = str(int(parts[2]) + 1)
elif version_bump_type == model.VersionBump.MINOR_VERSION:
parts[1] = str(int(parts[1]) + 1)
parts[2] = '0'
elif version_bump_type == model.VersionBump.MAJOR_VERSION:
parts[0] = str(int(parts[0]) + 1)
parts[1] = '0'
parts[2] = '0'
return '.'.join(parts)
def load_next_changes(change_dir: str) -> model.JMESLogEntryCollection:
next_release = os.path.join(change_dir, 'next-release')
if not os.path.isdir(next_release):
raise NoChangesFoundError()
changes = []
for change in sorted(os.listdir(next_release)):
entry = parse_entry(os.path.join(next_release, change))
changes.append(entry)
return model.JMESLogEntryCollection(changes=changes)
def parse_entry(filename: str) -> model.JMESLogEntry:
with open(filename) as f:
data = json.load(f)
return model.JMESLogEntry(**data)
def create_entry_recorder(
entry: model.JMESLogEntry, change_dir: str
) -> EntryRecorder:
recorder = EntryRecorder(
entry_gen=EntryGenerator(
entry=entry,
retriever=EditorRetriever(),
),
schema=model.EntrySchema(),
file_writer=EntryFileWriter(),
output_dir=change_dir,
)
return recorder
def render_changes(
changes: Dict[str, model.JMESLogEntryCollection],
out: IO[str],
template_contents: str,
) -> None:
context = {
'releases': reversed(list(changes.items())),
}
template = jinja2.Template(template_contents)
result = template.render(**context)
out.write(result)
def load_all_changes(
change_dir: str,
) -> Dict[str, model.JMESLogEntryCollection]:
releases = {}
for version_number in sorted_versioned_releases(change_dir):
filename = os.path.join(change_dir, f'{version_number}.json')
with open(filename) as f:
data = json.load(f)
releases[version_number] = model.JMESLogEntryCollection.from_dict(
data
)
return releases
def render_single_release_changes(
change_collection: model.JMESLogEntryCollection, out: IO[str]
) -> None:
for change in change_collection.changes:
description = '\n '.join(change.description.splitlines())
out.write(f'* {change.type}:{change.category}:{description}\n')
out.write('\n\n')
class ChangeQuery:
def __init__(self, change_dir: str) -> None:
self._change_dir = change_dir
def run_query(self, query_for: str) -> Any:
try:
handler = getattr(self, f'query_{query_for.replace("-", "_")}')
except AttributeError:
raise RuntimeError(f"Unknown query type: {query_for}")
return handler()
def query_last_release_version(self) -> str:
return find_last_released_version(self._change_dir)
def query_next_version(self) -> str:
changes = load_next_changes(self._change_dir)
last_released_version = find_last_released_version(self._change_dir)
next_version = determine_next_version(
last_released_version, changes.version_bump_type
)
return next_version
def query_next_release_type(self) -> str:
changes = load_next_changes(self._change_dir)
return changes.version_bump_type.value
| setattr(entry, key, value) | conditional_block |
mod.rs | //! TensorFlow Ops
use std::collections::HashMap;
use std::collections::VecDeque;
use std::fmt::Debug;
use std::mem;
use std::ops::{Index, IndexMut};
#[cfg(feature = "serialize")]
use std::result::Result as StdResult;
use std::sync::Arc;
use analyser::interface::{Solver, TensorsProxy};
use analyser::prelude::*;
use ops::nn::local_patch::{DataFormat, Padding};
use {DataType, Result, Tensor};
use downcast_rs::Downcast;
use objekt;
#[cfg(feature = "serialize")]
use serde::ser::{Serialize, Serializer};
#[macro_use]
mod macros;
mod array;
mod cast;
#[cfg(features = "image_ops")]
pub mod image;
pub mod konst;
mod math;
pub mod nn;
pub mod prelude {
pub use super::{Attr, InferenceRulesOp, Op, OpRegister};
pub use super::{OpBuffer, QueuesBuffer, TensorView};
pub use std::collections::HashMap;
pub use std::marker::PhantomData;
pub use tensor::{DataType, Datum, Tensor};
pub use Result;
}
#[derive(Debug, Clone)]
pub enum TensorView {
Owned(Tensor),
Shared(Arc<Tensor>),
}
impl TensorView {
/// Creates a shared TensorView from any TensorView.
pub fn into_shared(self) -> TensorView {
match self {
TensorView::Owned(m) => TensorView::Shared(Arc::new(m)),
TensorView::Shared(_) => self,
}
}
/// Creates a Tensor from a TensorView.
pub fn into_tensor(self) -> Tensor {
match self {
TensorView::Owned(m) => m,
TensorView::Shared(m) => m.as_ref().clone(),
}
}
/// Returns a reference to the Tensor wrapped inside a TensorView.
pub fn | (&self) -> &Tensor {
match self {
&TensorView::Owned(ref m) => &m,
&TensorView::Shared(ref m) => m.as_ref(),
}
}
/// Returns a shared copy of the TensorView, turning the one passed
/// as argument into a TensorView::Shared if necessary.
pub fn share(&mut self) -> TensorView {
// This is somewhat ugly, but sadly we couldn't find any other
// way to implement it. If we try to write something like:
// *self = TensorView::Shared(Arc::new(*m))
// the borrow checker will complain about *m being moved out of
// borrowed content, which makes sense but doesn't apply in our
// case because we will "give m back" to the TensorView, except
// wrapped around an Arc. The only way to get ownership of m is
// to use mem::replace, which means we have to create a "dummy"
// value to replace self first.
if let TensorView::Owned(_) = self {
let dummy = TensorView::Owned(Tensor::i32s(&[], &[0]).unwrap());
let shared = match mem::replace(self, dummy) {
TensorView::Owned(m) => TensorView::Shared(Arc::new(m)),
_ => panic!(),
};
*self = shared;
}
self.clone()
}
}
impl<M> From<M> for TensorView
where
Tensor: From<M>,
{
fn from(m: M) -> TensorView {
TensorView::Owned(m.into())
}
}
impl From<Arc<Tensor>> for TensorView {
fn from(m: Arc<Tensor>) -> TensorView {
TensorView::Shared(m)
}
}
impl ::std::ops::Deref for TensorView {
type Target = Tensor;
fn deref(&self) -> &Tensor {
match self {
&TensorView::Owned(ref m) => &m,
&TensorView::Shared(ref m) => m.as_ref(),
}
}
}
impl PartialEq for TensorView {
fn eq(&self, other: &TensorView) -> bool {
self.as_tensor() == other.as_tensor()
}
}
// TODO(liautaud): Find a more generic way to do this.
#[cfg_attr(feature = "serialize", derive(Serialize))]
#[derive(Debug, Clone)]
pub enum Attr {
I64(i64),
Usize(usize),
DataType(DataType),
DataFormat(DataFormat),
Padding(Padding),
Tensor(Tensor),
UsizeVec(Vec<usize>),
IsizeVec(Vec<isize>),
}
/// A Tensorflow operation.
pub trait Op: Debug + objekt::Clone + Send + Sync + 'static + InferenceOp {
/// Returns the attributes of the operation and their values.
fn get_attributes(&self) -> HashMap<&'static str, Attr>;
/// Evaluates the operation given the input tensors.
fn eval(&self, inputs: Vec<TensorView>) -> Result<Vec<TensorView>>;
/// Returns a new streaming buffer for the operation.
fn new_buffer(&self) -> Box<OpBuffer> {
Box::new(EmptyBuffer {})
}
/// Evaluates one step of the operation on the given input tensors.
/// This is only implemented for operators which support streaming.
///
/// The input tensors are annotated with an Option<usize>:
/// - None if the tensor doesn't have a streaming dimension.
/// - Option(d) if the tensor is being streamed on dimension d.
///
/// If an input tensor has a streaming dimension, the corresponding
/// TensorView will only contain a _chunk_ of input of size 1 along
/// that dimension. Note that each chunk will only be passed once
/// to the step function, so it should use the provided buffer to
/// store whichever chunks it needs for future computations.
///
/// The function should return Some(chunks) when it has computed
/// new chunks, and None if it has computed an intermediary result
/// successfully but doesn't have new output chunks ready yet.
///
/// For operators like Concat, multiple input tensors might have a
/// streaming dimension. In that case, at each call to step, only
/// one of the streaming inputs will receive new chunk while the
/// others will receive None.
fn step(
&self,
_inputs: Vec<(Option<usize>, Option<TensorView>)>,
_buffer: &mut Box<OpBuffer>,
) -> Result<Option<Vec<TensorView>>> {
bail!("Streaming is not available for operator {:?}", self)
}
/// Infers properties about the input and output tensors.
///
/// The `inputs` and `outputs` arguments correspond to properties about
/// the input and output tensors that are already known.
///
/// Returns Err in case of an unrecoverable error during the inference,
/// and the refined properties about the inputs and outputs otherwise.
fn infer_and_propagate(
&self,
inputs: Vec<TensorFact>,
outputs: Vec<TensorFact>,
) -> Result<(Vec<TensorFact>, Vec<TensorFact>)> {
let (infered_inputs, infered_outputs) = self.infer(inputs, outputs)?;
if infered_inputs.iter().all(|i| i.value.is_concrete()) {
let input_values = infered_inputs
.iter()
.map(|i| i.value.concretize().unwrap().clone().into())
.collect(); // checked
let output_value = self.eval(input_values)?.pop().unwrap();
Ok((
infered_inputs,
vec![::analyser::helpers::tensor_to_fact(
output_value.into_tensor(),
)],
))
} else {
Ok((infered_inputs, infered_outputs))
}
}
fn const_value(&self) -> Option<Tensor> {
None
}
}
pub trait InferenceOp {
fn infer(
&self,
inputs: Vec<TensorFact>,
outputs: Vec<TensorFact>,
) -> Result<(Vec<TensorFact>, Vec<TensorFact>)>;
}
pub trait InferenceRulesOp {
/// Registers the inference rules of the operator.
fn rules<'r, 'p: 'r, 's: 'r>(
&'s self,
solver: &mut Solver<'r>,
inputs: &'p TensorsProxy,
outputs: &'p TensorsProxy,
);
}
impl<O: InferenceRulesOp> InferenceOp for O {
fn infer(
&self,
inputs: Vec<TensorFact>,
outputs: Vec<TensorFact>,
) -> Result<(Vec<TensorFact>, Vec<TensorFact>)> {
let inputs_proxy = TensorsProxy::new(vec![0].into());
let outputs_proxy = TensorsProxy::new(vec![1].into());
let mut solver = Solver::default();
self.rules(&mut solver, &inputs_proxy, &outputs_proxy);
solver.infer((inputs, outputs))
}
}
clone_trait_object!(Op);
#[cfg(feature = "serialize")]
impl Serialize for Op {
fn serialize<S>(&self, serializer: S) -> StdResult<S::Ok, S::Error>
where
S: Serializer,
{
self.get_attributes().serialize(serializer)
}
}
pub type OpRegister = HashMap<&'static str, fn(&::tfpb::node_def::NodeDef) -> Result<Box<Op>>>;
pub struct OpBuilder(OpRegister);
impl OpBuilder {
pub fn new() -> OpBuilder {
let mut reg = OpRegister::new();
array::register_all_ops(&mut reg);
cast::register_all_ops(&mut reg);
konst::register_all_ops(&mut reg);
math::register_all_ops(&mut reg);
nn::register_all_ops(&mut reg);
OpBuilder(reg)
}
pub fn build(&self, pb: &::tfpb::node_def::NodeDef) -> Result<Box<Op>> {
match self.0.get(pb.get_op()) {
Some(builder) => builder(pb),
None => Ok(Box::new(UnimplementedOp(
pb.get_op().to_string(),
pb.to_owned(),
))),
}
}
}
#[derive(Debug, Clone)]
pub struct UnimplementedOp(String, ::tfpb::node_def::NodeDef);
impl Op for UnimplementedOp {
/// Evaluates the operation given the input tensors.
fn eval(&self, _inputs: Vec<TensorView>) -> Result<Vec<TensorView>> {
Err(format!("unimplemented operation: {}", self.0))?
}
/// Returns the attributes of the operation and their values.
fn get_attributes(&self) -> HashMap<&'static str, Attr> {
hashmap!{} // FIXME
}
}
impl InferenceRulesOp for UnimplementedOp {
fn rules<'r, 'p: 'r, 's: 'r>(
&'s self,
_: &mut Solver<'r>,
_: &'p TensorsProxy,
_: &'p TensorsProxy,
) {
}
}
/// A streaming buffer for a Tensorflow operation.
///
/// This is used during streaming evaluation of models. Each node is given
/// a mutable reference to a buffer which it can use to store intermediary
/// results between evaluation steps. Every operation must provide its own
/// buffer type (or use one of the general ones defined below), which must
/// implement the OpBuffer trait. It should return a new instance of it in
/// the `Op::new_buffer` method, and downcast it from OpBuffer in `step`.
pub trait OpBuffer: Downcast + Debug + objekt::Clone + Send + 'static {}
clone_trait_object!(OpBuffer);
impl_downcast!(OpBuffer);
/// An empty buffer for operations which don't need one.
#[derive(Debug, Clone)]
pub struct EmptyBuffer {}
impl OpBuffer for EmptyBuffer {}
/// A buffer with a variable number of TensorView queues.
#[derive(Debug, Clone)]
pub struct QueuesBuffer(Vec<VecDeque<TensorView>>);
impl OpBuffer for QueuesBuffer {}
impl QueuesBuffer {
/// Creates a new buffer with a given number of queues.
pub fn new(size: usize) -> QueuesBuffer {
QueuesBuffer(vec![VecDeque::new(); size])
}
/// Appends a new TensorView to each queue in the buffer.
pub fn append(&mut self, views: &mut [(Option<usize>, Option<TensorView>)]) -> Result<()> {
if views.len() > self.0.len() {
bail!("There are more input TensorViews than queues in the buffer.");
}
for (i, view) in views.iter_mut().enumerate() {
if view.1.is_some() {
self.0[i].push_back(view.1.take().unwrap())
}
}
Ok(())
}
/// Returns an iterator over all the queues in the buffer.
pub fn iter<'a>(&'a mut self) -> impl Iterator<Item = &'a VecDeque<TensorView>> {
self.0.iter()
}
/// Returns a mutable iterator over all the queues in the buffer.
pub fn iter_mut<'a>(&'a mut self) -> impl Iterator<Item = &'a mut VecDeque<TensorView>> {
self.0.iter_mut()
}
}
impl Index<usize> for QueuesBuffer {
type Output = VecDeque<TensorView>;
fn index(&self, index: usize) -> &VecDeque<TensorView> {
&self.0[index]
}
}
impl IndexMut<usize> for QueuesBuffer {
fn index_mut(&mut self, index: usize) -> &mut VecDeque<TensorView> {
&mut self.0[index]
}
}
| as_tensor | identifier_name |
mod.rs | //! TensorFlow Ops
use std::collections::HashMap;
use std::collections::VecDeque;
use std::fmt::Debug;
use std::mem;
use std::ops::{Index, IndexMut};
#[cfg(feature = "serialize")]
use std::result::Result as StdResult;
use std::sync::Arc;
use analyser::interface::{Solver, TensorsProxy};
use analyser::prelude::*;
use ops::nn::local_patch::{DataFormat, Padding};
use {DataType, Result, Tensor};
use downcast_rs::Downcast;
use objekt;
#[cfg(feature = "serialize")]
use serde::ser::{Serialize, Serializer};
#[macro_use]
mod macros;
mod array;
mod cast;
#[cfg(features = "image_ops")]
pub mod image;
pub mod konst;
mod math;
pub mod nn;
pub mod prelude {
pub use super::{Attr, InferenceRulesOp, Op, OpRegister};
pub use super::{OpBuffer, QueuesBuffer, TensorView};
pub use std::collections::HashMap;
pub use std::marker::PhantomData;
pub use tensor::{DataType, Datum, Tensor};
pub use Result;
}
#[derive(Debug, Clone)]
pub enum TensorView {
Owned(Tensor),
Shared(Arc<Tensor>),
}
impl TensorView {
/// Creates a shared TensorView from any TensorView.
pub fn into_shared(self) -> TensorView {
match self {
TensorView::Owned(m) => TensorView::Shared(Arc::new(m)),
TensorView::Shared(_) => self,
}
}
/// Creates a Tensor from a TensorView.
pub fn into_tensor(self) -> Tensor {
match self {
TensorView::Owned(m) => m,
TensorView::Shared(m) => m.as_ref().clone(),
}
}
/// Returns a reference to the Tensor wrapped inside a TensorView.
pub fn as_tensor(&self) -> &Tensor {
match self {
&TensorView::Owned(ref m) => &m,
&TensorView::Shared(ref m) => m.as_ref(),
}
}
/// Returns a shared copy of the TensorView, turning the one passed
/// as argument into a TensorView::Shared if necessary.
pub fn share(&mut self) -> TensorView {
// This is somewhat ugly, but sadly we couldn't find any other
// way to implement it. If we try to write something like:
// *self = TensorView::Shared(Arc::new(*m))
// the borrow checker will complain about *m being moved out of
// borrowed content, which makes sense but doesn't apply in our
// case because we will "give m back" to the TensorView, except
// wrapped around an Arc. The only way to get ownership of m is
// to use mem::replace, which means we have to create a "dummy"
// value to replace self first.
if let TensorView::Owned(_) = self {
let dummy = TensorView::Owned(Tensor::i32s(&[], &[0]).unwrap());
let shared = match mem::replace(self, dummy) {
TensorView::Owned(m) => TensorView::Shared(Arc::new(m)),
_ => panic!(),
};
*self = shared;
}
self.clone()
}
}
impl<M> From<M> for TensorView
where
Tensor: From<M>,
{
fn from(m: M) -> TensorView {
TensorView::Owned(m.into())
}
}
impl From<Arc<Tensor>> for TensorView {
fn from(m: Arc<Tensor>) -> TensorView {
TensorView::Shared(m)
}
}
impl ::std::ops::Deref for TensorView {
type Target = Tensor;
fn deref(&self) -> &Tensor {
match self {
&TensorView::Owned(ref m) => &m,
&TensorView::Shared(ref m) => m.as_ref(),
}
}
}
impl PartialEq for TensorView {
fn eq(&self, other: &TensorView) -> bool {
self.as_tensor() == other.as_tensor()
}
}
// TODO(liautaud): Find a more generic way to do this.
#[cfg_attr(feature = "serialize", derive(Serialize))]
#[derive(Debug, Clone)]
pub enum Attr {
I64(i64),
Usize(usize),
DataType(DataType),
DataFormat(DataFormat),
Padding(Padding),
Tensor(Tensor),
UsizeVec(Vec<usize>),
IsizeVec(Vec<isize>),
}
/// A Tensorflow operation.
pub trait Op: Debug + objekt::Clone + Send + Sync + 'static + InferenceOp {
/// Returns the attributes of the operation and their values.
fn get_attributes(&self) -> HashMap<&'static str, Attr>;
/// Evaluates the operation given the input tensors.
fn eval(&self, inputs: Vec<TensorView>) -> Result<Vec<TensorView>>;
/// Returns a new streaming buffer for the operation.
fn new_buffer(&self) -> Box<OpBuffer> {
Box::new(EmptyBuffer {})
}
/// Evaluates one step of the operation on the given input tensors.
/// This is only implemented for operators which support streaming.
///
/// The input tensors are annotated with an Option<usize>:
/// - None if the tensor doesn't have a streaming dimension.
/// - Option(d) if the tensor is being streamed on dimension d.
///
/// If an input tensor has a streaming dimension, the corresponding
/// TensorView will only contain a _chunk_ of input of size 1 along
/// that dimension. Note that each chunk will only be passed once
/// to the step function, so it should use the provided buffer to
/// store whichever chunks it needs for future computations.
///
/// The function should return Some(chunks) when it has computed
/// new chunks, and None if it has computed an intermediary result
/// successfully but doesn't have new output chunks ready yet.
///
/// For operators like Concat, multiple input tensors might have a
/// streaming dimension. In that case, at each call to step, only
/// one of the streaming inputs will receive new chunk while the
/// others will receive None. | bail!("Streaming is not available for operator {:?}", self)
}
/// Infers properties about the input and output tensors.
///
/// The `inputs` and `outputs` arguments correspond to properties about
/// the input and output tensors that are already known.
///
/// Returns Err in case of an unrecoverable error during the inference,
/// and the refined properties about the inputs and outputs otherwise.
fn infer_and_propagate(
&self,
inputs: Vec<TensorFact>,
outputs: Vec<TensorFact>,
) -> Result<(Vec<TensorFact>, Vec<TensorFact>)> {
let (infered_inputs, infered_outputs) = self.infer(inputs, outputs)?;
if infered_inputs.iter().all(|i| i.value.is_concrete()) {
let input_values = infered_inputs
.iter()
.map(|i| i.value.concretize().unwrap().clone().into())
.collect(); // checked
let output_value = self.eval(input_values)?.pop().unwrap();
Ok((
infered_inputs,
vec![::analyser::helpers::tensor_to_fact(
output_value.into_tensor(),
)],
))
} else {
Ok((infered_inputs, infered_outputs))
}
}
fn const_value(&self) -> Option<Tensor> {
None
}
}
pub trait InferenceOp {
fn infer(
&self,
inputs: Vec<TensorFact>,
outputs: Vec<TensorFact>,
) -> Result<(Vec<TensorFact>, Vec<TensorFact>)>;
}
pub trait InferenceRulesOp {
/// Registers the inference rules of the operator.
fn rules<'r, 'p: 'r, 's: 'r>(
&'s self,
solver: &mut Solver<'r>,
inputs: &'p TensorsProxy,
outputs: &'p TensorsProxy,
);
}
impl<O: InferenceRulesOp> InferenceOp for O {
fn infer(
&self,
inputs: Vec<TensorFact>,
outputs: Vec<TensorFact>,
) -> Result<(Vec<TensorFact>, Vec<TensorFact>)> {
let inputs_proxy = TensorsProxy::new(vec![0].into());
let outputs_proxy = TensorsProxy::new(vec![1].into());
let mut solver = Solver::default();
self.rules(&mut solver, &inputs_proxy, &outputs_proxy);
solver.infer((inputs, outputs))
}
}
clone_trait_object!(Op);
#[cfg(feature = "serialize")]
impl Serialize for Op {
fn serialize<S>(&self, serializer: S) -> StdResult<S::Ok, S::Error>
where
S: Serializer,
{
self.get_attributes().serialize(serializer)
}
}
pub type OpRegister = HashMap<&'static str, fn(&::tfpb::node_def::NodeDef) -> Result<Box<Op>>>;
pub struct OpBuilder(OpRegister);
impl OpBuilder {
pub fn new() -> OpBuilder {
let mut reg = OpRegister::new();
array::register_all_ops(&mut reg);
cast::register_all_ops(&mut reg);
konst::register_all_ops(&mut reg);
math::register_all_ops(&mut reg);
nn::register_all_ops(&mut reg);
OpBuilder(reg)
}
pub fn build(&self, pb: &::tfpb::node_def::NodeDef) -> Result<Box<Op>> {
match self.0.get(pb.get_op()) {
Some(builder) => builder(pb),
None => Ok(Box::new(UnimplementedOp(
pb.get_op().to_string(),
pb.to_owned(),
))),
}
}
}
#[derive(Debug, Clone)]
pub struct UnimplementedOp(String, ::tfpb::node_def::NodeDef);
impl Op for UnimplementedOp {
/// Evaluates the operation given the input tensors.
fn eval(&self, _inputs: Vec<TensorView>) -> Result<Vec<TensorView>> {
Err(format!("unimplemented operation: {}", self.0))?
}
/// Returns the attributes of the operation and their values.
fn get_attributes(&self) -> HashMap<&'static str, Attr> {
hashmap!{} // FIXME
}
}
impl InferenceRulesOp for UnimplementedOp {
fn rules<'r, 'p: 'r, 's: 'r>(
&'s self,
_: &mut Solver<'r>,
_: &'p TensorsProxy,
_: &'p TensorsProxy,
) {
}
}
/// A streaming buffer for a Tensorflow operation.
///
/// This is used during streaming evaluation of models. Each node is given
/// a mutable reference to a buffer which it can use to store intermediary
/// results between evaluation steps. Every operation must provide its own
/// buffer type (or use one of the general ones defined below), which must
/// implement the OpBuffer trait. It should return a new instance of it in
/// the `Op::new_buffer` method, and downcast it from OpBuffer in `step`.
pub trait OpBuffer: Downcast + Debug + objekt::Clone + Send + 'static {}
clone_trait_object!(OpBuffer);
impl_downcast!(OpBuffer);
/// An empty buffer for operations which don't need one.
#[derive(Debug, Clone)]
pub struct EmptyBuffer {}
impl OpBuffer for EmptyBuffer {}
/// A buffer with a variable number of TensorView queues.
#[derive(Debug, Clone)]
pub struct QueuesBuffer(Vec<VecDeque<TensorView>>);
impl OpBuffer for QueuesBuffer {}
impl QueuesBuffer {
/// Creates a new buffer with a given number of queues.
pub fn new(size: usize) -> QueuesBuffer {
QueuesBuffer(vec![VecDeque::new(); size])
}
/// Appends a new TensorView to each queue in the buffer.
pub fn append(&mut self, views: &mut [(Option<usize>, Option<TensorView>)]) -> Result<()> {
if views.len() > self.0.len() {
bail!("There are more input TensorViews than queues in the buffer.");
}
for (i, view) in views.iter_mut().enumerate() {
if view.1.is_some() {
self.0[i].push_back(view.1.take().unwrap())
}
}
Ok(())
}
/// Returns an iterator over all the queues in the buffer.
pub fn iter<'a>(&'a mut self) -> impl Iterator<Item = &'a VecDeque<TensorView>> {
self.0.iter()
}
/// Returns a mutable iterator over all the queues in the buffer.
pub fn iter_mut<'a>(&'a mut self) -> impl Iterator<Item = &'a mut VecDeque<TensorView>> {
self.0.iter_mut()
}
}
impl Index<usize> for QueuesBuffer {
type Output = VecDeque<TensorView>;
fn index(&self, index: usize) -> &VecDeque<TensorView> {
&self.0[index]
}
}
impl IndexMut<usize> for QueuesBuffer {
fn index_mut(&mut self, index: usize) -> &mut VecDeque<TensorView> {
&mut self.0[index]
}
} | fn step(
&self,
_inputs: Vec<(Option<usize>, Option<TensorView>)>,
_buffer: &mut Box<OpBuffer>,
) -> Result<Option<Vec<TensorView>>> { | random_line_split |
mod.rs | //! TensorFlow Ops
use std::collections::HashMap;
use std::collections::VecDeque;
use std::fmt::Debug;
use std::mem;
use std::ops::{Index, IndexMut};
#[cfg(feature = "serialize")]
use std::result::Result as StdResult;
use std::sync::Arc;
use analyser::interface::{Solver, TensorsProxy};
use analyser::prelude::*;
use ops::nn::local_patch::{DataFormat, Padding};
use {DataType, Result, Tensor};
use downcast_rs::Downcast;
use objekt;
#[cfg(feature = "serialize")]
use serde::ser::{Serialize, Serializer};
#[macro_use]
mod macros;
mod array;
mod cast;
#[cfg(features = "image_ops")]
pub mod image;
pub mod konst;
mod math;
pub mod nn;
pub mod prelude {
pub use super::{Attr, InferenceRulesOp, Op, OpRegister};
pub use super::{OpBuffer, QueuesBuffer, TensorView};
pub use std::collections::HashMap;
pub use std::marker::PhantomData;
pub use tensor::{DataType, Datum, Tensor};
pub use Result;
}
#[derive(Debug, Clone)]
pub enum TensorView {
Owned(Tensor),
Shared(Arc<Tensor>),
}
impl TensorView {
/// Creates a shared TensorView from any TensorView.
pub fn into_shared(self) -> TensorView |
/// Creates a Tensor from a TensorView.
pub fn into_tensor(self) -> Tensor {
match self {
TensorView::Owned(m) => m,
TensorView::Shared(m) => m.as_ref().clone(),
}
}
/// Returns a reference to the Tensor wrapped inside a TensorView.
pub fn as_tensor(&self) -> &Tensor {
match self {
&TensorView::Owned(ref m) => &m,
&TensorView::Shared(ref m) => m.as_ref(),
}
}
/// Returns a shared copy of the TensorView, turning the one passed
/// as argument into a TensorView::Shared if necessary.
pub fn share(&mut self) -> TensorView {
// This is somewhat ugly, but sadly we couldn't find any other
// way to implement it. If we try to write something like:
// *self = TensorView::Shared(Arc::new(*m))
// the borrow checker will complain about *m being moved out of
// borrowed content, which makes sense but doesn't apply in our
// case because we will "give m back" to the TensorView, except
// wrapped around an Arc. The only way to get ownership of m is
// to use mem::replace, which means we have to create a "dummy"
// value to replace self first.
if let TensorView::Owned(_) = self {
let dummy = TensorView::Owned(Tensor::i32s(&[], &[0]).unwrap());
let shared = match mem::replace(self, dummy) {
TensorView::Owned(m) => TensorView::Shared(Arc::new(m)),
_ => panic!(),
};
*self = shared;
}
self.clone()
}
}
impl<M> From<M> for TensorView
where
Tensor: From<M>,
{
fn from(m: M) -> TensorView {
TensorView::Owned(m.into())
}
}
impl From<Arc<Tensor>> for TensorView {
fn from(m: Arc<Tensor>) -> TensorView {
TensorView::Shared(m)
}
}
impl ::std::ops::Deref for TensorView {
type Target = Tensor;
fn deref(&self) -> &Tensor {
match self {
&TensorView::Owned(ref m) => &m,
&TensorView::Shared(ref m) => m.as_ref(),
}
}
}
impl PartialEq for TensorView {
fn eq(&self, other: &TensorView) -> bool {
self.as_tensor() == other.as_tensor()
}
}
// TODO(liautaud): Find a more generic way to do this.
#[cfg_attr(feature = "serialize", derive(Serialize))]
#[derive(Debug, Clone)]
pub enum Attr {
I64(i64),
Usize(usize),
DataType(DataType),
DataFormat(DataFormat),
Padding(Padding),
Tensor(Tensor),
UsizeVec(Vec<usize>),
IsizeVec(Vec<isize>),
}
/// A Tensorflow operation.
pub trait Op: Debug + objekt::Clone + Send + Sync + 'static + InferenceOp {
/// Returns the attributes of the operation and their values.
fn get_attributes(&self) -> HashMap<&'static str, Attr>;
/// Evaluates the operation given the input tensors.
fn eval(&self, inputs: Vec<TensorView>) -> Result<Vec<TensorView>>;
/// Returns a new streaming buffer for the operation.
fn new_buffer(&self) -> Box<OpBuffer> {
Box::new(EmptyBuffer {})
}
/// Evaluates one step of the operation on the given input tensors.
/// This is only implemented for operators which support streaming.
///
/// The input tensors are annotated with an Option<usize>:
/// - None if the tensor doesn't have a streaming dimension.
/// - Option(d) if the tensor is being streamed on dimension d.
///
/// If an input tensor has a streaming dimension, the corresponding
/// TensorView will only contain a _chunk_ of input of size 1 along
/// that dimension. Note that each chunk will only be passed once
/// to the step function, so it should use the provided buffer to
/// store whichever chunks it needs for future computations.
///
/// The function should return Some(chunks) when it has computed
/// new chunks, and None if it has computed an intermediary result
/// successfully but doesn't have new output chunks ready yet.
///
/// For operators like Concat, multiple input tensors might have a
/// streaming dimension. In that case, at each call to step, only
/// one of the streaming inputs will receive new chunk while the
/// others will receive None.
fn step(
&self,
_inputs: Vec<(Option<usize>, Option<TensorView>)>,
_buffer: &mut Box<OpBuffer>,
) -> Result<Option<Vec<TensorView>>> {
bail!("Streaming is not available for operator {:?}", self)
}
/// Infers properties about the input and output tensors.
///
/// The `inputs` and `outputs` arguments correspond to properties about
/// the input and output tensors that are already known.
///
/// Returns Err in case of an unrecoverable error during the inference,
/// and the refined properties about the inputs and outputs otherwise.
fn infer_and_propagate(
&self,
inputs: Vec<TensorFact>,
outputs: Vec<TensorFact>,
) -> Result<(Vec<TensorFact>, Vec<TensorFact>)> {
let (infered_inputs, infered_outputs) = self.infer(inputs, outputs)?;
if infered_inputs.iter().all(|i| i.value.is_concrete()) {
let input_values = infered_inputs
.iter()
.map(|i| i.value.concretize().unwrap().clone().into())
.collect(); // checked
let output_value = self.eval(input_values)?.pop().unwrap();
Ok((
infered_inputs,
vec![::analyser::helpers::tensor_to_fact(
output_value.into_tensor(),
)],
))
} else {
Ok((infered_inputs, infered_outputs))
}
}
fn const_value(&self) -> Option<Tensor> {
None
}
}
pub trait InferenceOp {
fn infer(
&self,
inputs: Vec<TensorFact>,
outputs: Vec<TensorFact>,
) -> Result<(Vec<TensorFact>, Vec<TensorFact>)>;
}
pub trait InferenceRulesOp {
/// Registers the inference rules of the operator.
fn rules<'r, 'p: 'r, 's: 'r>(
&'s self,
solver: &mut Solver<'r>,
inputs: &'p TensorsProxy,
outputs: &'p TensorsProxy,
);
}
impl<O: InferenceRulesOp> InferenceOp for O {
fn infer(
&self,
inputs: Vec<TensorFact>,
outputs: Vec<TensorFact>,
) -> Result<(Vec<TensorFact>, Vec<TensorFact>)> {
let inputs_proxy = TensorsProxy::new(vec![0].into());
let outputs_proxy = TensorsProxy::new(vec![1].into());
let mut solver = Solver::default();
self.rules(&mut solver, &inputs_proxy, &outputs_proxy);
solver.infer((inputs, outputs))
}
}
clone_trait_object!(Op);
#[cfg(feature = "serialize")]
impl Serialize for Op {
fn serialize<S>(&self, serializer: S) -> StdResult<S::Ok, S::Error>
where
S: Serializer,
{
self.get_attributes().serialize(serializer)
}
}
pub type OpRegister = HashMap<&'static str, fn(&::tfpb::node_def::NodeDef) -> Result<Box<Op>>>;
pub struct OpBuilder(OpRegister);
impl OpBuilder {
pub fn new() -> OpBuilder {
let mut reg = OpRegister::new();
array::register_all_ops(&mut reg);
cast::register_all_ops(&mut reg);
konst::register_all_ops(&mut reg);
math::register_all_ops(&mut reg);
nn::register_all_ops(&mut reg);
OpBuilder(reg)
}
pub fn build(&self, pb: &::tfpb::node_def::NodeDef) -> Result<Box<Op>> {
match self.0.get(pb.get_op()) {
Some(builder) => builder(pb),
None => Ok(Box::new(UnimplementedOp(
pb.get_op().to_string(),
pb.to_owned(),
))),
}
}
}
#[derive(Debug, Clone)]
pub struct UnimplementedOp(String, ::tfpb::node_def::NodeDef);
impl Op for UnimplementedOp {
/// Evaluates the operation given the input tensors.
fn eval(&self, _inputs: Vec<TensorView>) -> Result<Vec<TensorView>> {
Err(format!("unimplemented operation: {}", self.0))?
}
/// Returns the attributes of the operation and their values.
fn get_attributes(&self) -> HashMap<&'static str, Attr> {
hashmap!{} // FIXME
}
}
impl InferenceRulesOp for UnimplementedOp {
fn rules<'r, 'p: 'r, 's: 'r>(
&'s self,
_: &mut Solver<'r>,
_: &'p TensorsProxy,
_: &'p TensorsProxy,
) {
}
}
/// A streaming buffer for a Tensorflow operation.
///
/// This is used during streaming evaluation of models. Each node is given
/// a mutable reference to a buffer which it can use to store intermediary
/// results between evaluation steps. Every operation must provide its own
/// buffer type (or use one of the general ones defined below), which must
/// implement the OpBuffer trait. It should return a new instance of it in
/// the `Op::new_buffer` method, and downcast it from OpBuffer in `step`.
pub trait OpBuffer: Downcast + Debug + objekt::Clone + Send + 'static {}
clone_trait_object!(OpBuffer);
impl_downcast!(OpBuffer);
/// An empty buffer for operations which don't need one.
#[derive(Debug, Clone)]
pub struct EmptyBuffer {}
impl OpBuffer for EmptyBuffer {}
/// A buffer with a variable number of TensorView queues.
#[derive(Debug, Clone)]
pub struct QueuesBuffer(Vec<VecDeque<TensorView>>);
impl OpBuffer for QueuesBuffer {}
impl QueuesBuffer {
/// Creates a new buffer with a given number of queues.
pub fn new(size: usize) -> QueuesBuffer {
QueuesBuffer(vec![VecDeque::new(); size])
}
/// Appends a new TensorView to each queue in the buffer.
pub fn append(&mut self, views: &mut [(Option<usize>, Option<TensorView>)]) -> Result<()> {
if views.len() > self.0.len() {
bail!("There are more input TensorViews than queues in the buffer.");
}
for (i, view) in views.iter_mut().enumerate() {
if view.1.is_some() {
self.0[i].push_back(view.1.take().unwrap())
}
}
Ok(())
}
/// Returns an iterator over all the queues in the buffer.
pub fn iter<'a>(&'a mut self) -> impl Iterator<Item = &'a VecDeque<TensorView>> {
self.0.iter()
}
/// Returns a mutable iterator over all the queues in the buffer.
pub fn iter_mut<'a>(&'a mut self) -> impl Iterator<Item = &'a mut VecDeque<TensorView>> {
self.0.iter_mut()
}
}
impl Index<usize> for QueuesBuffer {
type Output = VecDeque<TensorView>;
fn index(&self, index: usize) -> &VecDeque<TensorView> {
&self.0[index]
}
}
impl IndexMut<usize> for QueuesBuffer {
fn index_mut(&mut self, index: usize) -> &mut VecDeque<TensorView> {
&mut self.0[index]
}
}
| {
match self {
TensorView::Owned(m) => TensorView::Shared(Arc::new(m)),
TensorView::Shared(_) => self,
}
} | identifier_body |
import.go | package main
import (
"bytes"
"context"
"encoding/json"
"flag"
"fmt"
"github.com/alexashley/terraform-provider-kong/kong/kong"
"github.com/google/subcommands"
"io/ioutil"
"os"
"os/exec"
"strings"
)
type importCommand struct {
adminApiUrl string
rbacToken string
isDryRun bool
importFileName string
tfConfigPath string
}
var pluginsToResourceImplementations = map[string]string{
"openid-connect": "kong_plugin_openid_connect",
"request-transformer-advanced": "kong_plugin_request_transformer_advanced",
}
func (*importCommand) Name() string {
return "import"
}
func (*importCommand) Synopsis() string {
return "Import consumers, services, and routes from Kong."
}
func (*importCommand) Usage() string {
return `import -admin-api-url=https://kong-admin.foo.com`
}
func (cmd *importCommand) SetFlags(flags *flag.FlagSet) {
flags.StringVar(
&cmd.adminApiUrl,
"admin-api-url",
"http://localhost:8001",
"Kong's admin api url. Usually listening on port 8001.",
)
flags.StringVar(
&cmd.rbacToken,
"rbac-token",
"",
"Kong EE RBAC token. Only necessary if your Kong Enterprise installation is secured with RBAC.",
)
flags.BoolVar(
&cmd.isDryRun,
"dry-run",
false,
"List the resources that will be imported, but do not actually import them.",
)
flags.StringVar(
&cmd.importFileName,
"state",
"import-state.json",
"Holds the current import state and any exclusions",
)
flags.StringVar(
&cmd.tfConfigPath,
"tf-config",
"",
"Path to Terraform config directory",
)
}
func (cmd *importCommand) Execute(_ context.Context, flags *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus {
fmt.Println("Importing resources from: " + cmd.adminApiUrl)
client, err := kong.NewKongClient(kong.KongConfig{
AdminApiUrl: cmd.adminApiUrl,
RbacToken: cmd.rbacToken,
})
if err != nil {
fmt.Printf("error initializing Kong client: %v\n", err)
return subcommands.ExitFailure
}
state := &kongState{
client: client,
}
if err := state.loadState(cmd.importFileName); err != nil {
fmt.Printf("error loading import state file %v\n", err)
return subcommands.ExitFailure
}
if err := state.discover(); err != nil {
fmt.Printf("error while discovering resources %v\n", err)
return subcommands.ExitFailure
}
fmt.Println("\nDiscovery:")
fmt.Println(state.discoveryReport())
if !cmd.isDryRun {
if err := state.importResources(cmd); err != nil {
fmt.Printf("error occurred while importing resources: %v\n", err)
err := state.finish(cmd.importFileName)
if err != nil {
fmt.Println("Additional error saving progress ", err)
}
return subcommands.ExitFailure
} else {
if err := state.finish(cmd.importFileName); err != nil {
fmt.Printf("Error occurred saving import file %v\n", err)
}
}
}
return subcommands.ExitSuccess
}
type kongState struct {
services []kong.KongService
routes []kong.KongRoute
plugins []kong.KongPlugin
consumers []kong.KongConsumer
imports map[string][]string // { services: [<uuid>], routes: [<uuid>,] }
client *kong.KongClient
}
func (s *kongState) loadState(fileName string) error {
if stateFile, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0755); err != nil {
return err
} else {
defer stateFile.Close()
s.imports = make(map[string][]string)
raw, err := ioutil.ReadAll(stateFile)
if err != nil {
return err
}
if len(raw) == 0 {
s.imports["services"] = make([]string, 0)
s.imports["routes"] = make([]string, 0)
s.imports["plugins"] = make([]string, 0)
s.imports["consumers"] = make([]string, 0)
} else if err := json.Unmarshal(raw, &s.imports); err != nil {
return err
}
}
return nil
}
func (s *kongState) discover() error {
if consumers, err := s.client.GetConsumers(); err != nil {
return err
} else {
s.consumers = consumers
}
if services, err := s.client.GetServices(); err != nil {
return err
} else {
s.services = services
}
if routes, err := s.client.GetRoutes(); err != nil {
return err
} else {
s.routes = routes
}
if plugins, err := s.client.GetPlugins(); err != nil {
return err
} else {
s.plugins = plugins
}
return nil
}
func (s *kongState) discoveryReport() string {
lines := make([]string, 0)
if len(s.consumers) > 0 {
lines = append(lines, fmt.Sprintf("Discovered %d consumers", len(s.consumers)))
} else {
lines = append(lines, "No consumers discovered.")
}
if len(s.services) > 0 {
lines = append(lines, fmt.Sprintf("Discovered %d services", len(s.services)))
} else {
lines = append(lines, "No services discovered.")
}
if len(s.routes) > 0 {
lines = append(lines, fmt.Sprintf("Discovered %d routes", len(s.routes)))
} else {
lines = append(lines, "No routes discovered.")
}
if len(s.plugins) > 0 {
lines = append(lines, fmt.Sprintf("Discovered %d plugins", len(s.plugins)))
} else {
lines = append(lines, "No plugins discovered.")
}
for index, line := range lines {
lines[index] = fmt.Sprintf("- %s", line)
}
return strings.Join(lines, "\n")
}
func createHclSafeName(name string) string {
invalid := []string{"-", "/", " ", "."}
hclName := name
for _, c := range invalid {
hclName = strings.Replace(hclName, c, "_", -1)
}
return hclName
}
type resourceImport struct {
kongResourceType string // plugin, route, service, consumer
terraformResourceType string // kong_plugin, kong_route, kong_plugin_openid_connect
resourceName string // what's to the right of the terraformResourceType in the HCL
resourceId string
dryRun bool
configPath string // HCL config
}
func (s *kongState) hasResourceBeenImported(resource *resourceImport) bool {
resourceTypePluralized := resource.kongResourceType + "s"
if importedIds, ok := s.imports[resourceTypePluralized]; ok {
for _, id := range importedIds {
if id == resource.resourceId {
return true
}
}
}
return false
}
func (s *kongState) importResource(resourceImport *resourceImport) error {
if s.hasResourceBeenImported(resourceImport) {
return nil
}
terraformResourceName := fmt.Sprintf("%s.%s", resourceImport.terraformResourceType, createHclSafeName(resourceImport.resourceName))
if !resourceImport.dryRun {
// ex: terraform import -config=examples/import kong_service.service_to_import e86f981e-a580-4bd6-aef3-1324adfcc12c
cmd := exec.Command(
"terraform",
"import",
fmt.Sprintf("-config=%s", resourceImport.configPath),
terraformResourceName,
resourceImport.resourceId,
)
var stderr bytes.Buffer
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
fmt.Println(stderr.String())
return err
}
}
fmt.Println("Imported:", terraformResourceName)
return nil
}
func (s *kongState) importResources(cmd *importCommand) error {
if len(s.consumers) > 0 {
fmt.Println("\nImporting consumers:")
for _, consumer := range s.consumers {
resource := &resourceImport{
kongResourceType: "consumer",
terraformResourceType: "kong_consumer",
resourceName: getResourceNameForConsumer(&consumer),
resourceId: consumer.Id,
dryRun: cmd.isDryRun,
configPath: cmd.tfConfigPath,
}
if err := s.importResource(resource); err != nil {
return err
} else {
s.imports["consumers"] = append(s.imports["consumers"], consumer.Id)
}
}
if len(s.services) > 0 {
fmt.Println("\nImporting services:")
for _, service := range s.services {
resource := &resourceImport{
kongResourceType: "service",
terraformResourceType: "kong_service",
resourceName: service.Name,
resourceId: service.Id,
dryRun: cmd.isDryRun,
configPath: cmd.tfConfigPath,
}
if err := s.importResource(resource); err != nil {
return err
} else {
s.imports["services"] = append(s.imports["services"], service.Id)
}
}
}
if len(s.routes) > 0 {
fmt.Println("\nImporting routes:")
for _, route := range s.routes {
resource := &resourceImport{
kongResourceType: "route",
terraformResourceType: "kong_route",
resourceName: getResourceNameForRoute(s, &route),
resourceId: route.Id,
dryRun: cmd.isDryRun,
configPath: cmd.tfConfigPath,
}
if err := s.importResource(resource); err != nil {
return err
} else {
s.imports["routes"] = append(s.imports["routes"], route.Id)
}
}
}
if len(s.plugins) > 0 {
fmt.Println("\nImporting plugins:")
for _, plugin := range s.plugins {
terraformResourceType := "kong_plugin"
if pluginResourceImplementation, ok := pluginsToResourceImplementations[plugin.Name]; ok {
terraformResourceType = pluginResourceImplementation
}
resource := &resourceImport{
kongResourceType: "plugin",
terraformResourceType: terraformResourceType,
resourceName: getResourceNameForPlugin(s, &plugin),
resourceId: plugin.Id,
dryRun: cmd.isDryRun,
configPath: cmd.tfConfigPath,
}
if err := s.importResource(resource); err != nil {
return err
} else {
s.imports["plugins"] = append(s.imports["plugins"], plugin.Id)
}
}
}
}
return nil
}
func (s *kongState) finish(fileName string) error {
if data, err := json.Marshal(s.imports); err != nil {
return err
} else {
return ioutil.WriteFile(fileName, data, 0644)
}
}
func getResourceNameForConsumer(consumer *kong.KongConsumer) string {
if len(consumer.Username) > 0 {
return consumer.Username
} else {
return consumer.CustomId
}
}
func getResourceNameForRoute(s *kongState, route *kong.KongRoute) string {
var service kong.KongService
for _, s := range s.services {
if s.Id == route.Service.Id {
service = s
break
}
}
name := service.Name
// TODO: the path/host slices should probably be sorted...
if len(route.Paths) > 0 {
path := strings.Split(route.Paths[0], "/")[1:] // need to remove the trailing space from splitting /path
for index, p := range path {
// if the path was prefixed with the service name, we don't want to repeat it
// e.g., service name: products, route path: /products
// the result should be products, not products_products
if index == 0 && p == name {
continue
}
name = name + "_" + p
}
} else {
name = name + route.Hosts[0]
}
return name
}
func getResourceNameForPlugin(s *kongState, plugin *kong.KongPlugin) string |
func pluginHasSpecificResourceImplementation(plugin *kong.KongPlugin) bool {
_, ok := pluginsToResourceImplementations[plugin.Name]
return ok
}
| {
namePrefix := ""
if plugin.ServiceId != "" {
for _, service := range s.services {
if service.Id == plugin.ServiceId {
namePrefix = service.Name
break
}
}
} else if plugin.RouteId != "" {
for _, route := range s.routes {
if route.Id == plugin.RouteId {
namePrefix = getResourceNameForRoute(s, &route)
break
}
}
} else if plugin.ConsumerId != "" {
for _, consumer := range s.consumers {
if consumer.Id == plugin.ConsumerId {
namePrefix = getResourceNameForConsumer(&consumer)
break
}
}
} else {
namePrefix = "global"
}
// for plugins with specific resource implementations (like openid-connect) we don't want to add the plugin name at the end
// it's redundant. compare:
// kong_plugin_openid_connect.foo_openid_connect
// vs
// kong_plugin_openid_connect.foo
if pluginHasSpecificResourceImplementation(plugin) {
return namePrefix
}
return namePrefix + "_" + plugin.Name
} | identifier_body |
import.go | package main
import (
"bytes"
"context"
"encoding/json"
"flag"
"fmt"
"github.com/alexashley/terraform-provider-kong/kong/kong"
"github.com/google/subcommands"
"io/ioutil"
"os"
"os/exec"
"strings"
)
type importCommand struct {
adminApiUrl string
rbacToken string
isDryRun bool
importFileName string
tfConfigPath string
}
var pluginsToResourceImplementations = map[string]string{
"openid-connect": "kong_plugin_openid_connect",
"request-transformer-advanced": "kong_plugin_request_transformer_advanced",
}
func (*importCommand) Name() string {
return "import"
}
func (*importCommand) Synopsis() string {
return "Import consumers, services, and routes from Kong."
}
func (*importCommand) Usage() string {
return `import -admin-api-url=https://kong-admin.foo.com`
}
func (cmd *importCommand) SetFlags(flags *flag.FlagSet) {
flags.StringVar(
&cmd.adminApiUrl,
"admin-api-url",
"http://localhost:8001",
"Kong's admin api url. Usually listening on port 8001.",
)
flags.StringVar(
&cmd.rbacToken,
"rbac-token",
"",
"Kong EE RBAC token. Only necessary if your Kong Enterprise installation is secured with RBAC.",
)
flags.BoolVar(
&cmd.isDryRun,
"dry-run",
false,
"List the resources that will be imported, but do not actually import them.",
)
flags.StringVar(
&cmd.importFileName,
"state",
"import-state.json",
"Holds the current import state and any exclusions",
)
flags.StringVar(
&cmd.tfConfigPath,
"tf-config",
"",
"Path to Terraform config directory",
)
}
func (cmd *importCommand) Execute(_ context.Context, flags *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus {
fmt.Println("Importing resources from: " + cmd.adminApiUrl)
client, err := kong.NewKongClient(kong.KongConfig{
AdminApiUrl: cmd.adminApiUrl,
RbacToken: cmd.rbacToken,
})
if err != nil {
fmt.Printf("error initializing Kong client: %v\n", err)
return subcommands.ExitFailure
}
state := &kongState{
client: client,
}
if err := state.loadState(cmd.importFileName); err != nil {
fmt.Printf("error loading import state file %v\n", err)
return subcommands.ExitFailure
}
if err := state.discover(); err != nil {
fmt.Printf("error while discovering resources %v\n", err)
return subcommands.ExitFailure
}
fmt.Println("\nDiscovery:")
fmt.Println(state.discoveryReport())
if !cmd.isDryRun {
if err := state.importResources(cmd); err != nil {
fmt.Printf("error occurred while importing resources: %v\n", err)
err := state.finish(cmd.importFileName)
if err != nil {
fmt.Println("Additional error saving progress ", err)
}
return subcommands.ExitFailure
} else {
if err := state.finish(cmd.importFileName); err != nil {
fmt.Printf("Error occurred saving import file %v\n", err)
}
}
}
return subcommands.ExitSuccess
}
type kongState struct {
services []kong.KongService
routes []kong.KongRoute
plugins []kong.KongPlugin
consumers []kong.KongConsumer
imports map[string][]string // { services: [<uuid>], routes: [<uuid>,] }
client *kong.KongClient
}
func (s *kongState) loadState(fileName string) error {
if stateFile, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0755); err != nil {
return err
} else {
defer stateFile.Close()
s.imports = make(map[string][]string)
raw, err := ioutil.ReadAll(stateFile)
if err != nil {
return err
}
if len(raw) == 0 {
s.imports["services"] = make([]string, 0)
s.imports["routes"] = make([]string, 0)
s.imports["plugins"] = make([]string, 0)
s.imports["consumers"] = make([]string, 0)
} else if err := json.Unmarshal(raw, &s.imports); err != nil {
return err
}
}
return nil
}
func (s *kongState) discover() error {
if consumers, err := s.client.GetConsumers(); err != nil {
return err
} else {
s.consumers = consumers
}
if services, err := s.client.GetServices(); err != nil {
return err
} else {
s.services = services
}
if routes, err := s.client.GetRoutes(); err != nil {
return err
} else {
s.routes = routes
}
if plugins, err := s.client.GetPlugins(); err != nil {
return err
} else {
s.plugins = plugins
}
return nil
}
func (s *kongState) discoveryReport() string {
lines := make([]string, 0)
if len(s.consumers) > 0 {
lines = append(lines, fmt.Sprintf("Discovered %d consumers", len(s.consumers)))
} else {
lines = append(lines, "No consumers discovered.")
}
if len(s.services) > 0 {
lines = append(lines, fmt.Sprintf("Discovered %d services", len(s.services)))
} else {
lines = append(lines, "No services discovered.")
}
if len(s.routes) > 0 {
lines = append(lines, fmt.Sprintf("Discovered %d routes", len(s.routes)))
} else {
lines = append(lines, "No routes discovered.")
}
if len(s.plugins) > 0 {
lines = append(lines, fmt.Sprintf("Discovered %d plugins", len(s.plugins)))
} else {
lines = append(lines, "No plugins discovered.")
}
for index, line := range lines {
lines[index] = fmt.Sprintf("- %s", line)
}
return strings.Join(lines, "\n")
}
func createHclSafeName(name string) string {
invalid := []string{"-", "/", " ", "."}
hclName := name
for _, c := range invalid {
hclName = strings.Replace(hclName, c, "_", -1)
}
return hclName
}
type resourceImport struct {
kongResourceType string // plugin, route, service, consumer
terraformResourceType string // kong_plugin, kong_route, kong_plugin_openid_connect
resourceName string // what's to the right of the terraformResourceType in the HCL
resourceId string
dryRun bool
configPath string // HCL config
}
func (s *kongState) hasResourceBeenImported(resource *resourceImport) bool {
resourceTypePluralized := resource.kongResourceType + "s"
if importedIds, ok := s.imports[resourceTypePluralized]; ok {
for _, id := range importedIds {
if id == resource.resourceId {
return true
}
}
}
return false
}
func (s *kongState) importResource(resourceImport *resourceImport) error {
if s.hasResourceBeenImported(resourceImport) {
return nil
}
terraformResourceName := fmt.Sprintf("%s.%s", resourceImport.terraformResourceType, createHclSafeName(resourceImport.resourceName))
if !resourceImport.dryRun {
// ex: terraform import -config=examples/import kong_service.service_to_import e86f981e-a580-4bd6-aef3-1324adfcc12c
cmd := exec.Command(
"terraform",
"import",
fmt.Sprintf("-config=%s", resourceImport.configPath),
terraformResourceName,
resourceImport.resourceId,
)
var stderr bytes.Buffer
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
fmt.Println(stderr.String())
return err
}
}
fmt.Println("Imported:", terraformResourceName)
return nil
}
func (s *kongState) importResources(cmd *importCommand) error {
if len(s.consumers) > 0 |
return nil
}
func (s *kongState) finish(fileName string) error {
if data, err := json.Marshal(s.imports); err != nil {
return err
} else {
return ioutil.WriteFile(fileName, data, 0644)
}
}
func getResourceNameForConsumer(consumer *kong.KongConsumer) string {
if len(consumer.Username) > 0 {
return consumer.Username
} else {
return consumer.CustomId
}
}
func getResourceNameForRoute(s *kongState, route *kong.KongRoute) string {
var service kong.KongService
for _, s := range s.services {
if s.Id == route.Service.Id {
service = s
break
}
}
name := service.Name
// TODO: the path/host slices should probably be sorted...
if len(route.Paths) > 0 {
path := strings.Split(route.Paths[0], "/")[1:] // need to remove the trailing space from splitting /path
for index, p := range path {
// if the path was prefixed with the service name, we don't want to repeat it
// e.g., service name: products, route path: /products
// the result should be products, not products_products
if index == 0 && p == name {
continue
}
name = name + "_" + p
}
} else {
name = name + route.Hosts[0]
}
return name
}
func getResourceNameForPlugin(s *kongState, plugin *kong.KongPlugin) string {
namePrefix := ""
if plugin.ServiceId != "" {
for _, service := range s.services {
if service.Id == plugin.ServiceId {
namePrefix = service.Name
break
}
}
} else if plugin.RouteId != "" {
for _, route := range s.routes {
if route.Id == plugin.RouteId {
namePrefix = getResourceNameForRoute(s, &route)
break
}
}
} else if plugin.ConsumerId != "" {
for _, consumer := range s.consumers {
if consumer.Id == plugin.ConsumerId {
namePrefix = getResourceNameForConsumer(&consumer)
break
}
}
} else {
namePrefix = "global"
}
// for plugins with specific resource implementations (like openid-connect) we don't want to add the plugin name at the end
// it's redundant. compare:
// kong_plugin_openid_connect.foo_openid_connect
// vs
// kong_plugin_openid_connect.foo
if pluginHasSpecificResourceImplementation(plugin) {
return namePrefix
}
return namePrefix + "_" + plugin.Name
}
func pluginHasSpecificResourceImplementation(plugin *kong.KongPlugin) bool {
_, ok := pluginsToResourceImplementations[plugin.Name]
return ok
}
| {
fmt.Println("\nImporting consumers:")
for _, consumer := range s.consumers {
resource := &resourceImport{
kongResourceType: "consumer",
terraformResourceType: "kong_consumer",
resourceName: getResourceNameForConsumer(&consumer),
resourceId: consumer.Id,
dryRun: cmd.isDryRun,
configPath: cmd.tfConfigPath,
}
if err := s.importResource(resource); err != nil {
return err
} else {
s.imports["consumers"] = append(s.imports["consumers"], consumer.Id)
}
}
if len(s.services) > 0 {
fmt.Println("\nImporting services:")
for _, service := range s.services {
resource := &resourceImport{
kongResourceType: "service",
terraformResourceType: "kong_service",
resourceName: service.Name,
resourceId: service.Id,
dryRun: cmd.isDryRun,
configPath: cmd.tfConfigPath,
}
if err := s.importResource(resource); err != nil {
return err
} else {
s.imports["services"] = append(s.imports["services"], service.Id)
}
}
}
if len(s.routes) > 0 {
fmt.Println("\nImporting routes:")
for _, route := range s.routes {
resource := &resourceImport{
kongResourceType: "route",
terraformResourceType: "kong_route",
resourceName: getResourceNameForRoute(s, &route),
resourceId: route.Id,
dryRun: cmd.isDryRun,
configPath: cmd.tfConfigPath,
}
if err := s.importResource(resource); err != nil {
return err
} else {
s.imports["routes"] = append(s.imports["routes"], route.Id)
}
}
}
if len(s.plugins) > 0 {
fmt.Println("\nImporting plugins:")
for _, plugin := range s.plugins {
terraformResourceType := "kong_plugin"
if pluginResourceImplementation, ok := pluginsToResourceImplementations[plugin.Name]; ok {
terraformResourceType = pluginResourceImplementation
}
resource := &resourceImport{
kongResourceType: "plugin",
terraformResourceType: terraformResourceType,
resourceName: getResourceNameForPlugin(s, &plugin),
resourceId: plugin.Id,
dryRun: cmd.isDryRun,
configPath: cmd.tfConfigPath,
}
if err := s.importResource(resource); err != nil {
return err
} else {
s.imports["plugins"] = append(s.imports["plugins"], plugin.Id)
}
}
}
} | conditional_block |
import.go | package main
import (
"bytes"
"context"
"encoding/json"
"flag"
"fmt"
"github.com/alexashley/terraform-provider-kong/kong/kong"
"github.com/google/subcommands"
"io/ioutil"
"os"
"os/exec"
"strings"
)
type importCommand struct {
adminApiUrl string
rbacToken string
isDryRun bool
importFileName string
tfConfigPath string
}
var pluginsToResourceImplementations = map[string]string{
"openid-connect": "kong_plugin_openid_connect",
"request-transformer-advanced": "kong_plugin_request_transformer_advanced",
}
func (*importCommand) Name() string {
return "import"
}
func (*importCommand) Synopsis() string {
return "Import consumers, services, and routes from Kong."
}
func (*importCommand) Usage() string {
return `import -admin-api-url=https://kong-admin.foo.com`
}
func (cmd *importCommand) SetFlags(flags *flag.FlagSet) {
flags.StringVar(
&cmd.adminApiUrl,
"admin-api-url",
"http://localhost:8001",
"Kong's admin api url. Usually listening on port 8001.",
)
flags.StringVar(
&cmd.rbacToken,
"rbac-token",
"",
"Kong EE RBAC token. Only necessary if your Kong Enterprise installation is secured with RBAC.",
)
flags.BoolVar(
&cmd.isDryRun,
"dry-run",
false,
"List the resources that will be imported, but do not actually import them.",
)
flags.StringVar(
&cmd.importFileName,
"state",
"import-state.json",
"Holds the current import state and any exclusions",
)
flags.StringVar(
&cmd.tfConfigPath,
"tf-config",
"",
"Path to Terraform config directory",
)
}
func (cmd *importCommand) Execute(_ context.Context, flags *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus {
fmt.Println("Importing resources from: " + cmd.adminApiUrl)
client, err := kong.NewKongClient(kong.KongConfig{
AdminApiUrl: cmd.adminApiUrl,
RbacToken: cmd.rbacToken,
})
if err != nil {
fmt.Printf("error initializing Kong client: %v\n", err)
return subcommands.ExitFailure
}
state := &kongState{
client: client,
}
if err := state.loadState(cmd.importFileName); err != nil {
fmt.Printf("error loading import state file %v\n", err)
return subcommands.ExitFailure
}
if err := state.discover(); err != nil {
fmt.Printf("error while discovering resources %v\n", err)
return subcommands.ExitFailure
}
fmt.Println("\nDiscovery:")
fmt.Println(state.discoveryReport())
if !cmd.isDryRun {
if err := state.importResources(cmd); err != nil {
fmt.Printf("error occurred while importing resources: %v\n", err)
err := state.finish(cmd.importFileName)
if err != nil {
fmt.Println("Additional error saving progress ", err)
}
return subcommands.ExitFailure
} else {
if err := state.finish(cmd.importFileName); err != nil {
fmt.Printf("Error occurred saving import file %v\n", err)
}
}
}
return subcommands.ExitSuccess
}
type kongState struct {
services []kong.KongService
routes []kong.KongRoute
plugins []kong.KongPlugin
consumers []kong.KongConsumer
imports map[string][]string // { services: [<uuid>], routes: [<uuid>,] }
client *kong.KongClient
}
func (s *kongState) loadState(fileName string) error {
if stateFile, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0755); err != nil {
return err
} else {
defer stateFile.Close()
s.imports = make(map[string][]string)
raw, err := ioutil.ReadAll(stateFile)
if err != nil {
return err
}
if len(raw) == 0 {
s.imports["services"] = make([]string, 0)
s.imports["routes"] = make([]string, 0)
s.imports["plugins"] = make([]string, 0)
s.imports["consumers"] = make([]string, 0)
} else if err := json.Unmarshal(raw, &s.imports); err != nil {
return err
}
}
return nil
}
func (s *kongState) discover() error {
if consumers, err := s.client.GetConsumers(); err != nil {
return err
} else {
s.consumers = consumers
}
if services, err := s.client.GetServices(); err != nil {
return err
} else {
s.services = services
}
if routes, err := s.client.GetRoutes(); err != nil {
return err
} else {
s.routes = routes
}
if plugins, err := s.client.GetPlugins(); err != nil {
return err
} else {
s.plugins = plugins
}
return nil
}
func (s *kongState) discoveryReport() string {
lines := make([]string, 0)
if len(s.consumers) > 0 {
lines = append(lines, fmt.Sprintf("Discovered %d consumers", len(s.consumers)))
} else {
lines = append(lines, "No consumers discovered.")
}
if len(s.services) > 0 {
lines = append(lines, fmt.Sprintf("Discovered %d services", len(s.services)))
} else {
lines = append(lines, "No services discovered.")
}
if len(s.routes) > 0 {
lines = append(lines, fmt.Sprintf("Discovered %d routes", len(s.routes)))
} else {
lines = append(lines, "No routes discovered.")
}
if len(s.plugins) > 0 {
lines = append(lines, fmt.Sprintf("Discovered %d plugins", len(s.plugins)))
} else {
lines = append(lines, "No plugins discovered.")
}
for index, line := range lines {
lines[index] = fmt.Sprintf("- %s", line)
}
return strings.Join(lines, "\n")
}
func createHclSafeName(name string) string {
invalid := []string{"-", "/", " ", "."}
hclName := name
for _, c := range invalid {
hclName = strings.Replace(hclName, c, "_", -1)
}
return hclName
}
type resourceImport struct {
kongResourceType string // plugin, route, service, consumer
terraformResourceType string // kong_plugin, kong_route, kong_plugin_openid_connect
resourceName string // what's to the right of the terraformResourceType in the HCL
resourceId string
dryRun bool
configPath string // HCL config
}
func (s *kongState) hasResourceBeenImported(resource *resourceImport) bool {
resourceTypePluralized := resource.kongResourceType + "s"
if importedIds, ok := s.imports[resourceTypePluralized]; ok {
for _, id := range importedIds {
if id == resource.resourceId {
return true
}
}
}
return false
}
func (s *kongState) | (resourceImport *resourceImport) error {
if s.hasResourceBeenImported(resourceImport) {
return nil
}
terraformResourceName := fmt.Sprintf("%s.%s", resourceImport.terraformResourceType, createHclSafeName(resourceImport.resourceName))
if !resourceImport.dryRun {
// ex: terraform import -config=examples/import kong_service.service_to_import e86f981e-a580-4bd6-aef3-1324adfcc12c
cmd := exec.Command(
"terraform",
"import",
fmt.Sprintf("-config=%s", resourceImport.configPath),
terraformResourceName,
resourceImport.resourceId,
)
var stderr bytes.Buffer
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
fmt.Println(stderr.String())
return err
}
}
fmt.Println("Imported:", terraformResourceName)
return nil
}
func (s *kongState) importResources(cmd *importCommand) error {
if len(s.consumers) > 0 {
fmt.Println("\nImporting consumers:")
for _, consumer := range s.consumers {
resource := &resourceImport{
kongResourceType: "consumer",
terraformResourceType: "kong_consumer",
resourceName: getResourceNameForConsumer(&consumer),
resourceId: consumer.Id,
dryRun: cmd.isDryRun,
configPath: cmd.tfConfigPath,
}
if err := s.importResource(resource); err != nil {
return err
} else {
s.imports["consumers"] = append(s.imports["consumers"], consumer.Id)
}
}
if len(s.services) > 0 {
fmt.Println("\nImporting services:")
for _, service := range s.services {
resource := &resourceImport{
kongResourceType: "service",
terraformResourceType: "kong_service",
resourceName: service.Name,
resourceId: service.Id,
dryRun: cmd.isDryRun,
configPath: cmd.tfConfigPath,
}
if err := s.importResource(resource); err != nil {
return err
} else {
s.imports["services"] = append(s.imports["services"], service.Id)
}
}
}
if len(s.routes) > 0 {
fmt.Println("\nImporting routes:")
for _, route := range s.routes {
resource := &resourceImport{
kongResourceType: "route",
terraformResourceType: "kong_route",
resourceName: getResourceNameForRoute(s, &route),
resourceId: route.Id,
dryRun: cmd.isDryRun,
configPath: cmd.tfConfigPath,
}
if err := s.importResource(resource); err != nil {
return err
} else {
s.imports["routes"] = append(s.imports["routes"], route.Id)
}
}
}
if len(s.plugins) > 0 {
fmt.Println("\nImporting plugins:")
for _, plugin := range s.plugins {
terraformResourceType := "kong_plugin"
if pluginResourceImplementation, ok := pluginsToResourceImplementations[plugin.Name]; ok {
terraformResourceType = pluginResourceImplementation
}
resource := &resourceImport{
kongResourceType: "plugin",
terraformResourceType: terraformResourceType,
resourceName: getResourceNameForPlugin(s, &plugin),
resourceId: plugin.Id,
dryRun: cmd.isDryRun,
configPath: cmd.tfConfigPath,
}
if err := s.importResource(resource); err != nil {
return err
} else {
s.imports["plugins"] = append(s.imports["plugins"], plugin.Id)
}
}
}
}
return nil
}
func (s *kongState) finish(fileName string) error {
if data, err := json.Marshal(s.imports); err != nil {
return err
} else {
return ioutil.WriteFile(fileName, data, 0644)
}
}
func getResourceNameForConsumer(consumer *kong.KongConsumer) string {
if len(consumer.Username) > 0 {
return consumer.Username
} else {
return consumer.CustomId
}
}
func getResourceNameForRoute(s *kongState, route *kong.KongRoute) string {
var service kong.KongService
for _, s := range s.services {
if s.Id == route.Service.Id {
service = s
break
}
}
name := service.Name
// TODO: the path/host slices should probably be sorted...
if len(route.Paths) > 0 {
path := strings.Split(route.Paths[0], "/")[1:] // need to remove the trailing space from splitting /path
for index, p := range path {
// if the path was prefixed with the service name, we don't want to repeat it
// e.g., service name: products, route path: /products
// the result should be products, not products_products
if index == 0 && p == name {
continue
}
name = name + "_" + p
}
} else {
name = name + route.Hosts[0]
}
return name
}
func getResourceNameForPlugin(s *kongState, plugin *kong.KongPlugin) string {
namePrefix := ""
if plugin.ServiceId != "" {
for _, service := range s.services {
if service.Id == plugin.ServiceId {
namePrefix = service.Name
break
}
}
} else if plugin.RouteId != "" {
for _, route := range s.routes {
if route.Id == plugin.RouteId {
namePrefix = getResourceNameForRoute(s, &route)
break
}
}
} else if plugin.ConsumerId != "" {
for _, consumer := range s.consumers {
if consumer.Id == plugin.ConsumerId {
namePrefix = getResourceNameForConsumer(&consumer)
break
}
}
} else {
namePrefix = "global"
}
// for plugins with specific resource implementations (like openid-connect) we don't want to add the plugin name at the end
// it's redundant. compare:
// kong_plugin_openid_connect.foo_openid_connect
// vs
// kong_plugin_openid_connect.foo
if pluginHasSpecificResourceImplementation(plugin) {
return namePrefix
}
return namePrefix + "_" + plugin.Name
}
func pluginHasSpecificResourceImplementation(plugin *kong.KongPlugin) bool {
_, ok := pluginsToResourceImplementations[plugin.Name]
return ok
}
| importResource | identifier_name |
import.go | package main
import (
"bytes"
"context"
"encoding/json"
"flag"
"fmt"
"github.com/alexashley/terraform-provider-kong/kong/kong"
"github.com/google/subcommands"
"io/ioutil"
"os"
"os/exec"
"strings"
)
type importCommand struct {
adminApiUrl string
rbacToken string
isDryRun bool
importFileName string
tfConfigPath string
}
var pluginsToResourceImplementations = map[string]string{
"openid-connect": "kong_plugin_openid_connect",
"request-transformer-advanced": "kong_plugin_request_transformer_advanced",
}
func (*importCommand) Name() string {
return "import"
}
func (*importCommand) Synopsis() string {
return "Import consumers, services, and routes from Kong."
}
func (*importCommand) Usage() string {
return `import -admin-api-url=https://kong-admin.foo.com`
}
func (cmd *importCommand) SetFlags(flags *flag.FlagSet) {
flags.StringVar(
&cmd.adminApiUrl,
"admin-api-url",
"http://localhost:8001",
"Kong's admin api url. Usually listening on port 8001.",
)
flags.StringVar(
&cmd.rbacToken,
"rbac-token",
"",
"Kong EE RBAC token. Only necessary if your Kong Enterprise installation is secured with RBAC.",
)
flags.BoolVar(
&cmd.isDryRun,
"dry-run",
false,
"List the resources that will be imported, but do not actually import them.",
)
flags.StringVar(
&cmd.importFileName,
"state",
"import-state.json",
"Holds the current import state and any exclusions",
)
flags.StringVar(
&cmd.tfConfigPath,
"tf-config",
"",
"Path to Terraform config directory",
)
}
func (cmd *importCommand) Execute(_ context.Context, flags *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus {
fmt.Println("Importing resources from: " + cmd.adminApiUrl)
client, err := kong.NewKongClient(kong.KongConfig{
AdminApiUrl: cmd.adminApiUrl,
RbacToken: cmd.rbacToken,
})
if err != nil {
fmt.Printf("error initializing Kong client: %v\n", err)
return subcommands.ExitFailure
}
state := &kongState{
client: client,
}
if err := state.loadState(cmd.importFileName); err != nil {
fmt.Printf("error loading import state file %v\n", err)
return subcommands.ExitFailure
}
if err := state.discover(); err != nil {
fmt.Printf("error while discovering resources %v\n", err)
return subcommands.ExitFailure
}
fmt.Println("\nDiscovery:")
fmt.Println(state.discoveryReport())
if !cmd.isDryRun {
if err := state.importResources(cmd); err != nil {
fmt.Printf("error occurred while importing resources: %v\n", err)
err := state.finish(cmd.importFileName)
if err != nil {
fmt.Println("Additional error saving progress ", err)
}
return subcommands.ExitFailure
} else {
if err := state.finish(cmd.importFileName); err != nil {
fmt.Printf("Error occurred saving import file %v\n", err)
}
}
}
return subcommands.ExitSuccess
}
type kongState struct {
services []kong.KongService
routes []kong.KongRoute
plugins []kong.KongPlugin
consumers []kong.KongConsumer
imports map[string][]string // { services: [<uuid>], routes: [<uuid>,] }
client *kong.KongClient
}
func (s *kongState) loadState(fileName string) error {
if stateFile, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0755); err != nil {
return err
} else {
defer stateFile.Close()
s.imports = make(map[string][]string)
raw, err := ioutil.ReadAll(stateFile)
if err != nil {
return err
}
if len(raw) == 0 {
s.imports["services"] = make([]string, 0)
s.imports["routes"] = make([]string, 0)
s.imports["plugins"] = make([]string, 0)
s.imports["consumers"] = make([]string, 0)
} else if err := json.Unmarshal(raw, &s.imports); err != nil {
return err
}
}
return nil
}
func (s *kongState) discover() error {
if consumers, err := s.client.GetConsumers(); err != nil {
return err
} else {
s.consumers = consumers
}
if services, err := s.client.GetServices(); err != nil {
return err
} else {
s.services = services
}
if routes, err := s.client.GetRoutes(); err != nil {
return err
} else {
s.routes = routes
}
if plugins, err := s.client.GetPlugins(); err != nil {
return err
} else {
s.plugins = plugins
}
return nil
}
func (s *kongState) discoveryReport() string {
lines := make([]string, 0)
if len(s.consumers) > 0 {
lines = append(lines, fmt.Sprintf("Discovered %d consumers", len(s.consumers)))
} else {
lines = append(lines, "No consumers discovered.")
}
if len(s.services) > 0 {
lines = append(lines, fmt.Sprintf("Discovered %d services", len(s.services)))
} else {
lines = append(lines, "No services discovered.")
}
if len(s.routes) > 0 {
lines = append(lines, fmt.Sprintf("Discovered %d routes", len(s.routes)))
} else {
lines = append(lines, "No routes discovered.")
}
if len(s.plugins) > 0 {
lines = append(lines, fmt.Sprintf("Discovered %d plugins", len(s.plugins)))
} else {
lines = append(lines, "No plugins discovered.")
}
for index, line := range lines {
lines[index] = fmt.Sprintf("- %s", line)
}
return strings.Join(lines, "\n")
}
func createHclSafeName(name string) string {
invalid := []string{"-", "/", " ", "."}
hclName := name
for _, c := range invalid {
hclName = strings.Replace(hclName, c, "_", -1)
}
return hclName
}
type resourceImport struct {
kongResourceType string // plugin, route, service, consumer
terraformResourceType string // kong_plugin, kong_route, kong_plugin_openid_connect
resourceName string // what's to the right of the terraformResourceType in the HCL
resourceId string
dryRun bool
configPath string // HCL config
}
func (s *kongState) hasResourceBeenImported(resource *resourceImport) bool {
resourceTypePluralized := resource.kongResourceType + "s"
if importedIds, ok := s.imports[resourceTypePluralized]; ok {
for _, id := range importedIds {
if id == resource.resourceId {
return true
}
}
}
return false
}
func (s *kongState) importResource(resourceImport *resourceImport) error {
if s.hasResourceBeenImported(resourceImport) {
return nil
}
terraformResourceName := fmt.Sprintf("%s.%s", resourceImport.terraformResourceType, createHclSafeName(resourceImport.resourceName))
if !resourceImport.dryRun {
// ex: terraform import -config=examples/import kong_service.service_to_import e86f981e-a580-4bd6-aef3-1324adfcc12c
cmd := exec.Command(
"terraform",
"import",
fmt.Sprintf("-config=%s", resourceImport.configPath),
terraformResourceName,
resourceImport.resourceId,
)
var stderr bytes.Buffer
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
fmt.Println(stderr.String())
return err
}
}
fmt.Println("Imported:", terraformResourceName) | if len(s.consumers) > 0 {
fmt.Println("\nImporting consumers:")
for _, consumer := range s.consumers {
resource := &resourceImport{
kongResourceType: "consumer",
terraformResourceType: "kong_consumer",
resourceName: getResourceNameForConsumer(&consumer),
resourceId: consumer.Id,
dryRun: cmd.isDryRun,
configPath: cmd.tfConfigPath,
}
if err := s.importResource(resource); err != nil {
return err
} else {
s.imports["consumers"] = append(s.imports["consumers"], consumer.Id)
}
}
if len(s.services) > 0 {
fmt.Println("\nImporting services:")
for _, service := range s.services {
resource := &resourceImport{
kongResourceType: "service",
terraformResourceType: "kong_service",
resourceName: service.Name,
resourceId: service.Id,
dryRun: cmd.isDryRun,
configPath: cmd.tfConfigPath,
}
if err := s.importResource(resource); err != nil {
return err
} else {
s.imports["services"] = append(s.imports["services"], service.Id)
}
}
}
if len(s.routes) > 0 {
fmt.Println("\nImporting routes:")
for _, route := range s.routes {
resource := &resourceImport{
kongResourceType: "route",
terraformResourceType: "kong_route",
resourceName: getResourceNameForRoute(s, &route),
resourceId: route.Id,
dryRun: cmd.isDryRun,
configPath: cmd.tfConfigPath,
}
if err := s.importResource(resource); err != nil {
return err
} else {
s.imports["routes"] = append(s.imports["routes"], route.Id)
}
}
}
if len(s.plugins) > 0 {
fmt.Println("\nImporting plugins:")
for _, plugin := range s.plugins {
terraformResourceType := "kong_plugin"
if pluginResourceImplementation, ok := pluginsToResourceImplementations[plugin.Name]; ok {
terraformResourceType = pluginResourceImplementation
}
resource := &resourceImport{
kongResourceType: "plugin",
terraformResourceType: terraformResourceType,
resourceName: getResourceNameForPlugin(s, &plugin),
resourceId: plugin.Id,
dryRun: cmd.isDryRun,
configPath: cmd.tfConfigPath,
}
if err := s.importResource(resource); err != nil {
return err
} else {
s.imports["plugins"] = append(s.imports["plugins"], plugin.Id)
}
}
}
}
return nil
}
func (s *kongState) finish(fileName string) error {
if data, err := json.Marshal(s.imports); err != nil {
return err
} else {
return ioutil.WriteFile(fileName, data, 0644)
}
}
func getResourceNameForConsumer(consumer *kong.KongConsumer) string {
if len(consumer.Username) > 0 {
return consumer.Username
} else {
return consumer.CustomId
}
}
func getResourceNameForRoute(s *kongState, route *kong.KongRoute) string {
var service kong.KongService
for _, s := range s.services {
if s.Id == route.Service.Id {
service = s
break
}
}
name := service.Name
// TODO: the path/host slices should probably be sorted...
if len(route.Paths) > 0 {
path := strings.Split(route.Paths[0], "/")[1:] // need to remove the trailing space from splitting /path
for index, p := range path {
// if the path was prefixed with the service name, we don't want to repeat it
// e.g., service name: products, route path: /products
// the result should be products, not products_products
if index == 0 && p == name {
continue
}
name = name + "_" + p
}
} else {
name = name + route.Hosts[0]
}
return name
}
func getResourceNameForPlugin(s *kongState, plugin *kong.KongPlugin) string {
namePrefix := ""
if plugin.ServiceId != "" {
for _, service := range s.services {
if service.Id == plugin.ServiceId {
namePrefix = service.Name
break
}
}
} else if plugin.RouteId != "" {
for _, route := range s.routes {
if route.Id == plugin.RouteId {
namePrefix = getResourceNameForRoute(s, &route)
break
}
}
} else if plugin.ConsumerId != "" {
for _, consumer := range s.consumers {
if consumer.Id == plugin.ConsumerId {
namePrefix = getResourceNameForConsumer(&consumer)
break
}
}
} else {
namePrefix = "global"
}
// for plugins with specific resource implementations (like openid-connect) we don't want to add the plugin name at the end
// it's redundant. compare:
// kong_plugin_openid_connect.foo_openid_connect
// vs
// kong_plugin_openid_connect.foo
if pluginHasSpecificResourceImplementation(plugin) {
return namePrefix
}
return namePrefix + "_" + plugin.Name
}
func pluginHasSpecificResourceImplementation(plugin *kong.KongPlugin) bool {
_, ok := pluginsToResourceImplementations[plugin.Name]
return ok
} |
return nil
}
func (s *kongState) importResources(cmd *importCommand) error { | random_line_split |
mtcnn.py | import numpy as np
import torch
import torch.nn as nn
from torch.nn.functional import interpolate
from torchvision.ops.boxes import batched_nms
class MTCNN():
def __init__(self, device=None, model=None):
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = device
url = 'https://github.com/deepware/dFace/raw/master/models/mtcnn.pt'
if model is None:
model = torch.hub.load_state_dict_from_url(url)
else:
model = torch.load(model, map_location=device)
self.pnet = PNet().to(device)
self.rnet = RNet().to(device)
self.onet = ONet().to(device)
self.pnet.load_state_dict(model['pnet'])
self.rnet.load_state_dict(model['rnet'])
self.onet.load_state_dict(model['onet'])
def detect(self, imgs, minsize=None):
if len(imgs) == 0:
return []
if isinstance(imgs[0], np.ndarray):
h, w = imgs[0].shape[:2]
else:
w, h = imgs[0].size
if minsize is None:
minsize = max(96 * min(w, h)/1080, 40)
boxes, points = [], []
with torch.no_grad():
batches = [imgs[i:i+10] for i in range(0, len(imgs), 10)]
for batch in batches:
batch_boxes, batch_points = detect_face(
batch, minsize, self.pnet, self.rnet, self.onet,
[0.7, 0.8, 0.9], 0.709, self.device)
boxes += list(batch_boxes)
points += list(batch_points)
result = []
for box, point in zip(boxes, points):
box = np.array(box)
point = np.array(point)
if len(box) == 0:
result.append(None)
else:
result.append((box[:, :4], box[:, 4], point))
return result
def empty_cache(device):
if 'cuda' in device:
with torch.cuda.device(device):
torch.cuda.empty_cache()
class PNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=3)
self.prelu1 = nn.PReLU(10)
self.pool1 = nn.MaxPool2d(2, 2, ceil_mode=True)
self.conv2 = nn.Conv2d(10, 16, kernel_size=3)
self.prelu2 = nn.PReLU(16)
self.conv3 = nn.Conv2d(16, 32, kernel_size=3)
self.prelu3 = nn.PReLU(32)
self.conv4_1 = nn.Conv2d(32, 2, kernel_size=1)
self.softmax4_1 = nn.Softmax(dim=1)
self.conv4_2 = nn.Conv2d(32, 4, kernel_size=1)
def forward(self, x):
x = self.conv1(x)
x = self.prelu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.prelu2(x)
x = self.conv3(x)
x = self.prelu3(x)
a = self.conv4_1(x)
a = self.softmax4_1(a)
b = self.conv4_2(x)
return b, a
class RNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 28, kernel_size=3)
self.prelu1 = nn.PReLU(28)
self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv2 = nn.Conv2d(28, 48, kernel_size=3)
self.prelu2 = nn.PReLU(48)
self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv3 = nn.Conv2d(48, 64, kernel_size=2)
self.prelu3 = nn.PReLU(64)
self.dense4 = nn.Linear(576, 128)
self.prelu4 = nn.PReLU(128)
self.dense5_1 = nn.Linear(128, 2)
self.softmax5_1 = nn.Softmax(dim=1)
self.dense5_2 = nn.Linear(128, 4)
def forward(self, x):
x = self.conv1(x)
x = self.prelu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.prelu2(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.prelu3(x)
x = x.permute(0, 3, 2, 1).contiguous()
x = self.dense4(x.view(x.shape[0], -1))
x = self.prelu4(x)
a = self.dense5_1(x)
a = self.softmax5_1(a)
b = self.dense5_2(x)
return b, a
class ONet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3)
self.prelu1 = nn.PReLU(32)
self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
self.prelu2 = nn.PReLU(64)
self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3)
self.prelu3 = nn.PReLU(64)
self.pool3 = nn.MaxPool2d(2, 2, ceil_mode=True)
self.conv4 = nn.Conv2d(64, 128, kernel_size=2)
self.prelu4 = nn.PReLU(128)
self.dense5 = nn.Linear(1152, 256)
self.prelu5 = nn.PReLU(256)
self.dense6_1 = nn.Linear(256, 2)
self.softmax6_1 = nn.Softmax(dim=1)
self.dense6_2 = nn.Linear(256, 4)
self.dense6_3 = nn.Linear(256, 10)
def forward(self, x):
x = self.conv1(x)
x = self.prelu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.prelu2(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.prelu3(x)
x = self.pool3(x)
x = self.conv4(x)
x = self.prelu4(x)
x = x.permute(0, 3, 2, 1).contiguous()
x = self.dense5(x.view(x.shape[0], -1))
x = self.prelu5(x)
a = self.dense6_1(x)
a = self.softmax6_1(a)
b = self.dense6_2(x)
c = self.dense6_3(x)
return b, c, a
def detect_face(imgs, minsize, pnet, rnet, onet, threshold, factor, device):
if isinstance(imgs, (np.ndarray, torch.Tensor)):
|
else:
if not isinstance(imgs, (list, tuple)):
imgs = [imgs]
if any(img.size != imgs[0].size for img in imgs):
raise Exception("MTCNN batch processing only compatible with equal-dimension images.")
imgs = np.stack([np.uint8(img) for img in imgs])
imgs = torch.as_tensor(imgs, device=device)
model_dtype = next(pnet.parameters()).dtype
imgs = imgs.permute(0, 3, 1, 2).type(model_dtype)
batch_size = len(imgs)
h, w = imgs.shape[2:4]
m = 12.0 / minsize
minl = min(h, w)
minl = minl * m
# Create scale pyramid
scale_i = m
scales = []
while minl >= 12:
scales.append(scale_i)
scale_i = scale_i * factor
minl = minl * factor
# First stage
boxes = []
image_inds = []
all_inds = []
all_i = 0
for scale in scales:
im_data = imresample(imgs, (int(h * scale + 1), int(w * scale + 1)))
im_data = (im_data - 127.5) * 0.0078125
reg, probs = pnet(im_data)
empty_cache(device)
boxes_scale, image_inds_scale = generateBoundingBox(reg, probs[:, 1], scale, threshold[0])
boxes.append(boxes_scale)
image_inds.append(image_inds_scale)
all_inds.append(all_i + image_inds_scale)
all_i += batch_size
boxes = torch.cat(boxes, dim=0)
image_inds = torch.cat(image_inds, dim=0).cpu()
all_inds = torch.cat(all_inds, dim=0)
# NMS within each scale + image
pick = batched_nms(boxes[:, :4], boxes[:, 4], all_inds, 0.5)
boxes, image_inds = boxes[pick], image_inds[pick]
# NMS within each image
pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)
boxes, image_inds = boxes[pick], image_inds[pick]
regw = boxes[:, 2] - boxes[:, 0]
regh = boxes[:, 3] - boxes[:, 1]
qq1 = boxes[:, 0] + boxes[:, 5] * regw
qq2 = boxes[:, 1] + boxes[:, 6] * regh
qq3 = boxes[:, 2] + boxes[:, 7] * regw
qq4 = boxes[:, 3] + boxes[:, 8] * regh
boxes = torch.stack([qq1, qq2, qq3, qq4, boxes[:, 4]]).permute(1, 0)
boxes = rerec(boxes)
y, ey, x, ex = pad(boxes, w, h)
# Second stage
if len(boxes) > 0:
im_data = []
for k in range(len(y)):
if ey[k] > (y[k] - 1) and ex[k] > (x[k] - 1):
img_k = imgs[image_inds[k], :, (y[k] - 1):ey[k], (x[k] - 1):ex[k]].unsqueeze(0)
im_data.append(imresample(img_k, (24, 24)))
im_data = torch.cat(im_data, dim=0)
im_data = (im_data - 127.5) * 0.0078125
out = []
for batch in im_data.split(2000):
out += [rnet(batch)]
z = list(zip(*out))
out = (torch.cat(z[0]), torch.cat(z[1]))
empty_cache(device)
out0 = out[0].permute(1, 0)
out1 = out[1].permute(1, 0)
score = out1[1, :]
ipass = score > threshold[1]
boxes = torch.cat((boxes[ipass, :4], score[ipass].unsqueeze(1)), dim=1)
image_inds = image_inds[ipass]
mv = out0[:, ipass].permute(1, 0)
# NMS within each image
pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)
boxes, image_inds, mv = boxes[pick], image_inds[pick], mv[pick]
boxes = bbreg(boxes, mv)
boxes = rerec(boxes)
# Third stage
points = torch.zeros(0, 5, 2, device=device)
if len(boxes) > 0:
y, ey, x, ex = pad(boxes, w, h)
im_data = []
for k in range(len(y)):
if ey[k] > (y[k] - 1) and ex[k] > (x[k] - 1):
img_k = imgs[image_inds[k], :, (y[k] - 1):ey[k], (x[k] - 1):ex[k]].unsqueeze(0)
im_data.append(imresample(img_k, (48, 48)))
im_data = torch.cat(im_data, dim=0)
im_data = (im_data - 127.5) * 0.0078125
out = []
for batch in im_data.split(500):
out += [onet(batch)]
z = list(zip(*out))
out = (torch.cat(z[0]), torch.cat(z[1]), torch.cat(z[2]))
empty_cache(device)
out0 = out[0].permute(1, 0)
out1 = out[1].permute(1, 0)
out2 = out[2].permute(1, 0)
score = out2[1, :]
points = out1
ipass = score > threshold[2]
points = points[:, ipass]
boxes = torch.cat((boxes[ipass, :4], score[ipass].unsqueeze(1)), dim=1)
image_inds = image_inds[ipass]
mv = out0[:, ipass].permute(1, 0)
w_i = boxes[:, 2] - boxes[:, 0] + 1
h_i = boxes[:, 3] - boxes[:, 1] + 1
points_x = w_i.repeat(5, 1) * points[:5, :] + boxes[:, 0].repeat(5, 1) - 1
points_y = h_i.repeat(5, 1) * points[5:10, :] + boxes[:, 1].repeat(5, 1) - 1
points = torch.stack((points_x, points_y)).permute(2, 1, 0)
boxes = bbreg(boxes, mv)
# NMS within each image using "Min" strategy
# pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)
pick = batched_nms_numpy(boxes[:, :4], boxes[:, 4], image_inds, 0.7, 'Min')
boxes, image_inds, points = boxes[pick], image_inds[pick], points[pick]
boxes = boxes.cpu().numpy()
points = points.cpu().numpy()
batch_boxes = []
batch_points = []
for b_i in range(batch_size):
b_i_inds = np.where(image_inds == b_i)
batch_boxes.append(boxes[b_i_inds].copy())
batch_points.append(points[b_i_inds].copy())
batch_boxes, batch_points = np.array(batch_boxes), np.array(batch_points)
empty_cache(device)
return batch_boxes, batch_points
def bbreg(boundingbox, reg):
if reg.shape[1] == 1:
reg = torch.reshape(reg, (reg.shape[2], reg.shape[3]))
w = boundingbox[:, 2] - boundingbox[:, 0] + 1
h = boundingbox[:, 3] - boundingbox[:, 1] + 1
b1 = boundingbox[:, 0] + reg[:, 0] * w
b2 = boundingbox[:, 1] + reg[:, 1] * h
b3 = boundingbox[:, 2] + reg[:, 2] * w
b4 = boundingbox[:, 3] + reg[:, 3] * h
boundingbox[:, :4] = torch.stack([b1, b2, b3, b4]).permute(1, 0)
return boundingbox
def generateBoundingBox(reg, probs, scale, thresh):
stride = 2
cellsize = 12
reg = reg.permute(1, 0, 2, 3)
mask = probs >= thresh
mask_inds = mask.nonzero(as_tuple=False)
image_inds = mask_inds[:, 0]
score = probs[mask]
reg = reg[:, mask].permute(1, 0)
bb = mask_inds[:, 1:].type(reg.dtype).flip(1)
q1 = ((stride * bb + 1) / scale).floor()
q2 = ((stride * bb + cellsize - 1 + 1) / scale).floor()
boundingbox = torch.cat([q1, q2, score.unsqueeze(1), reg], dim=1)
return boundingbox, image_inds
def nms_numpy(boxes, scores, threshold, method):
if boxes.size == 0:
return np.empty((0, 3))
x1 = boxes[:, 0].copy()
y1 = boxes[:, 1].copy()
x2 = boxes[:, 2].copy()
y2 = boxes[:, 3].copy()
s = scores
area = (x2 - x1 + 1) * (y2 - y1 + 1)
I = np.argsort(s)
pick = np.zeros_like(s, dtype=np.int16)
counter = 0
while I.size > 0:
i = I[-1]
pick[counter] = i
counter += 1
idx = I[0:-1]
xx1 = np.maximum(x1[i], x1[idx]).copy()
yy1 = np.maximum(y1[i], y1[idx]).copy()
xx2 = np.minimum(x2[i], x2[idx]).copy()
yy2 = np.minimum(y2[i], y2[idx]).copy()
w = np.maximum(0.0, xx2 - xx1 + 1).copy()
h = np.maximum(0.0, yy2 - yy1 + 1).copy()
inter = w * h
if method == "Min":
o = inter / np.minimum(area[i], area[idx])
else:
o = inter / (area[i] + area[idx] - inter)
I = I[np.where(o <= threshold)]
pick = pick[:counter].copy()
return pick
def batched_nms_numpy(boxes, scores, idxs, threshold, method):
device = boxes.device
if boxes.numel() == 0:
return torch.empty((0,), dtype=torch.int64, device=device)
# strategy: in order to perform NMS independently per class.
# we add an offset to all the boxes. The offset is dependent
# only on the class idx, and is large enough so that boxes
# from different classes do not overlap
max_coordinate = boxes.max()
offsets = idxs.to(boxes) * (max_coordinate + 1)
boxes_for_nms = boxes + offsets[:, None]
boxes_for_nms = boxes_for_nms.cpu().numpy()
scores = scores.cpu().numpy()
keep = nms_numpy(boxes_for_nms, scores, threshold, method)
return torch.as_tensor(keep, dtype=torch.long, device=device)
def pad(boxes, w, h):
boxes = boxes.trunc().int().cpu().numpy()
x = boxes[:, 0]
y = boxes[:, 1]
ex = boxes[:, 2]
ey = boxes[:, 3]
x[x < 1] = 1
y[y < 1] = 1
ex[ex > w] = w
ey[ey > h] = h
return y, ey, x, ex
def rerec(bboxA):
h = bboxA[:, 3] - bboxA[:, 1]
w = bboxA[:, 2] - bboxA[:, 0]
l = torch.max(w, h)
bboxA[:, 0] = bboxA[:, 0] + w * 0.5 - l * 0.5
bboxA[:, 1] = bboxA[:, 1] + h * 0.5 - l * 0.5
bboxA[:, 2:4] = bboxA[:, :2] + l.repeat(2, 1).permute(1, 0)
return bboxA
def imresample(img, sz):
im_data = interpolate(img, size=sz, mode="area")
return im_data | imgs = torch.as_tensor(imgs, device=device)
if len(imgs.shape) == 3:
imgs = imgs.unsqueeze(0) | conditional_block |
mtcnn.py | import numpy as np
import torch
import torch.nn as nn
from torch.nn.functional import interpolate
from torchvision.ops.boxes import batched_nms
class MTCNN():
def __init__(self, device=None, model=None):
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = device
url = 'https://github.com/deepware/dFace/raw/master/models/mtcnn.pt'
if model is None:
model = torch.hub.load_state_dict_from_url(url)
else:
model = torch.load(model, map_location=device)
self.pnet = PNet().to(device)
self.rnet = RNet().to(device)
self.onet = ONet().to(device)
self.pnet.load_state_dict(model['pnet'])
self.rnet.load_state_dict(model['rnet'])
self.onet.load_state_dict(model['onet'])
def detect(self, imgs, minsize=None):
if len(imgs) == 0:
return []
if isinstance(imgs[0], np.ndarray):
h, w = imgs[0].shape[:2]
else:
w, h = imgs[0].size
if minsize is None:
minsize = max(96 * min(w, h)/1080, 40)
boxes, points = [], []
with torch.no_grad():
batches = [imgs[i:i+10] for i in range(0, len(imgs), 10)]
for batch in batches:
batch_boxes, batch_points = detect_face(
batch, minsize, self.pnet, self.rnet, self.onet,
[0.7, 0.8, 0.9], 0.709, self.device)
boxes += list(batch_boxes)
points += list(batch_points)
result = []
for box, point in zip(boxes, points):
box = np.array(box)
point = np.array(point)
if len(box) == 0:
result.append(None)
else:
result.append((box[:, :4], box[:, 4], point))
return result
def empty_cache(device):
if 'cuda' in device:
with torch.cuda.device(device):
torch.cuda.empty_cache()
class PNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=3)
self.prelu1 = nn.PReLU(10)
self.pool1 = nn.MaxPool2d(2, 2, ceil_mode=True)
self.conv2 = nn.Conv2d(10, 16, kernel_size=3)
self.prelu2 = nn.PReLU(16)
self.conv3 = nn.Conv2d(16, 32, kernel_size=3)
self.prelu3 = nn.PReLU(32)
self.conv4_1 = nn.Conv2d(32, 2, kernel_size=1)
self.softmax4_1 = nn.Softmax(dim=1)
self.conv4_2 = nn.Conv2d(32, 4, kernel_size=1)
def forward(self, x):
x = self.conv1(x)
x = self.prelu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.prelu2(x)
x = self.conv3(x)
x = self.prelu3(x)
a = self.conv4_1(x)
a = self.softmax4_1(a)
b = self.conv4_2(x)
return b, a
class RNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 28, kernel_size=3)
self.prelu1 = nn.PReLU(28)
self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv2 = nn.Conv2d(28, 48, kernel_size=3)
self.prelu2 = nn.PReLU(48)
self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv3 = nn.Conv2d(48, 64, kernel_size=2)
self.prelu3 = nn.PReLU(64)
self.dense4 = nn.Linear(576, 128)
self.prelu4 = nn.PReLU(128)
self.dense5_1 = nn.Linear(128, 2)
self.softmax5_1 = nn.Softmax(dim=1)
self.dense5_2 = nn.Linear(128, 4)
def forward(self, x):
x = self.conv1(x)
x = self.prelu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.prelu2(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.prelu3(x)
x = x.permute(0, 3, 2, 1).contiguous()
x = self.dense4(x.view(x.shape[0], -1))
x = self.prelu4(x)
a = self.dense5_1(x)
a = self.softmax5_1(a)
b = self.dense5_2(x)
return b, a
class ONet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3)
self.prelu1 = nn.PReLU(32)
self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
self.prelu2 = nn.PReLU(64)
self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3)
self.prelu3 = nn.PReLU(64)
self.pool3 = nn.MaxPool2d(2, 2, ceil_mode=True)
self.conv4 = nn.Conv2d(64, 128, kernel_size=2)
self.prelu4 = nn.PReLU(128)
self.dense5 = nn.Linear(1152, 256)
self.prelu5 = nn.PReLU(256)
self.dense6_1 = nn.Linear(256, 2)
self.softmax6_1 = nn.Softmax(dim=1)
self.dense6_2 = nn.Linear(256, 4)
self.dense6_3 = nn.Linear(256, 10)
def forward(self, x):
x = self.conv1(x)
x = self.prelu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.prelu2(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.prelu3(x)
x = self.pool3(x)
x = self.conv4(x)
x = self.prelu4(x)
x = x.permute(0, 3, 2, 1).contiguous()
x = self.dense5(x.view(x.shape[0], -1))
x = self.prelu5(x)
a = self.dense6_1(x)
a = self.softmax6_1(a)
b = self.dense6_2(x)
c = self.dense6_3(x)
return b, c, a
def detect_face(imgs, minsize, pnet, rnet, onet, threshold, factor, device):
if isinstance(imgs, (np.ndarray, torch.Tensor)):
imgs = torch.as_tensor(imgs, device=device)
if len(imgs.shape) == 3:
imgs = imgs.unsqueeze(0)
else:
if not isinstance(imgs, (list, tuple)):
imgs = [imgs]
if any(img.size != imgs[0].size for img in imgs):
raise Exception("MTCNN batch processing only compatible with equal-dimension images.")
imgs = np.stack([np.uint8(img) for img in imgs])
imgs = torch.as_tensor(imgs, device=device)
model_dtype = next(pnet.parameters()).dtype
imgs = imgs.permute(0, 3, 1, 2).type(model_dtype)
batch_size = len(imgs)
h, w = imgs.shape[2:4]
m = 12.0 / minsize
minl = min(h, w)
minl = minl * m
# Create scale pyramid
scale_i = m
scales = []
while minl >= 12:
scales.append(scale_i)
scale_i = scale_i * factor
minl = minl * factor
# First stage
boxes = []
image_inds = []
all_inds = []
all_i = 0
for scale in scales:
im_data = imresample(imgs, (int(h * scale + 1), int(w * scale + 1)))
im_data = (im_data - 127.5) * 0.0078125
reg, probs = pnet(im_data)
empty_cache(device)
boxes_scale, image_inds_scale = generateBoundingBox(reg, probs[:, 1], scale, threshold[0])
boxes.append(boxes_scale)
image_inds.append(image_inds_scale)
all_inds.append(all_i + image_inds_scale)
all_i += batch_size
boxes = torch.cat(boxes, dim=0)
image_inds = torch.cat(image_inds, dim=0).cpu()
all_inds = torch.cat(all_inds, dim=0)
# NMS within each scale + image
pick = batched_nms(boxes[:, :4], boxes[:, 4], all_inds, 0.5)
boxes, image_inds = boxes[pick], image_inds[pick]
# NMS within each image
pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)
boxes, image_inds = boxes[pick], image_inds[pick]
regw = boxes[:, 2] - boxes[:, 0]
regh = boxes[:, 3] - boxes[:, 1]
qq1 = boxes[:, 0] + boxes[:, 5] * regw
qq2 = boxes[:, 1] + boxes[:, 6] * regh
qq3 = boxes[:, 2] + boxes[:, 7] * regw
qq4 = boxes[:, 3] + boxes[:, 8] * regh
boxes = torch.stack([qq1, qq2, qq3, qq4, boxes[:, 4]]).permute(1, 0)
boxes = rerec(boxes)
y, ey, x, ex = pad(boxes, w, h)
# Second stage
if len(boxes) > 0:
im_data = []
for k in range(len(y)):
if ey[k] > (y[k] - 1) and ex[k] > (x[k] - 1):
img_k = imgs[image_inds[k], :, (y[k] - 1):ey[k], (x[k] - 1):ex[k]].unsqueeze(0)
im_data.append(imresample(img_k, (24, 24)))
im_data = torch.cat(im_data, dim=0)
im_data = (im_data - 127.5) * 0.0078125
out = []
for batch in im_data.split(2000):
out += [rnet(batch)]
z = list(zip(*out))
out = (torch.cat(z[0]), torch.cat(z[1]))
empty_cache(device)
out0 = out[0].permute(1, 0)
out1 = out[1].permute(1, 0)
score = out1[1, :]
ipass = score > threshold[1]
boxes = torch.cat((boxes[ipass, :4], score[ipass].unsqueeze(1)), dim=1)
image_inds = image_inds[ipass]
mv = out0[:, ipass].permute(1, 0)
# NMS within each image
pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)
boxes, image_inds, mv = boxes[pick], image_inds[pick], mv[pick]
boxes = bbreg(boxes, mv)
boxes = rerec(boxes)
# Third stage
points = torch.zeros(0, 5, 2, device=device)
if len(boxes) > 0:
y, ey, x, ex = pad(boxes, w, h)
im_data = []
for k in range(len(y)):
if ey[k] > (y[k] - 1) and ex[k] > (x[k] - 1):
img_k = imgs[image_inds[k], :, (y[k] - 1):ey[k], (x[k] - 1):ex[k]].unsqueeze(0)
im_data.append(imresample(img_k, (48, 48)))
im_data = torch.cat(im_data, dim=0)
im_data = (im_data - 127.5) * 0.0078125
out = []
for batch in im_data.split(500):
out += [onet(batch)]
z = list(zip(*out))
out = (torch.cat(z[0]), torch.cat(z[1]), torch.cat(z[2]))
empty_cache(device)
out0 = out[0].permute(1, 0)
out1 = out[1].permute(1, 0)
out2 = out[2].permute(1, 0)
score = out2[1, :]
points = out1
ipass = score > threshold[2]
points = points[:, ipass]
boxes = torch.cat((boxes[ipass, :4], score[ipass].unsqueeze(1)), dim=1)
image_inds = image_inds[ipass]
mv = out0[:, ipass].permute(1, 0)
w_i = boxes[:, 2] - boxes[:, 0] + 1
h_i = boxes[:, 3] - boxes[:, 1] + 1
points_x = w_i.repeat(5, 1) * points[:5, :] + boxes[:, 0].repeat(5, 1) - 1
points_y = h_i.repeat(5, 1) * points[5:10, :] + boxes[:, 1].repeat(5, 1) - 1
points = torch.stack((points_x, points_y)).permute(2, 1, 0)
boxes = bbreg(boxes, mv)
# NMS within each image using "Min" strategy
# pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)
pick = batched_nms_numpy(boxes[:, :4], boxes[:, 4], image_inds, 0.7, 'Min')
boxes, image_inds, points = boxes[pick], image_inds[pick], points[pick]
boxes = boxes.cpu().numpy()
points = points.cpu().numpy()
batch_boxes = []
batch_points = []
for b_i in range(batch_size):
b_i_inds = np.where(image_inds == b_i)
batch_boxes.append(boxes[b_i_inds].copy())
batch_points.append(points[b_i_inds].copy())
batch_boxes, batch_points = np.array(batch_boxes), np.array(batch_points)
empty_cache(device)
return batch_boxes, batch_points
def bbreg(boundingbox, reg):
if reg.shape[1] == 1:
reg = torch.reshape(reg, (reg.shape[2], reg.shape[3]))
w = boundingbox[:, 2] - boundingbox[:, 0] + 1
h = boundingbox[:, 3] - boundingbox[:, 1] + 1
b1 = boundingbox[:, 0] + reg[:, 0] * w
b2 = boundingbox[:, 1] + reg[:, 1] * h
b3 = boundingbox[:, 2] + reg[:, 2] * w
b4 = boundingbox[:, 3] + reg[:, 3] * h
boundingbox[:, :4] = torch.stack([b1, b2, b3, b4]).permute(1, 0)
return boundingbox
def generateBoundingBox(reg, probs, scale, thresh):
stride = 2
cellsize = 12
reg = reg.permute(1, 0, 2, 3)
mask = probs >= thresh
mask_inds = mask.nonzero(as_tuple=False)
image_inds = mask_inds[:, 0]
score = probs[mask]
reg = reg[:, mask].permute(1, 0)
bb = mask_inds[:, 1:].type(reg.dtype).flip(1)
q1 = ((stride * bb + 1) / scale).floor()
q2 = ((stride * bb + cellsize - 1 + 1) / scale).floor()
boundingbox = torch.cat([q1, q2, score.unsqueeze(1), reg], dim=1)
return boundingbox, image_inds
def nms_numpy(boxes, scores, threshold, method):
if boxes.size == 0:
return np.empty((0, 3))
x1 = boxes[:, 0].copy()
y1 = boxes[:, 1].copy()
x2 = boxes[:, 2].copy()
y2 = boxes[:, 3].copy()
s = scores
area = (x2 - x1 + 1) * (y2 - y1 + 1)
I = np.argsort(s)
pick = np.zeros_like(s, dtype=np.int16)
counter = 0
while I.size > 0:
i = I[-1]
pick[counter] = i
counter += 1
idx = I[0:-1]
xx1 = np.maximum(x1[i], x1[idx]).copy()
yy1 = np.maximum(y1[i], y1[idx]).copy()
xx2 = np.minimum(x2[i], x2[idx]).copy()
yy2 = np.minimum(y2[i], y2[idx]).copy()
w = np.maximum(0.0, xx2 - xx1 + 1).copy()
h = np.maximum(0.0, yy2 - yy1 + 1).copy()
inter = w * h
if method == "Min":
o = inter / np.minimum(area[i], area[idx])
else:
o = inter / (area[i] + area[idx] - inter)
I = I[np.where(o <= threshold)]
pick = pick[:counter].copy()
return pick
def batched_nms_numpy(boxes, scores, idxs, threshold, method):
device = boxes.device
if boxes.numel() == 0:
return torch.empty((0,), dtype=torch.int64, device=device)
# strategy: in order to perform NMS independently per class.
# we add an offset to all the boxes. The offset is dependent
# only on the class idx, and is large enough so that boxes
# from different classes do not overlap
max_coordinate = boxes.max()
offsets = idxs.to(boxes) * (max_coordinate + 1)
boxes_for_nms = boxes + offsets[:, None]
boxes_for_nms = boxes_for_nms.cpu().numpy()
scores = scores.cpu().numpy()
keep = nms_numpy(boxes_for_nms, scores, threshold, method)
return torch.as_tensor(keep, dtype=torch.long, device=device)
def | (boxes, w, h):
boxes = boxes.trunc().int().cpu().numpy()
x = boxes[:, 0]
y = boxes[:, 1]
ex = boxes[:, 2]
ey = boxes[:, 3]
x[x < 1] = 1
y[y < 1] = 1
ex[ex > w] = w
ey[ey > h] = h
return y, ey, x, ex
def rerec(bboxA):
h = bboxA[:, 3] - bboxA[:, 1]
w = bboxA[:, 2] - bboxA[:, 0]
l = torch.max(w, h)
bboxA[:, 0] = bboxA[:, 0] + w * 0.5 - l * 0.5
bboxA[:, 1] = bboxA[:, 1] + h * 0.5 - l * 0.5
bboxA[:, 2:4] = bboxA[:, :2] + l.repeat(2, 1).permute(1, 0)
return bboxA
def imresample(img, sz):
im_data = interpolate(img, size=sz, mode="area")
return im_data | pad | identifier_name |
mtcnn.py | import numpy as np
import torch
import torch.nn as nn
from torch.nn.functional import interpolate
from torchvision.ops.boxes import batched_nms
class MTCNN():
def __init__(self, device=None, model=None):
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = device
url = 'https://github.com/deepware/dFace/raw/master/models/mtcnn.pt'
if model is None:
model = torch.hub.load_state_dict_from_url(url)
else:
model = torch.load(model, map_location=device)
self.pnet = PNet().to(device)
self.rnet = RNet().to(device)
self.onet = ONet().to(device)
self.pnet.load_state_dict(model['pnet'])
self.rnet.load_state_dict(model['rnet'])
self.onet.load_state_dict(model['onet'])
def detect(self, imgs, minsize=None):
if len(imgs) == 0:
return []
if isinstance(imgs[0], np.ndarray):
h, w = imgs[0].shape[:2]
else:
w, h = imgs[0].size
if minsize is None:
minsize = max(96 * min(w, h)/1080, 40)
boxes, points = [], []
with torch.no_grad():
batches = [imgs[i:i+10] for i in range(0, len(imgs), 10)]
for batch in batches:
batch_boxes, batch_points = detect_face(
batch, minsize, self.pnet, self.rnet, self.onet,
[0.7, 0.8, 0.9], 0.709, self.device)
boxes += list(batch_boxes)
points += list(batch_points)
result = []
for box, point in zip(boxes, points):
box = np.array(box)
point = np.array(point)
if len(box) == 0:
result.append(None)
else:
result.append((box[:, :4], box[:, 4], point))
return result
def empty_cache(device):
if 'cuda' in device:
with torch.cuda.device(device):
torch.cuda.empty_cache()
class PNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=3)
self.prelu1 = nn.PReLU(10)
self.pool1 = nn.MaxPool2d(2, 2, ceil_mode=True)
self.conv2 = nn.Conv2d(10, 16, kernel_size=3)
self.prelu2 = nn.PReLU(16)
self.conv3 = nn.Conv2d(16, 32, kernel_size=3)
self.prelu3 = nn.PReLU(32)
self.conv4_1 = nn.Conv2d(32, 2, kernel_size=1)
self.softmax4_1 = nn.Softmax(dim=1)
self.conv4_2 = nn.Conv2d(32, 4, kernel_size=1)
def forward(self, x):
x = self.conv1(x)
x = self.prelu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.prelu2(x)
x = self.conv3(x)
x = self.prelu3(x)
a = self.conv4_1(x)
a = self.softmax4_1(a)
b = self.conv4_2(x)
return b, a
class RNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 28, kernel_size=3)
self.prelu1 = nn.PReLU(28)
self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv2 = nn.Conv2d(28, 48, kernel_size=3)
self.prelu2 = nn.PReLU(48)
self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv3 = nn.Conv2d(48, 64, kernel_size=2)
self.prelu3 = nn.PReLU(64)
self.dense4 = nn.Linear(576, 128)
self.prelu4 = nn.PReLU(128)
self.dense5_1 = nn.Linear(128, 2)
self.softmax5_1 = nn.Softmax(dim=1)
self.dense5_2 = nn.Linear(128, 4)
def forward(self, x):
x = self.conv1(x)
x = self.prelu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.prelu2(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.prelu3(x)
x = x.permute(0, 3, 2, 1).contiguous()
x = self.dense4(x.view(x.shape[0], -1))
x = self.prelu4(x)
a = self.dense5_1(x)
a = self.softmax5_1(a)
b = self.dense5_2(x)
return b, a
class ONet(nn.Module):
|
def detect_face(imgs, minsize, pnet, rnet, onet, threshold, factor, device):
if isinstance(imgs, (np.ndarray, torch.Tensor)):
imgs = torch.as_tensor(imgs, device=device)
if len(imgs.shape) == 3:
imgs = imgs.unsqueeze(0)
else:
if not isinstance(imgs, (list, tuple)):
imgs = [imgs]
if any(img.size != imgs[0].size for img in imgs):
raise Exception("MTCNN batch processing only compatible with equal-dimension images.")
imgs = np.stack([np.uint8(img) for img in imgs])
imgs = torch.as_tensor(imgs, device=device)
model_dtype = next(pnet.parameters()).dtype
imgs = imgs.permute(0, 3, 1, 2).type(model_dtype)
batch_size = len(imgs)
h, w = imgs.shape[2:4]
m = 12.0 / minsize
minl = min(h, w)
minl = minl * m
# Create scale pyramid
scale_i = m
scales = []
while minl >= 12:
scales.append(scale_i)
scale_i = scale_i * factor
minl = minl * factor
# First stage
boxes = []
image_inds = []
all_inds = []
all_i = 0
for scale in scales:
im_data = imresample(imgs, (int(h * scale + 1), int(w * scale + 1)))
im_data = (im_data - 127.5) * 0.0078125
reg, probs = pnet(im_data)
empty_cache(device)
boxes_scale, image_inds_scale = generateBoundingBox(reg, probs[:, 1], scale, threshold[0])
boxes.append(boxes_scale)
image_inds.append(image_inds_scale)
all_inds.append(all_i + image_inds_scale)
all_i += batch_size
boxes = torch.cat(boxes, dim=0)
image_inds = torch.cat(image_inds, dim=0).cpu()
all_inds = torch.cat(all_inds, dim=0)
# NMS within each scale + image
pick = batched_nms(boxes[:, :4], boxes[:, 4], all_inds, 0.5)
boxes, image_inds = boxes[pick], image_inds[pick]
# NMS within each image
pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)
boxes, image_inds = boxes[pick], image_inds[pick]
regw = boxes[:, 2] - boxes[:, 0]
regh = boxes[:, 3] - boxes[:, 1]
qq1 = boxes[:, 0] + boxes[:, 5] * regw
qq2 = boxes[:, 1] + boxes[:, 6] * regh
qq3 = boxes[:, 2] + boxes[:, 7] * regw
qq4 = boxes[:, 3] + boxes[:, 8] * regh
boxes = torch.stack([qq1, qq2, qq3, qq4, boxes[:, 4]]).permute(1, 0)
boxes = rerec(boxes)
y, ey, x, ex = pad(boxes, w, h)
# Second stage
if len(boxes) > 0:
im_data = []
for k in range(len(y)):
if ey[k] > (y[k] - 1) and ex[k] > (x[k] - 1):
img_k = imgs[image_inds[k], :, (y[k] - 1):ey[k], (x[k] - 1):ex[k]].unsqueeze(0)
im_data.append(imresample(img_k, (24, 24)))
im_data = torch.cat(im_data, dim=0)
im_data = (im_data - 127.5) * 0.0078125
out = []
for batch in im_data.split(2000):
out += [rnet(batch)]
z = list(zip(*out))
out = (torch.cat(z[0]), torch.cat(z[1]))
empty_cache(device)
out0 = out[0].permute(1, 0)
out1 = out[1].permute(1, 0)
score = out1[1, :]
ipass = score > threshold[1]
boxes = torch.cat((boxes[ipass, :4], score[ipass].unsqueeze(1)), dim=1)
image_inds = image_inds[ipass]
mv = out0[:, ipass].permute(1, 0)
# NMS within each image
pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)
boxes, image_inds, mv = boxes[pick], image_inds[pick], mv[pick]
boxes = bbreg(boxes, mv)
boxes = rerec(boxes)
# Third stage
points = torch.zeros(0, 5, 2, device=device)
if len(boxes) > 0:
y, ey, x, ex = pad(boxes, w, h)
im_data = []
for k in range(len(y)):
if ey[k] > (y[k] - 1) and ex[k] > (x[k] - 1):
img_k = imgs[image_inds[k], :, (y[k] - 1):ey[k], (x[k] - 1):ex[k]].unsqueeze(0)
im_data.append(imresample(img_k, (48, 48)))
im_data = torch.cat(im_data, dim=0)
im_data = (im_data - 127.5) * 0.0078125
out = []
for batch in im_data.split(500):
out += [onet(batch)]
z = list(zip(*out))
out = (torch.cat(z[0]), torch.cat(z[1]), torch.cat(z[2]))
empty_cache(device)
out0 = out[0].permute(1, 0)
out1 = out[1].permute(1, 0)
out2 = out[2].permute(1, 0)
score = out2[1, :]
points = out1
ipass = score > threshold[2]
points = points[:, ipass]
boxes = torch.cat((boxes[ipass, :4], score[ipass].unsqueeze(1)), dim=1)
image_inds = image_inds[ipass]
mv = out0[:, ipass].permute(1, 0)
w_i = boxes[:, 2] - boxes[:, 0] + 1
h_i = boxes[:, 3] - boxes[:, 1] + 1
points_x = w_i.repeat(5, 1) * points[:5, :] + boxes[:, 0].repeat(5, 1) - 1
points_y = h_i.repeat(5, 1) * points[5:10, :] + boxes[:, 1].repeat(5, 1) - 1
points = torch.stack((points_x, points_y)).permute(2, 1, 0)
boxes = bbreg(boxes, mv)
# NMS within each image using "Min" strategy
# pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)
pick = batched_nms_numpy(boxes[:, :4], boxes[:, 4], image_inds, 0.7, 'Min')
boxes, image_inds, points = boxes[pick], image_inds[pick], points[pick]
boxes = boxes.cpu().numpy()
points = points.cpu().numpy()
batch_boxes = []
batch_points = []
for b_i in range(batch_size):
b_i_inds = np.where(image_inds == b_i)
batch_boxes.append(boxes[b_i_inds].copy())
batch_points.append(points[b_i_inds].copy())
batch_boxes, batch_points = np.array(batch_boxes), np.array(batch_points)
empty_cache(device)
return batch_boxes, batch_points
def bbreg(boundingbox, reg):
if reg.shape[1] == 1:
reg = torch.reshape(reg, (reg.shape[2], reg.shape[3]))
w = boundingbox[:, 2] - boundingbox[:, 0] + 1
h = boundingbox[:, 3] - boundingbox[:, 1] + 1
b1 = boundingbox[:, 0] + reg[:, 0] * w
b2 = boundingbox[:, 1] + reg[:, 1] * h
b3 = boundingbox[:, 2] + reg[:, 2] * w
b4 = boundingbox[:, 3] + reg[:, 3] * h
boundingbox[:, :4] = torch.stack([b1, b2, b3, b4]).permute(1, 0)
return boundingbox
def generateBoundingBox(reg, probs, scale, thresh):
stride = 2
cellsize = 12
reg = reg.permute(1, 0, 2, 3)
mask = probs >= thresh
mask_inds = mask.nonzero(as_tuple=False)
image_inds = mask_inds[:, 0]
score = probs[mask]
reg = reg[:, mask].permute(1, 0)
bb = mask_inds[:, 1:].type(reg.dtype).flip(1)
q1 = ((stride * bb + 1) / scale).floor()
q2 = ((stride * bb + cellsize - 1 + 1) / scale).floor()
boundingbox = torch.cat([q1, q2, score.unsqueeze(1), reg], dim=1)
return boundingbox, image_inds
def nms_numpy(boxes, scores, threshold, method):
if boxes.size == 0:
return np.empty((0, 3))
x1 = boxes[:, 0].copy()
y1 = boxes[:, 1].copy()
x2 = boxes[:, 2].copy()
y2 = boxes[:, 3].copy()
s = scores
area = (x2 - x1 + 1) * (y2 - y1 + 1)
I = np.argsort(s)
pick = np.zeros_like(s, dtype=np.int16)
counter = 0
while I.size > 0:
i = I[-1]
pick[counter] = i
counter += 1
idx = I[0:-1]
xx1 = np.maximum(x1[i], x1[idx]).copy()
yy1 = np.maximum(y1[i], y1[idx]).copy()
xx2 = np.minimum(x2[i], x2[idx]).copy()
yy2 = np.minimum(y2[i], y2[idx]).copy()
w = np.maximum(0.0, xx2 - xx1 + 1).copy()
h = np.maximum(0.0, yy2 - yy1 + 1).copy()
inter = w * h
if method == "Min":
o = inter / np.minimum(area[i], area[idx])
else:
o = inter / (area[i] + area[idx] - inter)
I = I[np.where(o <= threshold)]
pick = pick[:counter].copy()
return pick
def batched_nms_numpy(boxes, scores, idxs, threshold, method):
device = boxes.device
if boxes.numel() == 0:
return torch.empty((0,), dtype=torch.int64, device=device)
# strategy: in order to perform NMS independently per class.
# we add an offset to all the boxes. The offset is dependent
# only on the class idx, and is large enough so that boxes
# from different classes do not overlap
max_coordinate = boxes.max()
offsets = idxs.to(boxes) * (max_coordinate + 1)
boxes_for_nms = boxes + offsets[:, None]
boxes_for_nms = boxes_for_nms.cpu().numpy()
scores = scores.cpu().numpy()
keep = nms_numpy(boxes_for_nms, scores, threshold, method)
return torch.as_tensor(keep, dtype=torch.long, device=device)
def pad(boxes, w, h):
boxes = boxes.trunc().int().cpu().numpy()
x = boxes[:, 0]
y = boxes[:, 1]
ex = boxes[:, 2]
ey = boxes[:, 3]
x[x < 1] = 1
y[y < 1] = 1
ex[ex > w] = w
ey[ey > h] = h
return y, ey, x, ex
def rerec(bboxA):
h = bboxA[:, 3] - bboxA[:, 1]
w = bboxA[:, 2] - bboxA[:, 0]
l = torch.max(w, h)
bboxA[:, 0] = bboxA[:, 0] + w * 0.5 - l * 0.5
bboxA[:, 1] = bboxA[:, 1] + h * 0.5 - l * 0.5
bboxA[:, 2:4] = bboxA[:, :2] + l.repeat(2, 1).permute(1, 0)
return bboxA
def imresample(img, sz):
im_data = interpolate(img, size=sz, mode="area")
return im_data | def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3)
self.prelu1 = nn.PReLU(32)
self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
self.prelu2 = nn.PReLU(64)
self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3)
self.prelu3 = nn.PReLU(64)
self.pool3 = nn.MaxPool2d(2, 2, ceil_mode=True)
self.conv4 = nn.Conv2d(64, 128, kernel_size=2)
self.prelu4 = nn.PReLU(128)
self.dense5 = nn.Linear(1152, 256)
self.prelu5 = nn.PReLU(256)
self.dense6_1 = nn.Linear(256, 2)
self.softmax6_1 = nn.Softmax(dim=1)
self.dense6_2 = nn.Linear(256, 4)
self.dense6_3 = nn.Linear(256, 10)
def forward(self, x):
x = self.conv1(x)
x = self.prelu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.prelu2(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.prelu3(x)
x = self.pool3(x)
x = self.conv4(x)
x = self.prelu4(x)
x = x.permute(0, 3, 2, 1).contiguous()
x = self.dense5(x.view(x.shape[0], -1))
x = self.prelu5(x)
a = self.dense6_1(x)
a = self.softmax6_1(a)
b = self.dense6_2(x)
c = self.dense6_3(x)
return b, c, a | identifier_body |
mtcnn.py | import numpy as np
import torch
import torch.nn as nn
from torch.nn.functional import interpolate
from torchvision.ops.boxes import batched_nms
class MTCNN():
def __init__(self, device=None, model=None):
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = device
url = 'https://github.com/deepware/dFace/raw/master/models/mtcnn.pt'
if model is None:
model = torch.hub.load_state_dict_from_url(url)
else:
model = torch.load(model, map_location=device)
self.pnet = PNet().to(device)
self.rnet = RNet().to(device)
self.onet = ONet().to(device)
self.pnet.load_state_dict(model['pnet'])
self.rnet.load_state_dict(model['rnet'])
self.onet.load_state_dict(model['onet'])
def detect(self, imgs, minsize=None):
if len(imgs) == 0:
return []
if isinstance(imgs[0], np.ndarray):
h, w = imgs[0].shape[:2]
else:
w, h = imgs[0].size
if minsize is None:
minsize = max(96 * min(w, h)/1080, 40)
boxes, points = [], []
with torch.no_grad():
batches = [imgs[i:i+10] for i in range(0, len(imgs), 10)]
for batch in batches:
batch_boxes, batch_points = detect_face(
batch, minsize, self.pnet, self.rnet, self.onet,
[0.7, 0.8, 0.9], 0.709, self.device)
boxes += list(batch_boxes)
points += list(batch_points)
result = []
for box, point in zip(boxes, points):
box = np.array(box)
point = np.array(point)
if len(box) == 0:
result.append(None)
else:
result.append((box[:, :4], box[:, 4], point))
return result
def empty_cache(device):
if 'cuda' in device:
with torch.cuda.device(device):
torch.cuda.empty_cache()
class PNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=3)
self.prelu1 = nn.PReLU(10)
self.pool1 = nn.MaxPool2d(2, 2, ceil_mode=True)
self.conv2 = nn.Conv2d(10, 16, kernel_size=3)
self.prelu2 = nn.PReLU(16)
self.conv3 = nn.Conv2d(16, 32, kernel_size=3)
self.prelu3 = nn.PReLU(32)
self.conv4_1 = nn.Conv2d(32, 2, kernel_size=1)
self.softmax4_1 = nn.Softmax(dim=1)
self.conv4_2 = nn.Conv2d(32, 4, kernel_size=1)
def forward(self, x):
x = self.conv1(x)
x = self.prelu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.prelu2(x)
x = self.conv3(x)
x = self.prelu3(x)
a = self.conv4_1(x)
a = self.softmax4_1(a)
b = self.conv4_2(x)
return b, a
class RNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 28, kernel_size=3)
self.prelu1 = nn.PReLU(28)
self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv2 = nn.Conv2d(28, 48, kernel_size=3)
self.prelu2 = nn.PReLU(48)
self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv3 = nn.Conv2d(48, 64, kernel_size=2)
self.prelu3 = nn.PReLU(64)
self.dense4 = nn.Linear(576, 128)
self.prelu4 = nn.PReLU(128)
self.dense5_1 = nn.Linear(128, 2)
self.softmax5_1 = nn.Softmax(dim=1)
self.dense5_2 = nn.Linear(128, 4)
def forward(self, x):
x = self.conv1(x)
x = self.prelu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.prelu2(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.prelu3(x)
x = x.permute(0, 3, 2, 1).contiguous()
x = self.dense4(x.view(x.shape[0], -1))
x = self.prelu4(x) |
class ONet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3)
self.prelu1 = nn.PReLU(32)
self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
self.prelu2 = nn.PReLU(64)
self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3)
self.prelu3 = nn.PReLU(64)
self.pool3 = nn.MaxPool2d(2, 2, ceil_mode=True)
self.conv4 = nn.Conv2d(64, 128, kernel_size=2)
self.prelu4 = nn.PReLU(128)
self.dense5 = nn.Linear(1152, 256)
self.prelu5 = nn.PReLU(256)
self.dense6_1 = nn.Linear(256, 2)
self.softmax6_1 = nn.Softmax(dim=1)
self.dense6_2 = nn.Linear(256, 4)
self.dense6_3 = nn.Linear(256, 10)
def forward(self, x):
x = self.conv1(x)
x = self.prelu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.prelu2(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.prelu3(x)
x = self.pool3(x)
x = self.conv4(x)
x = self.prelu4(x)
x = x.permute(0, 3, 2, 1).contiguous()
x = self.dense5(x.view(x.shape[0], -1))
x = self.prelu5(x)
a = self.dense6_1(x)
a = self.softmax6_1(a)
b = self.dense6_2(x)
c = self.dense6_3(x)
return b, c, a
def detect_face(imgs, minsize, pnet, rnet, onet, threshold, factor, device):
if isinstance(imgs, (np.ndarray, torch.Tensor)):
imgs = torch.as_tensor(imgs, device=device)
if len(imgs.shape) == 3:
imgs = imgs.unsqueeze(0)
else:
if not isinstance(imgs, (list, tuple)):
imgs = [imgs]
if any(img.size != imgs[0].size for img in imgs):
raise Exception("MTCNN batch processing only compatible with equal-dimension images.")
imgs = np.stack([np.uint8(img) for img in imgs])
imgs = torch.as_tensor(imgs, device=device)
model_dtype = next(pnet.parameters()).dtype
imgs = imgs.permute(0, 3, 1, 2).type(model_dtype)
batch_size = len(imgs)
h, w = imgs.shape[2:4]
m = 12.0 / minsize
minl = min(h, w)
minl = minl * m
# Create scale pyramid
scale_i = m
scales = []
while minl >= 12:
scales.append(scale_i)
scale_i = scale_i * factor
minl = minl * factor
# First stage
boxes = []
image_inds = []
all_inds = []
all_i = 0
for scale in scales:
im_data = imresample(imgs, (int(h * scale + 1), int(w * scale + 1)))
im_data = (im_data - 127.5) * 0.0078125
reg, probs = pnet(im_data)
empty_cache(device)
boxes_scale, image_inds_scale = generateBoundingBox(reg, probs[:, 1], scale, threshold[0])
boxes.append(boxes_scale)
image_inds.append(image_inds_scale)
all_inds.append(all_i + image_inds_scale)
all_i += batch_size
boxes = torch.cat(boxes, dim=0)
image_inds = torch.cat(image_inds, dim=0).cpu()
all_inds = torch.cat(all_inds, dim=0)
# NMS within each scale + image
pick = batched_nms(boxes[:, :4], boxes[:, 4], all_inds, 0.5)
boxes, image_inds = boxes[pick], image_inds[pick]
# NMS within each image
pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)
boxes, image_inds = boxes[pick], image_inds[pick]
regw = boxes[:, 2] - boxes[:, 0]
regh = boxes[:, 3] - boxes[:, 1]
qq1 = boxes[:, 0] + boxes[:, 5] * regw
qq2 = boxes[:, 1] + boxes[:, 6] * regh
qq3 = boxes[:, 2] + boxes[:, 7] * regw
qq4 = boxes[:, 3] + boxes[:, 8] * regh
boxes = torch.stack([qq1, qq2, qq3, qq4, boxes[:, 4]]).permute(1, 0)
boxes = rerec(boxes)
y, ey, x, ex = pad(boxes, w, h)
# Second stage
if len(boxes) > 0:
im_data = []
for k in range(len(y)):
if ey[k] > (y[k] - 1) and ex[k] > (x[k] - 1):
img_k = imgs[image_inds[k], :, (y[k] - 1):ey[k], (x[k] - 1):ex[k]].unsqueeze(0)
im_data.append(imresample(img_k, (24, 24)))
im_data = torch.cat(im_data, dim=0)
im_data = (im_data - 127.5) * 0.0078125
out = []
for batch in im_data.split(2000):
out += [rnet(batch)]
z = list(zip(*out))
out = (torch.cat(z[0]), torch.cat(z[1]))
empty_cache(device)
out0 = out[0].permute(1, 0)
out1 = out[1].permute(1, 0)
score = out1[1, :]
ipass = score > threshold[1]
boxes = torch.cat((boxes[ipass, :4], score[ipass].unsqueeze(1)), dim=1)
image_inds = image_inds[ipass]
mv = out0[:, ipass].permute(1, 0)
# NMS within each image
pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)
boxes, image_inds, mv = boxes[pick], image_inds[pick], mv[pick]
boxes = bbreg(boxes, mv)
boxes = rerec(boxes)
# Third stage
points = torch.zeros(0, 5, 2, device=device)
if len(boxes) > 0:
y, ey, x, ex = pad(boxes, w, h)
im_data = []
for k in range(len(y)):
if ey[k] > (y[k] - 1) and ex[k] > (x[k] - 1):
img_k = imgs[image_inds[k], :, (y[k] - 1):ey[k], (x[k] - 1):ex[k]].unsqueeze(0)
im_data.append(imresample(img_k, (48, 48)))
im_data = torch.cat(im_data, dim=0)
im_data = (im_data - 127.5) * 0.0078125
out = []
for batch in im_data.split(500):
out += [onet(batch)]
z = list(zip(*out))
out = (torch.cat(z[0]), torch.cat(z[1]), torch.cat(z[2]))
empty_cache(device)
out0 = out[0].permute(1, 0)
out1 = out[1].permute(1, 0)
out2 = out[2].permute(1, 0)
score = out2[1, :]
points = out1
ipass = score > threshold[2]
points = points[:, ipass]
boxes = torch.cat((boxes[ipass, :4], score[ipass].unsqueeze(1)), dim=1)
image_inds = image_inds[ipass]
mv = out0[:, ipass].permute(1, 0)
w_i = boxes[:, 2] - boxes[:, 0] + 1
h_i = boxes[:, 3] - boxes[:, 1] + 1
points_x = w_i.repeat(5, 1) * points[:5, :] + boxes[:, 0].repeat(5, 1) - 1
points_y = h_i.repeat(5, 1) * points[5:10, :] + boxes[:, 1].repeat(5, 1) - 1
points = torch.stack((points_x, points_y)).permute(2, 1, 0)
boxes = bbreg(boxes, mv)
# NMS within each image using "Min" strategy
# pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)
pick = batched_nms_numpy(boxes[:, :4], boxes[:, 4], image_inds, 0.7, 'Min')
boxes, image_inds, points = boxes[pick], image_inds[pick], points[pick]
boxes = boxes.cpu().numpy()
points = points.cpu().numpy()
batch_boxes = []
batch_points = []
for b_i in range(batch_size):
b_i_inds = np.where(image_inds == b_i)
batch_boxes.append(boxes[b_i_inds].copy())
batch_points.append(points[b_i_inds].copy())
batch_boxes, batch_points = np.array(batch_boxes), np.array(batch_points)
empty_cache(device)
return batch_boxes, batch_points
def bbreg(boundingbox, reg):
if reg.shape[1] == 1:
reg = torch.reshape(reg, (reg.shape[2], reg.shape[3]))
w = boundingbox[:, 2] - boundingbox[:, 0] + 1
h = boundingbox[:, 3] - boundingbox[:, 1] + 1
b1 = boundingbox[:, 0] + reg[:, 0] * w
b2 = boundingbox[:, 1] + reg[:, 1] * h
b3 = boundingbox[:, 2] + reg[:, 2] * w
b4 = boundingbox[:, 3] + reg[:, 3] * h
boundingbox[:, :4] = torch.stack([b1, b2, b3, b4]).permute(1, 0)
return boundingbox
def generateBoundingBox(reg, probs, scale, thresh):
stride = 2
cellsize = 12
reg = reg.permute(1, 0, 2, 3)
mask = probs >= thresh
mask_inds = mask.nonzero(as_tuple=False)
image_inds = mask_inds[:, 0]
score = probs[mask]
reg = reg[:, mask].permute(1, 0)
bb = mask_inds[:, 1:].type(reg.dtype).flip(1)
q1 = ((stride * bb + 1) / scale).floor()
q2 = ((stride * bb + cellsize - 1 + 1) / scale).floor()
boundingbox = torch.cat([q1, q2, score.unsqueeze(1), reg], dim=1)
return boundingbox, image_inds
def nms_numpy(boxes, scores, threshold, method):
if boxes.size == 0:
return np.empty((0, 3))
x1 = boxes[:, 0].copy()
y1 = boxes[:, 1].copy()
x2 = boxes[:, 2].copy()
y2 = boxes[:, 3].copy()
s = scores
area = (x2 - x1 + 1) * (y2 - y1 + 1)
I = np.argsort(s)
pick = np.zeros_like(s, dtype=np.int16)
counter = 0
while I.size > 0:
i = I[-1]
pick[counter] = i
counter += 1
idx = I[0:-1]
xx1 = np.maximum(x1[i], x1[idx]).copy()
yy1 = np.maximum(y1[i], y1[idx]).copy()
xx2 = np.minimum(x2[i], x2[idx]).copy()
yy2 = np.minimum(y2[i], y2[idx]).copy()
w = np.maximum(0.0, xx2 - xx1 + 1).copy()
h = np.maximum(0.0, yy2 - yy1 + 1).copy()
inter = w * h
if method == "Min":
o = inter / np.minimum(area[i], area[idx])
else:
o = inter / (area[i] + area[idx] - inter)
I = I[np.where(o <= threshold)]
pick = pick[:counter].copy()
return pick
def batched_nms_numpy(boxes, scores, idxs, threshold, method):
device = boxes.device
if boxes.numel() == 0:
return torch.empty((0,), dtype=torch.int64, device=device)
# strategy: in order to perform NMS independently per class.
# we add an offset to all the boxes. The offset is dependent
# only on the class idx, and is large enough so that boxes
# from different classes do not overlap
max_coordinate = boxes.max()
offsets = idxs.to(boxes) * (max_coordinate + 1)
boxes_for_nms = boxes + offsets[:, None]
boxes_for_nms = boxes_for_nms.cpu().numpy()
scores = scores.cpu().numpy()
keep = nms_numpy(boxes_for_nms, scores, threshold, method)
return torch.as_tensor(keep, dtype=torch.long, device=device)
def pad(boxes, w, h):
boxes = boxes.trunc().int().cpu().numpy()
x = boxes[:, 0]
y = boxes[:, 1]
ex = boxes[:, 2]
ey = boxes[:, 3]
x[x < 1] = 1
y[y < 1] = 1
ex[ex > w] = w
ey[ey > h] = h
return y, ey, x, ex
def rerec(bboxA):
h = bboxA[:, 3] - bboxA[:, 1]
w = bboxA[:, 2] - bboxA[:, 0]
l = torch.max(w, h)
bboxA[:, 0] = bboxA[:, 0] + w * 0.5 - l * 0.5
bboxA[:, 1] = bboxA[:, 1] + h * 0.5 - l * 0.5
bboxA[:, 2:4] = bboxA[:, :2] + l.repeat(2, 1).permute(1, 0)
return bboxA
def imresample(img, sz):
im_data = interpolate(img, size=sz, mode="area")
return im_data | a = self.dense5_1(x)
a = self.softmax5_1(a)
b = self.dense5_2(x)
return b, a | random_line_split |
lambdaFunction.go | package lambda
import (
"bufio"
"container/list"
"errors"
"fmt"
"log"
"net/http"
"os"
"path/filepath"
"strings"
"time"
"github.com/open-lambda/open-lambda/ol/common"
"github.com/open-lambda/open-lambda/ol/worker/lambda/packages"
"github.com/open-lambda/open-lambda/ol/worker/sandbox"
)
// LambdaFunc represents a single lambda function (the code)
type LambdaFunc struct {
lmgr *LambdaMgr
name string
rtType common.RuntimeType
// lambda code
lastPull *time.Time
codeDir string
meta *sandbox.SandboxMeta
// lambda execution
funcChan chan *Invocation // server to func
instChan chan *Invocation // func to instances
doneChan chan *Invocation // instances to func
instances *list.List
// send chan to the kill chan to destroy the instance, then
// wait for msg on sent chan to block until it is done
killChan chan chan bool
}
func (f *LambdaFunc) Invoke(w http.ResponseWriter, r *http.Request) {
t := common.T0("LambdaFunc.Invoke")
defer t.T1()
done := make(chan bool)
req := &Invocation{w: w, r: r, done: done}
// send invocation to lambda func task, if room in queue
select {
case f.funcChan <- req:
// block until it's done
<-done
default:
// queue cannot accept more, so reply with backoff
req.w.WriteHeader(http.StatusTooManyRequests)
req.w.Write([]byte("lambda function queue is full\n"))
}
}
// add function name to each log message so we know which logs
// correspond to which LambdaFuncs
func (f *LambdaFunc) printf(format string, args ...any) {
msg := fmt.Sprintf(format, args...)
log.Printf("%s [FUNC %s]", strings.TrimRight(msg, "\n"), f.name)
}
// parseMeta reads in a requirements.txt file that was built from pip-compile
func parseMeta(codeDir string) (meta *sandbox.SandboxMeta, err error) {
meta = &sandbox.SandboxMeta{
Installs: []string{},
Imports: []string{},
}
path := filepath.Join(codeDir, "requirements.txt")
file, err := os.Open(path)
if errors.Is(err, os.ErrNotExist) {
// having a requirements.txt is optional
return meta, nil
} else if err != nil {
return nil, err
}
defer file.Close()
scnr := bufio.NewScanner(file)
for scnr.Scan() {
line := strings.ReplaceAll(scnr.Text(), " ", "")
pkg := strings.Split(line, "#")[0]
if pkg != "" {
pkg = packages.NormalizePkg(pkg)
meta.Installs = append(meta.Installs, pkg)
}
}
return meta, nil
}
// if there is any error:
// 1. we won't switch to the new code
// 2. we won't update pull time (so well check for a fix next time)
func (f *LambdaFunc) pullHandlerIfStale() (err error) {
// check if there is newer code, download it if necessary
now := time.Now()
cacheNs := int64(common.Conf.Registry_cache_ms) * 1000000
// should we check for new code?
if f.lastPull != nil && int64(now.Sub(*f.lastPull)) < cacheNs {
return nil
}
// is there new code?
rtType, codeDir, err := f.lmgr.HandlerPuller.Pull(f.name)
if err != nil {
return err
}
if codeDir == f.codeDir {
return nil
}
f.rtType = rtType
defer func() {
if err != nil {
if err := os.RemoveAll(codeDir); err != nil {
log.Printf("could not cleanup %s after failed pull\n", codeDir)
}
if rtType == common.RT_PYTHON {
// we dirty this dir (e.g., by setting up
// symlinks to packages, so we want the
// HandlerPuller to give us a new one next
// time, even if the code hasn't changed
f.lmgr.HandlerPuller.Reset(f.name)
}
}
}()
if rtType == common.RT_PYTHON {
// inspect new code for dependencies; if we can install
// everything necessary, start using new code
meta, err := parseMeta(codeDir)
if err != nil {
return err
}
// make sure all specified dependencies are installed
// (but don't recursively find others)
for _, pkg := range meta.Installs {
if _, err := f.lmgr.PackagePuller.GetPkg(pkg); err != nil {
return err
}
}
f.lmgr.DepTracer.TraceFunction(codeDir, meta.Installs)
f.meta = meta
} else if rtType == common.RT_NATIVE {
log.Printf("Got native function")
}
f.codeDir = codeDir
f.lastPull = &now
return nil
}
// this Task receives lambda requests, fetches new lambda code as
// needed, and dispatches to a set of lambda instances. Task also
// monitors outstanding requests, and scales the number of instances
// up or down as needed.
//
// communication for a given request is as follows (each of the four
// transfers are commented within the function):
//
// client -> function -> instance -> function -> client
//
// each of the 4 handoffs above is over a chan. In order, those chans are:
// 1. LambdaFunc.funcChan
// 2. LambdaFunc.instChan
// 3. LambdaFunc.doneChan
// 4. Invocation.done
//
// If either LambdaFunc.funcChan or LambdaFunc.instChan is full, we
// respond to the client with a backoff message: StatusTooManyRequests
func (f *LambdaFunc) Task() {
f.printf("debug: LambdaFunc.Task() runs on goroutine %d", common.GetGoroutineID())
// we want to perform various cleanup actions, such as killing
// instances and deleting old code. We want to do these
// asynchronously, but in order. Thus, we use a chan to get
// FIFO behavior and a single cleanup task to get async.
//
// two types can be sent to this chan:
//
// 1. string: this is a path to be deleted
//
// 2. chan: this is a signal chan that corresponds to
// previously initiated cleanup work. We block until we
// receive the complete signal, before proceeding to
// subsequent cleanup tasks in the FIFO.
cleanupChan := make(chan any, 32)
cleanupTaskDone := make(chan bool)
go func() {
for {
msg, ok := <-cleanupChan
if !ok {
cleanupTaskDone <- true
return
}
switch op := msg.(type) {
case string:
if err := os.RemoveAll(op); err != nil {
f.printf("Async code cleanup could not delete %s, even after all instances using it killed: %v", op, err)
}
case chan bool:
<-op
}
}
}()
// stats for autoscaling
outstandingReqs := 0
execMs := common.NewRollingAvg(10)
var lastScaling *time.Time = nil
timeout := time.NewTimer(0)
for {
select {
case <-timeout.C:
if f.codeDir == "" {
continue
}
case req := <-f.funcChan:
// msg: client -> function
// check for new code, and cleanup old code
// (and instances that use it) if necessary
oldCodeDir := f.codeDir
if err := f.pullHandlerIfStale(); err != nil {
f.printf("Error checking for new lambda code at `%s`: %v", f.codeDir, err)
req.w.WriteHeader(http.StatusInternalServerError)
req.w.Write([]byte(err.Error() + "\n"))
req.done <- true
continue
}
if oldCodeDir != "" && oldCodeDir != f.codeDir {
el := f.instances.Front()
for el != nil {
waitChan := el.Value.(*LambdaInstance).AsyncKill()
cleanupChan <- waitChan
el = el.Next()
}
f.instances = list.New()
// cleanupChan is a FIFO, so this will
// happen after the cleanup task waits
// for all instance kills to finish
cleanupChan <- oldCodeDir
}
f.lmgr.DepTracer.TraceInvocation(f.codeDir)
select {
case f.instChan <- req:
// msg: function -> instance
outstandingReqs++
default:
// queue cannot accept more, so reply with backoff
req.w.WriteHeader(http.StatusTooManyRequests)
req.w.Write([]byte("lambda instance queue is full\n"))
req.done <- true
}
case req := <-f.doneChan:
// msg: instance -> function
execMs.Add(req.execMs)
outstandingReqs--
// msg: function -> client
req.done <- true
case done := <-f.killChan:
// signal all instances to die, then wait for
// cleanup task to finish and exit
el := f.instances.Front()
for el != nil {
waitChan := el.Value.(*LambdaInstance).AsyncKill()
cleanupChan <- waitChan
el = el.Next()
}
if f.codeDir != "" {
//cleanupChan <- f.codeDir
}
close(cleanupChan)
<-cleanupTaskDone
done <- true
return
}
// POLICY: how many instances (i.e., virtual sandboxes) should we allocate?
// AUTOSCALING STEP 1: decide how many instances we want
// let's aim to have 1 sandbox per second of outstanding work
inProgressWorkMs := outstandingReqs * execMs.Avg
desiredInstances := inProgressWorkMs / 1000
// if we have, say, one job that will take 100
// seconds, spinning up 100 instances won't do any
// good, so cap by number of outstanding reqs
if outstandingReqs < desiredInstances {
desiredInstances = outstandingReqs
}
// always try to have one instance
if desiredInstances < 1 {
desiredInstances = 1
}
// AUTOSCALING STEP 2: tweak how many instances we have, to get closer to our goal
// make at most one scaling adjustment per second
adjustFreq := time.Second
now := time.Now()
if lastScaling != nil {
elapsed := now.Sub(*lastScaling)
if elapsed < adjustFreq {
if desiredInstances != f.instances.Len() {
timeout = time.NewTimer(adjustFreq - elapsed)
}
continue
}
}
// kill or start at most one instance to get closer to
// desired number
if f.instances.Len() < desiredInstances {
f.printf("increase instances to %d", f.instances.Len()+1)
f.newInstance()
lastScaling = &now
} else if f.instances.Len() > desiredInstances {
f.printf("reduce instances to %d", f.instances.Len()-1)
waitChan := f.instances.Back().Value.(*LambdaInstance).AsyncKill()
f.instances.Remove(f.instances.Back())
cleanupChan <- waitChan
lastScaling = &now
}
if f.instances.Len() != desiredInstances {
// we can only adjust quickly, so we want to
// run through this loop again as soon as
// possible, even if there are no requests to
// service.
timeout = time.NewTimer(adjustFreq)
}
}
}
func (f *LambdaFunc) newInstance() {
if f.codeDir == "" {
panic("cannot start instance until code has been fetched")
}
linst := &LambdaInstance{
lfunc: f,
codeDir: f.codeDir,
meta: f.meta,
killChan: make(chan chan bool, 1),
}
f.instances.PushBack(linst)
go linst.Task()
}
func (f *LambdaFunc) Kill() | {
done := make(chan bool)
f.killChan <- done
<-done
} | identifier_body | |
lambdaFunction.go | package lambda
import (
"bufio"
"container/list"
"errors"
"fmt"
"log"
"net/http"
"os"
"path/filepath"
"strings"
"time"
"github.com/open-lambda/open-lambda/ol/common"
"github.com/open-lambda/open-lambda/ol/worker/lambda/packages"
"github.com/open-lambda/open-lambda/ol/worker/sandbox"
)
// LambdaFunc represents a single lambda function (the code)
type LambdaFunc struct {
lmgr *LambdaMgr
name string
rtType common.RuntimeType
// lambda code
lastPull *time.Time
codeDir string
meta *sandbox.SandboxMeta
// lambda execution
funcChan chan *Invocation // server to func
instChan chan *Invocation // func to instances
doneChan chan *Invocation // instances to func
instances *list.List
// send chan to the kill chan to destroy the instance, then
// wait for msg on sent chan to block until it is done
killChan chan chan bool
}
func (f *LambdaFunc) Invoke(w http.ResponseWriter, r *http.Request) {
t := common.T0("LambdaFunc.Invoke")
defer t.T1()
done := make(chan bool)
req := &Invocation{w: w, r: r, done: done}
// send invocation to lambda func task, if room in queue
select {
case f.funcChan <- req:
// block until it's done
<-done
default:
// queue cannot accept more, so reply with backoff
req.w.WriteHeader(http.StatusTooManyRequests)
req.w.Write([]byte("lambda function queue is full\n"))
}
}
// add function name to each log message so we know which logs
// correspond to which LambdaFuncs
func (f *LambdaFunc) printf(format string, args ...any) {
msg := fmt.Sprintf(format, args...)
log.Printf("%s [FUNC %s]", strings.TrimRight(msg, "\n"), f.name)
}
// parseMeta reads in a requirements.txt file that was built from pip-compile
func parseMeta(codeDir string) (meta *sandbox.SandboxMeta, err error) {
meta = &sandbox.SandboxMeta{
Installs: []string{},
Imports: []string{},
}
path := filepath.Join(codeDir, "requirements.txt")
file, err := os.Open(path)
if errors.Is(err, os.ErrNotExist) {
// having a requirements.txt is optional
return meta, nil
} else if err != nil {
return nil, err
}
defer file.Close()
scnr := bufio.NewScanner(file)
for scnr.Scan() {
line := strings.ReplaceAll(scnr.Text(), " ", "")
pkg := strings.Split(line, "#")[0]
if pkg != "" {
pkg = packages.NormalizePkg(pkg)
meta.Installs = append(meta.Installs, pkg)
}
}
return meta, nil
}
// if there is any error:
// 1. we won't switch to the new code
// 2. we won't update pull time (so well check for a fix next time)
func (f *LambdaFunc) pullHandlerIfStale() (err error) {
// check if there is newer code, download it if necessary
now := time.Now()
cacheNs := int64(common.Conf.Registry_cache_ms) * 1000000
// should we check for new code?
if f.lastPull != nil && int64(now.Sub(*f.lastPull)) < cacheNs {
return nil
}
// is there new code?
rtType, codeDir, err := f.lmgr.HandlerPuller.Pull(f.name)
if err != nil {
return err
}
if codeDir == f.codeDir {
return nil
}
f.rtType = rtType
defer func() {
if err != nil {
if err := os.RemoveAll(codeDir); err != nil {
log.Printf("could not cleanup %s after failed pull\n", codeDir)
}
if rtType == common.RT_PYTHON {
// we dirty this dir (e.g., by setting up
// symlinks to packages, so we want the
// HandlerPuller to give us a new one next
// time, even if the code hasn't changed
f.lmgr.HandlerPuller.Reset(f.name)
}
}
}()
if rtType == common.RT_PYTHON {
// inspect new code for dependencies; if we can install
// everything necessary, start using new code
meta, err := parseMeta(codeDir)
if err != nil {
return err
}
// make sure all specified dependencies are installed
// (but don't recursively find others)
for _, pkg := range meta.Installs {
if _, err := f.lmgr.PackagePuller.GetPkg(pkg); err != nil {
return err
}
}
f.lmgr.DepTracer.TraceFunction(codeDir, meta.Installs)
f.meta = meta
} else if rtType == common.RT_NATIVE {
log.Printf("Got native function")
}
f.codeDir = codeDir
f.lastPull = &now
return nil
}
// this Task receives lambda requests, fetches new lambda code as
// needed, and dispatches to a set of lambda instances. Task also
// monitors outstanding requests, and scales the number of instances
// up or down as needed.
//
// communication for a given request is as follows (each of the four
// transfers are commented within the function):
//
// client -> function -> instance -> function -> client
//
// each of the 4 handoffs above is over a chan. In order, those chans are:
// 1. LambdaFunc.funcChan
// 2. LambdaFunc.instChan
// 3. LambdaFunc.doneChan
// 4. Invocation.done
//
// If either LambdaFunc.funcChan or LambdaFunc.instChan is full, we
// respond to the client with a backoff message: StatusTooManyRequests
func (f *LambdaFunc) Task() {
f.printf("debug: LambdaFunc.Task() runs on goroutine %d", common.GetGoroutineID())
// we want to perform various cleanup actions, such as killing
// instances and deleting old code. We want to do these
// asynchronously, but in order. Thus, we use a chan to get
// FIFO behavior and a single cleanup task to get async.
//
// two types can be sent to this chan:
//
// 1. string: this is a path to be deleted
//
// 2. chan: this is a signal chan that corresponds to
// previously initiated cleanup work. We block until we
// receive the complete signal, before proceeding to
// subsequent cleanup tasks in the FIFO.
cleanupChan := make(chan any, 32)
cleanupTaskDone := make(chan bool)
go func() {
for {
msg, ok := <-cleanupChan
if !ok {
cleanupTaskDone <- true
return
}
switch op := msg.(type) {
case string:
if err := os.RemoveAll(op); err != nil {
f.printf("Async code cleanup could not delete %s, even after all instances using it killed: %v", op, err)
}
case chan bool:
<-op
}
}
}()
// stats for autoscaling
outstandingReqs := 0
execMs := common.NewRollingAvg(10)
var lastScaling *time.Time = nil
timeout := time.NewTimer(0)
for |
}
func (f *LambdaFunc) newInstance() {
if f.codeDir == "" {
panic("cannot start instance until code has been fetched")
}
linst := &LambdaInstance{
lfunc: f,
codeDir: f.codeDir,
meta: f.meta,
killChan: make(chan chan bool, 1),
}
f.instances.PushBack(linst)
go linst.Task()
}
func (f *LambdaFunc) Kill() {
done := make(chan bool)
f.killChan <- done
<-done
}
| {
select {
case <-timeout.C:
if f.codeDir == "" {
continue
}
case req := <-f.funcChan:
// msg: client -> function
// check for new code, and cleanup old code
// (and instances that use it) if necessary
oldCodeDir := f.codeDir
if err := f.pullHandlerIfStale(); err != nil {
f.printf("Error checking for new lambda code at `%s`: %v", f.codeDir, err)
req.w.WriteHeader(http.StatusInternalServerError)
req.w.Write([]byte(err.Error() + "\n"))
req.done <- true
continue
}
if oldCodeDir != "" && oldCodeDir != f.codeDir {
el := f.instances.Front()
for el != nil {
waitChan := el.Value.(*LambdaInstance).AsyncKill()
cleanupChan <- waitChan
el = el.Next()
}
f.instances = list.New()
// cleanupChan is a FIFO, so this will
// happen after the cleanup task waits
// for all instance kills to finish
cleanupChan <- oldCodeDir
}
f.lmgr.DepTracer.TraceInvocation(f.codeDir)
select {
case f.instChan <- req:
// msg: function -> instance
outstandingReqs++
default:
// queue cannot accept more, so reply with backoff
req.w.WriteHeader(http.StatusTooManyRequests)
req.w.Write([]byte("lambda instance queue is full\n"))
req.done <- true
}
case req := <-f.doneChan:
// msg: instance -> function
execMs.Add(req.execMs)
outstandingReqs--
// msg: function -> client
req.done <- true
case done := <-f.killChan:
// signal all instances to die, then wait for
// cleanup task to finish and exit
el := f.instances.Front()
for el != nil {
waitChan := el.Value.(*LambdaInstance).AsyncKill()
cleanupChan <- waitChan
el = el.Next()
}
if f.codeDir != "" {
//cleanupChan <- f.codeDir
}
close(cleanupChan)
<-cleanupTaskDone
done <- true
return
}
// POLICY: how many instances (i.e., virtual sandboxes) should we allocate?
// AUTOSCALING STEP 1: decide how many instances we want
// let's aim to have 1 sandbox per second of outstanding work
inProgressWorkMs := outstandingReqs * execMs.Avg
desiredInstances := inProgressWorkMs / 1000
// if we have, say, one job that will take 100
// seconds, spinning up 100 instances won't do any
// good, so cap by number of outstanding reqs
if outstandingReqs < desiredInstances {
desiredInstances = outstandingReqs
}
// always try to have one instance
if desiredInstances < 1 {
desiredInstances = 1
}
// AUTOSCALING STEP 2: tweak how many instances we have, to get closer to our goal
// make at most one scaling adjustment per second
adjustFreq := time.Second
now := time.Now()
if lastScaling != nil {
elapsed := now.Sub(*lastScaling)
if elapsed < adjustFreq {
if desiredInstances != f.instances.Len() {
timeout = time.NewTimer(adjustFreq - elapsed)
}
continue
}
}
// kill or start at most one instance to get closer to
// desired number
if f.instances.Len() < desiredInstances {
f.printf("increase instances to %d", f.instances.Len()+1)
f.newInstance()
lastScaling = &now
} else if f.instances.Len() > desiredInstances {
f.printf("reduce instances to %d", f.instances.Len()-1)
waitChan := f.instances.Back().Value.(*LambdaInstance).AsyncKill()
f.instances.Remove(f.instances.Back())
cleanupChan <- waitChan
lastScaling = &now
}
if f.instances.Len() != desiredInstances {
// we can only adjust quickly, so we want to
// run through this loop again as soon as
// possible, even if there are no requests to
// service.
timeout = time.NewTimer(adjustFreq)
}
} | conditional_block |
lambdaFunction.go | package lambda
import (
"bufio"
"container/list"
"errors"
"fmt"
"log"
"net/http"
"os"
"path/filepath"
"strings"
"time"
"github.com/open-lambda/open-lambda/ol/common"
"github.com/open-lambda/open-lambda/ol/worker/lambda/packages"
"github.com/open-lambda/open-lambda/ol/worker/sandbox"
)
// LambdaFunc represents a single lambda function (the code)
type LambdaFunc struct {
lmgr *LambdaMgr
name string
rtType common.RuntimeType
// lambda code
lastPull *time.Time
codeDir string
meta *sandbox.SandboxMeta
// lambda execution
funcChan chan *Invocation // server to func
instChan chan *Invocation // func to instances
doneChan chan *Invocation // instances to func
instances *list.List
// send chan to the kill chan to destroy the instance, then
// wait for msg on sent chan to block until it is done
killChan chan chan bool
}
func (f *LambdaFunc) Invoke(w http.ResponseWriter, r *http.Request) {
t := common.T0("LambdaFunc.Invoke")
defer t.T1()
done := make(chan bool)
req := &Invocation{w: w, r: r, done: done}
// send invocation to lambda func task, if room in queue
select {
case f.funcChan <- req:
// block until it's done
<-done
default:
// queue cannot accept more, so reply with backoff
req.w.WriteHeader(http.StatusTooManyRequests)
req.w.Write([]byte("lambda function queue is full\n"))
}
}
// add function name to each log message so we know which logs
// correspond to which LambdaFuncs
func (f *LambdaFunc) printf(format string, args ...any) {
msg := fmt.Sprintf(format, args...)
log.Printf("%s [FUNC %s]", strings.TrimRight(msg, "\n"), f.name)
}
// parseMeta reads in a requirements.txt file that was built from pip-compile
func parseMeta(codeDir string) (meta *sandbox.SandboxMeta, err error) {
meta = &sandbox.SandboxMeta{
Installs: []string{},
Imports: []string{},
}
path := filepath.Join(codeDir, "requirements.txt")
file, err := os.Open(path)
if errors.Is(err, os.ErrNotExist) {
// having a requirements.txt is optional
return meta, nil
} else if err != nil {
return nil, err
}
defer file.Close()
scnr := bufio.NewScanner(file)
for scnr.Scan() {
line := strings.ReplaceAll(scnr.Text(), " ", "")
pkg := strings.Split(line, "#")[0]
if pkg != "" {
pkg = packages.NormalizePkg(pkg)
meta.Installs = append(meta.Installs, pkg)
}
}
return meta, nil
}
// if there is any error:
// 1. we won't switch to the new code
// 2. we won't update pull time (so well check for a fix next time)
func (f *LambdaFunc) pullHandlerIfStale() (err error) {
// check if there is newer code, download it if necessary
now := time.Now()
cacheNs := int64(common.Conf.Registry_cache_ms) * 1000000
// should we check for new code?
if f.lastPull != nil && int64(now.Sub(*f.lastPull)) < cacheNs {
return nil
}
// is there new code?
rtType, codeDir, err := f.lmgr.HandlerPuller.Pull(f.name)
if err != nil {
return err
}
if codeDir == f.codeDir {
return nil
}
f.rtType = rtType
defer func() {
if err != nil {
if err := os.RemoveAll(codeDir); err != nil {
log.Printf("could not cleanup %s after failed pull\n", codeDir)
}
if rtType == common.RT_PYTHON {
// we dirty this dir (e.g., by setting up
// symlinks to packages, so we want the
// HandlerPuller to give us a new one next
// time, even if the code hasn't changed
f.lmgr.HandlerPuller.Reset(f.name)
}
}
}()
if rtType == common.RT_PYTHON {
// inspect new code for dependencies; if we can install
// everything necessary, start using new code
meta, err := parseMeta(codeDir)
if err != nil {
return err
}
// make sure all specified dependencies are installed
// (but don't recursively find others)
for _, pkg := range meta.Installs {
if _, err := f.lmgr.PackagePuller.GetPkg(pkg); err != nil {
return err
}
}
f.lmgr.DepTracer.TraceFunction(codeDir, meta.Installs)
f.meta = meta
} else if rtType == common.RT_NATIVE {
log.Printf("Got native function")
}
f.codeDir = codeDir
f.lastPull = &now
return nil
}
// this Task receives lambda requests, fetches new lambda code as
// needed, and dispatches to a set of lambda instances. Task also
// monitors outstanding requests, and scales the number of instances
// up or down as needed.
//
// communication for a given request is as follows (each of the four
// transfers are commented within the function):
//
// client -> function -> instance -> function -> client
//
// each of the 4 handoffs above is over a chan. In order, those chans are:
// 1. LambdaFunc.funcChan
// 2. LambdaFunc.instChan
// 3. LambdaFunc.doneChan
// 4. Invocation.done
//
// If either LambdaFunc.funcChan or LambdaFunc.instChan is full, we
// respond to the client with a backoff message: StatusTooManyRequests
func (f *LambdaFunc) Task() {
f.printf("debug: LambdaFunc.Task() runs on goroutine %d", common.GetGoroutineID())
// we want to perform various cleanup actions, such as killing
// instances and deleting old code. We want to do these
// asynchronously, but in order. Thus, we use a chan to get
// FIFO behavior and a single cleanup task to get async.
//
// two types can be sent to this chan:
//
// 1. string: this is a path to be deleted
//
// 2. chan: this is a signal chan that corresponds to
// previously initiated cleanup work. We block until we
// receive the complete signal, before proceeding to
// subsequent cleanup tasks in the FIFO.
cleanupChan := make(chan any, 32)
cleanupTaskDone := make(chan bool)
go func() {
for {
msg, ok := <-cleanupChan
if !ok {
cleanupTaskDone <- true
return
}
switch op := msg.(type) {
case string:
if err := os.RemoveAll(op); err != nil {
f.printf("Async code cleanup could not delete %s, even after all instances using it killed: %v", op, err)
}
case chan bool:
<-op
}
}
}()
// stats for autoscaling
outstandingReqs := 0
execMs := common.NewRollingAvg(10)
var lastScaling *time.Time = nil
timeout := time.NewTimer(0)
for {
select {
case <-timeout.C:
if f.codeDir == "" {
continue
}
case req := <-f.funcChan:
// msg: client -> function
// check for new code, and cleanup old code
// (and instances that use it) if necessary
oldCodeDir := f.codeDir
if err := f.pullHandlerIfStale(); err != nil {
f.printf("Error checking for new lambda code at `%s`: %v", f.codeDir, err)
req.w.WriteHeader(http.StatusInternalServerError)
req.w.Write([]byte(err.Error() + "\n"))
req.done <- true
continue
}
if oldCodeDir != "" && oldCodeDir != f.codeDir {
el := f.instances.Front()
for el != nil {
waitChan := el.Value.(*LambdaInstance).AsyncKill()
cleanupChan <- waitChan
el = el.Next()
}
f.instances = list.New()
// cleanupChan is a FIFO, so this will
// happen after the cleanup task waits
// for all instance kills to finish
cleanupChan <- oldCodeDir
}
f.lmgr.DepTracer.TraceInvocation(f.codeDir)
select {
case f.instChan <- req:
// msg: function -> instance
outstandingReqs++
default:
// queue cannot accept more, so reply with backoff
req.w.WriteHeader(http.StatusTooManyRequests)
req.w.Write([]byte("lambda instance queue is full\n"))
req.done <- true
}
case req := <-f.doneChan:
// msg: instance -> function
execMs.Add(req.execMs)
outstandingReqs--
// msg: function -> client
req.done <- true
case done := <-f.killChan:
// signal all instances to die, then wait for
// cleanup task to finish and exit
el := f.instances.Front()
for el != nil {
waitChan := el.Value.(*LambdaInstance).AsyncKill()
cleanupChan <- waitChan
el = el.Next()
}
if f.codeDir != "" {
//cleanupChan <- f.codeDir
}
close(cleanupChan)
<-cleanupTaskDone
done <- true
return
}
// POLICY: how many instances (i.e., virtual sandboxes) should we allocate?
// AUTOSCALING STEP 1: decide how many instances we want
// let's aim to have 1 sandbox per second of outstanding work
inProgressWorkMs := outstandingReqs * execMs.Avg
desiredInstances := inProgressWorkMs / 1000
// if we have, say, one job that will take 100
// seconds, spinning up 100 instances won't do any
// good, so cap by number of outstanding reqs
if outstandingReqs < desiredInstances {
desiredInstances = outstandingReqs
}
// always try to have one instance
if desiredInstances < 1 {
desiredInstances = 1
}
// AUTOSCALING STEP 2: tweak how many instances we have, to get closer to our goal
// make at most one scaling adjustment per second
adjustFreq := time.Second
now := time.Now()
if lastScaling != nil {
elapsed := now.Sub(*lastScaling)
if elapsed < adjustFreq {
if desiredInstances != f.instances.Len() {
timeout = time.NewTimer(adjustFreq - elapsed)
}
continue
}
}
// kill or start at most one instance to get closer to
// desired number
if f.instances.Len() < desiredInstances {
f.printf("increase instances to %d", f.instances.Len()+1)
f.newInstance()
lastScaling = &now
} else if f.instances.Len() > desiredInstances {
f.printf("reduce instances to %d", f.instances.Len()-1)
waitChan := f.instances.Back().Value.(*LambdaInstance).AsyncKill()
f.instances.Remove(f.instances.Back())
cleanupChan <- waitChan
lastScaling = &now
}
if f.instances.Len() != desiredInstances {
// we can only adjust quickly, so we want to
// run through this loop again as soon as
// possible, even if there are no requests to
// service.
timeout = time.NewTimer(adjustFreq)
}
}
}
func (f *LambdaFunc) | () {
if f.codeDir == "" {
panic("cannot start instance until code has been fetched")
}
linst := &LambdaInstance{
lfunc: f,
codeDir: f.codeDir,
meta: f.meta,
killChan: make(chan chan bool, 1),
}
f.instances.PushBack(linst)
go linst.Task()
}
func (f *LambdaFunc) Kill() {
done := make(chan bool)
f.killChan <- done
<-done
}
| newInstance | identifier_name |
lambdaFunction.go | package lambda
import (
"bufio"
"container/list"
"errors"
"fmt"
"log"
"net/http"
"os"
"path/filepath"
"strings"
"time"
"github.com/open-lambda/open-lambda/ol/common"
"github.com/open-lambda/open-lambda/ol/worker/lambda/packages"
"github.com/open-lambda/open-lambda/ol/worker/sandbox"
)
// LambdaFunc represents a single lambda function (the code)
type LambdaFunc struct {
lmgr *LambdaMgr
name string
rtType common.RuntimeType
// lambda code
lastPull *time.Time
codeDir string
meta *sandbox.SandboxMeta
// lambda execution
funcChan chan *Invocation // server to func
instChan chan *Invocation // func to instances
doneChan chan *Invocation // instances to func
instances *list.List
// send chan to the kill chan to destroy the instance, then
// wait for msg on sent chan to block until it is done
killChan chan chan bool
}
func (f *LambdaFunc) Invoke(w http.ResponseWriter, r *http.Request) {
t := common.T0("LambdaFunc.Invoke")
defer t.T1()
done := make(chan bool)
req := &Invocation{w: w, r: r, done: done}
// send invocation to lambda func task, if room in queue
select {
case f.funcChan <- req:
// block until it's done
<-done
default:
// queue cannot accept more, so reply with backoff
req.w.WriteHeader(http.StatusTooManyRequests)
req.w.Write([]byte("lambda function queue is full\n"))
}
}
// add function name to each log message so we know which logs
// correspond to which LambdaFuncs
func (f *LambdaFunc) printf(format string, args ...any) {
msg := fmt.Sprintf(format, args...)
log.Printf("%s [FUNC %s]", strings.TrimRight(msg, "\n"), f.name)
}
// parseMeta reads in a requirements.txt file that was built from pip-compile
func parseMeta(codeDir string) (meta *sandbox.SandboxMeta, err error) {
meta = &sandbox.SandboxMeta{
Installs: []string{},
Imports: []string{},
}
path := filepath.Join(codeDir, "requirements.txt")
file, err := os.Open(path)
if errors.Is(err, os.ErrNotExist) {
// having a requirements.txt is optional
return meta, nil
} else if err != nil {
return nil, err
}
defer file.Close()
scnr := bufio.NewScanner(file)
for scnr.Scan() {
line := strings.ReplaceAll(scnr.Text(), " ", "")
pkg := strings.Split(line, "#")[0]
if pkg != "" {
pkg = packages.NormalizePkg(pkg)
meta.Installs = append(meta.Installs, pkg)
}
}
return meta, nil
}
// if there is any error:
// 1. we won't switch to the new code
// 2. we won't update pull time (so well check for a fix next time)
func (f *LambdaFunc) pullHandlerIfStale() (err error) {
// check if there is newer code, download it if necessary
now := time.Now()
cacheNs := int64(common.Conf.Registry_cache_ms) * 1000000
// should we check for new code?
if f.lastPull != nil && int64(now.Sub(*f.lastPull)) < cacheNs {
return nil
}
// is there new code?
rtType, codeDir, err := f.lmgr.HandlerPuller.Pull(f.name)
if err != nil {
return err
}
if codeDir == f.codeDir {
return nil
}
f.rtType = rtType
defer func() {
if err != nil {
if err := os.RemoveAll(codeDir); err != nil {
log.Printf("could not cleanup %s after failed pull\n", codeDir)
}
if rtType == common.RT_PYTHON {
// we dirty this dir (e.g., by setting up
// symlinks to packages, so we want the
// HandlerPuller to give us a new one next
// time, even if the code hasn't changed
f.lmgr.HandlerPuller.Reset(f.name)
}
}
}()
if rtType == common.RT_PYTHON {
// inspect new code for dependencies; if we can install
// everything necessary, start using new code
meta, err := parseMeta(codeDir)
if err != nil {
return err
}
// make sure all specified dependencies are installed
// (but don't recursively find others)
for _, pkg := range meta.Installs {
if _, err := f.lmgr.PackagePuller.GetPkg(pkg); err != nil {
return err
}
}
f.lmgr.DepTracer.TraceFunction(codeDir, meta.Installs)
f.meta = meta
} else if rtType == common.RT_NATIVE {
log.Printf("Got native function")
} |
// this Task receives lambda requests, fetches new lambda code as
// needed, and dispatches to a set of lambda instances. Task also
// monitors outstanding requests, and scales the number of instances
// up or down as needed.
//
// communication for a given request is as follows (each of the four
// transfers are commented within the function):
//
// client -> function -> instance -> function -> client
//
// each of the 4 handoffs above is over a chan. In order, those chans are:
// 1. LambdaFunc.funcChan
// 2. LambdaFunc.instChan
// 3. LambdaFunc.doneChan
// 4. Invocation.done
//
// If either LambdaFunc.funcChan or LambdaFunc.instChan is full, we
// respond to the client with a backoff message: StatusTooManyRequests
func (f *LambdaFunc) Task() {
f.printf("debug: LambdaFunc.Task() runs on goroutine %d", common.GetGoroutineID())
// we want to perform various cleanup actions, such as killing
// instances and deleting old code. We want to do these
// asynchronously, but in order. Thus, we use a chan to get
// FIFO behavior and a single cleanup task to get async.
//
// two types can be sent to this chan:
//
// 1. string: this is a path to be deleted
//
// 2. chan: this is a signal chan that corresponds to
// previously initiated cleanup work. We block until we
// receive the complete signal, before proceeding to
// subsequent cleanup tasks in the FIFO.
cleanupChan := make(chan any, 32)
cleanupTaskDone := make(chan bool)
go func() {
for {
msg, ok := <-cleanupChan
if !ok {
cleanupTaskDone <- true
return
}
switch op := msg.(type) {
case string:
if err := os.RemoveAll(op); err != nil {
f.printf("Async code cleanup could not delete %s, even after all instances using it killed: %v", op, err)
}
case chan bool:
<-op
}
}
}()
// stats for autoscaling
outstandingReqs := 0
execMs := common.NewRollingAvg(10)
var lastScaling *time.Time = nil
timeout := time.NewTimer(0)
for {
select {
case <-timeout.C:
if f.codeDir == "" {
continue
}
case req := <-f.funcChan:
// msg: client -> function
// check for new code, and cleanup old code
// (and instances that use it) if necessary
oldCodeDir := f.codeDir
if err := f.pullHandlerIfStale(); err != nil {
f.printf("Error checking for new lambda code at `%s`: %v", f.codeDir, err)
req.w.WriteHeader(http.StatusInternalServerError)
req.w.Write([]byte(err.Error() + "\n"))
req.done <- true
continue
}
if oldCodeDir != "" && oldCodeDir != f.codeDir {
el := f.instances.Front()
for el != nil {
waitChan := el.Value.(*LambdaInstance).AsyncKill()
cleanupChan <- waitChan
el = el.Next()
}
f.instances = list.New()
// cleanupChan is a FIFO, so this will
// happen after the cleanup task waits
// for all instance kills to finish
cleanupChan <- oldCodeDir
}
f.lmgr.DepTracer.TraceInvocation(f.codeDir)
select {
case f.instChan <- req:
// msg: function -> instance
outstandingReqs++
default:
// queue cannot accept more, so reply with backoff
req.w.WriteHeader(http.StatusTooManyRequests)
req.w.Write([]byte("lambda instance queue is full\n"))
req.done <- true
}
case req := <-f.doneChan:
// msg: instance -> function
execMs.Add(req.execMs)
outstandingReqs--
// msg: function -> client
req.done <- true
case done := <-f.killChan:
// signal all instances to die, then wait for
// cleanup task to finish and exit
el := f.instances.Front()
for el != nil {
waitChan := el.Value.(*LambdaInstance).AsyncKill()
cleanupChan <- waitChan
el = el.Next()
}
if f.codeDir != "" {
//cleanupChan <- f.codeDir
}
close(cleanupChan)
<-cleanupTaskDone
done <- true
return
}
// POLICY: how many instances (i.e., virtual sandboxes) should we allocate?
// AUTOSCALING STEP 1: decide how many instances we want
// let's aim to have 1 sandbox per second of outstanding work
inProgressWorkMs := outstandingReqs * execMs.Avg
desiredInstances := inProgressWorkMs / 1000
// if we have, say, one job that will take 100
// seconds, spinning up 100 instances won't do any
// good, so cap by number of outstanding reqs
if outstandingReqs < desiredInstances {
desiredInstances = outstandingReqs
}
// always try to have one instance
if desiredInstances < 1 {
desiredInstances = 1
}
// AUTOSCALING STEP 2: tweak how many instances we have, to get closer to our goal
// make at most one scaling adjustment per second
adjustFreq := time.Second
now := time.Now()
if lastScaling != nil {
elapsed := now.Sub(*lastScaling)
if elapsed < adjustFreq {
if desiredInstances != f.instances.Len() {
timeout = time.NewTimer(adjustFreq - elapsed)
}
continue
}
}
// kill or start at most one instance to get closer to
// desired number
if f.instances.Len() < desiredInstances {
f.printf("increase instances to %d", f.instances.Len()+1)
f.newInstance()
lastScaling = &now
} else if f.instances.Len() > desiredInstances {
f.printf("reduce instances to %d", f.instances.Len()-1)
waitChan := f.instances.Back().Value.(*LambdaInstance).AsyncKill()
f.instances.Remove(f.instances.Back())
cleanupChan <- waitChan
lastScaling = &now
}
if f.instances.Len() != desiredInstances {
// we can only adjust quickly, so we want to
// run through this loop again as soon as
// possible, even if there are no requests to
// service.
timeout = time.NewTimer(adjustFreq)
}
}
}
func (f *LambdaFunc) newInstance() {
if f.codeDir == "" {
panic("cannot start instance until code has been fetched")
}
linst := &LambdaInstance{
lfunc: f,
codeDir: f.codeDir,
meta: f.meta,
killChan: make(chan chan bool, 1),
}
f.instances.PushBack(linst)
go linst.Task()
}
func (f *LambdaFunc) Kill() {
done := make(chan bool)
f.killChan <- done
<-done
} |
f.codeDir = codeDir
f.lastPull = &now
return nil
} | random_line_split |
lib.rs | mod file_log;
mod formatter;
pub mod log_macro;
use std::env;
use std::fmt;
use std::io::{self, BufWriter};
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Mutex;
use std::thread;
use log::{self, SetLoggerError};
use slog::{self, slog_o, Drain, FnValue, Key, OwnedKVList, PushFnValue, Record, KV};
use slog_async::{Async, OverflowStrategy};
use slog_term::{Decorator, PlainDecorator, RecordDecorator};
use self::file_log::{RotateBySize, RotateByTime, RotatingFileLogger, RotatingFileLoggerBuilder};
pub use slog::{FilterFn, Level};
use std::fmt::Arguments;
use std::time::Duration;
// The suffix appended to the end of rotated log files by datetime log rotator
// Warning: Diagnostics service parses log files by file name format.
// Remember to update the corresponding code when suffix layout is changed.
pub const DATETIME_ROTATE_SUFFIX: &str = "%Y-%m-%d-%H:%M:%S%.f";
// Default is 128.
// Extended since blocking is set, and we don't want to block very often.
const SLOG_CHANNEL_SIZE: usize = 10240;
// Default is DropAndReport.
// It is not desirable to have dropped logs in our use case.
const SLOG_CHANNEL_OVERFLOW_STRATEGY: OverflowStrategy = OverflowStrategy::Block;
const TIMESTAMP_FORMAT: &str = "%Y/%m/%d %H:%M:%S%.3f %:z";
static LOG_LEVEL: AtomicUsize = AtomicUsize::new(usize::max_value());
#[derive(Clone, Debug)]
pub enum LogFormat {
Text,
Json,
}
/// Makes a thread name with an additional tag inherited from the current thread.
#[macro_export]
macro_rules! thd_name {
($name:expr) => {{
$crate::get_tag_from_thread_name()
.map(|tag| format!("{}::{}", $name, tag))
.unwrap_or_else(|| $name.to_owned())
}};
}
pub fn get_tag_from_thread_name() -> Option<String> {
thread::current()
.name()
.and_then(|name| name.split("::").skip(1).last())
.map(From::from)
}
pub fn init_log<D>(
drain: D,
level: Level,
use_async: bool,
init_stdlog: bool,
mut disabled_targets: Vec<String>,
slow_threshold: u64,
) -> Result<(), SetLoggerError>
where
D: Drain + Send + 'static,
<D as Drain>::Err: std::fmt::Display,
{
// Set the initial log level used by the Drains
LOG_LEVEL.store(level.as_usize(), Ordering::Relaxed);
// Only for debug purpose, so use environment instead of configuration file.
if let Ok(extra_modules) = env::var("FASTJOB_DISABLE_LOG_TARGETS") {
disabled_targets.extend(extra_modules.split(',').map(ToOwned::to_owned));
}
let filter = move |record: &Record| {
if !disabled_targets.is_empty() {
// Here get the highest level module name to check.
let module = record.module().splitn(2, "::").next().unwrap();
disabled_targets.iter().all(|target| target != module)
} else {
true
}
};
let logger = if use_async {
let drain = Async::new(LogAndFuse(drain))
.chan_size(SLOG_CHANNEL_SIZE)
.overflow_strategy(SLOG_CHANNEL_OVERFLOW_STRATEGY)
.thread_name(thd_name!("slogger"))
.build()
.filter_level(level)
.fuse();
let drain = SlowLogFilter {
threshold: slow_threshold,
inner: drain,
};
let filtered = drain.filter(filter).fuse();
slog::Logger::root(filtered, slog_o!())
} else {
let drain = LogAndFuse(Mutex::new(drain).filter_level(level));
let drain = SlowLogFilter {
threshold: slow_threshold,
inner: drain,
};
let filtered = drain.filter(filter).fuse();
slog::Logger::root(filtered, slog_o!())
};
set_global_logger(level, init_stdlog, logger)
}
pub fn set_global_logger(
level: Level,
init_stdlog: bool,
logger: slog::Logger,
) -> Result<(), SetLoggerError> {
slog_global::set_global(logger);
if init_stdlog {
slog_global::redirect_std_log(Some(level))?;
grpcio::redirect_log();
}
Ok(())
}
/// Constructs a new file writer which outputs log to a file at the specified path.
/// The file writer rotates for the specified timespan.
pub fn file_writer<N>(
path: impl AsRef<Path>,
rotation_timespan: Duration,
rotation_size: u64,
rename: N,
) -> io::Result<BufWriter<RotatingFileLogger>>
where
N: 'static + Send + Fn(&Path) -> io::Result<PathBuf>,
{
let logger = BufWriter::new(
RotatingFileLoggerBuilder::builder(rename)
.add_path(path)
.add_rotator(RotateByTime::new(rotation_timespan))
.add_rotator(RotateBySize::new(rotation_size))
.build()?,
);
Ok(logger)
}
/// Constructs a new terminal writer which outputs logs to stderr.
pub fn term_writer() -> io::Stderr {
io::stderr()
}
/// Formats output logs to "FastJob Log Format".
pub fn text_format<W>(io: W) -> FastJobFormat<PlainDecorator<W>>
where
W: io::Write,
{
let decorator = PlainDecorator::new(io);
FastJobFormat::new(decorator)
}
/// Formats output logs to JSON format.
pub fn json_format<W>(io: W) -> slog_json::Json<W>
where
W: io::Write,
{
slog_json::Json::new(io)
.set_newlines(true)
.set_flush(true)
.add_key_value(slog_o!(
"message" => PushFnValue(|record, ser| ser.emit(record.msg())),
"caller" => PushFnValue(|record, ser| ser.emit(format_args!(
"{}:{}",
Path::new(record.file())
.file_name()
.and_then(|path| path.to_str())
.unwrap_or("<unknown>"),
record.line(),
))),
"level" => FnValue(|record| get_unified_log_level(record.level())),
"time" => FnValue(|_| chrono::Local::now().format(TIMESTAMP_FORMAT).to_string()),
))
.build()
}
pub fn get_level_by_string(lv: &str) -> Option<Level> {
match &*lv.to_owned().to_lowercase() {
"critical" => Some(Level::Critical),
"error" => Some(Level::Error),
// We support `warn` due to legacy.
"warning" | "warn" => Some(Level::Warning),
"debug" => Some(Level::Debug),
"trace" => Some(Level::Trace),
"info" => Some(Level::Info),
_ => None,
}
}
// The `to_string()` function of `slog::Level` produces values like `erro` and `trce` instead of
// the full words. This produces the full word.
pub fn get_string_by_level(lv: Level) -> &'static str {
match lv {
Level::Critical => "critical",
Level::Error => "error",
Level::Warning => "warning",
Level::Debug => "debug",
Level::Trace => "trace",
Level::Info => "info",
}
}
// Converts `slog::Level` to unified log level format.
fn get_unified_log_level(lv: Level) -> &'static str {
match lv {
Level::Critical => "FATAL",
Level::Error => "ERROR",
Level::Warning => "WARN",
Level::Info => "INFO",
Level::Debug => "DEBUG",
Level::Trace => "TRACE",
}
}
pub fn convert_slog_level_to_log_level(lv: Level) -> log::Level {
match lv {
Level::Critical | Level::Error => log::Level::Error,
Level::Warning => log::Level::Warn,
Level::Debug => log::Level::Debug,
Level::Trace => log::Level::Trace,
Level::Info => log::Level::Info,
}
}
pub fn convert_log_level_to_slog_level(lv: log::Level) -> Level {
match lv {
log::Level::Error => Level::Error,
log::Level::Warn => Level::Warning,
log::Level::Debug => Level::Debug,
log::Level::Trace => Level::Trace,
log::Level::Info => Level::Info,
}
}
pub fn get_log_level() -> Option<Level> {
Level::from_usize(LOG_LEVEL.load(Ordering::Relaxed))
}
pub fn set_log_level(new_level: Level) {
LOG_LEVEL.store(new_level.as_usize(), Ordering::SeqCst)
}
pub struct FastJobFormat<D>
where
D: Decorator,
{
decorator: D,
}
impl<D> FastJobFormat<D>
where
D: Decorator,
{
pub fn new(decorator: D) -> Self {
Self { decorator }
}
}
impl<D> Drain for FastJobFormat<D>
where
D: Decorator,
{
type Ok = ();
type Err = io::Error;
fn log(&self, record: &Record<'_>, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
if record.level().as_usize() <= LOG_LEVEL.load(Ordering::Relaxed) {
self.decorator.with_record(record, values, |decorator| {
write_log_header(decorator, record)?;
write_log_msg(decorator, record)?;
write_log_fields(decorator, record, values)?;
decorator.start_whitespace()?;
writeln!(decorator)?;
decorator.flush()?;
Ok(())
})?;
}
Ok(())
}
}
struct LogAndFuse<D>(D);
impl<D> Drain for LogAndFuse<D>
where
D: Drain,
<D as Drain>::Err: std::fmt::Display,
{
type Ok = ();
type Err = slog::Never;
fn log(&self, record: &Record<'_>, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
if record.level().as_usize() <= LOG_LEVEL.load(Ordering::Relaxed) {
if let Err(e) = self.0.log(record, values) {
let fatal_drainer = Mutex::new(text_format(term_writer())).ignore_res();
fatal_drainer.log(record, values).unwrap();
let fatal_logger = slog::Logger::root(fatal_drainer, slog_o!());
slog::slog_crit!(
fatal_logger,
"logger encountered error";
"err" => %e,
)
}
}
Ok(())
}
}
/// Filters logs with operation cost lower than threshold. Otherwise output logs to inner drainer.
struct SlowLogFilter<D> {
threshold: u64,
inner: D,
}
impl<D> Drain for SlowLogFilter<D>
where
D: Drain<Ok = (), Err = slog::Never>,
{
type Ok = ();
type Err = slog::Never;
fn log(&self, record: &Record<'_>, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
if record.tag() == "slow_log" {
let mut s = SlowCostSerializer { cost: None };
let kv = record.kv();
let _ = kv.serialize(record, &mut s);
if let Some(cost) = s.cost {
if cost <= self.threshold {
return Ok(());
}
}
}
self.inner.log(record, values)
}
}
struct SlowCostSerializer {
// None means input record without key `takes`
cost: Option<u64>,
}
impl slog::ser::Serializer for SlowCostSerializer {
fn emit_arguments(&mut self, _key: Key, _val: &fmt::Arguments<'_>) -> slog::Result {
Ok(())
}
fn emit_u64(&mut self, key: Key, val: u64) -> slog::Result {
if key == "takes" {
self.cost = Some(val);
}
Ok(())
}
}
/// Special struct for slow log cost serializing
pub struct LogCost(pub u64);
impl slog::Value for LogCost {
fn serialize(
&self,
_record: &Record,
key: Key,
serializer: &mut dyn slog::Serializer,
) -> slog::Result {
serializer.emit_u64(key, self.0)
}
}
/// Dispatches logs to a normal `Drain` or a slow-log specialized `Drain` by tag
pub struct LogDispatcher<N: Drain, S: Drain> {
normal: N,
slow: Option<S>,
}
impl<N: Drain, S: Drain> LogDispatcher<N, S> {
pub fn new(normal: N, slow: Option<S>) -> Self {
Self { normal, slow }
}
}
impl<N, S> Drain for LogDispatcher<N, S>
where
N: Drain<Ok = (), Err = io::Error>,
S: Drain<Ok = (), Err = io::Error>,
{
type Ok = (); | type Err = io::Error;
fn log(&self, record: &Record<'_>, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
let tag = record.tag();
println!("{}", tag);
if self.slow.is_some() && tag.starts_with("slow_log") {
self.slow.as_ref().unwrap().log(record, values)
} else {
self.normal.log(record, values)
}
}
}
/// Writes log header to decorator.
fn write_log_header(decorator: &mut dyn RecordDecorator, record: &Record<'_>) -> io::Result<()> {
decorator.start_timestamp()?;
write!(
decorator,
"[{}]",
chrono::Local::now().format(TIMESTAMP_FORMAT)
)?;
decorator.start_whitespace()?;
write!(decorator, " ")?;
decorator.start_level()?;
write!(decorator, "[{}]", get_unified_log_level(record.level()))?;
decorator.start_whitespace()?;
write!(decorator, " ")?;
// Write source file info.
decorator.start_msg()?;
if let Some(path) = Path::new(record.file())
.file_name()
.and_then(|path| path.to_str())
{
write!(decorator, "[")?;
formatter::write_file_name(decorator, path)?;
write!(decorator, ":{}]", record.line())?;
} else {
write!(decorator, "[<unknown>]")?;
}
Ok(())
}
/// Writes log message to decorator.
fn write_log_msg(decorator: &mut dyn RecordDecorator, record: &Record<'_>) -> io::Result<()> {
decorator.start_whitespace()?;
write!(decorator, " ")?;
decorator.start_msg()?;
write!(decorator, "[")?;
let msg = format!("{}", record.msg());
formatter::write_escaped_str(decorator, &msg)?;
write!(decorator, "]")?;
Ok(())
}
/// Writes log fields to decorator.
fn write_log_fields(
decorator: &mut dyn RecordDecorator,
record: &Record<'_>,
values: &OwnedKVList,
) -> io::Result<()> {
let mut serializer = Serializer::new(decorator);
record.kv().serialize(record, &mut serializer)?;
values.serialize(record, &mut serializer)?;
serializer.finish();
Ok(())
}
struct Serializer<'a> {
decorator: &'a mut dyn RecordDecorator,
}
impl<'a> Serializer<'a> {
fn new(decorator: &'a mut dyn RecordDecorator) -> Self {
Self { decorator }
}
fn write_whitespace(&mut self) -> io::Result<()> {
self.decorator.start_whitespace()?;
write!(self.decorator, " ")?;
Ok(())
}
fn finish(self) {}
}
impl<'a> Drop for Serializer<'a> {
fn drop(&mut self) {}
}
impl<'a> slog::Serializer for Serializer<'a> {
fn emit_none(&mut self, key: Key) -> slog::Result {
self.emit_arguments(key, &format_args!("None"))
}
fn emit_arguments(&mut self, key: Key, val: &fmt::Arguments<'_>) -> slog::Result {
self.write_whitespace()?;
// Write key
write!(self.decorator, "[")?;
self.decorator.start_key()?;
formatter::write_escaped_str(&mut self.decorator, key as &str)?;
// Write separator
self.decorator.start_separator()?;
write!(self.decorator, "=")?;
// Write value
let value = format!("{}", val);
self.decorator.start_value()?;
formatter::write_escaped_str(self.decorator, &value)?;
self.decorator.reset()?;
write!(self.decorator, "]")?;
Ok(())
}
} | random_line_split | |
lib.rs | mod file_log;
mod formatter;
pub mod log_macro;
use std::env;
use std::fmt;
use std::io::{self, BufWriter};
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Mutex;
use std::thread;
use log::{self, SetLoggerError};
use slog::{self, slog_o, Drain, FnValue, Key, OwnedKVList, PushFnValue, Record, KV};
use slog_async::{Async, OverflowStrategy};
use slog_term::{Decorator, PlainDecorator, RecordDecorator};
use self::file_log::{RotateBySize, RotateByTime, RotatingFileLogger, RotatingFileLoggerBuilder};
pub use slog::{FilterFn, Level};
use std::fmt::Arguments;
use std::time::Duration;
// The suffix appended to the end of rotated log files by datetime log rotator
// Warning: Diagnostics service parses log files by file name format.
// Remember to update the corresponding code when suffix layout is changed.
pub const DATETIME_ROTATE_SUFFIX: &str = "%Y-%m-%d-%H:%M:%S%.f";
// Default is 128.
// Extended since blocking is set, and we don't want to block very often.
const SLOG_CHANNEL_SIZE: usize = 10240;
// Default is DropAndReport.
// It is not desirable to have dropped logs in our use case.
const SLOG_CHANNEL_OVERFLOW_STRATEGY: OverflowStrategy = OverflowStrategy::Block;
const TIMESTAMP_FORMAT: &str = "%Y/%m/%d %H:%M:%S%.3f %:z";
static LOG_LEVEL: AtomicUsize = AtomicUsize::new(usize::max_value());
#[derive(Clone, Debug)]
pub enum LogFormat {
Text,
Json,
}
/// Makes a thread name with an additional tag inherited from the current thread.
#[macro_export]
macro_rules! thd_name {
($name:expr) => {{
$crate::get_tag_from_thread_name()
.map(|tag| format!("{}::{}", $name, tag))
.unwrap_or_else(|| $name.to_owned())
}};
}
pub fn get_tag_from_thread_name() -> Option<String> {
thread::current()
.name()
.and_then(|name| name.split("::").skip(1).last())
.map(From::from)
}
pub fn init_log<D>(
drain: D,
level: Level,
use_async: bool,
init_stdlog: bool,
mut disabled_targets: Vec<String>,
slow_threshold: u64,
) -> Result<(), SetLoggerError>
where
D: Drain + Send + 'static,
<D as Drain>::Err: std::fmt::Display,
{
// Set the initial log level used by the Drains
LOG_LEVEL.store(level.as_usize(), Ordering::Relaxed);
// Only for debug purpose, so use environment instead of configuration file.
if let Ok(extra_modules) = env::var("FASTJOB_DISABLE_LOG_TARGETS") {
disabled_targets.extend(extra_modules.split(',').map(ToOwned::to_owned));
}
let filter = move |record: &Record| {
if !disabled_targets.is_empty() {
// Here get the highest level module name to check.
let module = record.module().splitn(2, "::").next().unwrap();
disabled_targets.iter().all(|target| target != module)
} else {
true
}
};
let logger = if use_async {
let drain = Async::new(LogAndFuse(drain))
.chan_size(SLOG_CHANNEL_SIZE)
.overflow_strategy(SLOG_CHANNEL_OVERFLOW_STRATEGY)
.thread_name(thd_name!("slogger"))
.build()
.filter_level(level)
.fuse();
let drain = SlowLogFilter {
threshold: slow_threshold,
inner: drain,
};
let filtered = drain.filter(filter).fuse();
slog::Logger::root(filtered, slog_o!())
} else {
let drain = LogAndFuse(Mutex::new(drain).filter_level(level));
let drain = SlowLogFilter {
threshold: slow_threshold,
inner: drain,
};
let filtered = drain.filter(filter).fuse();
slog::Logger::root(filtered, slog_o!())
};
set_global_logger(level, init_stdlog, logger)
}
pub fn set_global_logger(
level: Level,
init_stdlog: bool,
logger: slog::Logger,
) -> Result<(), SetLoggerError> {
slog_global::set_global(logger);
if init_stdlog {
slog_global::redirect_std_log(Some(level))?;
grpcio::redirect_log();
}
Ok(())
}
/// Constructs a new file writer which outputs log to a file at the specified path.
/// The file writer rotates for the specified timespan.
pub fn file_writer<N>(
path: impl AsRef<Path>,
rotation_timespan: Duration,
rotation_size: u64,
rename: N,
) -> io::Result<BufWriter<RotatingFileLogger>>
where
N: 'static + Send + Fn(&Path) -> io::Result<PathBuf>,
{
let logger = BufWriter::new(
RotatingFileLoggerBuilder::builder(rename)
.add_path(path)
.add_rotator(RotateByTime::new(rotation_timespan))
.add_rotator(RotateBySize::new(rotation_size))
.build()?,
);
Ok(logger)
}
/// Constructs a new terminal writer which outputs logs to stderr.
pub fn term_writer() -> io::Stderr {
io::stderr()
}
/// Formats output logs to "FastJob Log Format".
pub fn text_format<W>(io: W) -> FastJobFormat<PlainDecorator<W>>
where
W: io::Write,
{
let decorator = PlainDecorator::new(io);
FastJobFormat::new(decorator)
}
/// Formats output logs to JSON format.
pub fn json_format<W>(io: W) -> slog_json::Json<W>
where
W: io::Write,
{
slog_json::Json::new(io)
.set_newlines(true)
.set_flush(true)
.add_key_value(slog_o!(
"message" => PushFnValue(|record, ser| ser.emit(record.msg())),
"caller" => PushFnValue(|record, ser| ser.emit(format_args!(
"{}:{}",
Path::new(record.file())
.file_name()
.and_then(|path| path.to_str())
.unwrap_or("<unknown>"),
record.line(),
))),
"level" => FnValue(|record| get_unified_log_level(record.level())),
"time" => FnValue(|_| chrono::Local::now().format(TIMESTAMP_FORMAT).to_string()),
))
.build()
}
pub fn get_level_by_string(lv: &str) -> Option<Level> {
match &*lv.to_owned().to_lowercase() {
"critical" => Some(Level::Critical),
"error" => Some(Level::Error),
// We support `warn` due to legacy.
"warning" | "warn" => Some(Level::Warning),
"debug" => Some(Level::Debug),
"trace" => Some(Level::Trace),
"info" => Some(Level::Info),
_ => None,
}
}
// The `to_string()` function of `slog::Level` produces values like `erro` and `trce` instead of
// the full words. This produces the full word.
pub fn get_string_by_level(lv: Level) -> &'static str |
// Converts `slog::Level` to unified log level format.
fn get_unified_log_level(lv: Level) -> &'static str {
match lv {
Level::Critical => "FATAL",
Level::Error => "ERROR",
Level::Warning => "WARN",
Level::Info => "INFO",
Level::Debug => "DEBUG",
Level::Trace => "TRACE",
}
}
pub fn convert_slog_level_to_log_level(lv: Level) -> log::Level {
match lv {
Level::Critical | Level::Error => log::Level::Error,
Level::Warning => log::Level::Warn,
Level::Debug => log::Level::Debug,
Level::Trace => log::Level::Trace,
Level::Info => log::Level::Info,
}
}
pub fn convert_log_level_to_slog_level(lv: log::Level) -> Level {
match lv {
log::Level::Error => Level::Error,
log::Level::Warn => Level::Warning,
log::Level::Debug => Level::Debug,
log::Level::Trace => Level::Trace,
log::Level::Info => Level::Info,
}
}
pub fn get_log_level() -> Option<Level> {
Level::from_usize(LOG_LEVEL.load(Ordering::Relaxed))
}
pub fn set_log_level(new_level: Level) {
LOG_LEVEL.store(new_level.as_usize(), Ordering::SeqCst)
}
pub struct FastJobFormat<D>
where
D: Decorator,
{
decorator: D,
}
impl<D> FastJobFormat<D>
where
D: Decorator,
{
pub fn new(decorator: D) -> Self {
Self { decorator }
}
}
impl<D> Drain for FastJobFormat<D>
where
D: Decorator,
{
type Ok = ();
type Err = io::Error;
fn log(&self, record: &Record<'_>, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
if record.level().as_usize() <= LOG_LEVEL.load(Ordering::Relaxed) {
self.decorator.with_record(record, values, |decorator| {
write_log_header(decorator, record)?;
write_log_msg(decorator, record)?;
write_log_fields(decorator, record, values)?;
decorator.start_whitespace()?;
writeln!(decorator)?;
decorator.flush()?;
Ok(())
})?;
}
Ok(())
}
}
struct LogAndFuse<D>(D);
impl<D> Drain for LogAndFuse<D>
where
D: Drain,
<D as Drain>::Err: std::fmt::Display,
{
type Ok = ();
type Err = slog::Never;
fn log(&self, record: &Record<'_>, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
if record.level().as_usize() <= LOG_LEVEL.load(Ordering::Relaxed) {
if let Err(e) = self.0.log(record, values) {
let fatal_drainer = Mutex::new(text_format(term_writer())).ignore_res();
fatal_drainer.log(record, values).unwrap();
let fatal_logger = slog::Logger::root(fatal_drainer, slog_o!());
slog::slog_crit!(
fatal_logger,
"logger encountered error";
"err" => %e,
)
}
}
Ok(())
}
}
/// Filters logs with operation cost lower than threshold. Otherwise output logs to inner drainer.
struct SlowLogFilter<D> {
threshold: u64,
inner: D,
}
impl<D> Drain for SlowLogFilter<D>
where
D: Drain<Ok = (), Err = slog::Never>,
{
type Ok = ();
type Err = slog::Never;
fn log(&self, record: &Record<'_>, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
if record.tag() == "slow_log" {
let mut s = SlowCostSerializer { cost: None };
let kv = record.kv();
let _ = kv.serialize(record, &mut s);
if let Some(cost) = s.cost {
if cost <= self.threshold {
return Ok(());
}
}
}
self.inner.log(record, values)
}
}
struct SlowCostSerializer {
// None means input record without key `takes`
cost: Option<u64>,
}
impl slog::ser::Serializer for SlowCostSerializer {
fn emit_arguments(&mut self, _key: Key, _val: &fmt::Arguments<'_>) -> slog::Result {
Ok(())
}
fn emit_u64(&mut self, key: Key, val: u64) -> slog::Result {
if key == "takes" {
self.cost = Some(val);
}
Ok(())
}
}
/// Special struct for slow log cost serializing
pub struct LogCost(pub u64);
impl slog::Value for LogCost {
fn serialize(
&self,
_record: &Record,
key: Key,
serializer: &mut dyn slog::Serializer,
) -> slog::Result {
serializer.emit_u64(key, self.0)
}
}
/// Dispatches logs to a normal `Drain` or a slow-log specialized `Drain` by tag
pub struct LogDispatcher<N: Drain, S: Drain> {
normal: N,
slow: Option<S>,
}
impl<N: Drain, S: Drain> LogDispatcher<N, S> {
pub fn new(normal: N, slow: Option<S>) -> Self {
Self { normal, slow }
}
}
impl<N, S> Drain for LogDispatcher<N, S>
where
N: Drain<Ok = (), Err = io::Error>,
S: Drain<Ok = (), Err = io::Error>,
{
type Ok = ();
type Err = io::Error;
fn log(&self, record: &Record<'_>, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
let tag = record.tag();
println!("{}", tag);
if self.slow.is_some() && tag.starts_with("slow_log") {
self.slow.as_ref().unwrap().log(record, values)
} else {
self.normal.log(record, values)
}
}
}
/// Writes log header to decorator.
fn write_log_header(decorator: &mut dyn RecordDecorator, record: &Record<'_>) -> io::Result<()> {
decorator.start_timestamp()?;
write!(
decorator,
"[{}]",
chrono::Local::now().format(TIMESTAMP_FORMAT)
)?;
decorator.start_whitespace()?;
write!(decorator, " ")?;
decorator.start_level()?;
write!(decorator, "[{}]", get_unified_log_level(record.level()))?;
decorator.start_whitespace()?;
write!(decorator, " ")?;
// Write source file info.
decorator.start_msg()?;
if let Some(path) = Path::new(record.file())
.file_name()
.and_then(|path| path.to_str())
{
write!(decorator, "[")?;
formatter::write_file_name(decorator, path)?;
write!(decorator, ":{}]", record.line())?;
} else {
write!(decorator, "[<unknown>]")?;
}
Ok(())
}
/// Writes log message to decorator.
fn write_log_msg(decorator: &mut dyn RecordDecorator, record: &Record<'_>) -> io::Result<()> {
decorator.start_whitespace()?;
write!(decorator, " ")?;
decorator.start_msg()?;
write!(decorator, "[")?;
let msg = format!("{}", record.msg());
formatter::write_escaped_str(decorator, &msg)?;
write!(decorator, "]")?;
Ok(())
}
/// Writes log fields to decorator.
fn write_log_fields(
decorator: &mut dyn RecordDecorator,
record: &Record<'_>,
values: &OwnedKVList,
) -> io::Result<()> {
let mut serializer = Serializer::new(decorator);
record.kv().serialize(record, &mut serializer)?;
values.serialize(record, &mut serializer)?;
serializer.finish();
Ok(())
}
struct Serializer<'a> {
decorator: &'a mut dyn RecordDecorator,
}
impl<'a> Serializer<'a> {
fn new(decorator: &'a mut dyn RecordDecorator) -> Self {
Self { decorator }
}
fn write_whitespace(&mut self) -> io::Result<()> {
self.decorator.start_whitespace()?;
write!(self.decorator, " ")?;
Ok(())
}
fn finish(self) {}
}
impl<'a> Drop for Serializer<'a> {
fn drop(&mut self) {}
}
impl<'a> slog::Serializer for Serializer<'a> {
fn emit_none(&mut self, key: Key) -> slog::Result {
self.emit_arguments(key, &format_args!("None"))
}
fn emit_arguments(&mut self, key: Key, val: &fmt::Arguments<'_>) -> slog::Result {
self.write_whitespace()?;
// Write key
write!(self.decorator, "[")?;
self.decorator.start_key()?;
formatter::write_escaped_str(&mut self.decorator, key as &str)?;
// Write separator
self.decorator.start_separator()?;
write!(self.decorator, "=")?;
// Write value
let value = format!("{}", val);
self.decorator.start_value()?;
formatter::write_escaped_str(self.decorator, &value)?;
self.decorator.reset()?;
write!(self.decorator, "]")?;
Ok(())
}
}
| {
match lv {
Level::Critical => "critical",
Level::Error => "error",
Level::Warning => "warning",
Level::Debug => "debug",
Level::Trace => "trace",
Level::Info => "info",
}
} | identifier_body |
lib.rs | mod file_log;
mod formatter;
pub mod log_macro;
use std::env;
use std::fmt;
use std::io::{self, BufWriter};
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Mutex;
use std::thread;
use log::{self, SetLoggerError};
use slog::{self, slog_o, Drain, FnValue, Key, OwnedKVList, PushFnValue, Record, KV};
use slog_async::{Async, OverflowStrategy};
use slog_term::{Decorator, PlainDecorator, RecordDecorator};
use self::file_log::{RotateBySize, RotateByTime, RotatingFileLogger, RotatingFileLoggerBuilder};
pub use slog::{FilterFn, Level};
use std::fmt::Arguments;
use std::time::Duration;
// The suffix appended to the end of rotated log files by datetime log rotator
// Warning: Diagnostics service parses log files by file name format.
// Remember to update the corresponding code when suffix layout is changed.
pub const DATETIME_ROTATE_SUFFIX: &str = "%Y-%m-%d-%H:%M:%S%.f";
// Default is 128.
// Extended since blocking is set, and we don't want to block very often.
const SLOG_CHANNEL_SIZE: usize = 10240;
// Default is DropAndReport.
// It is not desirable to have dropped logs in our use case.
const SLOG_CHANNEL_OVERFLOW_STRATEGY: OverflowStrategy = OverflowStrategy::Block;
const TIMESTAMP_FORMAT: &str = "%Y/%m/%d %H:%M:%S%.3f %:z";
static LOG_LEVEL: AtomicUsize = AtomicUsize::new(usize::max_value());
#[derive(Clone, Debug)]
pub enum LogFormat {
Text,
Json,
}
/// Makes a thread name with an additional tag inherited from the current thread.
#[macro_export]
macro_rules! thd_name {
($name:expr) => {{
$crate::get_tag_from_thread_name()
.map(|tag| format!("{}::{}", $name, tag))
.unwrap_or_else(|| $name.to_owned())
}};
}
pub fn get_tag_from_thread_name() -> Option<String> {
thread::current()
.name()
.and_then(|name| name.split("::").skip(1).last())
.map(From::from)
}
pub fn init_log<D>(
drain: D,
level: Level,
use_async: bool,
init_stdlog: bool,
mut disabled_targets: Vec<String>,
slow_threshold: u64,
) -> Result<(), SetLoggerError>
where
D: Drain + Send + 'static,
<D as Drain>::Err: std::fmt::Display,
{
// Set the initial log level used by the Drains
LOG_LEVEL.store(level.as_usize(), Ordering::Relaxed);
// Only for debug purpose, so use environment instead of configuration file.
if let Ok(extra_modules) = env::var("FASTJOB_DISABLE_LOG_TARGETS") {
disabled_targets.extend(extra_modules.split(',').map(ToOwned::to_owned));
}
let filter = move |record: &Record| {
if !disabled_targets.is_empty() {
// Here get the highest level module name to check.
let module = record.module().splitn(2, "::").next().unwrap();
disabled_targets.iter().all(|target| target != module)
} else {
true
}
};
let logger = if use_async {
let drain = Async::new(LogAndFuse(drain))
.chan_size(SLOG_CHANNEL_SIZE)
.overflow_strategy(SLOG_CHANNEL_OVERFLOW_STRATEGY)
.thread_name(thd_name!("slogger"))
.build()
.filter_level(level)
.fuse();
let drain = SlowLogFilter {
threshold: slow_threshold,
inner: drain,
};
let filtered = drain.filter(filter).fuse();
slog::Logger::root(filtered, slog_o!())
} else {
let drain = LogAndFuse(Mutex::new(drain).filter_level(level));
let drain = SlowLogFilter {
threshold: slow_threshold,
inner: drain,
};
let filtered = drain.filter(filter).fuse();
slog::Logger::root(filtered, slog_o!())
};
set_global_logger(level, init_stdlog, logger)
}
pub fn set_global_logger(
level: Level,
init_stdlog: bool,
logger: slog::Logger,
) -> Result<(), SetLoggerError> {
slog_global::set_global(logger);
if init_stdlog {
slog_global::redirect_std_log(Some(level))?;
grpcio::redirect_log();
}
Ok(())
}
/// Constructs a new file writer which outputs log to a file at the specified path.
/// The file writer rotates for the specified timespan.
pub fn file_writer<N>(
path: impl AsRef<Path>,
rotation_timespan: Duration,
rotation_size: u64,
rename: N,
) -> io::Result<BufWriter<RotatingFileLogger>>
where
N: 'static + Send + Fn(&Path) -> io::Result<PathBuf>,
{
let logger = BufWriter::new(
RotatingFileLoggerBuilder::builder(rename)
.add_path(path)
.add_rotator(RotateByTime::new(rotation_timespan))
.add_rotator(RotateBySize::new(rotation_size))
.build()?,
);
Ok(logger)
}
/// Constructs a new terminal writer which outputs logs to stderr.
pub fn term_writer() -> io::Stderr {
io::stderr()
}
/// Formats output logs to "FastJob Log Format".
pub fn text_format<W>(io: W) -> FastJobFormat<PlainDecorator<W>>
where
W: io::Write,
{
let decorator = PlainDecorator::new(io);
FastJobFormat::new(decorator)
}
/// Formats output logs to JSON format.
pub fn json_format<W>(io: W) -> slog_json::Json<W>
where
W: io::Write,
{
slog_json::Json::new(io)
.set_newlines(true)
.set_flush(true)
.add_key_value(slog_o!(
"message" => PushFnValue(|record, ser| ser.emit(record.msg())),
"caller" => PushFnValue(|record, ser| ser.emit(format_args!(
"{}:{}",
Path::new(record.file())
.file_name()
.and_then(|path| path.to_str())
.unwrap_or("<unknown>"),
record.line(),
))),
"level" => FnValue(|record| get_unified_log_level(record.level())),
"time" => FnValue(|_| chrono::Local::now().format(TIMESTAMP_FORMAT).to_string()),
))
.build()
}
pub fn get_level_by_string(lv: &str) -> Option<Level> {
match &*lv.to_owned().to_lowercase() {
"critical" => Some(Level::Critical),
"error" => Some(Level::Error),
// We support `warn` due to legacy.
"warning" | "warn" => Some(Level::Warning),
"debug" => Some(Level::Debug),
"trace" => Some(Level::Trace),
"info" => Some(Level::Info),
_ => None,
}
}
// The `to_string()` function of `slog::Level` produces values like `erro` and `trce` instead of
// the full words. This produces the full word.
pub fn get_string_by_level(lv: Level) -> &'static str {
match lv {
Level::Critical => "critical",
Level::Error => "error",
Level::Warning => "warning",
Level::Debug => "debug",
Level::Trace => "trace",
Level::Info => "info",
}
}
// Converts `slog::Level` to unified log level format.
fn get_unified_log_level(lv: Level) -> &'static str {
match lv {
Level::Critical => "FATAL",
Level::Error => "ERROR",
Level::Warning => "WARN",
Level::Info => "INFO",
Level::Debug => "DEBUG",
Level::Trace => "TRACE",
}
}
pub fn convert_slog_level_to_log_level(lv: Level) -> log::Level {
match lv {
Level::Critical | Level::Error => log::Level::Error,
Level::Warning => log::Level::Warn,
Level::Debug => log::Level::Debug,
Level::Trace => log::Level::Trace,
Level::Info => log::Level::Info,
}
}
pub fn convert_log_level_to_slog_level(lv: log::Level) -> Level {
match lv {
log::Level::Error => Level::Error,
log::Level::Warn => Level::Warning,
log::Level::Debug => Level::Debug,
log::Level::Trace => Level::Trace,
log::Level::Info => Level::Info,
}
}
pub fn get_log_level() -> Option<Level> {
Level::from_usize(LOG_LEVEL.load(Ordering::Relaxed))
}
pub fn set_log_level(new_level: Level) {
LOG_LEVEL.store(new_level.as_usize(), Ordering::SeqCst)
}
pub struct FastJobFormat<D>
where
D: Decorator,
{
decorator: D,
}
impl<D> FastJobFormat<D>
where
D: Decorator,
{
pub fn new(decorator: D) -> Self {
Self { decorator }
}
}
impl<D> Drain for FastJobFormat<D>
where
D: Decorator,
{
type Ok = ();
type Err = io::Error;
fn log(&self, record: &Record<'_>, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
if record.level().as_usize() <= LOG_LEVEL.load(Ordering::Relaxed) {
self.decorator.with_record(record, values, |decorator| {
write_log_header(decorator, record)?;
write_log_msg(decorator, record)?;
write_log_fields(decorator, record, values)?;
decorator.start_whitespace()?;
writeln!(decorator)?;
decorator.flush()?;
Ok(())
})?;
}
Ok(())
}
}
struct LogAndFuse<D>(D);
impl<D> Drain for LogAndFuse<D>
where
D: Drain,
<D as Drain>::Err: std::fmt::Display,
{
type Ok = ();
type Err = slog::Never;
fn log(&self, record: &Record<'_>, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
if record.level().as_usize() <= LOG_LEVEL.load(Ordering::Relaxed) {
if let Err(e) = self.0.log(record, values) {
let fatal_drainer = Mutex::new(text_format(term_writer())).ignore_res();
fatal_drainer.log(record, values).unwrap();
let fatal_logger = slog::Logger::root(fatal_drainer, slog_o!());
slog::slog_crit!(
fatal_logger,
"logger encountered error";
"err" => %e,
)
}
}
Ok(())
}
}
/// Filters logs with operation cost lower than threshold. Otherwise output logs to inner drainer.
struct SlowLogFilter<D> {
threshold: u64,
inner: D,
}
impl<D> Drain for SlowLogFilter<D>
where
D: Drain<Ok = (), Err = slog::Never>,
{
type Ok = ();
type Err = slog::Never;
fn log(&self, record: &Record<'_>, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
if record.tag() == "slow_log" {
let mut s = SlowCostSerializer { cost: None };
let kv = record.kv();
let _ = kv.serialize(record, &mut s);
if let Some(cost) = s.cost {
if cost <= self.threshold {
return Ok(());
}
}
}
self.inner.log(record, values)
}
}
struct SlowCostSerializer {
// None means input record without key `takes`
cost: Option<u64>,
}
impl slog::ser::Serializer for SlowCostSerializer {
fn emit_arguments(&mut self, _key: Key, _val: &fmt::Arguments<'_>) -> slog::Result {
Ok(())
}
fn emit_u64(&mut self, key: Key, val: u64) -> slog::Result {
if key == "takes" {
self.cost = Some(val);
}
Ok(())
}
}
/// Special struct for slow log cost serializing
pub struct LogCost(pub u64);
impl slog::Value for LogCost {
fn serialize(
&self,
_record: &Record,
key: Key,
serializer: &mut dyn slog::Serializer,
) -> slog::Result {
serializer.emit_u64(key, self.0)
}
}
/// Dispatches logs to a normal `Drain` or a slow-log specialized `Drain` by tag
pub struct LogDispatcher<N: Drain, S: Drain> {
normal: N,
slow: Option<S>,
}
impl<N: Drain, S: Drain> LogDispatcher<N, S> {
pub fn new(normal: N, slow: Option<S>) -> Self {
Self { normal, slow }
}
}
impl<N, S> Drain for LogDispatcher<N, S>
where
N: Drain<Ok = (), Err = io::Error>,
S: Drain<Ok = (), Err = io::Error>,
{
type Ok = ();
type Err = io::Error;
fn | (&self, record: &Record<'_>, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
let tag = record.tag();
println!("{}", tag);
if self.slow.is_some() && tag.starts_with("slow_log") {
self.slow.as_ref().unwrap().log(record, values)
} else {
self.normal.log(record, values)
}
}
}
/// Writes log header to decorator.
fn write_log_header(decorator: &mut dyn RecordDecorator, record: &Record<'_>) -> io::Result<()> {
decorator.start_timestamp()?;
write!(
decorator,
"[{}]",
chrono::Local::now().format(TIMESTAMP_FORMAT)
)?;
decorator.start_whitespace()?;
write!(decorator, " ")?;
decorator.start_level()?;
write!(decorator, "[{}]", get_unified_log_level(record.level()))?;
decorator.start_whitespace()?;
write!(decorator, " ")?;
// Write source file info.
decorator.start_msg()?;
if let Some(path) = Path::new(record.file())
.file_name()
.and_then(|path| path.to_str())
{
write!(decorator, "[")?;
formatter::write_file_name(decorator, path)?;
write!(decorator, ":{}]", record.line())?;
} else {
write!(decorator, "[<unknown>]")?;
}
Ok(())
}
/// Writes log message to decorator.
fn write_log_msg(decorator: &mut dyn RecordDecorator, record: &Record<'_>) -> io::Result<()> {
decorator.start_whitespace()?;
write!(decorator, " ")?;
decorator.start_msg()?;
write!(decorator, "[")?;
let msg = format!("{}", record.msg());
formatter::write_escaped_str(decorator, &msg)?;
write!(decorator, "]")?;
Ok(())
}
/// Writes log fields to decorator.
fn write_log_fields(
decorator: &mut dyn RecordDecorator,
record: &Record<'_>,
values: &OwnedKVList,
) -> io::Result<()> {
let mut serializer = Serializer::new(decorator);
record.kv().serialize(record, &mut serializer)?;
values.serialize(record, &mut serializer)?;
serializer.finish();
Ok(())
}
struct Serializer<'a> {
decorator: &'a mut dyn RecordDecorator,
}
impl<'a> Serializer<'a> {
fn new(decorator: &'a mut dyn RecordDecorator) -> Self {
Self { decorator }
}
fn write_whitespace(&mut self) -> io::Result<()> {
self.decorator.start_whitespace()?;
write!(self.decorator, " ")?;
Ok(())
}
fn finish(self) {}
}
impl<'a> Drop for Serializer<'a> {
fn drop(&mut self) {}
}
impl<'a> slog::Serializer for Serializer<'a> {
fn emit_none(&mut self, key: Key) -> slog::Result {
self.emit_arguments(key, &format_args!("None"))
}
fn emit_arguments(&mut self, key: Key, val: &fmt::Arguments<'_>) -> slog::Result {
self.write_whitespace()?;
// Write key
write!(self.decorator, "[")?;
self.decorator.start_key()?;
formatter::write_escaped_str(&mut self.decorator, key as &str)?;
// Write separator
self.decorator.start_separator()?;
write!(self.decorator, "=")?;
// Write value
let value = format!("{}", val);
self.decorator.start_value()?;
formatter::write_escaped_str(self.decorator, &value)?;
self.decorator.reset()?;
write!(self.decorator, "]")?;
Ok(())
}
}
| log | identifier_name |
animLib.py |
from filesystem import *
from common import printInfoStr, printErrorStr
from mayaDecorators import d_unifyUndo
import maya.cmds as cmd
import names
import api
import apiExtensions
__author__ = 'mel@macaronikazoo.com'
TOOL_NAME = 'animLib'
kEXT = 'clip'
VER = 3 #version
### clip types
kPOSE = 0
kANIM = 1
kDELTA = 2
kDEFAULT_MAPPING_THRESHOLD = 1
kICON_W_H = 60, 60
mel = api.mel
Mapping = names.Mapping
class AnimLibException(Exception):
def __init__( self, *args ):
Exception.__init__(self, *args)
def getMostLikelyModelView():
'''
returns the panel name for the most likely active panel - the currently active panel can be
ambiguous if the user has been using the outliner, or graph editor or something after viewport
usage... this method simply looks at the currently active panel and if its not a modelPanel,
then it returns the first visible model panel. if no panels are found, returns None
'''
cur = cmd.getPanel(wf=True)
curType = cmd.getPanel(to=cur)
if curType == "modelPanel":
return cur
visPanels = cmd.getPanel(vis=True)
for p in visPanels:
if cmd.getPanel(to=p) == "modelPanel":
return p
return None
def generateIcon( preset ):
'''
given a preset object, this method will generate an icon using the currently active viewport. the
path to the icon is returned
'''
sel = cmd.ls(sl=True)
cmd.select(cl=True)
panel = getMostLikelyModelView()
if panel is None:
raise AnimLibException('cannot determine which panel to use for icon generation')
#store some initial settings, change them to what is required, and then restored at the very end
settings = ["-df", "-cv", "-ca", "-nurbsCurves", "-nurbsSurfaces", "-lt", "-ha", "-dim", "-pv", "-ikh", "-j", "-dy"]
imgFormat = cmd.getAttr("defaultRenderGlobals.imageFormat")
states = []
cmd.setAttr("defaultRenderGlobals.imageFormat", 20)
for setting in settings:
states.append( mel.eval("modelEditor -q %s %s;" % (setting, panel)) )
for setting in settings:
mel.eval("modelEditor -e %s 0 %s;" % (setting, panel))
time = cmd.currentTime(q=True)
#make sure the icon is open for edit if its a global clip
if preset.locale == GLOBAL and preset.icon.exists:
preset.edit()
icon = cmd.playblast(st=time, et=time, w=kICON_W_H[0], h=kICON_W_H[1], fo=True, fmt="image", v=0, p=100, orn=0, cf=str(preset.icon.resolve()))
icon = Path(icon)
if icon.exists:
icon = icon.setExtension('bmp', True)
cmd.setAttr("defaultRenderGlobals.imageFormat", imgFormat)
#restore viewport settings
try: cmd.select(sel)
except TypeError: pass
for setting, initialState in zip(settings, states):
mel.eval("modelEditor -e %s %s %s;" % (setting, initialState, panel))
return icon
class BaseBlender(object):
'''
a blender object is simply a callable object that when called with a percentage arg (0-1) will
apply said percentage of the given clips to the given mapping
'''
def __init__( self, clipA, clipB, mapping=None, attributes=None ):
self.clipA = clipA
self.clipB = clipB
self.__mapping = mapping
if attributes:
attributes = set( attributes )
self.attributes = attributes
def setMapping( self, mapping ):
self.__mapping = mapping
def getMapping( self ):
return self.__mapping
def __call__( self, pct, mapping=None ):
if mapping is not None:
self.setMapping( mapping )
assert self.getMapping() is not None
class PoseBlender(BaseBlender):
def __call__( self, pct, mapping=None, attributes=None ):
BaseBlender.__call__(self, pct, mapping)
cmdQueue = api.CmdQueue()
if mapping is None:
mapping = self.getMapping()
if attributes is None:
attributes = self.attributes
mappingDict = mapping.asDict()
for clipAObj, attrDictA in self.clipA.iteritems():
#if the object isn't in the mapping dict, skip it
if clipAObj not in mappingDict:
continue
clipBObjs = mapping[ clipAObj ]
for a, valueA in attrDictA.iteritems():
if attributes:
if a not in attributes:
continue
if not clipAObj:
continue
attrpath = '%s.%s' % (clipAObj, a)
if not cmd.getAttr( attrpath, settable=True ):
continue
for clipBObj in clipBObjs:
try:
attrDictB = self.clipB[ clipBObj ]
except KeyError: continue
try:
valueB = attrDictB[ a ]
blendedValue = (valueA * (1-pct)) + (valueB * pct)
cmdQueue.append( 'setAttr -clamp %s %f' % (attrpath, blendedValue) )
except KeyError:
cmdQueue.append( 'setAttr -clamp %s %f' % (attrpath, valueA) )
except: pass
cmdQueue()
class AnimBlender(BaseBlender):
def __init__( self, clipA, clipB, mapping=None ):
BaseBlender.__init__(self, clipA, clipB, mapping)
#so now we need to generate a dict to represent the curves for both of the clips
animCurveDictA = self.animCurveDictA = {}
animCurveDictB = self.animCurveDictB = {}
#the curvePairs attribute contains a dict - indexed by attrpath - containing a tuple of (curveA, curveB)
self.curvePairs = {}
for clip, curveDict in zip([self.clipA, self.clipB], [animCurveDictA, animCurveDictB]):
for o, attrDict in clip.iteritems():
curveDict[o] = {}
for a, keyData in attrDict.iteritems():
curve = curveDict[o][a] = animCurve.AnimCurve()
#unpack the key data
weightedTangents, keyList = keyData
#generate the curves
for time, value, itt, ott, ix, iy, ox, oy, isLocked, isWeighted in keyList:
curve.m_bWeighted = weightedTangents
curve.AddKey( time, value, ix, iy, ox, oy )
attrPath = '%s.%s' % (o, a)
try: self.curvePairs[attrPath].append( curve )
except KeyError: self.curvePairs[attrPath] = [ curve ]
#now iterate over each curve pair and make sure they both have keys on the same frames...
for attrPath, curves in self.curvePairs.iteritems():
try:
curveA, curveB = curves
except ValueError: continue
curveTimes = set( curveA.m_keys.keys() + curveB.m_keys.keys() )
for t in curveTimes:
curveA.InsertKey( t )
curveB.InsertKey( t )
print 'keys on', attrPath, 'at times', curveTimes
def __call__( self, pct, mapping=None ):
BaseBlender.__call__(self, pct, mapping)
cmdQueue = api.CmdQueue()
if pct == 0:
self.clipA.apply( self.getMapping() )
elif pct == 1:
self.clipB.apply( self.getMapping() )
else:
for attrPath, curves in self.curvePairs.iteritems():
try:
curveA, curveB = curves
except ValueError: continue
#because we know both curves have the same timings (ie if curveA has a key at time x, curveB is guaranteed to also have a key
#at time x) then we just need to iterate over the keys of one curve, and blend them with the values of the other
for time, keyA in curveA.m_keys.iteritems():
keyB = curveB.m_keys[ time ]
blendedValue = (keyA.m_flValue * (1-pct)) + (keyB.m_flValue * pct)
blendedIX = (keyA.m_flInTanX * (1-pct)) + (keyB.m_flInTanX * pct)
blendedIY = (keyA.m_flInTanY * (1-pct)) + (keyB.m_flInTanY * pct)
blendedOX = (keyA.m_flOutTanX * (1-pct)) + (keyB.m_flOutTanX * pct)
blendedOY = (keyA.m_flOutTanY * (1-pct)) + (keyB.m_flOutTanY * pct)
cmdQueue.append( 'setKeyframe -t %s -v %s %s' % (time, blendedValue, attrPath) )
cmdQueue.append( 'keyTangent -e -t %s -ix %s -iy %s -ox %s -oy %s %s' % (time, blendedIX, blendedIY, blendedOX, blendedOY, attrPath) )
class BaseClip(dict):
'''
baseclass for clips
'''
blender = None
OPTIONS =\
kOPT_ADDITIVE, kOPT_ADDITIVE_WORLD, kOPT_OFFSET, kOPT_CLEAR, kMULT, kOPT_ATTRSELECTION =\
'additive', 'additiveWorld', 'offset', 'clear', 'mult', 'attrSelection'
kOPT_DEFAULTS = { kOPT_ADDITIVE: False,
kOPT_ADDITIVE_WORLD: False,
kOPT_OFFSET: 0,
kOPT_CLEAR: True,
kMULT: 1,
kOPT_ATTRSELECTION: False }
def __init__( self, objects=None ):
if objects is not None:
self.generate( objects )
def generate( self, objects ):
pass
def apply( self, mapping, attributes=None, **kwargs ):
'''
valid kwargs are
additive [False] applys the animation additively
'''
pass
def getObjects( self ):
return self.keys()
def generatePreArgs( self ):
return tuple()
def getObjAttrNames( obj, attrNamesToSkip=() ):
#grab attributes
objAttrs = cmd.listAttr( obj, keyable=True, visible=True, scalar=True ) or []
#also grab alias' - its possible to pass in an alias name, so we need to test against them as well
aliass = cmd.aliasAttr( obj, q=True ) or []
#because the aliasAttr cmd returns a list with the alias, attr pairs in a flat list, we need to iterate over the list, skipping every second entry
itAliass = iter( aliass )
for attr in itAliass:
objAttrs.append( attr )
itAliass.next()
filteredAttrs = []
for attr in objAttrs:
skipAttr = False
for skipName in attrNamesToSkip:
if attr == skipName:
skipAttr = True
elif attr.startswith( skipName +'[' ) or attr.startswith( skipName +'.' ):
skipAttr = True
if skipAttr:
continue
filteredAttrs.append( attr )
return filteredAttrs
#defines a mapping between node type, and the function used to get a list of attributes from that node to save to the clip. by default getObjAttrNames( obj ) is called
GET_ATTR_BY_NODE_TYPE = { 'blendShape': lambda obj: getObjAttrNames( obj, ['envelope', 'weight', 'inputTarget'] ) }
class PoseClip(BaseClip):
blender = PoseBlender
@classmethod
def FromObjects( cls, objects ):
new = cls()
cls.generate(new, objects)
return new
def __add__( self, other ):
'''
for adding multiple pose clips together - returns a new PoseClip instance
'''
pass
def __mult__( self, other ):
'''
for multiplying a clip by a scalar value
'''
assert isinstance(other, (int, long, float))
new = PoseClip
for obj, attrDict in self.iteritems():
for attr, value in attrDict.iteritems():
attrDict[attr] = value * other
def generate( self, objects, attrs=None ):
'''
generates a pose dictionary - its basically just dict with node names for keys. key values
are dictionaries with attribute name keys and attribute value keys
'''
self.clear()
if attrs:
attrs = set( attrs )
for obj in objects:
objType = cmd.nodeType( obj )
attrGetFunction = GET_ATTR_BY_NODE_TYPE.get( objType, getObjAttrNames )
objAttrs = set( attrGetFunction( obj ) )
if attrs:
objAttrs = objAttrs.intersection( attrs )
if objAttrs is None:
continue
self[ obj ] = objDict = {}
for attr in objAttrs:
objDict[ attr ] = cmd.getAttr( '%s.%s' % (obj, attr) )
return True
@d_unifyUndo
def apply( self, mapping, attributes=None, **kwargs ):
'''
construct a mel string to pass to eval - so it can be contained in a single undo...
'''
cmdQueue = api.CmdQueue()
#gather options...
additive = kwargs.get( self.kOPT_ADDITIVE,
self.kOPT_DEFAULTS[ self.kOPT_ADDITIVE ] )
#convert the attribute list to a set for fast lookup
if attributes:
attributes = set( attributes )
for clipObj, tgtObj in mapping.iteritems():
try:
attrDict = self[ clipObj ]
except KeyError: continue
for attr, value in attrDict.iteritems():
if attributes:
if attr not in attributes:
continue
if not tgtObj:
continue
attrpath = '%s.%s' % (tgtObj, attr)
try:
if not cmd.getAttr( attrpath, settable=True ): continue
except TypeError: continue
if additive: value += cmd.getAttr( attrpath )
cmdQueue.append( 'setAttr -clamp %s %f;' % (attrpath, value) )
cmdQueue()
class AnimClip(BaseClip):
blender = AnimBlender
def __init__( self, objects=None ):
self.offset = 0
BaseClip.__init__(self, objects)
def __add__( self, other ):
pass
def __mult__( self, other ):
assert isinstance(other, (int, long, float))
for obj, attrDict in self.iteritems():
for attr, value in attrDict.iteritems():
value *= other
def generate( self, objects, attrs=None, startFrame=None, endFrame=None ):
'''
generates an anim dictionary - its basically just dict with node names for keys. key values
are lists of tuples with the form: (keyTime, attrDict) where attrDict is a dictionary with
attribute name keys and attribute value keys
'''
defaultWeightedTangentOpt = bool(cmd.keyTangent(q=True, g=True, wt=True))
self.clear()
if attrs:
attrs = set( attrs )
if startFrame is None:
startFrame = cmd.playbackOptions( q=True, min=True )
if endFrame is None:
endFrame = cmd.playbackOptions( q=True, max=True )
startFrame, endFrame = list( sorted( [startFrame, endFrame] ) )
self.offset = startFrame
#list all keys on the objects - so we can determine the start frame, and range. all times are stored relative to this time
allKeys = cmd.keyframe( objects, q=True ) or []
allKeys.sort()
allKeys = [ k for k in allKeys if startFrame <= k <= endFrame ]
if not allKeys:
return False
self.offset = offset = allKeys[ 0 ]
self.__range = allKeys[ -1 ] - offset
for obj in objects:
objAttrs = set( cmd.listAttr( obj, keyable=True, visible=True, scalar=True ) or [] )
if attrs:
objAttrs = objAttrs.intersection( attrs )
if not objAttrs:
continue
objDict = {}
self[ obj ] = objDict
for attr in objAttrs:
timeTuple = startFrame, endFrame
#so the attr value dict contains a big fat list containing tuples of the form:
#(time, value, itt, ott, ita, ota, iw, ow, isLockedTangents, isWeightLock)
attrpath = '%s.%s' % (obj, attr)
times = cmd.keyframe( attrpath, q=True, t=timeTuple )
weightedTangents = defaultWeightedTangentOpt
#if there is an animCurve this will return its "weighted tangent" state - otherwise it will return None and a TypeError will be raised
try: weightedTangents = bool(cmd.keyTangent(attrpath, q=True, weightedTangents=True)[0])
except TypeError: pass
if times is None:
#in this case the attr has no animation, so simply record the pose for this attr
objDict[attr] = (False, [(None, cmd.getAttr(attrpath), None, None, None, None, None, None, None, None)])
continue
else:
times = [ t-offset for t in times ]
values = cmd.keyframe(attrpath, q=True, t=timeTuple, vc=True)
itts = cmd.keyTangent(attrpath, q=True, t=timeTuple, itt=True)
otts = cmd.keyTangent(attrpath, q=True, t=timeTuple, ott=True)
ixs = cmd.keyTangent(attrpath, q=True, t=timeTuple, ix=True)
iys = cmd.keyTangent(attrpath, q=True, t=timeTuple, iy=True)
oxs = cmd.keyTangent(attrpath, q=True, t=timeTuple, ox=True)
oys = cmd.keyTangent(attrpath, q=True, t=timeTuple, oy=True)
isLocked = cmd.keyTangent(attrpath, q=True, t=timeTuple, weightLock=True)
isWeighted = cmd.keyTangent(attrpath, q=True, t=timeTuple, weightLock=True)
objDict[ attr ] = weightedTangents, zip(times, values, itts, otts, ixs, iys, oxs, oys, isLocked, isWeighted)
return True
@d_unifyUndo
def apply( self, mapping, attributes=None, **kwargs ):
'''
valid kwargs are:
mult [1.0] apply a mutiplier when applying curve values
additive [False]
clear [True]
'''
beginningWeightedTanState = cmd.keyTangent( q=True, g=True, wt=True )
### gather options...
additive = kwargs.get( self.kOPT_ADDITIVE,
self.kOPT_DEFAULTS[self.kOPT_ADDITIVE] )
worldAdditive = kwargs.get( self.kOPT_ADDITIVE_WORLD,
self.kOPT_DEFAULTS[self.kOPT_ADDITIVE_WORLD] )
clear = kwargs.get( self.kOPT_CLEAR,
self.kOPT_DEFAULTS[self.kOPT_CLEAR] )
mult = kwargs.get( self.kMULT,
self.kOPT_DEFAULTS[self.kMULT] )
timeOffset = kwargs.get( self.kOPT_OFFSET, self.offset )
#if worldAdditive is turned on, then additive is implied
if worldAdditive:
additive = worldAdditive
#determine the time range to clear
clearStart = timeOffset
clearEnd = clearStart + self.range
#convert the attribute list to a set for fast lookup
if attributes:
attributes = set( attributes )
for obj, tgtObj in mapping.iteritems():
if not tgtObj:
continue
try:
attrDict = self[ obj ]
except KeyError: continue
for attr, (weightedTangents, keyList) in attrDict.iteritems():
if attributes:
if attr not in attributes:
continue
attrpath = '%s.%s' % (tgtObj, attr)
try:
if not cmd.getAttr(attrpath, settable=True):
continue
except TypeError: continue
except RuntimeError:
print obj, tgtObj, attrpath
raise
#do the clear... maya doesn't complain if we try to do a cutKey on an attrpath with no
#animation - and this is good to do before we determine whether the attrpath has a curve or not...
if clear:
cmd.cutKey( attrpath, t=(clearStart, clearEnd), cl=True )
#is there an anim curve on the target attrpath already?
curveExists = cmd.keyframe(attrpath, index=(0,), q=True) is not None
preValue = 0
if additive:
if worldAdditive:
isWorld = True
#if the control has space switching setup, see if its value is set to "world" - if its not, we're don't treat the control's animation as additive
try: isWorld = cmd.getAttr('%s.parent' % obj, asString=True) == 'world'
except TypeError: pass
#only treat translation as additive
if isWorld and attr.startswith('translate'):
preValue = cmd.getAttr(attrpath)
else:
preValue = cmd.getAttr(attrpath)
for time, value, itt, ott, ix, iy, ox, oy, isLocked, isWeighted in keyList:
value *= mult
value += preValue
if time is None:
#in this case the attr value was just a pose...
cmd.setAttr( attrpath, value )
else:
time += timeOffset
cmd.setKeyframe( attrpath, t=(time,), v=value )
if weightedTangents:
#this needs to be done as two separate commands - because setting the tangent types in the same cmd as setting tangent weights can result
#in the tangent types being ignored (for the case of stepped mainly, but subtle weirdness with flat happens too)
cmd.keyTangent( attrpath, t=(time,), ix=ix, iy=iy, ox=ox, oy=oy, l=isLocked, wl=isWeighted )
cmd.keyTangent( attrpath, t=(time,), itt=itt, ott=ott )
else:
cmd.keyTangent( attrpath, t=(time,), ix=ix, iy=iy, ox=ox, oy=oy )
#cmd.keyTangent( e=True, g=True, wt=beginningWeightedTanState )
def getKeyTimes( self ):
'''
returns an ordered list of key times
'''
keyTimesSet = set()
for obj, attrDict in self.iteritems():
for attr, (weightedTangents, keyList) in attrDict.iteritems():
if keyList[0][0] is None:
continue
for tup in keyList:
keyTimesSet.add( tup[0] )
keyTimes = list( keyTimesSet )
keyTimes.sort()
return keyTimes
def getRange( self ):
'''
returns a tuple of (start, end)
'''
times = self.getKeyTimes()
try:
start, end = times[0], times[-1]
self.offset = start
except IndexError:
start, end = 0, 0
self.__range = 0
return start, end
def getRangeValue( self ):
try:
return self.__range
except AttributeError:
self.getRange()
return self.__range
range = property(getRangeValue)
def generatePreArgs( self ):
return tuple()
kEXPORT_DICT_THE_CLIP = 'clip'
kEXPORT_DICT_CLIP_TYPE = 'clip_type'
kEXPORT_DICT_OBJECTS = 'objects'
kEXPORT_DICT_WORLDSPACE = 'worldspace'
class ClipPreset(Preset):
'''
a clip preset is different from a normal preset because it is actually two separate files - a
pickled animation data file, and an icon
'''
TYPE_CLASSES = {kPOSE: PoseClip,
kANIM: AnimClip,
kDELTA: None}
TYPE_LABELS = {kPOSE: 'pose',
kANIM: 'anim',
kDELTA: 'delta'}
### auto generate a label types
LABEL_TYPES = {}
for t, l in TYPE_LABELS.iteritems():
LABEL_TYPES[l] = t
def __new__( cls, locale, library, name, type=kPOSE ):
tool = '%s/%s' % (TOOL_NAME, library)
typeLbl = cls.TYPE_LABELS[type]
ext = '%s.%s' % (typeLbl, kEXT)
self = Preset.__new__( cls, locale, tool, name, ext )
self.icon = Preset( locale, tool, name, '%s.bmp' % typeLbl )
return self
def asClip( self ):
presetDict = self.unpickle()
return presetDict[ kEXPORT_DICT_THE_CLIP ]
def niceName( self ):
return self.name().split('.')[0]
def getLibrary( self ):
return self[-2]
def setLibrary( self, library ):
self[-2] = library
def getTypeName( self ):
return self.name().split('.')[ -1 ]
def getType( self ):
typeLbl = self.getTypeName()
return self.LABEL_TYPES[typeLbl]
def move( self, library=None ):
if library is None:
library = self.getLibrary()
newLoc = ClipPreset(self.other(), library, self.niceName(), self.getType())
#perform the move...
Path.move(self, newLoc)
Path.move(self.icon, newLoc.icon)
return newLoc
def copy( self, library=None ):
if library is None:
library = self.library
newLoc = ClipPreset(self.other(), library, self.niceName(), self.getType())
#perform the copy...
Path.copy(self, newLoc)
Path.copy(self.icon, newLoc.icon)
return newLoc
def rename( self, newName ):
'''
newName should be the base name - sans any clip type id or extension...
'''
newName = '%s.%s' % (scrubName(newName), self.getTypeName())
Preset.rename(self, newName)
self.icon.rename(newName)
def delete( self ):
Path.delete(self)
self.icon.delete()
@api.d_noAutoKey
def apply( self, objects, attributes=None, **kwargs ):
presetDict = self.unpickle()
srcObjs = presetDict[ kEXPORT_DICT_OBJECTS ]
clip = presetDict[ kEXPORT_DICT_THE_CLIP ]
#do a version check - if older version clip is being used - perhaps we can write conversion functionality?
try:
ver = presetDict[ kEXPORT_DICT_TOOL_VER ]
if ver != VER:
api.melWarning("the anim clip version don't match!")
except KeyError:
api.melWarning("this is an old VER 1 pose clip - I don't know how to load them anymore...")
return
#generate the name mapping
slamApply = kwargs.get( 'slam', False )
if slamApply:
objects = cmd.ls( typ='transform' )
tgts = names.matchNames( srcObjs, objects, threshold=kDEFAULT_MAPPING_THRESHOLD )
mapping = Mapping( srcObjs, tgts )
else:
tgts = names.matchNames( srcObjs, objects, threshold=kDEFAULT_MAPPING_THRESHOLD )
mapping = Mapping( srcObjs, tgts )
#run the clip's apply method
clip.apply( mapping, attributes, **kwargs )
def getClipObjects( self ):
'''
returns a list of all the object names contained in the clip
'''
presetDict = self.unpickle()
srcObjs = presetDict[ kEXPORT_DICT_OBJECTS ]
return srcObjs
def write( self, objects, **kwargs ):
type = self.getType()
clipDict = api.writeExportDict( TOOL_NAME, VER )
clipDict[ kEXPORT_DICT_CLIP_TYPE ] = type
clipDict[ kEXPORT_DICT_OBJECTS ] = objects
clipDict[ kEXPORT_DICT_WORLDSPACE ] = False
theClip = self.TYPE_CLASSES[ type ]()
success = theClip.generate( objects, **kwargs )
if not success:
printErrorStr( "Failed to generate clip!" )
return
clipDict[ kEXPORT_DICT_THE_CLIP ] = theClip
#write the preset file to disk
self.pickle( clipDict )
#generate the icon for the clip and add it to perforce if appropriate
icon = generateIcon( self )
#icon.asP4().add()
printInfoStr( "Generated clip!" )
class ClipManager(PresetManager):
'''
an abstraction for listing libraries and library clips for clip presets - there are two
main differences between clip presets and other presets - clips have a library which is
a subdir of the main preset dir, and there are also multiple types of clips both with
the same extension.
'''
def __init__( self ):
PresetManager.__init__(self, TOOL_NAME, kEXT)
def getLibraryNames( self ):
'''
returns the names of all libraries under the current mod
'''
libraries = set()
for locale, paths in self.getLibraryPaths().iteritems():
for p in paths:
libName = p.name()
libraries.add(libName)
libraries = list(libraries)
libraries.sort()
return libraries
def getLibraryPaths( self ):
'''
returns a dictionary of library paths keyed using locale. ie:
{LOCAL: [path1, path2, ...], GLOBAL: etc...}
'''
localeDict = {}
for locale in LOCAL, GLOBAL:
localeDict[locale] = libraries = []
dirs = self.getPresetDirs(locale)
libraryNames = set()
for d in dirs:
dLibs = d.dirs()
for dLib in dLibs:
dLibName = dLib[-1]
if dLibName not in libraryNames:
libraries.append(dLib)
libraryNames.add(dLibName)
return localeDict
def createLibrary( self, name ):
newLibraryPath = Preset(LOCAL, TOOL_NAME, name, '')
newLibraryPath.create()
def getLibraryClips( self, library ):
|
def getPathToLibrary( self, library, locale=LOCAL ):
return getPresetDirs(locale, TOOL_NAME)[0] / library
def reload( self ):
pass
#end
| global kEXT
clips = {LOCAL: [], GLOBAL: []}
for locale in LOCAL, GLOBAL:
localeClips = clips[locale]
for dir in getPresetDirs(locale, TOOL_NAME):
dir += library
if not dir.exists:
continue
for f in dir.files():
if f.hasExtension( kEXT ):
f = f.setExtension()
name, type = f[ -1 ].split('.')
f = f[ :-1 ]
type = ClipPreset.LABEL_TYPES[ type ]
localeClips.append( ClipPreset(locale, library, name, type) )
return clips | identifier_body |
animLib.py | from filesystem import *
from common import printInfoStr, printErrorStr
from mayaDecorators import d_unifyUndo
import maya.cmds as cmd
import names
import api
import apiExtensions
__author__ = 'mel@macaronikazoo.com'
TOOL_NAME = 'animLib'
kEXT = 'clip'
VER = 3 #version
### clip types
kPOSE = 0
kANIM = 1
kDELTA = 2
kDEFAULT_MAPPING_THRESHOLD = 1
kICON_W_H = 60, 60
mel = api.mel
Mapping = names.Mapping
class AnimLibException(Exception):
def __init__( self, *args ):
Exception.__init__(self, *args)
def getMostLikelyModelView():
'''
returns the panel name for the most likely active panel - the currently active panel can be
ambiguous if the user has been using the outliner, or graph editor or something after viewport
usage... this method simply looks at the currently active panel and if its not a modelPanel,
then it returns the first visible model panel. if no panels are found, returns None
'''
cur = cmd.getPanel(wf=True)
curType = cmd.getPanel(to=cur)
if curType == "modelPanel":
return cur
visPanels = cmd.getPanel(vis=True)
for p in visPanels:
if cmd.getPanel(to=p) == "modelPanel":
return p
return None
def generateIcon( preset ):
'''
given a preset object, this method will generate an icon using the currently active viewport. the
path to the icon is returned
'''
sel = cmd.ls(sl=True)
cmd.select(cl=True)
panel = getMostLikelyModelView()
if panel is None:
raise AnimLibException('cannot determine which panel to use for icon generation')
#store some initial settings, change them to what is required, and then restored at the very end
settings = ["-df", "-cv", "-ca", "-nurbsCurves", "-nurbsSurfaces", "-lt", "-ha", "-dim", "-pv", "-ikh", "-j", "-dy"]
imgFormat = cmd.getAttr("defaultRenderGlobals.imageFormat")
states = []
cmd.setAttr("defaultRenderGlobals.imageFormat", 20)
for setting in settings:
states.append( mel.eval("modelEditor -q %s %s;" % (setting, panel)) )
for setting in settings:
mel.eval("modelEditor -e %s 0 %s;" % (setting, panel))
time = cmd.currentTime(q=True)
#make sure the icon is open for edit if its a global clip
if preset.locale == GLOBAL and preset.icon.exists:
preset.edit()
icon = cmd.playblast(st=time, et=time, w=kICON_W_H[0], h=kICON_W_H[1], fo=True, fmt="image", v=0, p=100, orn=0, cf=str(preset.icon.resolve()))
icon = Path(icon)
if icon.exists:
icon = icon.setExtension('bmp', True)
cmd.setAttr("defaultRenderGlobals.imageFormat", imgFormat)
#restore viewport settings
try: cmd.select(sel)
except TypeError: pass
for setting, initialState in zip(settings, states):
mel.eval("modelEditor -e %s %s %s;" % (setting, initialState, panel))
return icon
class BaseBlender(object):
'''
a blender object is simply a callable object that when called with a percentage arg (0-1) will
apply said percentage of the given clips to the given mapping
'''
def __init__( self, clipA, clipB, mapping=None, attributes=None ):
self.clipA = clipA
self.clipB = clipB
self.__mapping = mapping
if attributes:
attributes = set( attributes )
self.attributes = attributes
def setMapping( self, mapping ):
self.__mapping = mapping
def getMapping( self ):
return self.__mapping
def __call__( self, pct, mapping=None ):
if mapping is not None:
self.setMapping( mapping )
assert self.getMapping() is not None
class PoseBlender(BaseBlender):
def __call__( self, pct, mapping=None, attributes=None ):
BaseBlender.__call__(self, pct, mapping)
cmdQueue = api.CmdQueue()
if mapping is None:
mapping = self.getMapping()
if attributes is None:
attributes = self.attributes
mappingDict = mapping.asDict()
for clipAObj, attrDictA in self.clipA.iteritems():
#if the object isn't in the mapping dict, skip it
if clipAObj not in mappingDict:
continue
clipBObjs = mapping[ clipAObj ]
for a, valueA in attrDictA.iteritems():
if attributes:
if a not in attributes:
continue
if not clipAObj:
continue
attrpath = '%s.%s' % (clipAObj, a)
if not cmd.getAttr( attrpath, settable=True ):
continue
for clipBObj in clipBObjs:
try:
attrDictB = self.clipB[ clipBObj ]
except KeyError: continue
try:
valueB = attrDictB[ a ]
blendedValue = (valueA * (1-pct)) + (valueB * pct)
cmdQueue.append( 'setAttr -clamp %s %f' % (attrpath, blendedValue) )
except KeyError:
cmdQueue.append( 'setAttr -clamp %s %f' % (attrpath, valueA) )
except: pass
cmdQueue()
class AnimBlender(BaseBlender):
def __init__( self, clipA, clipB, mapping=None ):
BaseBlender.__init__(self, clipA, clipB, mapping)
#so now we need to generate a dict to represent the curves for both of the clips
animCurveDictA = self.animCurveDictA = {}
animCurveDictB = self.animCurveDictB = {}
#the curvePairs attribute contains a dict - indexed by attrpath - containing a tuple of (curveA, curveB)
self.curvePairs = {}
for clip, curveDict in zip([self.clipA, self.clipB], [animCurveDictA, animCurveDictB]):
for o, attrDict in clip.iteritems():
curveDict[o] = {}
for a, keyData in attrDict.iteritems():
curve = curveDict[o][a] = animCurve.AnimCurve()
#unpack the key data
weightedTangents, keyList = keyData
#generate the curves
for time, value, itt, ott, ix, iy, ox, oy, isLocked, isWeighted in keyList:
curve.m_bWeighted = weightedTangents
curve.AddKey( time, value, ix, iy, ox, oy )
attrPath = '%s.%s' % (o, a)
try: self.curvePairs[attrPath].append( curve )
except KeyError: self.curvePairs[attrPath] = [ curve ]
#now iterate over each curve pair and make sure they both have keys on the same frames...
for attrPath, curves in self.curvePairs.iteritems():
try:
curveA, curveB = curves
except ValueError: continue
curveTimes = set( curveA.m_keys.keys() + curveB.m_keys.keys() )
for t in curveTimes:
curveA.InsertKey( t )
curveB.InsertKey( t )
print 'keys on', attrPath, 'at times', curveTimes
def __call__( self, pct, mapping=None ):
BaseBlender.__call__(self, pct, mapping)
cmdQueue = api.CmdQueue()
if pct == 0:
self.clipA.apply( self.getMapping() )
elif pct == 1:
self.clipB.apply( self.getMapping() )
else:
for attrPath, curves in self.curvePairs.iteritems():
try:
curveA, curveB = curves
except ValueError: continue
#because we know both curves have the same timings (ie if curveA has a key at time x, curveB is guaranteed to also have a key
#at time x) then we just need to iterate over the keys of one curve, and blend them with the values of the other
for time, keyA in curveA.m_keys.iteritems():
keyB = curveB.m_keys[ time ]
blendedValue = (keyA.m_flValue * (1-pct)) + (keyB.m_flValue * pct)
blendedIX = (keyA.m_flInTanX * (1-pct)) + (keyB.m_flInTanX * pct)
blendedIY = (keyA.m_flInTanY * (1-pct)) + (keyB.m_flInTanY * pct)
blendedOX = (keyA.m_flOutTanX * (1-pct)) + (keyB.m_flOutTanX * pct)
blendedOY = (keyA.m_flOutTanY * (1-pct)) + (keyB.m_flOutTanY * pct)
cmdQueue.append( 'setKeyframe -t %s -v %s %s' % (time, blendedValue, attrPath) )
cmdQueue.append( 'keyTangent -e -t %s -ix %s -iy %s -ox %s -oy %s %s' % (time, blendedIX, blendedIY, blendedOX, blendedOY, attrPath) )
class BaseClip(dict):
'''
baseclass for clips
'''
blender = None
OPTIONS =\
kOPT_ADDITIVE, kOPT_ADDITIVE_WORLD, kOPT_OFFSET, kOPT_CLEAR, kMULT, kOPT_ATTRSELECTION =\
'additive', 'additiveWorld', 'offset', 'clear', 'mult', 'attrSelection'
kOPT_DEFAULTS = { kOPT_ADDITIVE: False,
kOPT_ADDITIVE_WORLD: False,
kOPT_OFFSET: 0,
kOPT_CLEAR: True,
kMULT: 1,
kOPT_ATTRSELECTION: False }
def __init__( self, objects=None ):
if objects is not None:
self.generate( objects )
def generate( self, objects ):
pass
def apply( self, mapping, attributes=None, **kwargs ):
'''
valid kwargs are
additive [False] applys the animation additively
| return self.keys()
def generatePreArgs( self ):
return tuple()
def getObjAttrNames( obj, attrNamesToSkip=() ):
#grab attributes
objAttrs = cmd.listAttr( obj, keyable=True, visible=True, scalar=True ) or []
#also grab alias' - its possible to pass in an alias name, so we need to test against them as well
aliass = cmd.aliasAttr( obj, q=True ) or []
#because the aliasAttr cmd returns a list with the alias, attr pairs in a flat list, we need to iterate over the list, skipping every second entry
itAliass = iter( aliass )
for attr in itAliass:
objAttrs.append( attr )
itAliass.next()
filteredAttrs = []
for attr in objAttrs:
skipAttr = False
for skipName in attrNamesToSkip:
if attr == skipName:
skipAttr = True
elif attr.startswith( skipName +'[' ) or attr.startswith( skipName +'.' ):
skipAttr = True
if skipAttr:
continue
filteredAttrs.append( attr )
return filteredAttrs
#defines a mapping between node type, and the function used to get a list of attributes from that node to save to the clip. by default getObjAttrNames( obj ) is called
GET_ATTR_BY_NODE_TYPE = { 'blendShape': lambda obj: getObjAttrNames( obj, ['envelope', 'weight', 'inputTarget'] ) }
class PoseClip(BaseClip):
blender = PoseBlender
@classmethod
def FromObjects( cls, objects ):
new = cls()
cls.generate(new, objects)
return new
def __add__( self, other ):
'''
for adding multiple pose clips together - returns a new PoseClip instance
'''
pass
def __mult__( self, other ):
'''
for multiplying a clip by a scalar value
'''
assert isinstance(other, (int, long, float))
new = PoseClip
for obj, attrDict in self.iteritems():
for attr, value in attrDict.iteritems():
attrDict[attr] = value * other
def generate( self, objects, attrs=None ):
'''
generates a pose dictionary - its basically just dict with node names for keys. key values
are dictionaries with attribute name keys and attribute value keys
'''
self.clear()
if attrs:
attrs = set( attrs )
for obj in objects:
objType = cmd.nodeType( obj )
attrGetFunction = GET_ATTR_BY_NODE_TYPE.get( objType, getObjAttrNames )
objAttrs = set( attrGetFunction( obj ) )
if attrs:
objAttrs = objAttrs.intersection( attrs )
if objAttrs is None:
continue
self[ obj ] = objDict = {}
for attr in objAttrs:
objDict[ attr ] = cmd.getAttr( '%s.%s' % (obj, attr) )
return True
@d_unifyUndo
def apply( self, mapping, attributes=None, **kwargs ):
'''
construct a mel string to pass to eval - so it can be contained in a single undo...
'''
cmdQueue = api.CmdQueue()
#gather options...
additive = kwargs.get( self.kOPT_ADDITIVE,
self.kOPT_DEFAULTS[ self.kOPT_ADDITIVE ] )
#convert the attribute list to a set for fast lookup
if attributes:
attributes = set( attributes )
for clipObj, tgtObj in mapping.iteritems():
try:
attrDict = self[ clipObj ]
except KeyError: continue
for attr, value in attrDict.iteritems():
if attributes:
if attr not in attributes:
continue
if not tgtObj:
continue
attrpath = '%s.%s' % (tgtObj, attr)
try:
if not cmd.getAttr( attrpath, settable=True ): continue
except TypeError: continue
if additive: value += cmd.getAttr( attrpath )
cmdQueue.append( 'setAttr -clamp %s %f;' % (attrpath, value) )
cmdQueue()
class AnimClip(BaseClip):
blender = AnimBlender
def __init__( self, objects=None ):
self.offset = 0
BaseClip.__init__(self, objects)
def __add__( self, other ):
pass
def __mult__( self, other ):
assert isinstance(other, (int, long, float))
for obj, attrDict in self.iteritems():
for attr, value in attrDict.iteritems():
value *= other
def generate( self, objects, attrs=None, startFrame=None, endFrame=None ):
'''
generates an anim dictionary - its basically just dict with node names for keys. key values
are lists of tuples with the form: (keyTime, attrDict) where attrDict is a dictionary with
attribute name keys and attribute value keys
'''
defaultWeightedTangentOpt = bool(cmd.keyTangent(q=True, g=True, wt=True))
self.clear()
if attrs:
attrs = set( attrs )
if startFrame is None:
startFrame = cmd.playbackOptions( q=True, min=True )
if endFrame is None:
endFrame = cmd.playbackOptions( q=True, max=True )
startFrame, endFrame = list( sorted( [startFrame, endFrame] ) )
self.offset = startFrame
#list all keys on the objects - so we can determine the start frame, and range. all times are stored relative to this time
allKeys = cmd.keyframe( objects, q=True ) or []
allKeys.sort()
allKeys = [ k for k in allKeys if startFrame <= k <= endFrame ]
if not allKeys:
return False
self.offset = offset = allKeys[ 0 ]
self.__range = allKeys[ -1 ] - offset
for obj in objects:
objAttrs = set( cmd.listAttr( obj, keyable=True, visible=True, scalar=True ) or [] )
if attrs:
objAttrs = objAttrs.intersection( attrs )
if not objAttrs:
continue
objDict = {}
self[ obj ] = objDict
for attr in objAttrs:
timeTuple = startFrame, endFrame
#so the attr value dict contains a big fat list containing tuples of the form:
#(time, value, itt, ott, ita, ota, iw, ow, isLockedTangents, isWeightLock)
attrpath = '%s.%s' % (obj, attr)
times = cmd.keyframe( attrpath, q=True, t=timeTuple )
weightedTangents = defaultWeightedTangentOpt
#if there is an animCurve this will return its "weighted tangent" state - otherwise it will return None and a TypeError will be raised
try: weightedTangents = bool(cmd.keyTangent(attrpath, q=True, weightedTangents=True)[0])
except TypeError: pass
if times is None:
#in this case the attr has no animation, so simply record the pose for this attr
objDict[attr] = (False, [(None, cmd.getAttr(attrpath), None, None, None, None, None, None, None, None)])
continue
else:
times = [ t-offset for t in times ]
values = cmd.keyframe(attrpath, q=True, t=timeTuple, vc=True)
itts = cmd.keyTangent(attrpath, q=True, t=timeTuple, itt=True)
otts = cmd.keyTangent(attrpath, q=True, t=timeTuple, ott=True)
ixs = cmd.keyTangent(attrpath, q=True, t=timeTuple, ix=True)
iys = cmd.keyTangent(attrpath, q=True, t=timeTuple, iy=True)
oxs = cmd.keyTangent(attrpath, q=True, t=timeTuple, ox=True)
oys = cmd.keyTangent(attrpath, q=True, t=timeTuple, oy=True)
isLocked = cmd.keyTangent(attrpath, q=True, t=timeTuple, weightLock=True)
isWeighted = cmd.keyTangent(attrpath, q=True, t=timeTuple, weightLock=True)
objDict[ attr ] = weightedTangents, zip(times, values, itts, otts, ixs, iys, oxs, oys, isLocked, isWeighted)
return True
@d_unifyUndo
def apply( self, mapping, attributes=None, **kwargs ):
'''
valid kwargs are:
mult [1.0] apply a mutiplier when applying curve values
additive [False]
clear [True]
'''
beginningWeightedTanState = cmd.keyTangent( q=True, g=True, wt=True )
### gather options...
additive = kwargs.get( self.kOPT_ADDITIVE,
self.kOPT_DEFAULTS[self.kOPT_ADDITIVE] )
worldAdditive = kwargs.get( self.kOPT_ADDITIVE_WORLD,
self.kOPT_DEFAULTS[self.kOPT_ADDITIVE_WORLD] )
clear = kwargs.get( self.kOPT_CLEAR,
self.kOPT_DEFAULTS[self.kOPT_CLEAR] )
mult = kwargs.get( self.kMULT,
self.kOPT_DEFAULTS[self.kMULT] )
timeOffset = kwargs.get( self.kOPT_OFFSET, self.offset )
#if worldAdditive is turned on, then additive is implied
if worldAdditive:
additive = worldAdditive
#determine the time range to clear
clearStart = timeOffset
clearEnd = clearStart + self.range
#convert the attribute list to a set for fast lookup
if attributes:
attributes = set( attributes )
for obj, tgtObj in mapping.iteritems():
if not tgtObj:
continue
try:
attrDict = self[ obj ]
except KeyError: continue
for attr, (weightedTangents, keyList) in attrDict.iteritems():
if attributes:
if attr not in attributes:
continue
attrpath = '%s.%s' % (tgtObj, attr)
try:
if not cmd.getAttr(attrpath, settable=True):
continue
except TypeError: continue
except RuntimeError:
print obj, tgtObj, attrpath
raise
#do the clear... maya doesn't complain if we try to do a cutKey on an attrpath with no
#animation - and this is good to do before we determine whether the attrpath has a curve or not...
if clear:
cmd.cutKey( attrpath, t=(clearStart, clearEnd), cl=True )
#is there an anim curve on the target attrpath already?
curveExists = cmd.keyframe(attrpath, index=(0,), q=True) is not None
preValue = 0
if additive:
if worldAdditive:
isWorld = True
#if the control has space switching setup, see if its value is set to "world" - if its not, we're don't treat the control's animation as additive
try: isWorld = cmd.getAttr('%s.parent' % obj, asString=True) == 'world'
except TypeError: pass
#only treat translation as additive
if isWorld and attr.startswith('translate'):
preValue = cmd.getAttr(attrpath)
else:
preValue = cmd.getAttr(attrpath)
for time, value, itt, ott, ix, iy, ox, oy, isLocked, isWeighted in keyList:
value *= mult
value += preValue
if time is None:
#in this case the attr value was just a pose...
cmd.setAttr( attrpath, value )
else:
time += timeOffset
cmd.setKeyframe( attrpath, t=(time,), v=value )
if weightedTangents:
#this needs to be done as two separate commands - because setting the tangent types in the same cmd as setting tangent weights can result
#in the tangent types being ignored (for the case of stepped mainly, but subtle weirdness with flat happens too)
cmd.keyTangent( attrpath, t=(time,), ix=ix, iy=iy, ox=ox, oy=oy, l=isLocked, wl=isWeighted )
cmd.keyTangent( attrpath, t=(time,), itt=itt, ott=ott )
else:
cmd.keyTangent( attrpath, t=(time,), ix=ix, iy=iy, ox=ox, oy=oy )
#cmd.keyTangent( e=True, g=True, wt=beginningWeightedTanState )
def getKeyTimes( self ):
'''
returns an ordered list of key times
'''
keyTimesSet = set()
for obj, attrDict in self.iteritems():
for attr, (weightedTangents, keyList) in attrDict.iteritems():
if keyList[0][0] is None:
continue
for tup in keyList:
keyTimesSet.add( tup[0] )
keyTimes = list( keyTimesSet )
keyTimes.sort()
return keyTimes
def getRange( self ):
'''
returns a tuple of (start, end)
'''
times = self.getKeyTimes()
try:
start, end = times[0], times[-1]
self.offset = start
except IndexError:
start, end = 0, 0
self.__range = 0
return start, end
def getRangeValue( self ):
try:
return self.__range
except AttributeError:
self.getRange()
return self.__range
range = property(getRangeValue)
def generatePreArgs( self ):
return tuple()
kEXPORT_DICT_THE_CLIP = 'clip'
kEXPORT_DICT_CLIP_TYPE = 'clip_type'
kEXPORT_DICT_OBJECTS = 'objects'
kEXPORT_DICT_WORLDSPACE = 'worldspace'
class ClipPreset(Preset):
'''
a clip preset is different from a normal preset because it is actually two separate files - a
pickled animation data file, and an icon
'''
TYPE_CLASSES = {kPOSE: PoseClip,
kANIM: AnimClip,
kDELTA: None}
TYPE_LABELS = {kPOSE: 'pose',
kANIM: 'anim',
kDELTA: 'delta'}
### auto generate a label types
LABEL_TYPES = {}
for t, l in TYPE_LABELS.iteritems():
LABEL_TYPES[l] = t
def __new__( cls, locale, library, name, type=kPOSE ):
tool = '%s/%s' % (TOOL_NAME, library)
typeLbl = cls.TYPE_LABELS[type]
ext = '%s.%s' % (typeLbl, kEXT)
self = Preset.__new__( cls, locale, tool, name, ext )
self.icon = Preset( locale, tool, name, '%s.bmp' % typeLbl )
return self
def asClip( self ):
presetDict = self.unpickle()
return presetDict[ kEXPORT_DICT_THE_CLIP ]
def niceName( self ):
return self.name().split('.')[0]
def getLibrary( self ):
return self[-2]
def setLibrary( self, library ):
self[-2] = library
def getTypeName( self ):
return self.name().split('.')[ -1 ]
def getType( self ):
typeLbl = self.getTypeName()
return self.LABEL_TYPES[typeLbl]
def move( self, library=None ):
if library is None:
library = self.getLibrary()
newLoc = ClipPreset(self.other(), library, self.niceName(), self.getType())
#perform the move...
Path.move(self, newLoc)
Path.move(self.icon, newLoc.icon)
return newLoc
def copy( self, library=None ):
if library is None:
library = self.library
newLoc = ClipPreset(self.other(), library, self.niceName(), self.getType())
#perform the copy...
Path.copy(self, newLoc)
Path.copy(self.icon, newLoc.icon)
return newLoc
def rename( self, newName ):
'''
newName should be the base name - sans any clip type id or extension...
'''
newName = '%s.%s' % (scrubName(newName), self.getTypeName())
Preset.rename(self, newName)
self.icon.rename(newName)
def delete( self ):
Path.delete(self)
self.icon.delete()
@api.d_noAutoKey
def apply( self, objects, attributes=None, **kwargs ):
presetDict = self.unpickle()
srcObjs = presetDict[ kEXPORT_DICT_OBJECTS ]
clip = presetDict[ kEXPORT_DICT_THE_CLIP ]
#do a version check - if older version clip is being used - perhaps we can write conversion functionality?
try:
ver = presetDict[ kEXPORT_DICT_TOOL_VER ]
if ver != VER:
api.melWarning("the anim clip version don't match!")
except KeyError:
api.melWarning("this is an old VER 1 pose clip - I don't know how to load them anymore...")
return
#generate the name mapping
slamApply = kwargs.get( 'slam', False )
if slamApply:
objects = cmd.ls( typ='transform' )
tgts = names.matchNames( srcObjs, objects, threshold=kDEFAULT_MAPPING_THRESHOLD )
mapping = Mapping( srcObjs, tgts )
else:
tgts = names.matchNames( srcObjs, objects, threshold=kDEFAULT_MAPPING_THRESHOLD )
mapping = Mapping( srcObjs, tgts )
#run the clip's apply method
clip.apply( mapping, attributes, **kwargs )
def getClipObjects( self ):
'''
returns a list of all the object names contained in the clip
'''
presetDict = self.unpickle()
srcObjs = presetDict[ kEXPORT_DICT_OBJECTS ]
return srcObjs
def write( self, objects, **kwargs ):
type = self.getType()
clipDict = api.writeExportDict( TOOL_NAME, VER )
clipDict[ kEXPORT_DICT_CLIP_TYPE ] = type
clipDict[ kEXPORT_DICT_OBJECTS ] = objects
clipDict[ kEXPORT_DICT_WORLDSPACE ] = False
theClip = self.TYPE_CLASSES[ type ]()
success = theClip.generate( objects, **kwargs )
if not success:
printErrorStr( "Failed to generate clip!" )
return
clipDict[ kEXPORT_DICT_THE_CLIP ] = theClip
#write the preset file to disk
self.pickle( clipDict )
#generate the icon for the clip and add it to perforce if appropriate
icon = generateIcon( self )
#icon.asP4().add()
printInfoStr( "Generated clip!" )
class ClipManager(PresetManager):
'''
an abstraction for listing libraries and library clips for clip presets - there are two
main differences between clip presets and other presets - clips have a library which is
a subdir of the main preset dir, and there are also multiple types of clips both with
the same extension.
'''
def __init__( self ):
PresetManager.__init__(self, TOOL_NAME, kEXT)
def getLibraryNames( self ):
'''
returns the names of all libraries under the current mod
'''
libraries = set()
for locale, paths in self.getLibraryPaths().iteritems():
for p in paths:
libName = p.name()
libraries.add(libName)
libraries = list(libraries)
libraries.sort()
return libraries
def getLibraryPaths( self ):
'''
returns a dictionary of library paths keyed using locale. ie:
{LOCAL: [path1, path2, ...], GLOBAL: etc...}
'''
localeDict = {}
for locale in LOCAL, GLOBAL:
localeDict[locale] = libraries = []
dirs = self.getPresetDirs(locale)
libraryNames = set()
for d in dirs:
dLibs = d.dirs()
for dLib in dLibs:
dLibName = dLib[-1]
if dLibName not in libraryNames:
libraries.append(dLib)
libraryNames.add(dLibName)
return localeDict
def createLibrary( self, name ):
newLibraryPath = Preset(LOCAL, TOOL_NAME, name, '')
newLibraryPath.create()
def getLibraryClips( self, library ):
global kEXT
clips = {LOCAL: [], GLOBAL: []}
for locale in LOCAL, GLOBAL:
localeClips = clips[locale]
for dir in getPresetDirs(locale, TOOL_NAME):
dir += library
if not dir.exists:
continue
for f in dir.files():
if f.hasExtension( kEXT ):
f = f.setExtension()
name, type = f[ -1 ].split('.')
f = f[ :-1 ]
type = ClipPreset.LABEL_TYPES[ type ]
localeClips.append( ClipPreset(locale, library, name, type) )
return clips
def getPathToLibrary( self, library, locale=LOCAL ):
return getPresetDirs(locale, TOOL_NAME)[0] / library
def reload( self ):
pass
#end | '''
pass
def getObjects( self ):
| random_line_split |
animLib.py |
from filesystem import *
from common import printInfoStr, printErrorStr
from mayaDecorators import d_unifyUndo
import maya.cmds as cmd
import names
import api
import apiExtensions
__author__ = 'mel@macaronikazoo.com'
TOOL_NAME = 'animLib'
kEXT = 'clip'
VER = 3 #version
### clip types
kPOSE = 0
kANIM = 1
kDELTA = 2
kDEFAULT_MAPPING_THRESHOLD = 1
kICON_W_H = 60, 60
mel = api.mel
Mapping = names.Mapping
class AnimLibException(Exception):
def __init__( self, *args ):
Exception.__init__(self, *args)
def getMostLikelyModelView():
'''
returns the panel name for the most likely active panel - the currently active panel can be
ambiguous if the user has been using the outliner, or graph editor or something after viewport
usage... this method simply looks at the currently active panel and if its not a modelPanel,
then it returns the first visible model panel. if no panels are found, returns None
'''
cur = cmd.getPanel(wf=True)
curType = cmd.getPanel(to=cur)
if curType == "modelPanel":
return cur
visPanels = cmd.getPanel(vis=True)
for p in visPanels:
if cmd.getPanel(to=p) == "modelPanel":
return p
return None
def generateIcon( preset ):
'''
given a preset object, this method will generate an icon using the currently active viewport. the
path to the icon is returned
'''
sel = cmd.ls(sl=True)
cmd.select(cl=True)
panel = getMostLikelyModelView()
if panel is None:
raise AnimLibException('cannot determine which panel to use for icon generation')
#store some initial settings, change them to what is required, and then restored at the very end
settings = ["-df", "-cv", "-ca", "-nurbsCurves", "-nurbsSurfaces", "-lt", "-ha", "-dim", "-pv", "-ikh", "-j", "-dy"]
imgFormat = cmd.getAttr("defaultRenderGlobals.imageFormat")
states = []
cmd.setAttr("defaultRenderGlobals.imageFormat", 20)
for setting in settings:
states.append( mel.eval("modelEditor -q %s %s;" % (setting, panel)) )
for setting in settings:
|
time = cmd.currentTime(q=True)
#make sure the icon is open for edit if its a global clip
if preset.locale == GLOBAL and preset.icon.exists:
preset.edit()
icon = cmd.playblast(st=time, et=time, w=kICON_W_H[0], h=kICON_W_H[1], fo=True, fmt="image", v=0, p=100, orn=0, cf=str(preset.icon.resolve()))
icon = Path(icon)
if icon.exists:
icon = icon.setExtension('bmp', True)
cmd.setAttr("defaultRenderGlobals.imageFormat", imgFormat)
#restore viewport settings
try: cmd.select(sel)
except TypeError: pass
for setting, initialState in zip(settings, states):
mel.eval("modelEditor -e %s %s %s;" % (setting, initialState, panel))
return icon
class BaseBlender(object):
'''
a blender object is simply a callable object that when called with a percentage arg (0-1) will
apply said percentage of the given clips to the given mapping
'''
def __init__( self, clipA, clipB, mapping=None, attributes=None ):
self.clipA = clipA
self.clipB = clipB
self.__mapping = mapping
if attributes:
attributes = set( attributes )
self.attributes = attributes
def setMapping( self, mapping ):
self.__mapping = mapping
def getMapping( self ):
return self.__mapping
def __call__( self, pct, mapping=None ):
if mapping is not None:
self.setMapping( mapping )
assert self.getMapping() is not None
class PoseBlender(BaseBlender):
def __call__( self, pct, mapping=None, attributes=None ):
BaseBlender.__call__(self, pct, mapping)
cmdQueue = api.CmdQueue()
if mapping is None:
mapping = self.getMapping()
if attributes is None:
attributes = self.attributes
mappingDict = mapping.asDict()
for clipAObj, attrDictA in self.clipA.iteritems():
#if the object isn't in the mapping dict, skip it
if clipAObj not in mappingDict:
continue
clipBObjs = mapping[ clipAObj ]
for a, valueA in attrDictA.iteritems():
if attributes:
if a not in attributes:
continue
if not clipAObj:
continue
attrpath = '%s.%s' % (clipAObj, a)
if not cmd.getAttr( attrpath, settable=True ):
continue
for clipBObj in clipBObjs:
try:
attrDictB = self.clipB[ clipBObj ]
except KeyError: continue
try:
valueB = attrDictB[ a ]
blendedValue = (valueA * (1-pct)) + (valueB * pct)
cmdQueue.append( 'setAttr -clamp %s %f' % (attrpath, blendedValue) )
except KeyError:
cmdQueue.append( 'setAttr -clamp %s %f' % (attrpath, valueA) )
except: pass
cmdQueue()
class AnimBlender(BaseBlender):
def __init__( self, clipA, clipB, mapping=None ):
BaseBlender.__init__(self, clipA, clipB, mapping)
#so now we need to generate a dict to represent the curves for both of the clips
animCurveDictA = self.animCurveDictA = {}
animCurveDictB = self.animCurveDictB = {}
#the curvePairs attribute contains a dict - indexed by attrpath - containing a tuple of (curveA, curveB)
self.curvePairs = {}
for clip, curveDict in zip([self.clipA, self.clipB], [animCurveDictA, animCurveDictB]):
for o, attrDict in clip.iteritems():
curveDict[o] = {}
for a, keyData in attrDict.iteritems():
curve = curveDict[o][a] = animCurve.AnimCurve()
#unpack the key data
weightedTangents, keyList = keyData
#generate the curves
for time, value, itt, ott, ix, iy, ox, oy, isLocked, isWeighted in keyList:
curve.m_bWeighted = weightedTangents
curve.AddKey( time, value, ix, iy, ox, oy )
attrPath = '%s.%s' % (o, a)
try: self.curvePairs[attrPath].append( curve )
except KeyError: self.curvePairs[attrPath] = [ curve ]
#now iterate over each curve pair and make sure they both have keys on the same frames...
for attrPath, curves in self.curvePairs.iteritems():
try:
curveA, curveB = curves
except ValueError: continue
curveTimes = set( curveA.m_keys.keys() + curveB.m_keys.keys() )
for t in curveTimes:
curveA.InsertKey( t )
curveB.InsertKey( t )
print 'keys on', attrPath, 'at times', curveTimes
def __call__( self, pct, mapping=None ):
BaseBlender.__call__(self, pct, mapping)
cmdQueue = api.CmdQueue()
if pct == 0:
self.clipA.apply( self.getMapping() )
elif pct == 1:
self.clipB.apply( self.getMapping() )
else:
for attrPath, curves in self.curvePairs.iteritems():
try:
curveA, curveB = curves
except ValueError: continue
#because we know both curves have the same timings (ie if curveA has a key at time x, curveB is guaranteed to also have a key
#at time x) then we just need to iterate over the keys of one curve, and blend them with the values of the other
for time, keyA in curveA.m_keys.iteritems():
keyB = curveB.m_keys[ time ]
blendedValue = (keyA.m_flValue * (1-pct)) + (keyB.m_flValue * pct)
blendedIX = (keyA.m_flInTanX * (1-pct)) + (keyB.m_flInTanX * pct)
blendedIY = (keyA.m_flInTanY * (1-pct)) + (keyB.m_flInTanY * pct)
blendedOX = (keyA.m_flOutTanX * (1-pct)) + (keyB.m_flOutTanX * pct)
blendedOY = (keyA.m_flOutTanY * (1-pct)) + (keyB.m_flOutTanY * pct)
cmdQueue.append( 'setKeyframe -t %s -v %s %s' % (time, blendedValue, attrPath) )
cmdQueue.append( 'keyTangent -e -t %s -ix %s -iy %s -ox %s -oy %s %s' % (time, blendedIX, blendedIY, blendedOX, blendedOY, attrPath) )
class BaseClip(dict):
'''
baseclass for clips
'''
blender = None
OPTIONS =\
kOPT_ADDITIVE, kOPT_ADDITIVE_WORLD, kOPT_OFFSET, kOPT_CLEAR, kMULT, kOPT_ATTRSELECTION =\
'additive', 'additiveWorld', 'offset', 'clear', 'mult', 'attrSelection'
kOPT_DEFAULTS = { kOPT_ADDITIVE: False,
kOPT_ADDITIVE_WORLD: False,
kOPT_OFFSET: 0,
kOPT_CLEAR: True,
kMULT: 1,
kOPT_ATTRSELECTION: False }
def __init__( self, objects=None ):
if objects is not None:
self.generate( objects )
def generate( self, objects ):
pass
def apply( self, mapping, attributes=None, **kwargs ):
'''
valid kwargs are
additive [False] applys the animation additively
'''
pass
def getObjects( self ):
return self.keys()
def generatePreArgs( self ):
return tuple()
def getObjAttrNames( obj, attrNamesToSkip=() ):
#grab attributes
objAttrs = cmd.listAttr( obj, keyable=True, visible=True, scalar=True ) or []
#also grab alias' - its possible to pass in an alias name, so we need to test against them as well
aliass = cmd.aliasAttr( obj, q=True ) or []
#because the aliasAttr cmd returns a list with the alias, attr pairs in a flat list, we need to iterate over the list, skipping every second entry
itAliass = iter( aliass )
for attr in itAliass:
objAttrs.append( attr )
itAliass.next()
filteredAttrs = []
for attr in objAttrs:
skipAttr = False
for skipName in attrNamesToSkip:
if attr == skipName:
skipAttr = True
elif attr.startswith( skipName +'[' ) or attr.startswith( skipName +'.' ):
skipAttr = True
if skipAttr:
continue
filteredAttrs.append( attr )
return filteredAttrs
#defines a mapping between node type, and the function used to get a list of attributes from that node to save to the clip. by default getObjAttrNames( obj ) is called
GET_ATTR_BY_NODE_TYPE = { 'blendShape': lambda obj: getObjAttrNames( obj, ['envelope', 'weight', 'inputTarget'] ) }
class PoseClip(BaseClip):
blender = PoseBlender
@classmethod
def FromObjects( cls, objects ):
new = cls()
cls.generate(new, objects)
return new
def __add__( self, other ):
'''
for adding multiple pose clips together - returns a new PoseClip instance
'''
pass
def __mult__( self, other ):
'''
for multiplying a clip by a scalar value
'''
assert isinstance(other, (int, long, float))
new = PoseClip
for obj, attrDict in self.iteritems():
for attr, value in attrDict.iteritems():
attrDict[attr] = value * other
def generate( self, objects, attrs=None ):
'''
generates a pose dictionary - its basically just dict with node names for keys. key values
are dictionaries with attribute name keys and attribute value keys
'''
self.clear()
if attrs:
attrs = set( attrs )
for obj in objects:
objType = cmd.nodeType( obj )
attrGetFunction = GET_ATTR_BY_NODE_TYPE.get( objType, getObjAttrNames )
objAttrs = set( attrGetFunction( obj ) )
if attrs:
objAttrs = objAttrs.intersection( attrs )
if objAttrs is None:
continue
self[ obj ] = objDict = {}
for attr in objAttrs:
objDict[ attr ] = cmd.getAttr( '%s.%s' % (obj, attr) )
return True
@d_unifyUndo
def apply( self, mapping, attributes=None, **kwargs ):
'''
construct a mel string to pass to eval - so it can be contained in a single undo...
'''
cmdQueue = api.CmdQueue()
#gather options...
additive = kwargs.get( self.kOPT_ADDITIVE,
self.kOPT_DEFAULTS[ self.kOPT_ADDITIVE ] )
#convert the attribute list to a set for fast lookup
if attributes:
attributes = set( attributes )
for clipObj, tgtObj in mapping.iteritems():
try:
attrDict = self[ clipObj ]
except KeyError: continue
for attr, value in attrDict.iteritems():
if attributes:
if attr not in attributes:
continue
if not tgtObj:
continue
attrpath = '%s.%s' % (tgtObj, attr)
try:
if not cmd.getAttr( attrpath, settable=True ): continue
except TypeError: continue
if additive: value += cmd.getAttr( attrpath )
cmdQueue.append( 'setAttr -clamp %s %f;' % (attrpath, value) )
cmdQueue()
class AnimClip(BaseClip):
blender = AnimBlender
def __init__( self, objects=None ):
self.offset = 0
BaseClip.__init__(self, objects)
def __add__( self, other ):
pass
def __mult__( self, other ):
assert isinstance(other, (int, long, float))
for obj, attrDict in self.iteritems():
for attr, value in attrDict.iteritems():
value *= other
def generate( self, objects, attrs=None, startFrame=None, endFrame=None ):
'''
generates an anim dictionary - its basically just dict with node names for keys. key values
are lists of tuples with the form: (keyTime, attrDict) where attrDict is a dictionary with
attribute name keys and attribute value keys
'''
defaultWeightedTangentOpt = bool(cmd.keyTangent(q=True, g=True, wt=True))
self.clear()
if attrs:
attrs = set( attrs )
if startFrame is None:
startFrame = cmd.playbackOptions( q=True, min=True )
if endFrame is None:
endFrame = cmd.playbackOptions( q=True, max=True )
startFrame, endFrame = list( sorted( [startFrame, endFrame] ) )
self.offset = startFrame
#list all keys on the objects - so we can determine the start frame, and range. all times are stored relative to this time
allKeys = cmd.keyframe( objects, q=True ) or []
allKeys.sort()
allKeys = [ k for k in allKeys if startFrame <= k <= endFrame ]
if not allKeys:
return False
self.offset = offset = allKeys[ 0 ]
self.__range = allKeys[ -1 ] - offset
for obj in objects:
objAttrs = set( cmd.listAttr( obj, keyable=True, visible=True, scalar=True ) or [] )
if attrs:
objAttrs = objAttrs.intersection( attrs )
if not objAttrs:
continue
objDict = {}
self[ obj ] = objDict
for attr in objAttrs:
timeTuple = startFrame, endFrame
#so the attr value dict contains a big fat list containing tuples of the form:
#(time, value, itt, ott, ita, ota, iw, ow, isLockedTangents, isWeightLock)
attrpath = '%s.%s' % (obj, attr)
times = cmd.keyframe( attrpath, q=True, t=timeTuple )
weightedTangents = defaultWeightedTangentOpt
#if there is an animCurve this will return its "weighted tangent" state - otherwise it will return None and a TypeError will be raised
try: weightedTangents = bool(cmd.keyTangent(attrpath, q=True, weightedTangents=True)[0])
except TypeError: pass
if times is None:
#in this case the attr has no animation, so simply record the pose for this attr
objDict[attr] = (False, [(None, cmd.getAttr(attrpath), None, None, None, None, None, None, None, None)])
continue
else:
times = [ t-offset for t in times ]
values = cmd.keyframe(attrpath, q=True, t=timeTuple, vc=True)
itts = cmd.keyTangent(attrpath, q=True, t=timeTuple, itt=True)
otts = cmd.keyTangent(attrpath, q=True, t=timeTuple, ott=True)
ixs = cmd.keyTangent(attrpath, q=True, t=timeTuple, ix=True)
iys = cmd.keyTangent(attrpath, q=True, t=timeTuple, iy=True)
oxs = cmd.keyTangent(attrpath, q=True, t=timeTuple, ox=True)
oys = cmd.keyTangent(attrpath, q=True, t=timeTuple, oy=True)
isLocked = cmd.keyTangent(attrpath, q=True, t=timeTuple, weightLock=True)
isWeighted = cmd.keyTangent(attrpath, q=True, t=timeTuple, weightLock=True)
objDict[ attr ] = weightedTangents, zip(times, values, itts, otts, ixs, iys, oxs, oys, isLocked, isWeighted)
return True
@d_unifyUndo
def apply( self, mapping, attributes=None, **kwargs ):
'''
valid kwargs are:
mult [1.0] apply a mutiplier when applying curve values
additive [False]
clear [True]
'''
beginningWeightedTanState = cmd.keyTangent( q=True, g=True, wt=True )
### gather options...
additive = kwargs.get( self.kOPT_ADDITIVE,
self.kOPT_DEFAULTS[self.kOPT_ADDITIVE] )
worldAdditive = kwargs.get( self.kOPT_ADDITIVE_WORLD,
self.kOPT_DEFAULTS[self.kOPT_ADDITIVE_WORLD] )
clear = kwargs.get( self.kOPT_CLEAR,
self.kOPT_DEFAULTS[self.kOPT_CLEAR] )
mult = kwargs.get( self.kMULT,
self.kOPT_DEFAULTS[self.kMULT] )
timeOffset = kwargs.get( self.kOPT_OFFSET, self.offset )
#if worldAdditive is turned on, then additive is implied
if worldAdditive:
additive = worldAdditive
#determine the time range to clear
clearStart = timeOffset
clearEnd = clearStart + self.range
#convert the attribute list to a set for fast lookup
if attributes:
attributes = set( attributes )
for obj, tgtObj in mapping.iteritems():
if not tgtObj:
continue
try:
attrDict = self[ obj ]
except KeyError: continue
for attr, (weightedTangents, keyList) in attrDict.iteritems():
if attributes:
if attr not in attributes:
continue
attrpath = '%s.%s' % (tgtObj, attr)
try:
if not cmd.getAttr(attrpath, settable=True):
continue
except TypeError: continue
except RuntimeError:
print obj, tgtObj, attrpath
raise
#do the clear... maya doesn't complain if we try to do a cutKey on an attrpath with no
#animation - and this is good to do before we determine whether the attrpath has a curve or not...
if clear:
cmd.cutKey( attrpath, t=(clearStart, clearEnd), cl=True )
#is there an anim curve on the target attrpath already?
curveExists = cmd.keyframe(attrpath, index=(0,), q=True) is not None
preValue = 0
if additive:
if worldAdditive:
isWorld = True
#if the control has space switching setup, see if its value is set to "world" - if its not, we're don't treat the control's animation as additive
try: isWorld = cmd.getAttr('%s.parent' % obj, asString=True) == 'world'
except TypeError: pass
#only treat translation as additive
if isWorld and attr.startswith('translate'):
preValue = cmd.getAttr(attrpath)
else:
preValue = cmd.getAttr(attrpath)
for time, value, itt, ott, ix, iy, ox, oy, isLocked, isWeighted in keyList:
value *= mult
value += preValue
if time is None:
#in this case the attr value was just a pose...
cmd.setAttr( attrpath, value )
else:
time += timeOffset
cmd.setKeyframe( attrpath, t=(time,), v=value )
if weightedTangents:
#this needs to be done as two separate commands - because setting the tangent types in the same cmd as setting tangent weights can result
#in the tangent types being ignored (for the case of stepped mainly, but subtle weirdness with flat happens too)
cmd.keyTangent( attrpath, t=(time,), ix=ix, iy=iy, ox=ox, oy=oy, l=isLocked, wl=isWeighted )
cmd.keyTangent( attrpath, t=(time,), itt=itt, ott=ott )
else:
cmd.keyTangent( attrpath, t=(time,), ix=ix, iy=iy, ox=ox, oy=oy )
#cmd.keyTangent( e=True, g=True, wt=beginningWeightedTanState )
def getKeyTimes( self ):
'''
returns an ordered list of key times
'''
keyTimesSet = set()
for obj, attrDict in self.iteritems():
for attr, (weightedTangents, keyList) in attrDict.iteritems():
if keyList[0][0] is None:
continue
for tup in keyList:
keyTimesSet.add( tup[0] )
keyTimes = list( keyTimesSet )
keyTimes.sort()
return keyTimes
def getRange( self ):
'''
returns a tuple of (start, end)
'''
times = self.getKeyTimes()
try:
start, end = times[0], times[-1]
self.offset = start
except IndexError:
start, end = 0, 0
self.__range = 0
return start, end
def getRangeValue( self ):
try:
return self.__range
except AttributeError:
self.getRange()
return self.__range
range = property(getRangeValue)
def generatePreArgs( self ):
return tuple()
kEXPORT_DICT_THE_CLIP = 'clip'
kEXPORT_DICT_CLIP_TYPE = 'clip_type'
kEXPORT_DICT_OBJECTS = 'objects'
kEXPORT_DICT_WORLDSPACE = 'worldspace'
class ClipPreset(Preset):
'''
a clip preset is different from a normal preset because it is actually two separate files - a
pickled animation data file, and an icon
'''
TYPE_CLASSES = {kPOSE: PoseClip,
kANIM: AnimClip,
kDELTA: None}
TYPE_LABELS = {kPOSE: 'pose',
kANIM: 'anim',
kDELTA: 'delta'}
### auto generate a label types
LABEL_TYPES = {}
for t, l in TYPE_LABELS.iteritems():
LABEL_TYPES[l] = t
def __new__( cls, locale, library, name, type=kPOSE ):
tool = '%s/%s' % (TOOL_NAME, library)
typeLbl = cls.TYPE_LABELS[type]
ext = '%s.%s' % (typeLbl, kEXT)
self = Preset.__new__( cls, locale, tool, name, ext )
self.icon = Preset( locale, tool, name, '%s.bmp' % typeLbl )
return self
def asClip( self ):
presetDict = self.unpickle()
return presetDict[ kEXPORT_DICT_THE_CLIP ]
def niceName( self ):
return self.name().split('.')[0]
def getLibrary( self ):
return self[-2]
def setLibrary( self, library ):
self[-2] = library
def getTypeName( self ):
return self.name().split('.')[ -1 ]
def getType( self ):
typeLbl = self.getTypeName()
return self.LABEL_TYPES[typeLbl]
def move( self, library=None ):
if library is None:
library = self.getLibrary()
newLoc = ClipPreset(self.other(), library, self.niceName(), self.getType())
#perform the move...
Path.move(self, newLoc)
Path.move(self.icon, newLoc.icon)
return newLoc
def copy( self, library=None ):
if library is None:
library = self.library
newLoc = ClipPreset(self.other(), library, self.niceName(), self.getType())
#perform the copy...
Path.copy(self, newLoc)
Path.copy(self.icon, newLoc.icon)
return newLoc
def rename( self, newName ):
'''
newName should be the base name - sans any clip type id or extension...
'''
newName = '%s.%s' % (scrubName(newName), self.getTypeName())
Preset.rename(self, newName)
self.icon.rename(newName)
def delete( self ):
Path.delete(self)
self.icon.delete()
@api.d_noAutoKey
def apply( self, objects, attributes=None, **kwargs ):
presetDict = self.unpickle()
srcObjs = presetDict[ kEXPORT_DICT_OBJECTS ]
clip = presetDict[ kEXPORT_DICT_THE_CLIP ]
#do a version check - if older version clip is being used - perhaps we can write conversion functionality?
try:
ver = presetDict[ kEXPORT_DICT_TOOL_VER ]
if ver != VER:
api.melWarning("the anim clip version don't match!")
except KeyError:
api.melWarning("this is an old VER 1 pose clip - I don't know how to load them anymore...")
return
#generate the name mapping
slamApply = kwargs.get( 'slam', False )
if slamApply:
objects = cmd.ls( typ='transform' )
tgts = names.matchNames( srcObjs, objects, threshold=kDEFAULT_MAPPING_THRESHOLD )
mapping = Mapping( srcObjs, tgts )
else:
tgts = names.matchNames( srcObjs, objects, threshold=kDEFAULT_MAPPING_THRESHOLD )
mapping = Mapping( srcObjs, tgts )
#run the clip's apply method
clip.apply( mapping, attributes, **kwargs )
def getClipObjects( self ):
'''
returns a list of all the object names contained in the clip
'''
presetDict = self.unpickle()
srcObjs = presetDict[ kEXPORT_DICT_OBJECTS ]
return srcObjs
def write( self, objects, **kwargs ):
type = self.getType()
clipDict = api.writeExportDict( TOOL_NAME, VER )
clipDict[ kEXPORT_DICT_CLIP_TYPE ] = type
clipDict[ kEXPORT_DICT_OBJECTS ] = objects
clipDict[ kEXPORT_DICT_WORLDSPACE ] = False
theClip = self.TYPE_CLASSES[ type ]()
success = theClip.generate( objects, **kwargs )
if not success:
printErrorStr( "Failed to generate clip!" )
return
clipDict[ kEXPORT_DICT_THE_CLIP ] = theClip
#write the preset file to disk
self.pickle( clipDict )
#generate the icon for the clip and add it to perforce if appropriate
icon = generateIcon( self )
#icon.asP4().add()
printInfoStr( "Generated clip!" )
class ClipManager(PresetManager):
'''
an abstraction for listing libraries and library clips for clip presets - there are two
main differences between clip presets and other presets - clips have a library which is
a subdir of the main preset dir, and there are also multiple types of clips both with
the same extension.
'''
def __init__( self ):
PresetManager.__init__(self, TOOL_NAME, kEXT)
def getLibraryNames( self ):
'''
returns the names of all libraries under the current mod
'''
libraries = set()
for locale, paths in self.getLibraryPaths().iteritems():
for p in paths:
libName = p.name()
libraries.add(libName)
libraries = list(libraries)
libraries.sort()
return libraries
def getLibraryPaths( self ):
'''
returns a dictionary of library paths keyed using locale. ie:
{LOCAL: [path1, path2, ...], GLOBAL: etc...}
'''
localeDict = {}
for locale in LOCAL, GLOBAL:
localeDict[locale] = libraries = []
dirs = self.getPresetDirs(locale)
libraryNames = set()
for d in dirs:
dLibs = d.dirs()
for dLib in dLibs:
dLibName = dLib[-1]
if dLibName not in libraryNames:
libraries.append(dLib)
libraryNames.add(dLibName)
return localeDict
def createLibrary( self, name ):
newLibraryPath = Preset(LOCAL, TOOL_NAME, name, '')
newLibraryPath.create()
def getLibraryClips( self, library ):
global kEXT
clips = {LOCAL: [], GLOBAL: []}
for locale in LOCAL, GLOBAL:
localeClips = clips[locale]
for dir in getPresetDirs(locale, TOOL_NAME):
dir += library
if not dir.exists:
continue
for f in dir.files():
if f.hasExtension( kEXT ):
f = f.setExtension()
name, type = f[ -1 ].split('.')
f = f[ :-1 ]
type = ClipPreset.LABEL_TYPES[ type ]
localeClips.append( ClipPreset(locale, library, name, type) )
return clips
def getPathToLibrary( self, library, locale=LOCAL ):
return getPresetDirs(locale, TOOL_NAME)[0] / library
def reload( self ):
pass
#end
| mel.eval("modelEditor -e %s 0 %s;" % (setting, panel)) | conditional_block |
animLib.py |
from filesystem import *
from common import printInfoStr, printErrorStr
from mayaDecorators import d_unifyUndo
import maya.cmds as cmd
import names
import api
import apiExtensions
__author__ = 'mel@macaronikazoo.com'
TOOL_NAME = 'animLib'
kEXT = 'clip'
VER = 3 #version
### clip types
kPOSE = 0
kANIM = 1
kDELTA = 2
kDEFAULT_MAPPING_THRESHOLD = 1
kICON_W_H = 60, 60
mel = api.mel
Mapping = names.Mapping
class AnimLibException(Exception):
def __init__( self, *args ):
Exception.__init__(self, *args)
def getMostLikelyModelView():
'''
returns the panel name for the most likely active panel - the currently active panel can be
ambiguous if the user has been using the outliner, or graph editor or something after viewport
usage... this method simply looks at the currently active panel and if its not a modelPanel,
then it returns the first visible model panel. if no panels are found, returns None
'''
cur = cmd.getPanel(wf=True)
curType = cmd.getPanel(to=cur)
if curType == "modelPanel":
return cur
visPanels = cmd.getPanel(vis=True)
for p in visPanels:
if cmd.getPanel(to=p) == "modelPanel":
return p
return None
def generateIcon( preset ):
'''
given a preset object, this method will generate an icon using the currently active viewport. the
path to the icon is returned
'''
sel = cmd.ls(sl=True)
cmd.select(cl=True)
panel = getMostLikelyModelView()
if panel is None:
raise AnimLibException('cannot determine which panel to use for icon generation')
#store some initial settings, change them to what is required, and then restored at the very end
settings = ["-df", "-cv", "-ca", "-nurbsCurves", "-nurbsSurfaces", "-lt", "-ha", "-dim", "-pv", "-ikh", "-j", "-dy"]
imgFormat = cmd.getAttr("defaultRenderGlobals.imageFormat")
states = []
cmd.setAttr("defaultRenderGlobals.imageFormat", 20)
for setting in settings:
states.append( mel.eval("modelEditor -q %s %s;" % (setting, panel)) )
for setting in settings:
mel.eval("modelEditor -e %s 0 %s;" % (setting, panel))
time = cmd.currentTime(q=True)
#make sure the icon is open for edit if its a global clip
if preset.locale == GLOBAL and preset.icon.exists:
preset.edit()
icon = cmd.playblast(st=time, et=time, w=kICON_W_H[0], h=kICON_W_H[1], fo=True, fmt="image", v=0, p=100, orn=0, cf=str(preset.icon.resolve()))
icon = Path(icon)
if icon.exists:
icon = icon.setExtension('bmp', True)
cmd.setAttr("defaultRenderGlobals.imageFormat", imgFormat)
#restore viewport settings
try: cmd.select(sel)
except TypeError: pass
for setting, initialState in zip(settings, states):
mel.eval("modelEditor -e %s %s %s;" % (setting, initialState, panel))
return icon
class BaseBlender(object):
'''
a blender object is simply a callable object that when called with a percentage arg (0-1) will
apply said percentage of the given clips to the given mapping
'''
def __init__( self, clipA, clipB, mapping=None, attributes=None ):
self.clipA = clipA
self.clipB = clipB
self.__mapping = mapping
if attributes:
attributes = set( attributes )
self.attributes = attributes
def setMapping( self, mapping ):
self.__mapping = mapping
def getMapping( self ):
return self.__mapping
def __call__( self, pct, mapping=None ):
if mapping is not None:
self.setMapping( mapping )
assert self.getMapping() is not None
class PoseBlender(BaseBlender):
def | ( self, pct, mapping=None, attributes=None ):
BaseBlender.__call__(self, pct, mapping)
cmdQueue = api.CmdQueue()
if mapping is None:
mapping = self.getMapping()
if attributes is None:
attributes = self.attributes
mappingDict = mapping.asDict()
for clipAObj, attrDictA in self.clipA.iteritems():
#if the object isn't in the mapping dict, skip it
if clipAObj not in mappingDict:
continue
clipBObjs = mapping[ clipAObj ]
for a, valueA in attrDictA.iteritems():
if attributes:
if a not in attributes:
continue
if not clipAObj:
continue
attrpath = '%s.%s' % (clipAObj, a)
if not cmd.getAttr( attrpath, settable=True ):
continue
for clipBObj in clipBObjs:
try:
attrDictB = self.clipB[ clipBObj ]
except KeyError: continue
try:
valueB = attrDictB[ a ]
blendedValue = (valueA * (1-pct)) + (valueB * pct)
cmdQueue.append( 'setAttr -clamp %s %f' % (attrpath, blendedValue) )
except KeyError:
cmdQueue.append( 'setAttr -clamp %s %f' % (attrpath, valueA) )
except: pass
cmdQueue()
class AnimBlender(BaseBlender):
def __init__( self, clipA, clipB, mapping=None ):
BaseBlender.__init__(self, clipA, clipB, mapping)
#so now we need to generate a dict to represent the curves for both of the clips
animCurveDictA = self.animCurveDictA = {}
animCurveDictB = self.animCurveDictB = {}
#the curvePairs attribute contains a dict - indexed by attrpath - containing a tuple of (curveA, curveB)
self.curvePairs = {}
for clip, curveDict in zip([self.clipA, self.clipB], [animCurveDictA, animCurveDictB]):
for o, attrDict in clip.iteritems():
curveDict[o] = {}
for a, keyData in attrDict.iteritems():
curve = curveDict[o][a] = animCurve.AnimCurve()
#unpack the key data
weightedTangents, keyList = keyData
#generate the curves
for time, value, itt, ott, ix, iy, ox, oy, isLocked, isWeighted in keyList:
curve.m_bWeighted = weightedTangents
curve.AddKey( time, value, ix, iy, ox, oy )
attrPath = '%s.%s' % (o, a)
try: self.curvePairs[attrPath].append( curve )
except KeyError: self.curvePairs[attrPath] = [ curve ]
#now iterate over each curve pair and make sure they both have keys on the same frames...
for attrPath, curves in self.curvePairs.iteritems():
try:
curveA, curveB = curves
except ValueError: continue
curveTimes = set( curveA.m_keys.keys() + curveB.m_keys.keys() )
for t in curveTimes:
curveA.InsertKey( t )
curveB.InsertKey( t )
print 'keys on', attrPath, 'at times', curveTimes
def __call__( self, pct, mapping=None ):
BaseBlender.__call__(self, pct, mapping)
cmdQueue = api.CmdQueue()
if pct == 0:
self.clipA.apply( self.getMapping() )
elif pct == 1:
self.clipB.apply( self.getMapping() )
else:
for attrPath, curves in self.curvePairs.iteritems():
try:
curveA, curveB = curves
except ValueError: continue
#because we know both curves have the same timings (ie if curveA has a key at time x, curveB is guaranteed to also have a key
#at time x) then we just need to iterate over the keys of one curve, and blend them with the values of the other
for time, keyA in curveA.m_keys.iteritems():
keyB = curveB.m_keys[ time ]
blendedValue = (keyA.m_flValue * (1-pct)) + (keyB.m_flValue * pct)
blendedIX = (keyA.m_flInTanX * (1-pct)) + (keyB.m_flInTanX * pct)
blendedIY = (keyA.m_flInTanY * (1-pct)) + (keyB.m_flInTanY * pct)
blendedOX = (keyA.m_flOutTanX * (1-pct)) + (keyB.m_flOutTanX * pct)
blendedOY = (keyA.m_flOutTanY * (1-pct)) + (keyB.m_flOutTanY * pct)
cmdQueue.append( 'setKeyframe -t %s -v %s %s' % (time, blendedValue, attrPath) )
cmdQueue.append( 'keyTangent -e -t %s -ix %s -iy %s -ox %s -oy %s %s' % (time, blendedIX, blendedIY, blendedOX, blendedOY, attrPath) )
class BaseClip(dict):
'''
baseclass for clips
'''
blender = None
OPTIONS =\
kOPT_ADDITIVE, kOPT_ADDITIVE_WORLD, kOPT_OFFSET, kOPT_CLEAR, kMULT, kOPT_ATTRSELECTION =\
'additive', 'additiveWorld', 'offset', 'clear', 'mult', 'attrSelection'
kOPT_DEFAULTS = { kOPT_ADDITIVE: False,
kOPT_ADDITIVE_WORLD: False,
kOPT_OFFSET: 0,
kOPT_CLEAR: True,
kMULT: 1,
kOPT_ATTRSELECTION: False }
def __init__( self, objects=None ):
if objects is not None:
self.generate( objects )
def generate( self, objects ):
pass
def apply( self, mapping, attributes=None, **kwargs ):
'''
valid kwargs are
additive [False] applys the animation additively
'''
pass
def getObjects( self ):
return self.keys()
def generatePreArgs( self ):
return tuple()
def getObjAttrNames( obj, attrNamesToSkip=() ):
#grab attributes
objAttrs = cmd.listAttr( obj, keyable=True, visible=True, scalar=True ) or []
#also grab alias' - its possible to pass in an alias name, so we need to test against them as well
aliass = cmd.aliasAttr( obj, q=True ) or []
#because the aliasAttr cmd returns a list with the alias, attr pairs in a flat list, we need to iterate over the list, skipping every second entry
itAliass = iter( aliass )
for attr in itAliass:
objAttrs.append( attr )
itAliass.next()
filteredAttrs = []
for attr in objAttrs:
skipAttr = False
for skipName in attrNamesToSkip:
if attr == skipName:
skipAttr = True
elif attr.startswith( skipName +'[' ) or attr.startswith( skipName +'.' ):
skipAttr = True
if skipAttr:
continue
filteredAttrs.append( attr )
return filteredAttrs
#defines a mapping between node type, and the function used to get a list of attributes from that node to save to the clip. by default getObjAttrNames( obj ) is called
GET_ATTR_BY_NODE_TYPE = { 'blendShape': lambda obj: getObjAttrNames( obj, ['envelope', 'weight', 'inputTarget'] ) }
class PoseClip(BaseClip):
blender = PoseBlender
@classmethod
def FromObjects( cls, objects ):
new = cls()
cls.generate(new, objects)
return new
def __add__( self, other ):
'''
for adding multiple pose clips together - returns a new PoseClip instance
'''
pass
def __mult__( self, other ):
'''
for multiplying a clip by a scalar value
'''
assert isinstance(other, (int, long, float))
new = PoseClip
for obj, attrDict in self.iteritems():
for attr, value in attrDict.iteritems():
attrDict[attr] = value * other
def generate( self, objects, attrs=None ):
'''
generates a pose dictionary - its basically just dict with node names for keys. key values
are dictionaries with attribute name keys and attribute value keys
'''
self.clear()
if attrs:
attrs = set( attrs )
for obj in objects:
objType = cmd.nodeType( obj )
attrGetFunction = GET_ATTR_BY_NODE_TYPE.get( objType, getObjAttrNames )
objAttrs = set( attrGetFunction( obj ) )
if attrs:
objAttrs = objAttrs.intersection( attrs )
if objAttrs is None:
continue
self[ obj ] = objDict = {}
for attr in objAttrs:
objDict[ attr ] = cmd.getAttr( '%s.%s' % (obj, attr) )
return True
@d_unifyUndo
def apply( self, mapping, attributes=None, **kwargs ):
'''
construct a mel string to pass to eval - so it can be contained in a single undo...
'''
cmdQueue = api.CmdQueue()
#gather options...
additive = kwargs.get( self.kOPT_ADDITIVE,
self.kOPT_DEFAULTS[ self.kOPT_ADDITIVE ] )
#convert the attribute list to a set for fast lookup
if attributes:
attributes = set( attributes )
for clipObj, tgtObj in mapping.iteritems():
try:
attrDict = self[ clipObj ]
except KeyError: continue
for attr, value in attrDict.iteritems():
if attributes:
if attr not in attributes:
continue
if not tgtObj:
continue
attrpath = '%s.%s' % (tgtObj, attr)
try:
if not cmd.getAttr( attrpath, settable=True ): continue
except TypeError: continue
if additive: value += cmd.getAttr( attrpath )
cmdQueue.append( 'setAttr -clamp %s %f;' % (attrpath, value) )
cmdQueue()
class AnimClip(BaseClip):
blender = AnimBlender
def __init__( self, objects=None ):
self.offset = 0
BaseClip.__init__(self, objects)
def __add__( self, other ):
pass
def __mult__( self, other ):
assert isinstance(other, (int, long, float))
for obj, attrDict in self.iteritems():
for attr, value in attrDict.iteritems():
value *= other
def generate( self, objects, attrs=None, startFrame=None, endFrame=None ):
'''
generates an anim dictionary - its basically just dict with node names for keys. key values
are lists of tuples with the form: (keyTime, attrDict) where attrDict is a dictionary with
attribute name keys and attribute value keys
'''
defaultWeightedTangentOpt = bool(cmd.keyTangent(q=True, g=True, wt=True))
self.clear()
if attrs:
attrs = set( attrs )
if startFrame is None:
startFrame = cmd.playbackOptions( q=True, min=True )
if endFrame is None:
endFrame = cmd.playbackOptions( q=True, max=True )
startFrame, endFrame = list( sorted( [startFrame, endFrame] ) )
self.offset = startFrame
#list all keys on the objects - so we can determine the start frame, and range. all times are stored relative to this time
allKeys = cmd.keyframe( objects, q=True ) or []
allKeys.sort()
allKeys = [ k for k in allKeys if startFrame <= k <= endFrame ]
if not allKeys:
return False
self.offset = offset = allKeys[ 0 ]
self.__range = allKeys[ -1 ] - offset
for obj in objects:
objAttrs = set( cmd.listAttr( obj, keyable=True, visible=True, scalar=True ) or [] )
if attrs:
objAttrs = objAttrs.intersection( attrs )
if not objAttrs:
continue
objDict = {}
self[ obj ] = objDict
for attr in objAttrs:
timeTuple = startFrame, endFrame
#so the attr value dict contains a big fat list containing tuples of the form:
#(time, value, itt, ott, ita, ota, iw, ow, isLockedTangents, isWeightLock)
attrpath = '%s.%s' % (obj, attr)
times = cmd.keyframe( attrpath, q=True, t=timeTuple )
weightedTangents = defaultWeightedTangentOpt
#if there is an animCurve this will return its "weighted tangent" state - otherwise it will return None and a TypeError will be raised
try: weightedTangents = bool(cmd.keyTangent(attrpath, q=True, weightedTangents=True)[0])
except TypeError: pass
if times is None:
#in this case the attr has no animation, so simply record the pose for this attr
objDict[attr] = (False, [(None, cmd.getAttr(attrpath), None, None, None, None, None, None, None, None)])
continue
else:
times = [ t-offset for t in times ]
values = cmd.keyframe(attrpath, q=True, t=timeTuple, vc=True)
itts = cmd.keyTangent(attrpath, q=True, t=timeTuple, itt=True)
otts = cmd.keyTangent(attrpath, q=True, t=timeTuple, ott=True)
ixs = cmd.keyTangent(attrpath, q=True, t=timeTuple, ix=True)
iys = cmd.keyTangent(attrpath, q=True, t=timeTuple, iy=True)
oxs = cmd.keyTangent(attrpath, q=True, t=timeTuple, ox=True)
oys = cmd.keyTangent(attrpath, q=True, t=timeTuple, oy=True)
isLocked = cmd.keyTangent(attrpath, q=True, t=timeTuple, weightLock=True)
isWeighted = cmd.keyTangent(attrpath, q=True, t=timeTuple, weightLock=True)
objDict[ attr ] = weightedTangents, zip(times, values, itts, otts, ixs, iys, oxs, oys, isLocked, isWeighted)
return True
@d_unifyUndo
def apply( self, mapping, attributes=None, **kwargs ):
'''
valid kwargs are:
mult [1.0] apply a mutiplier when applying curve values
additive [False]
clear [True]
'''
beginningWeightedTanState = cmd.keyTangent( q=True, g=True, wt=True )
### gather options...
additive = kwargs.get( self.kOPT_ADDITIVE,
self.kOPT_DEFAULTS[self.kOPT_ADDITIVE] )
worldAdditive = kwargs.get( self.kOPT_ADDITIVE_WORLD,
self.kOPT_DEFAULTS[self.kOPT_ADDITIVE_WORLD] )
clear = kwargs.get( self.kOPT_CLEAR,
self.kOPT_DEFAULTS[self.kOPT_CLEAR] )
mult = kwargs.get( self.kMULT,
self.kOPT_DEFAULTS[self.kMULT] )
timeOffset = kwargs.get( self.kOPT_OFFSET, self.offset )
#if worldAdditive is turned on, then additive is implied
if worldAdditive:
additive = worldAdditive
#determine the time range to clear
clearStart = timeOffset
clearEnd = clearStart + self.range
#convert the attribute list to a set for fast lookup
if attributes:
attributes = set( attributes )
for obj, tgtObj in mapping.iteritems():
if not tgtObj:
continue
try:
attrDict = self[ obj ]
except KeyError: continue
for attr, (weightedTangents, keyList) in attrDict.iteritems():
if attributes:
if attr not in attributes:
continue
attrpath = '%s.%s' % (tgtObj, attr)
try:
if not cmd.getAttr(attrpath, settable=True):
continue
except TypeError: continue
except RuntimeError:
print obj, tgtObj, attrpath
raise
#do the clear... maya doesn't complain if we try to do a cutKey on an attrpath with no
#animation - and this is good to do before we determine whether the attrpath has a curve or not...
if clear:
cmd.cutKey( attrpath, t=(clearStart, clearEnd), cl=True )
#is there an anim curve on the target attrpath already?
curveExists = cmd.keyframe(attrpath, index=(0,), q=True) is not None
preValue = 0
if additive:
if worldAdditive:
isWorld = True
#if the control has space switching setup, see if its value is set to "world" - if its not, we're don't treat the control's animation as additive
try: isWorld = cmd.getAttr('%s.parent' % obj, asString=True) == 'world'
except TypeError: pass
#only treat translation as additive
if isWorld and attr.startswith('translate'):
preValue = cmd.getAttr(attrpath)
else:
preValue = cmd.getAttr(attrpath)
for time, value, itt, ott, ix, iy, ox, oy, isLocked, isWeighted in keyList:
value *= mult
value += preValue
if time is None:
#in this case the attr value was just a pose...
cmd.setAttr( attrpath, value )
else:
time += timeOffset
cmd.setKeyframe( attrpath, t=(time,), v=value )
if weightedTangents:
#this needs to be done as two separate commands - because setting the tangent types in the same cmd as setting tangent weights can result
#in the tangent types being ignored (for the case of stepped mainly, but subtle weirdness with flat happens too)
cmd.keyTangent( attrpath, t=(time,), ix=ix, iy=iy, ox=ox, oy=oy, l=isLocked, wl=isWeighted )
cmd.keyTangent( attrpath, t=(time,), itt=itt, ott=ott )
else:
cmd.keyTangent( attrpath, t=(time,), ix=ix, iy=iy, ox=ox, oy=oy )
#cmd.keyTangent( e=True, g=True, wt=beginningWeightedTanState )
def getKeyTimes( self ):
'''
returns an ordered list of key times
'''
keyTimesSet = set()
for obj, attrDict in self.iteritems():
for attr, (weightedTangents, keyList) in attrDict.iteritems():
if keyList[0][0] is None:
continue
for tup in keyList:
keyTimesSet.add( tup[0] )
keyTimes = list( keyTimesSet )
keyTimes.sort()
return keyTimes
def getRange( self ):
'''
returns a tuple of (start, end)
'''
times = self.getKeyTimes()
try:
start, end = times[0], times[-1]
self.offset = start
except IndexError:
start, end = 0, 0
self.__range = 0
return start, end
def getRangeValue( self ):
try:
return self.__range
except AttributeError:
self.getRange()
return self.__range
range = property(getRangeValue)
def generatePreArgs( self ):
return tuple()
kEXPORT_DICT_THE_CLIP = 'clip'
kEXPORT_DICT_CLIP_TYPE = 'clip_type'
kEXPORT_DICT_OBJECTS = 'objects'
kEXPORT_DICT_WORLDSPACE = 'worldspace'
class ClipPreset(Preset):
'''
a clip preset is different from a normal preset because it is actually two separate files - a
pickled animation data file, and an icon
'''
TYPE_CLASSES = {kPOSE: PoseClip,
kANIM: AnimClip,
kDELTA: None}
TYPE_LABELS = {kPOSE: 'pose',
kANIM: 'anim',
kDELTA: 'delta'}
### auto generate a label types
LABEL_TYPES = {}
for t, l in TYPE_LABELS.iteritems():
LABEL_TYPES[l] = t
def __new__( cls, locale, library, name, type=kPOSE ):
tool = '%s/%s' % (TOOL_NAME, library)
typeLbl = cls.TYPE_LABELS[type]
ext = '%s.%s' % (typeLbl, kEXT)
self = Preset.__new__( cls, locale, tool, name, ext )
self.icon = Preset( locale, tool, name, '%s.bmp' % typeLbl )
return self
def asClip( self ):
presetDict = self.unpickle()
return presetDict[ kEXPORT_DICT_THE_CLIP ]
def niceName( self ):
return self.name().split('.')[0]
def getLibrary( self ):
return self[-2]
def setLibrary( self, library ):
self[-2] = library
def getTypeName( self ):
return self.name().split('.')[ -1 ]
def getType( self ):
typeLbl = self.getTypeName()
return self.LABEL_TYPES[typeLbl]
def move( self, library=None ):
if library is None:
library = self.getLibrary()
newLoc = ClipPreset(self.other(), library, self.niceName(), self.getType())
#perform the move...
Path.move(self, newLoc)
Path.move(self.icon, newLoc.icon)
return newLoc
def copy( self, library=None ):
if library is None:
library = self.library
newLoc = ClipPreset(self.other(), library, self.niceName(), self.getType())
#perform the copy...
Path.copy(self, newLoc)
Path.copy(self.icon, newLoc.icon)
return newLoc
def rename( self, newName ):
'''
newName should be the base name - sans any clip type id or extension...
'''
newName = '%s.%s' % (scrubName(newName), self.getTypeName())
Preset.rename(self, newName)
self.icon.rename(newName)
def delete( self ):
Path.delete(self)
self.icon.delete()
@api.d_noAutoKey
def apply( self, objects, attributes=None, **kwargs ):
presetDict = self.unpickle()
srcObjs = presetDict[ kEXPORT_DICT_OBJECTS ]
clip = presetDict[ kEXPORT_DICT_THE_CLIP ]
#do a version check - if older version clip is being used - perhaps we can write conversion functionality?
try:
ver = presetDict[ kEXPORT_DICT_TOOL_VER ]
if ver != VER:
api.melWarning("the anim clip version don't match!")
except KeyError:
api.melWarning("this is an old VER 1 pose clip - I don't know how to load them anymore...")
return
#generate the name mapping
slamApply = kwargs.get( 'slam', False )
if slamApply:
objects = cmd.ls( typ='transform' )
tgts = names.matchNames( srcObjs, objects, threshold=kDEFAULT_MAPPING_THRESHOLD )
mapping = Mapping( srcObjs, tgts )
else:
tgts = names.matchNames( srcObjs, objects, threshold=kDEFAULT_MAPPING_THRESHOLD )
mapping = Mapping( srcObjs, tgts )
#run the clip's apply method
clip.apply( mapping, attributes, **kwargs )
def getClipObjects( self ):
'''
returns a list of all the object names contained in the clip
'''
presetDict = self.unpickle()
srcObjs = presetDict[ kEXPORT_DICT_OBJECTS ]
return srcObjs
def write( self, objects, **kwargs ):
type = self.getType()
clipDict = api.writeExportDict( TOOL_NAME, VER )
clipDict[ kEXPORT_DICT_CLIP_TYPE ] = type
clipDict[ kEXPORT_DICT_OBJECTS ] = objects
clipDict[ kEXPORT_DICT_WORLDSPACE ] = False
theClip = self.TYPE_CLASSES[ type ]()
success = theClip.generate( objects, **kwargs )
if not success:
printErrorStr( "Failed to generate clip!" )
return
clipDict[ kEXPORT_DICT_THE_CLIP ] = theClip
#write the preset file to disk
self.pickle( clipDict )
#generate the icon for the clip and add it to perforce if appropriate
icon = generateIcon( self )
#icon.asP4().add()
printInfoStr( "Generated clip!" )
class ClipManager(PresetManager):
'''
an abstraction for listing libraries and library clips for clip presets - there are two
main differences between clip presets and other presets - clips have a library which is
a subdir of the main preset dir, and there are also multiple types of clips both with
the same extension.
'''
def __init__( self ):
PresetManager.__init__(self, TOOL_NAME, kEXT)
def getLibraryNames( self ):
'''
returns the names of all libraries under the current mod
'''
libraries = set()
for locale, paths in self.getLibraryPaths().iteritems():
for p in paths:
libName = p.name()
libraries.add(libName)
libraries = list(libraries)
libraries.sort()
return libraries
def getLibraryPaths( self ):
'''
returns a dictionary of library paths keyed using locale. ie:
{LOCAL: [path1, path2, ...], GLOBAL: etc...}
'''
localeDict = {}
for locale in LOCAL, GLOBAL:
localeDict[locale] = libraries = []
dirs = self.getPresetDirs(locale)
libraryNames = set()
for d in dirs:
dLibs = d.dirs()
for dLib in dLibs:
dLibName = dLib[-1]
if dLibName not in libraryNames:
libraries.append(dLib)
libraryNames.add(dLibName)
return localeDict
def createLibrary( self, name ):
newLibraryPath = Preset(LOCAL, TOOL_NAME, name, '')
newLibraryPath.create()
def getLibraryClips( self, library ):
global kEXT
clips = {LOCAL: [], GLOBAL: []}
for locale in LOCAL, GLOBAL:
localeClips = clips[locale]
for dir in getPresetDirs(locale, TOOL_NAME):
dir += library
if not dir.exists:
continue
for f in dir.files():
if f.hasExtension( kEXT ):
f = f.setExtension()
name, type = f[ -1 ].split('.')
f = f[ :-1 ]
type = ClipPreset.LABEL_TYPES[ type ]
localeClips.append( ClipPreset(locale, library, name, type) )
return clips
def getPathToLibrary( self, library, locale=LOCAL ):
return getPresetDirs(locale, TOOL_NAME)[0] / library
def reload( self ):
pass
#end
| __call__ | identifier_name |
textboardcomponent.js | CARNIVAL.registerComponent('net.meta4vr.textboard', function () {
/*
Params:
textScale
backgroundColor
width
height
textLines
transparentBackground
*/
/*
There are some subtleties when using transparent backgrounds.
The shader discards fragments with alpha <0.9, so you need to set the alpha value of the bg to something less than that.
The canvas rendering uses a blend function that dithers between the foreground color and the background color, including the relative alpha values.
By setting the background alpha slightly below 0.9 you should get a slight outline around the text of the background color. This is quite visually
pleasing and has the side effect of antialiasing the text somewhat.
If you set the alpha too low, your outline will end up black or nonexistent which can make the text look pretty choppy.
*/
/*
Note also that there seems to be something of a performance cost to using a shader that discards a lot of fragments, as in the case of transparent
backgrounds. Not sure why this is :-|
*/
/*
Known issues:
reset() is kinda quirky when dealing with transparent backgrounds
Transparent backgrounds generally have a few quirks. Best to use them sparingly.
*/
/* would be good to standardise this - make it so that we don't have to manually pass in rotationQuaternon etc. Ideally the entire p from params should
be passed to the constructor and also mined for */
var drawableclass = CARNIVAL.shape.SegmentedRectangle;
var meta = {
ident: 'net.meta4vr.textboard'
};
var TextBoard = function (params) {
CARNIVAL.component.Component.call(this, params, drawableclass);
//
// this._paramsOrig = params;
// var p = params || {};
// this.size = {maxX: p.width || 2, maxY: p.height || 2};
// superclass.call(this,
// p.position || {x:0, y:0, z:0},
// this.size,
// p.orientation || {x:0, y:0, z:0},
// {segmentsX:1, segmentsY:1, textureLabel:'orange', materialLabel:'matteplastic', label:p.label || null, rotationQuaternion:p.rotationQuaternion}
// );
var cfg = (params || {}).config || {};
var input = (params || {}).input || {};
this.textScale = cfg.textScale || 1; // Use this to scale the text
this.canvasScale = cfg.canvasScale || 200; // generally better not to mess with this
this.canvas = document.createElement('canvas');
// this.canvas.width = this.canvasScale * this.size.maxX;
// this.canvas.height = this.canvasScale * this.size.maxY;
this.canvas.width = this.canvasScale * this.drawParams.size.width;
this.canvas.height = this.canvasScale * this.drawParams.size.height;
this.ctx = this.canvas.getContext('2d');
this.currentTextLines = input.textLines || [];
this.currentText = input.text || null;
this.transparentBackground = cfg.transparentBackground || false;
if (this.transparentBackground) {
this.shaderLabel = 'basic'; /* TODO make a special shader for this */
}
this.backgroundColor = cfg.backgroundColor || (this.transparentBackground && 'rgba(0,0,255,0.89)' || 'rgba(0,0,255,1)');
var fslh = this.calculateFontSizeAndLineHeight(this.canvasScale, this.textScale);
this.boardRenderState = {
font: cfg.font || 'Arial',
fontSize: cfg.fontSize || fslh.fontSize,
lineHeight: cfg.lineHeight || fslh.lineHeight,
textColor: cfg.textColor || 'white',
// backgroundColor: p.backgroundColor || 'blue',
leftMargin: cfg.leftMargin || 4,
topMargin: cfg.topMargin || 4,
style: cfg.style || ''
};
this.cursor = null;
this.tex = null;
};
TextBoard.prototype = Object.create(CARNIVAL.component.Component.prototype);
// TextBoard.prototype.serialize = function () {
// var component = this;
// var mat = component.transformationMatrix();
//
// var getPos = function () {
// var trans = vec3.create();
// mat4.getTranslation(trans, mat);
// return trans;
// // return component.position; /* This should extract trans and rot from the transform matrix */
// }
// var getRot = function () {
// var rot = quat.create();
// mat4.getRotation(rot, mat);
// return rot;
// // return component.orientation;
// }
//
// return {
// component: this.meta.ident,
// parameters: {
// textLines: this._paramsOrig.textLines,
// position: getPos(),
// rotationQuaternion: getRot(),
// orientation: this._paramsOrig.orientation,
// width: this._paramsOrig.width,
// height: this._paramsOrig.height
// }
// }
// }
TextBoard.prototype.calculateFontSizeAndLineHeight = function (canvasScale, textScale) {
return {
fontSize: textScale * (canvasScale/5),
lineHeight: textScale * (canvasScale/4.8)
}
}
TextBoard.prototype.clear = function (color, suppressUpdate) {
color = color || this.backgroundColor;
// this.ctx.fillStyle = 'black';
// this.ctx.fillRect(0, 0, this.canvas.width, this.canvas.height);
this.ctx.fillStyle = color;
this.ctx.fillRect(0, 0, this.canvas.width, this.canvas.height);
if (!suppressUpdate) this.updateTexture();
}
TextBoard.prototype.reset = function () {
/* Just clearing the canvas doesn't work properly when we're using transparent backgrounds */
var newCanvas = document.createElement('canvas');
newCanvas.width = this.canvas.width;
newCanvas.height = this.canvas.height;
this.canvas = newCanvas;
this.ctx = this.canvas.getContext('2d');
this.cursor = null;
this.clear();
// this.textLines = [];
}
TextBoard.prototype.getTexture = function () {
var gl = CARNIVAL.engine.gl;
this.tex = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, this.tex);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
return this.tex;
}
TextBoard.prototype.updateTexture = function () {
var gl = CARNIVAL.engine.gl;
gl.bindTexture(gl.TEXTURE_2D, this.tex);
// gl.pixelStorei(gl.UNPACK_FLIP_Y_WEBGL, true);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, this.canvas);
}
TextBoard.prototype.renderTextLines = function (textLines) {
var board = this;
var rstate = board.boardRenderState;
var text = null;
if (!board.cursor) board.cursor = {x:rstate.leftMargin, y:rstate.topMargin};
for (var i = 0; i < textLines.length; i++) {
var line = textLines[i];
rstate.font = line.font || rstate.font;
rstate.fontSize = line.fontSize || rstate.fontSize;
rstate.textColor = line.textColor || rstate.textColor;
rstate.backgroundColor = line.backgroundColor || rstate.backgroundColor;
rstate.lineHeight = line.lineHeight || rstate.lineHeight;
rstate.leftMargin = line.leftMargin || rstate.leftMargin;
rstate.topMargin = line.topMargin || rstate.topMargin;
rstate.style = line.style || rstate.style;
text = line.text || null;
if (text) {
// var ctx = board.canvas.getContext('2d');
board.ctx.fillStyle = rstate.textColor;
board.ctx.font = "@ST@ @SZ@px @F@".replace('@SZ@', rstate.fontSize).replace('@F@', rstate.font).replace('@ST@', rstate.style);
// console.log('drawing text', text, board.cursor.x, board.cursor.y+rstate.fontSize)
board.ctx.fillText(text, board.cursor.x, board.cursor.y+rstate.fontSize);
board.cursor.y += rstate.lineHeight;
}
}
board.updateTexture();
}
TextBoard.prototype.addTextLines = function (lines) {
for (var i = 0; i < lines.length; i++) {
this.currentTextLines.push(lines[i]); |
TextBoard.prototype.setText = function (lines) {
this.currentTextLines = lines;
this.reset();
this.renderTextLines(this.currentTextLines);
}
TextBoard.prototype.prepare = function () {
var board = this;
board.drawable.texture = board.getTexture();
board.clear(board.backgroundColor, true);
if (board.currentTextLines.length) {
board.renderTextLines(board.currentTextLines);
}
else if (board.currentText) {
board.addTextLines([{text:board.currentText}]);
}
// board.updateTexture();
return new Promise(function (resolve, reject) {
resolve(board);
});
}
TextBoard.prototype.meta = {
ident: 'net.meta4vr.textboard',
config: [
{ident:'textScale', title:'Text Scale', type:'float'}
],
input: [
{ident:'text', title:'Text', type:'text'}
// {ident:'textLines', title:'Text Lines', type:'array,text'}
]
};
return TextBoard;
}()); | }
// this.currentTextLines.push(line);
this.reset();
this.renderTextLines(this.currentTextLines);
} | random_line_split |
textboardcomponent.js | CARNIVAL.registerComponent('net.meta4vr.textboard', function () {
/*
Params:
textScale
backgroundColor
width
height
textLines
transparentBackground
*/
/*
There are some subtleties when using transparent backgrounds.
The shader discards fragments with alpha <0.9, so you need to set the alpha value of the bg to something less than that.
The canvas rendering uses a blend function that dithers between the foreground color and the background color, including the relative alpha values.
By setting the background alpha slightly below 0.9 you should get a slight outline around the text of the background color. This is quite visually
pleasing and has the side effect of antialiasing the text somewhat.
If you set the alpha too low, your outline will end up black or nonexistent which can make the text look pretty choppy.
*/
/*
Note also that there seems to be something of a performance cost to using a shader that discards a lot of fragments, as in the case of transparent
backgrounds. Not sure why this is :-|
*/
/*
Known issues:
reset() is kinda quirky when dealing with transparent backgrounds
Transparent backgrounds generally have a few quirks. Best to use them sparingly.
*/
/* would be good to standardise this - make it so that we don't have to manually pass in rotationQuaternon etc. Ideally the entire p from params should
be passed to the constructor and also mined for */
var drawableclass = CARNIVAL.shape.SegmentedRectangle;
var meta = {
ident: 'net.meta4vr.textboard'
};
var TextBoard = function (params) {
CARNIVAL.component.Component.call(this, params, drawableclass);
//
// this._paramsOrig = params;
// var p = params || {};
// this.size = {maxX: p.width || 2, maxY: p.height || 2};
// superclass.call(this,
// p.position || {x:0, y:0, z:0},
// this.size,
// p.orientation || {x:0, y:0, z:0},
// {segmentsX:1, segmentsY:1, textureLabel:'orange', materialLabel:'matteplastic', label:p.label || null, rotationQuaternion:p.rotationQuaternion}
// );
var cfg = (params || {}).config || {};
var input = (params || {}).input || {};
this.textScale = cfg.textScale || 1; // Use this to scale the text
this.canvasScale = cfg.canvasScale || 200; // generally better not to mess with this
this.canvas = document.createElement('canvas');
// this.canvas.width = this.canvasScale * this.size.maxX;
// this.canvas.height = this.canvasScale * this.size.maxY;
this.canvas.width = this.canvasScale * this.drawParams.size.width;
this.canvas.height = this.canvasScale * this.drawParams.size.height;
this.ctx = this.canvas.getContext('2d');
this.currentTextLines = input.textLines || [];
this.currentText = input.text || null;
this.transparentBackground = cfg.transparentBackground || false;
if (this.transparentBackground) {
this.shaderLabel = 'basic'; /* TODO make a special shader for this */
}
this.backgroundColor = cfg.backgroundColor || (this.transparentBackground && 'rgba(0,0,255,0.89)' || 'rgba(0,0,255,1)');
var fslh = this.calculateFontSizeAndLineHeight(this.canvasScale, this.textScale);
this.boardRenderState = {
font: cfg.font || 'Arial',
fontSize: cfg.fontSize || fslh.fontSize,
lineHeight: cfg.lineHeight || fslh.lineHeight,
textColor: cfg.textColor || 'white',
// backgroundColor: p.backgroundColor || 'blue',
leftMargin: cfg.leftMargin || 4,
topMargin: cfg.topMargin || 4,
style: cfg.style || ''
};
this.cursor = null;
this.tex = null;
};
TextBoard.prototype = Object.create(CARNIVAL.component.Component.prototype);
// TextBoard.prototype.serialize = function () {
// var component = this;
// var mat = component.transformationMatrix();
//
// var getPos = function () {
// var trans = vec3.create();
// mat4.getTranslation(trans, mat);
// return trans;
// // return component.position; /* This should extract trans and rot from the transform matrix */
// }
// var getRot = function () {
// var rot = quat.create();
// mat4.getRotation(rot, mat);
// return rot;
// // return component.orientation;
// }
//
// return {
// component: this.meta.ident,
// parameters: {
// textLines: this._paramsOrig.textLines,
// position: getPos(),
// rotationQuaternion: getRot(),
// orientation: this._paramsOrig.orientation,
// width: this._paramsOrig.width,
// height: this._paramsOrig.height
// }
// }
// }
TextBoard.prototype.calculateFontSizeAndLineHeight = function (canvasScale, textScale) {
return {
fontSize: textScale * (canvasScale/5),
lineHeight: textScale * (canvasScale/4.8)
}
}
TextBoard.prototype.clear = function (color, suppressUpdate) {
color = color || this.backgroundColor;
// this.ctx.fillStyle = 'black';
// this.ctx.fillRect(0, 0, this.canvas.width, this.canvas.height);
this.ctx.fillStyle = color;
this.ctx.fillRect(0, 0, this.canvas.width, this.canvas.height);
if (!suppressUpdate) this.updateTexture();
}
TextBoard.prototype.reset = function () {
/* Just clearing the canvas doesn't work properly when we're using transparent backgrounds */
var newCanvas = document.createElement('canvas');
newCanvas.width = this.canvas.width;
newCanvas.height = this.canvas.height;
this.canvas = newCanvas;
this.ctx = this.canvas.getContext('2d');
this.cursor = null;
this.clear();
// this.textLines = [];
}
TextBoard.prototype.getTexture = function () {
var gl = CARNIVAL.engine.gl;
this.tex = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, this.tex);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
return this.tex;
}
TextBoard.prototype.updateTexture = function () {
var gl = CARNIVAL.engine.gl;
gl.bindTexture(gl.TEXTURE_2D, this.tex);
// gl.pixelStorei(gl.UNPACK_FLIP_Y_WEBGL, true);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, this.canvas);
}
TextBoard.prototype.renderTextLines = function (textLines) {
var board = this;
var rstate = board.boardRenderState;
var text = null;
if (!board.cursor) board.cursor = {x:rstate.leftMargin, y:rstate.topMargin};
for (var i = 0; i < textLines.length; i++) |
board.updateTexture();
}
TextBoard.prototype.addTextLines = function (lines) {
for (var i = 0; i < lines.length; i++) {
this.currentTextLines.push(lines[i]);
}
// this.currentTextLines.push(line);
this.reset();
this.renderTextLines(this.currentTextLines);
}
TextBoard.prototype.setText = function (lines) {
this.currentTextLines = lines;
this.reset();
this.renderTextLines(this.currentTextLines);
}
TextBoard.prototype.prepare = function () {
var board = this;
board.drawable.texture = board.getTexture();
board.clear(board.backgroundColor, true);
if (board.currentTextLines.length) {
board.renderTextLines(board.currentTextLines);
}
else if (board.currentText) {
board.addTextLines([{text:board.currentText}]);
}
// board.updateTexture();
return new Promise(function (resolve, reject) {
resolve(board);
});
}
TextBoard.prototype.meta = {
ident: 'net.meta4vr.textboard',
config: [
{ident:'textScale', title:'Text Scale', type:'float'}
],
input: [
{ident:'text', title:'Text', type:'text'}
// {ident:'textLines', title:'Text Lines', type:'array,text'}
]
};
return TextBoard;
}()); | {
var line = textLines[i];
rstate.font = line.font || rstate.font;
rstate.fontSize = line.fontSize || rstate.fontSize;
rstate.textColor = line.textColor || rstate.textColor;
rstate.backgroundColor = line.backgroundColor || rstate.backgroundColor;
rstate.lineHeight = line.lineHeight || rstate.lineHeight;
rstate.leftMargin = line.leftMargin || rstate.leftMargin;
rstate.topMargin = line.topMargin || rstate.topMargin;
rstate.style = line.style || rstate.style;
text = line.text || null;
if (text) {
// var ctx = board.canvas.getContext('2d');
board.ctx.fillStyle = rstate.textColor;
board.ctx.font = "@ST@ @SZ@px @F@".replace('@SZ@', rstate.fontSize).replace('@F@', rstate.font).replace('@ST@', rstate.style);
// console.log('drawing text', text, board.cursor.x, board.cursor.y+rstate.fontSize)
board.ctx.fillText(text, board.cursor.x, board.cursor.y+rstate.fontSize);
board.cursor.y += rstate.lineHeight;
}
} | conditional_block |
section_0771_to_0788.rs | //! @ The |align_state| and |preamble| variables are initialized elsewhere.
//!
//! @<Set init...@>=
//! align_ptr:=null; cur_align:=null; cur_span:=null; cur_loop:=null;
//! cur_head:=null; cur_tail:=null;
//!
//! @ Alignment stack maintenance is handled by a pair of trivial routines
//! called |push_alignment| and |pop_alignment|.
//!
//! @p procedure push_alignment;
//! var p:pointer; {the new alignment stack node}
//! begin p:=get_node(align_stack_node_size);
//! link(p):=align_ptr; info(p):=cur_align;
//! llink(p):=preamble; rlink(p):=cur_span;
//! mem[p+2].int:=cur_loop; mem[p+3].int:=align_state;
//! info(p+4):=cur_head; link(p+4):=cur_tail;
//! align_ptr:=p;
//! cur_head:=get_avail;
//! end;
//! @#
//! procedure pop_alignment;
//! var p:pointer; {the top alignment stack node}
//! begin free_avail(cur_head);
//! p:=align_ptr;
//! cur_tail:=link(p+4); cur_head:=info(p+4);
//! align_state:=mem[p+3].int; cur_loop:=mem[p+2].int;
//! cur_span:=rlink(p); preamble:=llink(p);
//! cur_align:=info(p); align_ptr:=link(p);
//! free_node(p,align_stack_node_size);
//! end;
//!
//! @ \TeX\ has eight procedures that govern alignments: |init_align| and
//! |fin_align| are used at the very beginning and the very end; |init_row| and
//! |fin_row| are used at the beginning and end of individual rows; |init_span|
//! is used at the beginning of a sequence of spanned columns (possibly involving
//! only one column); |init_col| and |fin_col| are used at the beginning and
//! end of individual columns; and |align_peek| is used after \.{\\cr} to see
//! whether the next item is \.{\\noalign}.
//!
//! We shall consider these routines in the order they are first used during
//! the course of a complete \.{\\halign}, namely |init_align|, |align_peek|,
//! |init_row|, |init_span|, |init_col|, |fin_col|, |fin_row|, |fin_align|.
//!
//! @ When \.{\\halign} or \.{\\valign} has been scanned in an appropriate
//! mode, \TeX\ calls |init_align|, whose task is to get everything off to a
//! good start. This mostly involves scanning the preamble and putting its
//! information into the preamble list.
//! @^preamble@>
//! | //! procedure@?align_peek; forward;@t\2@>@/
//! procedure@?normal_paragraph; forward;@t\2@>@/
//! procedure init_align;
//! label done, done1, done2, continue;
//! var save_cs_ptr:pointer; {|warning_index| value for error messages}
//! @!p:pointer; {for short-term temporary use}
//! begin save_cs_ptr:=cur_cs; {\.{\\halign} or \.{\\valign}, usually}
//! push_alignment; align_state:=-1000000; {enter a new alignment level}
//! @<Check for improper alignment in displayed math@>;
//! push_nest; {enter a new semantic level}
//! @<Change current mode to |-vmode| for \.{\\halign}, |-hmode| for \.{\\valign}@>;
//! scan_spec(align_group,false);@/
//! @<Scan the preamble and record it in the |preamble| list@>;
//! new_save_level(align_group);
//! if every_cr<>null then begin_token_list(every_cr,every_cr_text);
//! align_peek; {look for \.{\\noalign} or \.{\\omit}}
//! end;
//!
//! @ In vertical modes, |prev_depth| already has the correct value. But
//! if we are in |mmode| (displayed formula mode), we reach out to the
//! enclosing vertical mode for the |prev_depth| value that produces the
//! correct baseline calculations.
//!
//! @<Change current mode...@>=
//! if mode=mmode then
//! begin mode:=-vmode; prev_depth:=nest[nest_ptr-2].aux_field.sc;
//! end
//! else if mode>0 then negate(mode)
//!
//! @ When \.{\\halign} is used as a displayed formula, there should be
//! no other pieces of mlists present.
//!
//! @<Check for improper alignment in displayed math@>=
//! if (mode=mmode)and((tail<>head)or(incompleat_noad<>null)) then
//! begin print_err("Improper "); print_esc("halign"); print(" inside $$'s");
//! @.Improper \\halign...@>
//! help3("Displays can use special alignments (like \eqalignno)")@/
//! ("only if nothing but the alignment itself is between $$'s.")@/
//! ("So I've deleted the formulas that preceded this alignment.");
//! error; flush_math;
//! end
//!
//! @ @<Scan the preamble and record it in the |preamble| list@>=
//! preamble:=null; cur_align:=align_head; cur_loop:=null; scanner_status:=aligning;
//! warning_index:=save_cs_ptr; align_state:=-1000000;
//! {at this point, |cur_cmd=left_brace|}
//! loop@+ begin @<Append the current tabskip glue to the preamble list@>;
//! if cur_cmd=car_ret then goto done; {\.{\\cr} ends the preamble}
//! @<Scan preamble text until |cur_cmd| is |tab_mark| or |car_ret|,
//! looking for changes in the tabskip glue; append an
//! alignrecord to the preamble list@>;
//! end;
//! done: scanner_status:=normal
//!
//! @ @<Append the current tabskip glue to the preamble list@>=
//! link(cur_align):=new_param_glue(tab_skip_code);
//! cur_align:=link(cur_align)
//!
//! @ @<Scan preamble text until |cur_cmd| is |tab_mark| or |car_ret|...@>=
//! @<Scan the template \<u_j>, putting the resulting token list in |hold_head|@>;
//! link(cur_align):=new_null_box; cur_align:=link(cur_align); {a new alignrecord}
//! info(cur_align):=end_span; width(cur_align):=null_flag;
//! u_part(cur_align):=link(hold_head);
//! @<Scan the template \<v_j>, putting the resulting token list in |hold_head|@>;
//! v_part(cur_align):=link(hold_head)
//!
//! @ We enter `\.{\\span}' into |eqtb| with |tab_mark| as its command code,
//! and with |span_code| as the command modifier. This makes \TeX\ interpret it
//! essentially the same as an alignment delimiter like `\.\&', yet it is
//! recognizably different when we need to distinguish it from a normal delimiter.
//! It also turns out to be useful to give a special |cr_code| to `\.{\\cr}',
//! and an even larger |cr_cr_code| to `\.{\\crcr}'.
//!
//! The end of a template is represented by two ``frozen'' control sequences
//! called \.{\\endtemplate}. The first has the command code |end_template|, which
//! is |>outer_call|, so it will not easily disappear in the presence of errors.
//! The |get_x_token| routine converts the first into the second, which has |endv|
//! as its command code.
//!
//! @d span_code=256 {distinct from any character}
//! @d cr_code=257 {distinct from |span_code| and from any character}
//! @d cr_cr_code=cr_code+1 {this distinguishes \.{\\crcr} from \.{\\cr}}
//! @d end_template_token==cs_token_flag+frozen_end_template
//!
//! @<Put each of \TeX's primitives into the hash table@>=
//! primitive("span",tab_mark,span_code);@/
//! @!@:span_}{\.{\\span} primitive@>
//! primitive("cr",car_ret,cr_code);
//! @!@:cr_}{\.{\\cr} primitive@>
//! text(frozen_cr):="cr"; eqtb[frozen_cr]:=eqtb[cur_val];@/
//! primitive("crcr",car_ret,cr_cr_code);
//! @!@:cr_cr_}{\.{\\crcr} primitive@>
//! text(frozen_end_template):="endtemplate"; text(frozen_endv):="endtemplate";
//! eq_type(frozen_endv):=endv; equiv(frozen_endv):=null_list;
//! eq_level(frozen_endv):=level_one;@/
//! eqtb[frozen_end_template]:=eqtb[frozen_endv];
//! eq_type(frozen_end_template):=end_template;
//!
//! @ @<Cases of |print_cmd_chr|...@>=
//! tab_mark: if chr_code=span_code then print_esc("span")
//! else chr_cmd("alignment tab character ");
//! car_ret: if chr_code=cr_code then print_esc("cr")
//! else print_esc("crcr");
//!
//! @ The preamble is copied directly, except that \.{\\tabskip} causes a change
//! to the tabskip glue, thereby possibly expanding macros that immediately
//! follow it. An appearance of \.{\\span} also causes such an expansion.
//!
//! Note that if the preamble contains `\.{\\global\\tabskip}', the `\.{\\global}'
//! token survives in the preamble and the `\.{\\tabskip}' defines new
//! tabskip glue (locally).
//!
//! @<Declare the procedure called |get_preamble_token|@>=
//! procedure get_preamble_token;
//! label restart;
//! begin restart: get_token;
//! while (cur_chr=span_code)and(cur_cmd=tab_mark) do
//! begin get_token; {this token will be expanded once}
//! if cur_cmd>max_command then
//! begin expand; get_token;
//! end;
//! end;
//! if cur_cmd=endv then
//! fatal_error("(interwoven alignment preambles are not allowed)");
//! @.interwoven alignment preambles...@>
//! if (cur_cmd=assign_glue)and(cur_chr=glue_base+tab_skip_code) then
//! begin scan_optional_equals; scan_glue(glue_val);
//! if global_defs>0 then geq_define(glue_base+tab_skip_code,glue_ref,cur_val)
//! else eq_define(glue_base+tab_skip_code,glue_ref,cur_val);
//! goto restart;
//! end;
//! end;
//!
//! @ Spaces are eliminated from the beginning of a template.
//!
//! @<Scan the template \<u_j>...@>=
//! p:=hold_head; link(p):=null;
//! loop@+ begin get_preamble_token;
//! if cur_cmd=mac_param then goto done1;
//! if (cur_cmd<=car_ret)and(cur_cmd>=tab_mark)and(align_state=-1000000) then
//! if (p=hold_head)and(cur_loop=null)and(cur_cmd=tab_mark)
//! then cur_loop:=cur_align
//! else begin print_err("Missing # inserted in alignment preamble");
//! @.Missing \# inserted...@>
//! help3("There should be exactly one # between &'s, when an")@/
//! ("\halign or \valign is being set up. In this case you had")@/
//! ("none, so I've put one in; maybe that will work.");
//! back_error; goto done1;
//! end
//! else if (cur_cmd<>spacer)or(p<>hold_head) then
//! begin link(p):=get_avail; p:=link(p); info(p):=cur_tok;
//! end;
//! end;
//! done1:
//!
//! @ @<Scan the template \<v_j>...@>=
//! p:=hold_head; link(p):=null;
//! loop@+ begin continue: get_preamble_token;
//! if (cur_cmd<=car_ret)and(cur_cmd>=tab_mark)and(align_state=-1000000) then
//! goto done2;
//! if cur_cmd=mac_param then
//! begin print_err("Only one # is allowed per tab");
//! @.Only one \# is allowed...@>
//! help3("There should be exactly one # between &'s, when an")@/
//! ("\halign or \valign is being set up. In this case you had")@/
//! ("more than one, so I'm ignoring all but the first.");
//! error; goto continue;
//! end;
//! link(p):=get_avail; p:=link(p); info(p):=cur_tok;
//! end;
//! done2: link(p):=get_avail; p:=link(p);
//! info(p):=end_template_token {put \.{\\endtemplate} at the end}
//!
//! @ The tricky part about alignments is getting the templates into the
//! scanner at the right time, and recovering control when a row or column
//! is finished.
//!
//! We usually begin a row after each \.{\\cr} has been sensed, unless that
//! \.{\\cr} is followed by \.{\\noalign} or by the right brace that terminates
//! the alignment. The |align_peek| routine is used to look ahead and do
//! the right thing; it either gets a new row started, or gets a \.{\\noalign}
//! started, or finishes off the alignment.
//!
//! @<Declare the procedure called |align_peek|@>=
//! procedure align_peek;
//! label restart;
//! begin restart: align_state:=1000000; @<Get the next non-blank non-call token@>;
//! if cur_cmd=no_align then
//! begin scan_left_brace; new_save_level(no_align_group);
//! if mode=-vmode then normal_paragraph;
//! end
//! else if cur_cmd=right_brace then fin_align
//! else if (cur_cmd=car_ret)and(cur_chr=cr_cr_code) then
//! goto restart {ignore \.{\\crcr}}
//! else begin init_row; {start a new row}
//! init_col; {start a new column and replace what we peeked at}
//! end;
//! end;
//!
//! @ To start a row (i.e., a `row' that rhymes with `dough' but not with `bough'),
//! we enter a new semantic level, copy the first tabskip glue, and change
//! from internal vertical mode to restricted horizontal mode or vice versa.
//! The |space_factor| and |prev_depth| are not used on this semantic level,
//! but we clear them to zero just to be tidy.
//!
//! @p @t\4@>@<Declare the procedure called |init_span|@>@t@>@/
//! procedure init_row;
//! begin push_nest; mode:=(-hmode-vmode)-mode;
//! if mode=-hmode then space_factor:=0 @+else prev_depth:=0;
//! tail_append(new_glue(glue_ptr(preamble)));
//! subtype(tail):=tab_skip_code+1;@/
//! cur_align:=link(preamble); cur_tail:=cur_head; init_span(cur_align);
//! end;
//!
//! @ The parameter to |init_span| is a pointer to the alignrecord where the
//! next column or group of columns will begin. A new semantic level is
//! entered, so that the columns will generate a list for subsequent packaging.
//!
//! @<Declare the procedure called |init_span|@>=
//! procedure init_span(@!p:pointer);
//! begin push_nest;
//! if mode=-hmode then space_factor:=1000
//! else begin prev_depth:=ignore_depth; normal_paragraph;
//! end;
//! cur_span:=p;
//! end;
//!
//! @ When a column begins, we assume that |cur_cmd| is either |omit| or else
//! the current token should be put back into the input until the \<u_j>
//! template has been scanned. (Note that |cur_cmd| might be |tab_mark| or
//! |car_ret|.) We also assume that |align_state| is approximately 1000000 at
//! this time. We remain in the same mode, and start the template if it is
//! called for.
//!
//! @p procedure init_col;
//! begin extra_info(cur_align):=cur_cmd;
//! if cur_cmd=omit then align_state:=0
//! else begin back_input; begin_token_list(u_part(cur_align),u_template);
//! end; {now |align_state=1000000|}
//! end;
//! | //! @p @t\4@>@<Declare the procedure called |get_preamble_token|@>@t@>@/ | random_line_split |
http.rs | //! Simple HTTP implementation which supports both async and traditional execution environments
//! with minimal dependencies. This is used as the basis for REST and RPC clients.
use chunked_transfer;
use serde_json;
use std::convert::TryFrom;
use std::fmt;
#[cfg(not(feature = "tokio"))]
use std::io::Write;
use std::net::{SocketAddr, ToSocketAddrs};
use std::time::Duration;
#[cfg(feature = "tokio")]
use tokio::io::{AsyncBufReadExt, AsyncReadExt, AsyncWriteExt};
#[cfg(feature = "tokio")]
use tokio::net::TcpStream;
#[cfg(not(feature = "tokio"))]
use std::io::BufRead;
use std::io::Read;
#[cfg(not(feature = "tokio"))]
use std::net::TcpStream;
/// Timeout for operations on TCP streams.
const TCP_STREAM_TIMEOUT: Duration = Duration::from_secs(5);
/// Timeout for reading the first byte of a response. This is separate from the general read
/// timeout as it is not uncommon for Bitcoin Core to be blocked waiting on UTXO cache flushes for
/// upwards of 10 minutes on slow devices (e.g. RPis with SSDs over USB). Note that we always retry
/// once when we time out, so the maximum time we allow Bitcoin Core to block for is twice this
/// value.
const TCP_STREAM_RESPONSE_TIMEOUT: Duration = Duration::from_secs(300);
/// Maximum HTTP message header size in bytes.
const MAX_HTTP_MESSAGE_HEADER_SIZE: usize = 8192;
/// Maximum HTTP message body size in bytes. Enough for a hex-encoded block in JSON format and any
/// overhead for HTTP chunked transfer encoding.
const MAX_HTTP_MESSAGE_BODY_SIZE: usize = 2 * 4_000_000 + 32_000;
/// Endpoint for interacting with an HTTP-based API.
#[derive(Debug)]
pub struct HttpEndpoint {
host: String,
port: Option<u16>,
path: String,
}
impl HttpEndpoint {
/// Creates an endpoint for the given host and default HTTP port.
pub fn for_host(host: String) -> Self {
Self {
host,
port: None,
path: String::from("/"),
}
}
/// Specifies a port to use with the endpoint.
pub fn with_port(mut self, port: u16) -> Self {
self.port = Some(port);
self
}
/// Specifies a path to use with the endpoint.
pub fn with_path(mut self, path: String) -> Self {
self.path = path;
self
}
/// Returns the endpoint host.
pub fn host(&self) -> &str {
&self.host
}
/// Returns the endpoint port.
pub fn port(&self) -> u16 {
match self.port {
None => 80,
Some(port) => port,
}
}
/// Returns the endpoint path.
pub fn path(&self) -> &str {
&self.path
}
}
impl<'a> std::net::ToSocketAddrs for &'a HttpEndpoint {
type Iter = <(&'a str, u16) as std::net::ToSocketAddrs>::Iter;
fn to_socket_addrs(&self) -> std::io::Result<Self::Iter> {
(self.host(), self.port()).to_socket_addrs()
}
}
/// Client for making HTTP requests.
pub(crate) struct HttpClient {
address: SocketAddr,
stream: TcpStream,
}
impl HttpClient {
/// Opens a connection to an HTTP endpoint.
pub fn connect<E: ToSocketAddrs>(endpoint: E) -> std::io::Result<Self> {
let address = match endpoint.to_socket_addrs()?.next() {
None => {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidInput, "could not resolve to any addresses"));
},
Some(address) => address,
};
let stream = std::net::TcpStream::connect_timeout(&address, TCP_STREAM_TIMEOUT)?;
stream.set_read_timeout(Some(TCP_STREAM_TIMEOUT))?;
stream.set_write_timeout(Some(TCP_STREAM_TIMEOUT))?;
#[cfg(feature = "tokio")]
let stream = {
stream.set_nonblocking(true)?;
TcpStream::from_std(stream)?
};
Ok(Self { address, stream })
}
/// Sends a `GET` request for a resource identified by `uri` at the `host`.
///
/// Returns the response body in `F` format.
#[allow(dead_code)]
pub async fn get<F>(&mut self, uri: &str, host: &str) -> std::io::Result<F>
where F: TryFrom<Vec<u8>, Error = std::io::Error> {
let request = format!(
"GET {} HTTP/1.1\r\n\
Host: {}\r\n\
Connection: keep-alive\r\n\
\r\n", uri, host);
let response_body = self.send_request_with_retry(&request).await?;
F::try_from(response_body)
}
/// Sends a `POST` request for a resource identified by `uri` at the `host` using the given HTTP
/// authentication credentials.
///
/// The request body consists of the provided JSON `content`. Returns the response body in `F`
/// format.
#[allow(dead_code)]
pub async fn post<F>(&mut self, uri: &str, host: &str, auth: &str, content: serde_json::Value) -> std::io::Result<F>
where F: TryFrom<Vec<u8>, Error = std::io::Error> {
let content = content.to_string();
let request = format!(
"POST {} HTTP/1.1\r\n\
Host: {}\r\n\
Authorization: {}\r\n\
Connection: keep-alive\r\n\
Content-Type: application/json\r\n\
Content-Length: {}\r\n\
\r\n\
{}", uri, host, auth, content.len(), content);
let response_body = self.send_request_with_retry(&request).await?;
F::try_from(response_body)
}
/// Sends an HTTP request message and reads the response, returning its body. Attempts to
/// reconnect and retry if the connection has been closed.
async fn send_request_with_retry(&mut self, request: &str) -> std::io::Result<Vec<u8>> {
match self.send_request(request).await {
Ok(bytes) => Ok(bytes),
Err(_) => {
// Reconnect and retry on fail. This can happen if the connection was closed after
// the keep-alive limits are reached, or generally if the request timed out due to
// Bitcoin Core being stuck on a long-running operation or its RPC queue being
// full.
// Block 100ms before retrying the request as in many cases the source of the error
// may be persistent for some time.
#[cfg(feature = "tokio")]
tokio::time::sleep(Duration::from_millis(100)).await;
#[cfg(not(feature = "tokio"))]
std::thread::sleep(Duration::from_millis(100));
*self = Self::connect(self.address)?;
self.send_request(request).await
},
}
}
/// Sends an HTTP request message and reads the response, returning its body.
async fn send_request(&mut self, request: &str) -> std::io::Result<Vec<u8>> {
self.write_request(request).await?;
self.read_response().await
}
/// Writes an HTTP request message.
async fn write_request(&mut self, request: &str) -> std::io::Result<()> {
#[cfg(feature = "tokio")]
{
self.stream.write_all(request.as_bytes()).await?;
self.stream.flush().await
}
#[cfg(not(feature = "tokio"))]
{
self.stream.write_all(request.as_bytes())?;
self.stream.flush()
}
}
/// Reads an HTTP response message.
async fn read_response(&mut self) -> std::io::Result<Vec<u8>> {
#[cfg(feature = "tokio")]
let stream = self.stream.split().0;
#[cfg(not(feature = "tokio"))]
let stream = std::io::Read::by_ref(&mut self.stream);
let limited_stream = stream.take(MAX_HTTP_MESSAGE_HEADER_SIZE as u64);
#[cfg(feature = "tokio")]
let mut reader = tokio::io::BufReader::new(limited_stream);
#[cfg(not(feature = "tokio"))]
let mut reader = std::io::BufReader::new(limited_stream);
macro_rules! read_line {
() => { read_line!(0) };
($retry_count: expr) => { {
let mut line = String::new();
let mut timeout_count: u64 = 0;
let bytes_read = loop {
#[cfg(feature = "tokio")]
let read_res = reader.read_line(&mut line).await;
#[cfg(not(feature = "tokio"))]
let read_res = reader.read_line(&mut line);
match read_res {
Ok(bytes_read) => break bytes_read,
Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
timeout_count += 1;
if timeout_count > $retry_count {
return Err(e);
} else {
continue;
}
}
Err(e) => return Err(e),
}
};
match bytes_read {
0 => None,
_ => {
// Remove trailing CRLF
if line.ends_with('\n') { line.pop(); if line.ends_with('\r') { line.pop(); } }
Some(line)
},
}
} }
}
// Read and parse status line
// Note that we allow retrying a few times to reach TCP_STREAM_RESPONSE_TIMEOUT.
let status_line = read_line!(TCP_STREAM_RESPONSE_TIMEOUT.as_secs() / TCP_STREAM_TIMEOUT.as_secs())
.ok_or(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "no status line"))?;
let status = HttpStatus::parse(&status_line)?;
// Read and parse relevant headers
let mut message_length = HttpMessageLength::Empty;
loop {
let line = read_line!()
.ok_or(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "no headers"))?;
if line.is_empty() { break; }
let header = HttpHeader::parse(&line)?;
if header.has_name("Content-Length") {
let length = header.value.parse()
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
if let HttpMessageLength::Empty = message_length {
message_length = HttpMessageLength::ContentLength(length);
}
continue;
}
if header.has_name("Transfer-Encoding") {
message_length = HttpMessageLength::TransferEncoding(header.value.into());
continue;
}
}
// Read message body
let read_limit = MAX_HTTP_MESSAGE_BODY_SIZE - reader.buffer().len();
reader.get_mut().set_limit(read_limit as u64);
let contents = match message_length {
HttpMessageLength::Empty => { Vec::new() },
HttpMessageLength::ContentLength(length) => {
if length == 0 || length > MAX_HTTP_MESSAGE_BODY_SIZE {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "out of range"))
} else {
let mut content = vec![0; length];
#[cfg(feature = "tokio")]
reader.read_exact(&mut content[..]).await?;
#[cfg(not(feature = "tokio"))]
reader.read_exact(&mut content[..])?;
content
}
},
HttpMessageLength::TransferEncoding(coding) => {
if !coding.eq_ignore_ascii_case("chunked") {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput, "unsupported transfer coding"))
} else {
let mut content = Vec::new();
#[cfg(feature = "tokio")]
{
// Since chunked_transfer doesn't have an async interface, only use it to
// determine the size of each chunk to read.
//
// TODO: Replace with an async interface when available.
// https://github.com/frewsxcv/rust-chunked-transfer/issues/7
loop {
// Read the chunk header which contains the chunk size.
let mut chunk_header = String::new();
reader.read_line(&mut chunk_header).await?;
if chunk_header == "0\r\n" |
// Decode the chunk header to obtain the chunk size.
let mut buffer = Vec::new();
let mut decoder = chunked_transfer::Decoder::new(chunk_header.as_bytes());
decoder.read_to_end(&mut buffer)?;
// Read the chunk body.
let chunk_size = match decoder.remaining_chunks_size() {
None => break,
Some(chunk_size) => chunk_size,
};
let chunk_offset = content.len();
content.resize(chunk_offset + chunk_size + "\r\n".len(), 0);
reader.read_exact(&mut content[chunk_offset..]).await?;
content.resize(chunk_offset + chunk_size, 0);
}
content
}
#[cfg(not(feature = "tokio"))]
{
let mut decoder = chunked_transfer::Decoder::new(reader);
decoder.read_to_end(&mut content)?;
content
}
}
},
};
if !status.is_ok() {
// TODO: Handle 3xx redirection responses.
let error = HttpError {
status_code: status.code.to_string(),
contents,
};
return Err(std::io::Error::new(std::io::ErrorKind::Other, error));
}
Ok(contents)
}
}
/// HTTP error consisting of a status code and body contents.
#[derive(Debug)]
pub(crate) struct HttpError {
pub(crate) status_code: String,
pub(crate) contents: Vec<u8>,
}
impl std::error::Error for HttpError {}
impl fmt::Display for HttpError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let contents = String::from_utf8_lossy(&self.contents);
write!(f, "status_code: {}, contents: {}", self.status_code, contents)
}
}
/// HTTP response status code as defined by [RFC 7231].
///
/// [RFC 7231]: https://tools.ietf.org/html/rfc7231#section-6
struct HttpStatus<'a> {
code: &'a str,
}
impl<'a> HttpStatus<'a> {
/// Parses an HTTP status line as defined by [RFC 7230].
///
/// [RFC 7230]: https://tools.ietf.org/html/rfc7230#section-3.1.2
fn parse(line: &'a String) -> std::io::Result<HttpStatus<'a>> {
let mut tokens = line.splitn(3, ' ');
let http_version = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no HTTP-Version"))?;
if !http_version.eq_ignore_ascii_case("HTTP/1.1") &&
!http_version.eq_ignore_ascii_case("HTTP/1.0") {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid HTTP-Version"));
}
let code = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no Status-Code"))?;
if code.len() != 3 || !code.chars().all(|c| c.is_ascii_digit()) {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid Status-Code"));
}
let _reason = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no Reason-Phrase"))?;
Ok(Self { code })
}
/// Returns whether the status is successful (i.e., 2xx status class).
fn is_ok(&self) -> bool {
self.code.starts_with('2')
}
}
/// HTTP response header as defined by [RFC 7231].
///
/// [RFC 7231]: https://tools.ietf.org/html/rfc7231#section-7
struct HttpHeader<'a> {
name: &'a str,
value: &'a str,
}
impl<'a> HttpHeader<'a> {
/// Parses an HTTP header field as defined by [RFC 7230].
///
/// [RFC 7230]: https://tools.ietf.org/html/rfc7230#section-3.2
fn parse(line: &'a String) -> std::io::Result<HttpHeader<'a>> {
let mut tokens = line.splitn(2, ':');
let name = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no header name"))?;
let value = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no header value"))?
.trim_start();
Ok(Self { name, value })
}
/// Returns whether the header field has the given name.
fn has_name(&self, name: &str) -> bool {
self.name.eq_ignore_ascii_case(name)
}
}
/// HTTP message body length as defined by [RFC 7230].
///
/// [RFC 7230]: https://tools.ietf.org/html/rfc7230#section-3.3.3
enum HttpMessageLength {
Empty,
ContentLength(usize),
TransferEncoding(String),
}
/// An HTTP response body in binary format.
pub struct BinaryResponse(pub Vec<u8>);
/// An HTTP response body in JSON format.
pub struct JsonResponse(pub serde_json::Value);
/// Interprets bytes from an HTTP response body as binary data.
impl TryFrom<Vec<u8>> for BinaryResponse {
type Error = std::io::Error;
fn try_from(bytes: Vec<u8>) -> std::io::Result<Self> {
Ok(BinaryResponse(bytes))
}
}
/// Interprets bytes from an HTTP response body as a JSON value.
impl TryFrom<Vec<u8>> for JsonResponse {
type Error = std::io::Error;
fn try_from(bytes: Vec<u8>) -> std::io::Result<Self> {
Ok(JsonResponse(serde_json::from_slice(&bytes)?))
}
}
#[cfg(test)]
mod endpoint_tests {
use super::HttpEndpoint;
#[test]
fn with_default_port() {
let endpoint = HttpEndpoint::for_host("foo.com".into());
assert_eq!(endpoint.host(), "foo.com");
assert_eq!(endpoint.port(), 80);
}
#[test]
fn with_custom_port() {
let endpoint = HttpEndpoint::for_host("foo.com".into()).with_port(8080);
assert_eq!(endpoint.host(), "foo.com");
assert_eq!(endpoint.port(), 8080);
}
#[test]
fn with_uri_path() {
let endpoint = HttpEndpoint::for_host("foo.com".into()).with_path("/path".into());
assert_eq!(endpoint.host(), "foo.com");
assert_eq!(endpoint.path(), "/path");
}
#[test]
fn without_uri_path() {
let endpoint = HttpEndpoint::for_host("foo.com".into());
assert_eq!(endpoint.host(), "foo.com");
assert_eq!(endpoint.path(), "/");
}
#[test]
fn convert_to_socket_addrs() {
let endpoint = HttpEndpoint::for_host("foo.com".into());
let host = endpoint.host();
let port = endpoint.port();
use std::net::ToSocketAddrs;
match (&endpoint).to_socket_addrs() {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(mut socket_addrs) => {
match socket_addrs.next() {
None => panic!("Expected socket address"),
Some(addr) => {
assert_eq!(addr, (host, port).to_socket_addrs().unwrap().next().unwrap());
assert!(socket_addrs.next().is_none());
}
}
}
}
}
}
#[cfg(test)]
pub(crate) mod client_tests {
use super::*;
use std::io::BufRead;
use std::io::Write;
/// Server for handling HTTP client requests with a stock response.
pub struct HttpServer {
address: std::net::SocketAddr,
handler: std::thread::JoinHandle<()>,
shutdown: std::sync::Arc<std::sync::atomic::AtomicBool>,
}
/// Body of HTTP response messages.
pub enum MessageBody<T: ToString> {
Empty,
Content(T),
ChunkedContent(T),
}
impl HttpServer {
fn responding_with_body<T: ToString>(status: &str, body: MessageBody<T>) -> Self {
let response = match body {
MessageBody::Empty => format!("{}\r\n\r\n", status),
MessageBody::Content(body) => {
let body = body.to_string();
format!(
"{}\r\n\
Content-Length: {}\r\n\
\r\n\
{}", status, body.len(), body)
},
MessageBody::ChunkedContent(body) => {
let mut chuncked_body = Vec::new();
{
use chunked_transfer::Encoder;
let mut encoder = Encoder::with_chunks_size(&mut chuncked_body, 8);
encoder.write_all(body.to_string().as_bytes()).unwrap();
}
format!(
"{}\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
{}", status, String::from_utf8(chuncked_body).unwrap())
},
};
HttpServer::responding_with(response)
}
pub fn responding_with_ok<T: ToString>(body: MessageBody<T>) -> Self {
HttpServer::responding_with_body("HTTP/1.1 200 OK", body)
}
pub fn responding_with_not_found() -> Self {
HttpServer::responding_with_body::<String>("HTTP/1.1 404 Not Found", MessageBody::Empty)
}
pub fn responding_with_server_error<T: ToString>(content: T) -> Self {
let body = MessageBody::Content(content);
HttpServer::responding_with_body("HTTP/1.1 500 Internal Server Error", body)
}
fn responding_with(response: String) -> Self {
let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap();
let address = listener.local_addr().unwrap();
let shutdown = std::sync::Arc::new(std::sync::atomic::AtomicBool::new(false));
let shutdown_signaled = std::sync::Arc::clone(&shutdown);
let handler = std::thread::spawn(move || {
for stream in listener.incoming() {
let mut stream = stream.unwrap();
stream.set_write_timeout(Some(TCP_STREAM_TIMEOUT)).unwrap();
let lines_read = std::io::BufReader::new(&stream)
.lines()
.take_while(|line| !line.as_ref().unwrap().is_empty())
.count();
if lines_read == 0 { continue; }
for chunk in response.as_bytes().chunks(16) {
if shutdown_signaled.load(std::sync::atomic::Ordering::SeqCst) {
return;
} else {
if let Err(_) = stream.write(chunk) { break; }
if let Err(_) = stream.flush() { break; }
}
}
}
});
Self { address, handler, shutdown }
}
fn shutdown(self) {
self.shutdown.store(true, std::sync::atomic::Ordering::SeqCst);
self.handler.join().unwrap();
}
pub fn endpoint(&self) -> HttpEndpoint {
HttpEndpoint::for_host(self.address.ip().to_string()).with_port(self.address.port())
}
}
#[test]
fn connect_to_unresolvable_host() {
match HttpClient::connect(("example.invalid", 80)) {
Err(e) => {
assert!(e.to_string().contains("failed to lookup address information") ||
e.to_string().contains("No such host"), "{:?}", e);
},
Ok(_) => panic!("Expected error"),
}
}
#[test]
fn connect_with_no_socket_address() {
match HttpClient::connect(&vec![][..]) {
Err(e) => assert_eq!(e.kind(), std::io::ErrorKind::InvalidInput),
Ok(_) => panic!("Expected error"),
}
}
#[test]
fn connect_with_unknown_server() {
match HttpClient::connect(("::", 80)) {
#[cfg(target_os = "windows")]
Err(e) => assert_eq!(e.kind(), std::io::ErrorKind::AddrNotAvailable),
#[cfg(not(target_os = "windows"))]
Err(e) => assert_eq!(e.kind(), std::io::ErrorKind::ConnectionRefused),
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn connect_with_valid_endpoint() {
let server = HttpServer::responding_with_ok::<String>(MessageBody::Empty);
match HttpClient::connect(&server.endpoint()) {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(_) => {},
}
}
#[tokio::test]
async fn read_empty_message() {
let server = HttpServer::responding_with("".to_string());
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::UnexpectedEof);
assert_eq!(e.get_ref().unwrap().to_string(), "no status line");
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_incomplete_message() {
let server = HttpServer::responding_with("HTTP/1.1 200 OK".to_string());
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::UnexpectedEof);
assert_eq!(e.get_ref().unwrap().to_string(), "no headers");
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_too_large_message_headers() {
let response = format!(
"HTTP/1.1 302 Found\r\n\
Location: {}\r\n\
\r\n", "Z".repeat(MAX_HTTP_MESSAGE_HEADER_SIZE));
let server = HttpServer::responding_with(response);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::UnexpectedEof);
assert_eq!(e.get_ref().unwrap().to_string(), "no headers");
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_too_large_message_body() {
let body = "Z".repeat(MAX_HTTP_MESSAGE_BODY_SIZE + 1);
let server = HttpServer::responding_with_ok::<String>(MessageBody::Content(body));
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::InvalidData);
assert_eq!(e.get_ref().unwrap().to_string(), "out of range");
},
Ok(_) => panic!("Expected error"),
}
server.shutdown();
}
#[tokio::test]
async fn read_message_with_unsupported_transfer_coding() {
let response = String::from(
"HTTP/1.1 200 OK\r\n\
Transfer-Encoding: gzip\r\n\
\r\n\
foobar");
let server = HttpServer::responding_with(response);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::InvalidInput);
assert_eq!(e.get_ref().unwrap().to_string(), "unsupported transfer coding");
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_error() {
let server = HttpServer::responding_with_server_error("foo");
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<JsonResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::Other);
let http_error = e.into_inner().unwrap().downcast::<HttpError>().unwrap();
assert_eq!(http_error.status_code, "500");
assert_eq!(http_error.contents, "foo".as_bytes());
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_empty_message_body() {
let server = HttpServer::responding_with_ok::<String>(MessageBody::Empty);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(bytes) => assert_eq!(bytes.0, Vec::<u8>::new()),
}
}
#[tokio::test]
async fn read_message_body_with_length() {
let body = "foo bar baz qux".repeat(32);
let content = MessageBody::Content(body.clone());
let server = HttpServer::responding_with_ok::<String>(content);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(bytes) => assert_eq!(bytes.0, body.as_bytes()),
}
}
#[tokio::test]
async fn read_chunked_message_body() {
let body = "foo bar baz qux".repeat(32);
let chunked_content = MessageBody::ChunkedContent(body.clone());
let server = HttpServer::responding_with_ok::<String>(chunked_content);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(bytes) => assert_eq!(bytes.0, body.as_bytes()),
}
}
#[tokio::test]
async fn reconnect_closed_connection() {
let server = HttpServer::responding_with_ok::<String>(MessageBody::Empty);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
assert!(client.get::<BinaryResponse>("/foo", "foo.com").await.is_ok());
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(bytes) => assert_eq!(bytes.0, Vec::<u8>::new()),
}
}
#[test]
fn from_bytes_into_binary_response() {
let bytes = b"foo";
match BinaryResponse::try_from(bytes.to_vec()) {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(response) => assert_eq!(&response.0, bytes),
}
}
#[test]
fn from_invalid_bytes_into_json_response() {
let json = serde_json::json!({ "result": 42 });
match JsonResponse::try_from(json.to_string().as_bytes()[..5].to_vec()) {
Err(_) => {},
Ok(_) => panic!("Expected error"),
}
}
#[test]
fn from_valid_bytes_into_json_response() {
let json = serde_json::json!({ "result": 42 });
match JsonResponse::try_from(json.to_string().as_bytes().to_vec()) {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(response) => assert_eq!(response.0, json),
}
}
}
| {
// Read the terminator chunk since the decoder consumes the CRLF
// immediately when this chunk is encountered.
reader.read_line(&mut chunk_header).await?;
} | conditional_block |
http.rs | //! Simple HTTP implementation which supports both async and traditional execution environments
//! with minimal dependencies. This is used as the basis for REST and RPC clients.
use chunked_transfer;
use serde_json;
use std::convert::TryFrom;
use std::fmt;
#[cfg(not(feature = "tokio"))]
use std::io::Write;
use std::net::{SocketAddr, ToSocketAddrs};
use std::time::Duration;
#[cfg(feature = "tokio")]
use tokio::io::{AsyncBufReadExt, AsyncReadExt, AsyncWriteExt};
#[cfg(feature = "tokio")]
use tokio::net::TcpStream;
#[cfg(not(feature = "tokio"))]
use std::io::BufRead;
use std::io::Read;
#[cfg(not(feature = "tokio"))]
use std::net::TcpStream;
/// Timeout for operations on TCP streams.
const TCP_STREAM_TIMEOUT: Duration = Duration::from_secs(5);
/// Timeout for reading the first byte of a response. This is separate from the general read
/// timeout as it is not uncommon for Bitcoin Core to be blocked waiting on UTXO cache flushes for
/// upwards of 10 minutes on slow devices (e.g. RPis with SSDs over USB). Note that we always retry
/// once when we time out, so the maximum time we allow Bitcoin Core to block for is twice this
/// value.
const TCP_STREAM_RESPONSE_TIMEOUT: Duration = Duration::from_secs(300);
/// Maximum HTTP message header size in bytes.
const MAX_HTTP_MESSAGE_HEADER_SIZE: usize = 8192;
/// Maximum HTTP message body size in bytes. Enough for a hex-encoded block in JSON format and any
/// overhead for HTTP chunked transfer encoding.
const MAX_HTTP_MESSAGE_BODY_SIZE: usize = 2 * 4_000_000 + 32_000;
/// Endpoint for interacting with an HTTP-based API.
#[derive(Debug)]
pub struct HttpEndpoint {
host: String,
port: Option<u16>,
path: String,
}
impl HttpEndpoint {
/// Creates an endpoint for the given host and default HTTP port.
pub fn for_host(host: String) -> Self {
Self {
host,
port: None,
path: String::from("/"),
}
}
/// Specifies a port to use with the endpoint.
pub fn with_port(mut self, port: u16) -> Self {
self.port = Some(port);
self
}
/// Specifies a path to use with the endpoint.
pub fn with_path(mut self, path: String) -> Self {
self.path = path;
self
}
/// Returns the endpoint host.
pub fn host(&self) -> &str {
&self.host
}
/// Returns the endpoint port.
pub fn port(&self) -> u16 {
match self.port {
None => 80,
Some(port) => port,
}
}
/// Returns the endpoint path.
pub fn path(&self) -> &str {
&self.path
}
}
impl<'a> std::net::ToSocketAddrs for &'a HttpEndpoint {
type Iter = <(&'a str, u16) as std::net::ToSocketAddrs>::Iter;
fn to_socket_addrs(&self) -> std::io::Result<Self::Iter> {
(self.host(), self.port()).to_socket_addrs()
}
}
/// Client for making HTTP requests.
pub(crate) struct HttpClient {
address: SocketAddr,
stream: TcpStream,
}
impl HttpClient {
/// Opens a connection to an HTTP endpoint.
pub fn connect<E: ToSocketAddrs>(endpoint: E) -> std::io::Result<Self> {
let address = match endpoint.to_socket_addrs()?.next() {
None => {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidInput, "could not resolve to any addresses"));
},
Some(address) => address,
};
let stream = std::net::TcpStream::connect_timeout(&address, TCP_STREAM_TIMEOUT)?;
stream.set_read_timeout(Some(TCP_STREAM_TIMEOUT))?;
stream.set_write_timeout(Some(TCP_STREAM_TIMEOUT))?;
#[cfg(feature = "tokio")]
let stream = {
stream.set_nonblocking(true)?;
TcpStream::from_std(stream)?
};
Ok(Self { address, stream })
}
/// Sends a `GET` request for a resource identified by `uri` at the `host`.
///
/// Returns the response body in `F` format.
#[allow(dead_code)]
pub async fn get<F>(&mut self, uri: &str, host: &str) -> std::io::Result<F>
where F: TryFrom<Vec<u8>, Error = std::io::Error> {
let request = format!(
"GET {} HTTP/1.1\r\n\
Host: {}\r\n\
Connection: keep-alive\r\n\
\r\n", uri, host);
let response_body = self.send_request_with_retry(&request).await?;
F::try_from(response_body)
}
/// Sends a `POST` request for a resource identified by `uri` at the `host` using the given HTTP
/// authentication credentials.
///
/// The request body consists of the provided JSON `content`. Returns the response body in `F`
/// format.
#[allow(dead_code)]
pub async fn post<F>(&mut self, uri: &str, host: &str, auth: &str, content: serde_json::Value) -> std::io::Result<F>
where F: TryFrom<Vec<u8>, Error = std::io::Error> {
let content = content.to_string();
let request = format!(
"POST {} HTTP/1.1\r\n\
Host: {}\r\n\
Authorization: {}\r\n\
Connection: keep-alive\r\n\
Content-Type: application/json\r\n\
Content-Length: {}\r\n\
\r\n\
{}", uri, host, auth, content.len(), content);
let response_body = self.send_request_with_retry(&request).await?;
F::try_from(response_body)
}
/// Sends an HTTP request message and reads the response, returning its body. Attempts to
/// reconnect and retry if the connection has been closed.
async fn send_request_with_retry(&mut self, request: &str) -> std::io::Result<Vec<u8>> {
match self.send_request(request).await {
Ok(bytes) => Ok(bytes),
Err(_) => {
// Reconnect and retry on fail. This can happen if the connection was closed after
// the keep-alive limits are reached, or generally if the request timed out due to
// Bitcoin Core being stuck on a long-running operation or its RPC queue being
// full.
// Block 100ms before retrying the request as in many cases the source of the error
// may be persistent for some time.
#[cfg(feature = "tokio")]
tokio::time::sleep(Duration::from_millis(100)).await;
#[cfg(not(feature = "tokio"))]
std::thread::sleep(Duration::from_millis(100));
*self = Self::connect(self.address)?;
self.send_request(request).await
},
}
}
/// Sends an HTTP request message and reads the response, returning its body.
async fn send_request(&mut self, request: &str) -> std::io::Result<Vec<u8>> {
self.write_request(request).await?;
self.read_response().await
}
/// Writes an HTTP request message.
async fn write_request(&mut self, request: &str) -> std::io::Result<()> {
#[cfg(feature = "tokio")]
{
self.stream.write_all(request.as_bytes()).await?;
self.stream.flush().await
}
#[cfg(not(feature = "tokio"))]
{
self.stream.write_all(request.as_bytes())?;
self.stream.flush()
}
}
/// Reads an HTTP response message.
async fn read_response(&mut self) -> std::io::Result<Vec<u8>> {
#[cfg(feature = "tokio")]
let stream = self.stream.split().0;
#[cfg(not(feature = "tokio"))]
let stream = std::io::Read::by_ref(&mut self.stream);
let limited_stream = stream.take(MAX_HTTP_MESSAGE_HEADER_SIZE as u64);
#[cfg(feature = "tokio")]
let mut reader = tokio::io::BufReader::new(limited_stream);
#[cfg(not(feature = "tokio"))]
let mut reader = std::io::BufReader::new(limited_stream);
macro_rules! read_line {
() => { read_line!(0) };
($retry_count: expr) => { {
let mut line = String::new();
let mut timeout_count: u64 = 0;
let bytes_read = loop {
#[cfg(feature = "tokio")]
let read_res = reader.read_line(&mut line).await;
#[cfg(not(feature = "tokio"))]
let read_res = reader.read_line(&mut line);
match read_res {
Ok(bytes_read) => break bytes_read,
Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
timeout_count += 1;
if timeout_count > $retry_count {
return Err(e);
} else {
continue;
}
}
Err(e) => return Err(e),
}
};
match bytes_read {
0 => None,
_ => {
// Remove trailing CRLF
if line.ends_with('\n') { line.pop(); if line.ends_with('\r') { line.pop(); } }
Some(line)
},
}
} }
}
// Read and parse status line
// Note that we allow retrying a few times to reach TCP_STREAM_RESPONSE_TIMEOUT.
let status_line = read_line!(TCP_STREAM_RESPONSE_TIMEOUT.as_secs() / TCP_STREAM_TIMEOUT.as_secs())
.ok_or(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "no status line"))?;
let status = HttpStatus::parse(&status_line)?;
// Read and parse relevant headers
let mut message_length = HttpMessageLength::Empty;
loop {
let line = read_line!()
.ok_or(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "no headers"))?;
if line.is_empty() { break; }
let header = HttpHeader::parse(&line)?;
if header.has_name("Content-Length") {
let length = header.value.parse()
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
if let HttpMessageLength::Empty = message_length {
message_length = HttpMessageLength::ContentLength(length);
}
continue;
}
if header.has_name("Transfer-Encoding") {
message_length = HttpMessageLength::TransferEncoding(header.value.into());
continue;
}
}
// Read message body
let read_limit = MAX_HTTP_MESSAGE_BODY_SIZE - reader.buffer().len();
reader.get_mut().set_limit(read_limit as u64);
let contents = match message_length {
HttpMessageLength::Empty => { Vec::new() },
HttpMessageLength::ContentLength(length) => {
if length == 0 || length > MAX_HTTP_MESSAGE_BODY_SIZE {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "out of range"))
} else {
let mut content = vec![0; length];
#[cfg(feature = "tokio")]
reader.read_exact(&mut content[..]).await?;
#[cfg(not(feature = "tokio"))]
reader.read_exact(&mut content[..])?;
content
}
},
HttpMessageLength::TransferEncoding(coding) => {
if !coding.eq_ignore_ascii_case("chunked") {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput, "unsupported transfer coding"))
} else {
let mut content = Vec::new();
#[cfg(feature = "tokio")]
{
// Since chunked_transfer doesn't have an async interface, only use it to
// determine the size of each chunk to read.
//
// TODO: Replace with an async interface when available.
// https://github.com/frewsxcv/rust-chunked-transfer/issues/7
loop {
// Read the chunk header which contains the chunk size.
let mut chunk_header = String::new();
reader.read_line(&mut chunk_header).await?; | if chunk_header == "0\r\n" {
// Read the terminator chunk since the decoder consumes the CRLF
// immediately when this chunk is encountered.
reader.read_line(&mut chunk_header).await?;
}
// Decode the chunk header to obtain the chunk size.
let mut buffer = Vec::new();
let mut decoder = chunked_transfer::Decoder::new(chunk_header.as_bytes());
decoder.read_to_end(&mut buffer)?;
// Read the chunk body.
let chunk_size = match decoder.remaining_chunks_size() {
None => break,
Some(chunk_size) => chunk_size,
};
let chunk_offset = content.len();
content.resize(chunk_offset + chunk_size + "\r\n".len(), 0);
reader.read_exact(&mut content[chunk_offset..]).await?;
content.resize(chunk_offset + chunk_size, 0);
}
content
}
#[cfg(not(feature = "tokio"))]
{
let mut decoder = chunked_transfer::Decoder::new(reader);
decoder.read_to_end(&mut content)?;
content
}
}
},
};
if !status.is_ok() {
// TODO: Handle 3xx redirection responses.
let error = HttpError {
status_code: status.code.to_string(),
contents,
};
return Err(std::io::Error::new(std::io::ErrorKind::Other, error));
}
Ok(contents)
}
}
/// HTTP error consisting of a status code and body contents.
#[derive(Debug)]
pub(crate) struct HttpError {
pub(crate) status_code: String,
pub(crate) contents: Vec<u8>,
}
impl std::error::Error for HttpError {}
impl fmt::Display for HttpError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let contents = String::from_utf8_lossy(&self.contents);
write!(f, "status_code: {}, contents: {}", self.status_code, contents)
}
}
/// HTTP response status code as defined by [RFC 7231].
///
/// [RFC 7231]: https://tools.ietf.org/html/rfc7231#section-6
struct HttpStatus<'a> {
code: &'a str,
}
impl<'a> HttpStatus<'a> {
/// Parses an HTTP status line as defined by [RFC 7230].
///
/// [RFC 7230]: https://tools.ietf.org/html/rfc7230#section-3.1.2
fn parse(line: &'a String) -> std::io::Result<HttpStatus<'a>> {
let mut tokens = line.splitn(3, ' ');
let http_version = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no HTTP-Version"))?;
if !http_version.eq_ignore_ascii_case("HTTP/1.1") &&
!http_version.eq_ignore_ascii_case("HTTP/1.0") {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid HTTP-Version"));
}
let code = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no Status-Code"))?;
if code.len() != 3 || !code.chars().all(|c| c.is_ascii_digit()) {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid Status-Code"));
}
let _reason = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no Reason-Phrase"))?;
Ok(Self { code })
}
/// Returns whether the status is successful (i.e., 2xx status class).
fn is_ok(&self) -> bool {
self.code.starts_with('2')
}
}
/// HTTP response header as defined by [RFC 7231].
///
/// [RFC 7231]: https://tools.ietf.org/html/rfc7231#section-7
struct HttpHeader<'a> {
name: &'a str,
value: &'a str,
}
impl<'a> HttpHeader<'a> {
/// Parses an HTTP header field as defined by [RFC 7230].
///
/// [RFC 7230]: https://tools.ietf.org/html/rfc7230#section-3.2
fn parse(line: &'a String) -> std::io::Result<HttpHeader<'a>> {
let mut tokens = line.splitn(2, ':');
let name = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no header name"))?;
let value = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no header value"))?
.trim_start();
Ok(Self { name, value })
}
/// Returns whether the header field has the given name.
fn has_name(&self, name: &str) -> bool {
self.name.eq_ignore_ascii_case(name)
}
}
/// HTTP message body length as defined by [RFC 7230].
///
/// [RFC 7230]: https://tools.ietf.org/html/rfc7230#section-3.3.3
enum HttpMessageLength {
Empty,
ContentLength(usize),
TransferEncoding(String),
}
/// An HTTP response body in binary format.
pub struct BinaryResponse(pub Vec<u8>);
/// An HTTP response body in JSON format.
pub struct JsonResponse(pub serde_json::Value);
/// Interprets bytes from an HTTP response body as binary data.
impl TryFrom<Vec<u8>> for BinaryResponse {
type Error = std::io::Error;
fn try_from(bytes: Vec<u8>) -> std::io::Result<Self> {
Ok(BinaryResponse(bytes))
}
}
/// Interprets bytes from an HTTP response body as a JSON value.
impl TryFrom<Vec<u8>> for JsonResponse {
type Error = std::io::Error;
fn try_from(bytes: Vec<u8>) -> std::io::Result<Self> {
Ok(JsonResponse(serde_json::from_slice(&bytes)?))
}
}
#[cfg(test)]
mod endpoint_tests {
use super::HttpEndpoint;
#[test]
fn with_default_port() {
let endpoint = HttpEndpoint::for_host("foo.com".into());
assert_eq!(endpoint.host(), "foo.com");
assert_eq!(endpoint.port(), 80);
}
#[test]
fn with_custom_port() {
let endpoint = HttpEndpoint::for_host("foo.com".into()).with_port(8080);
assert_eq!(endpoint.host(), "foo.com");
assert_eq!(endpoint.port(), 8080);
}
#[test]
fn with_uri_path() {
let endpoint = HttpEndpoint::for_host("foo.com".into()).with_path("/path".into());
assert_eq!(endpoint.host(), "foo.com");
assert_eq!(endpoint.path(), "/path");
}
#[test]
fn without_uri_path() {
let endpoint = HttpEndpoint::for_host("foo.com".into());
assert_eq!(endpoint.host(), "foo.com");
assert_eq!(endpoint.path(), "/");
}
#[test]
fn convert_to_socket_addrs() {
let endpoint = HttpEndpoint::for_host("foo.com".into());
let host = endpoint.host();
let port = endpoint.port();
use std::net::ToSocketAddrs;
match (&endpoint).to_socket_addrs() {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(mut socket_addrs) => {
match socket_addrs.next() {
None => panic!("Expected socket address"),
Some(addr) => {
assert_eq!(addr, (host, port).to_socket_addrs().unwrap().next().unwrap());
assert!(socket_addrs.next().is_none());
}
}
}
}
}
}
#[cfg(test)]
pub(crate) mod client_tests {
use super::*;
use std::io::BufRead;
use std::io::Write;
/// Server for handling HTTP client requests with a stock response.
pub struct HttpServer {
address: std::net::SocketAddr,
handler: std::thread::JoinHandle<()>,
shutdown: std::sync::Arc<std::sync::atomic::AtomicBool>,
}
/// Body of HTTP response messages.
pub enum MessageBody<T: ToString> {
Empty,
Content(T),
ChunkedContent(T),
}
impl HttpServer {
fn responding_with_body<T: ToString>(status: &str, body: MessageBody<T>) -> Self {
let response = match body {
MessageBody::Empty => format!("{}\r\n\r\n", status),
MessageBody::Content(body) => {
let body = body.to_string();
format!(
"{}\r\n\
Content-Length: {}\r\n\
\r\n\
{}", status, body.len(), body)
},
MessageBody::ChunkedContent(body) => {
let mut chuncked_body = Vec::new();
{
use chunked_transfer::Encoder;
let mut encoder = Encoder::with_chunks_size(&mut chuncked_body, 8);
encoder.write_all(body.to_string().as_bytes()).unwrap();
}
format!(
"{}\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
{}", status, String::from_utf8(chuncked_body).unwrap())
},
};
HttpServer::responding_with(response)
}
pub fn responding_with_ok<T: ToString>(body: MessageBody<T>) -> Self {
HttpServer::responding_with_body("HTTP/1.1 200 OK", body)
}
pub fn responding_with_not_found() -> Self {
HttpServer::responding_with_body::<String>("HTTP/1.1 404 Not Found", MessageBody::Empty)
}
pub fn responding_with_server_error<T: ToString>(content: T) -> Self {
let body = MessageBody::Content(content);
HttpServer::responding_with_body("HTTP/1.1 500 Internal Server Error", body)
}
fn responding_with(response: String) -> Self {
let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap();
let address = listener.local_addr().unwrap();
let shutdown = std::sync::Arc::new(std::sync::atomic::AtomicBool::new(false));
let shutdown_signaled = std::sync::Arc::clone(&shutdown);
let handler = std::thread::spawn(move || {
for stream in listener.incoming() {
let mut stream = stream.unwrap();
stream.set_write_timeout(Some(TCP_STREAM_TIMEOUT)).unwrap();
let lines_read = std::io::BufReader::new(&stream)
.lines()
.take_while(|line| !line.as_ref().unwrap().is_empty())
.count();
if lines_read == 0 { continue; }
for chunk in response.as_bytes().chunks(16) {
if shutdown_signaled.load(std::sync::atomic::Ordering::SeqCst) {
return;
} else {
if let Err(_) = stream.write(chunk) { break; }
if let Err(_) = stream.flush() { break; }
}
}
}
});
Self { address, handler, shutdown }
}
fn shutdown(self) {
self.shutdown.store(true, std::sync::atomic::Ordering::SeqCst);
self.handler.join().unwrap();
}
pub fn endpoint(&self) -> HttpEndpoint {
HttpEndpoint::for_host(self.address.ip().to_string()).with_port(self.address.port())
}
}
#[test]
fn connect_to_unresolvable_host() {
match HttpClient::connect(("example.invalid", 80)) {
Err(e) => {
assert!(e.to_string().contains("failed to lookup address information") ||
e.to_string().contains("No such host"), "{:?}", e);
},
Ok(_) => panic!("Expected error"),
}
}
#[test]
fn connect_with_no_socket_address() {
match HttpClient::connect(&vec![][..]) {
Err(e) => assert_eq!(e.kind(), std::io::ErrorKind::InvalidInput),
Ok(_) => panic!("Expected error"),
}
}
#[test]
fn connect_with_unknown_server() {
match HttpClient::connect(("::", 80)) {
#[cfg(target_os = "windows")]
Err(e) => assert_eq!(e.kind(), std::io::ErrorKind::AddrNotAvailable),
#[cfg(not(target_os = "windows"))]
Err(e) => assert_eq!(e.kind(), std::io::ErrorKind::ConnectionRefused),
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn connect_with_valid_endpoint() {
let server = HttpServer::responding_with_ok::<String>(MessageBody::Empty);
match HttpClient::connect(&server.endpoint()) {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(_) => {},
}
}
#[tokio::test]
async fn read_empty_message() {
let server = HttpServer::responding_with("".to_string());
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::UnexpectedEof);
assert_eq!(e.get_ref().unwrap().to_string(), "no status line");
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_incomplete_message() {
let server = HttpServer::responding_with("HTTP/1.1 200 OK".to_string());
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::UnexpectedEof);
assert_eq!(e.get_ref().unwrap().to_string(), "no headers");
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_too_large_message_headers() {
let response = format!(
"HTTP/1.1 302 Found\r\n\
Location: {}\r\n\
\r\n", "Z".repeat(MAX_HTTP_MESSAGE_HEADER_SIZE));
let server = HttpServer::responding_with(response);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::UnexpectedEof);
assert_eq!(e.get_ref().unwrap().to_string(), "no headers");
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_too_large_message_body() {
let body = "Z".repeat(MAX_HTTP_MESSAGE_BODY_SIZE + 1);
let server = HttpServer::responding_with_ok::<String>(MessageBody::Content(body));
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::InvalidData);
assert_eq!(e.get_ref().unwrap().to_string(), "out of range");
},
Ok(_) => panic!("Expected error"),
}
server.shutdown();
}
#[tokio::test]
async fn read_message_with_unsupported_transfer_coding() {
let response = String::from(
"HTTP/1.1 200 OK\r\n\
Transfer-Encoding: gzip\r\n\
\r\n\
foobar");
let server = HttpServer::responding_with(response);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::InvalidInput);
assert_eq!(e.get_ref().unwrap().to_string(), "unsupported transfer coding");
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_error() {
let server = HttpServer::responding_with_server_error("foo");
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<JsonResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::Other);
let http_error = e.into_inner().unwrap().downcast::<HttpError>().unwrap();
assert_eq!(http_error.status_code, "500");
assert_eq!(http_error.contents, "foo".as_bytes());
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_empty_message_body() {
let server = HttpServer::responding_with_ok::<String>(MessageBody::Empty);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(bytes) => assert_eq!(bytes.0, Vec::<u8>::new()),
}
}
#[tokio::test]
async fn read_message_body_with_length() {
let body = "foo bar baz qux".repeat(32);
let content = MessageBody::Content(body.clone());
let server = HttpServer::responding_with_ok::<String>(content);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(bytes) => assert_eq!(bytes.0, body.as_bytes()),
}
}
#[tokio::test]
async fn read_chunked_message_body() {
let body = "foo bar baz qux".repeat(32);
let chunked_content = MessageBody::ChunkedContent(body.clone());
let server = HttpServer::responding_with_ok::<String>(chunked_content);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(bytes) => assert_eq!(bytes.0, body.as_bytes()),
}
}
#[tokio::test]
async fn reconnect_closed_connection() {
let server = HttpServer::responding_with_ok::<String>(MessageBody::Empty);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
assert!(client.get::<BinaryResponse>("/foo", "foo.com").await.is_ok());
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(bytes) => assert_eq!(bytes.0, Vec::<u8>::new()),
}
}
#[test]
fn from_bytes_into_binary_response() {
let bytes = b"foo";
match BinaryResponse::try_from(bytes.to_vec()) {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(response) => assert_eq!(&response.0, bytes),
}
}
#[test]
fn from_invalid_bytes_into_json_response() {
let json = serde_json::json!({ "result": 42 });
match JsonResponse::try_from(json.to_string().as_bytes()[..5].to_vec()) {
Err(_) => {},
Ok(_) => panic!("Expected error"),
}
}
#[test]
fn from_valid_bytes_into_json_response() {
let json = serde_json::json!({ "result": 42 });
match JsonResponse::try_from(json.to_string().as_bytes().to_vec()) {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(response) => assert_eq!(response.0, json),
}
}
} | random_line_split | |
http.rs | //! Simple HTTP implementation which supports both async and traditional execution environments
//! with minimal dependencies. This is used as the basis for REST and RPC clients.
use chunked_transfer;
use serde_json;
use std::convert::TryFrom;
use std::fmt;
#[cfg(not(feature = "tokio"))]
use std::io::Write;
use std::net::{SocketAddr, ToSocketAddrs};
use std::time::Duration;
#[cfg(feature = "tokio")]
use tokio::io::{AsyncBufReadExt, AsyncReadExt, AsyncWriteExt};
#[cfg(feature = "tokio")]
use tokio::net::TcpStream;
#[cfg(not(feature = "tokio"))]
use std::io::BufRead;
use std::io::Read;
#[cfg(not(feature = "tokio"))]
use std::net::TcpStream;
/// Timeout for operations on TCP streams.
const TCP_STREAM_TIMEOUT: Duration = Duration::from_secs(5);
/// Timeout for reading the first byte of a response. This is separate from the general read
/// timeout as it is not uncommon for Bitcoin Core to be blocked waiting on UTXO cache flushes for
/// upwards of 10 minutes on slow devices (e.g. RPis with SSDs over USB). Note that we always retry
/// once when we time out, so the maximum time we allow Bitcoin Core to block for is twice this
/// value.
const TCP_STREAM_RESPONSE_TIMEOUT: Duration = Duration::from_secs(300);
/// Maximum HTTP message header size in bytes.
const MAX_HTTP_MESSAGE_HEADER_SIZE: usize = 8192;
/// Maximum HTTP message body size in bytes. Enough for a hex-encoded block in JSON format and any
/// overhead for HTTP chunked transfer encoding.
const MAX_HTTP_MESSAGE_BODY_SIZE: usize = 2 * 4_000_000 + 32_000;
/// Endpoint for interacting with an HTTP-based API.
#[derive(Debug)]
pub struct HttpEndpoint {
host: String,
port: Option<u16>,
path: String,
}
impl HttpEndpoint {
/// Creates an endpoint for the given host and default HTTP port.
pub fn for_host(host: String) -> Self {
Self {
host,
port: None,
path: String::from("/"),
}
}
/// Specifies a port to use with the endpoint.
pub fn with_port(mut self, port: u16) -> Self {
self.port = Some(port);
self
}
/// Specifies a path to use with the endpoint.
pub fn with_path(mut self, path: String) -> Self {
self.path = path;
self
}
/// Returns the endpoint host.
pub fn host(&self) -> &str {
&self.host
}
/// Returns the endpoint port.
pub fn port(&self) -> u16 {
match self.port {
None => 80,
Some(port) => port,
}
}
/// Returns the endpoint path.
pub fn path(&self) -> &str {
&self.path
}
}
impl<'a> std::net::ToSocketAddrs for &'a HttpEndpoint {
type Iter = <(&'a str, u16) as std::net::ToSocketAddrs>::Iter;
fn to_socket_addrs(&self) -> std::io::Result<Self::Iter> {
(self.host(), self.port()).to_socket_addrs()
}
}
/// Client for making HTTP requests.
pub(crate) struct HttpClient {
address: SocketAddr,
stream: TcpStream,
}
impl HttpClient {
/// Opens a connection to an HTTP endpoint.
pub fn connect<E: ToSocketAddrs>(endpoint: E) -> std::io::Result<Self> {
let address = match endpoint.to_socket_addrs()?.next() {
None => {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidInput, "could not resolve to any addresses"));
},
Some(address) => address,
};
let stream = std::net::TcpStream::connect_timeout(&address, TCP_STREAM_TIMEOUT)?;
stream.set_read_timeout(Some(TCP_STREAM_TIMEOUT))?;
stream.set_write_timeout(Some(TCP_STREAM_TIMEOUT))?;
#[cfg(feature = "tokio")]
let stream = {
stream.set_nonblocking(true)?;
TcpStream::from_std(stream)?
};
Ok(Self { address, stream })
}
/// Sends a `GET` request for a resource identified by `uri` at the `host`.
///
/// Returns the response body in `F` format.
#[allow(dead_code)]
pub async fn get<F>(&mut self, uri: &str, host: &str) -> std::io::Result<F>
where F: TryFrom<Vec<u8>, Error = std::io::Error> {
let request = format!(
"GET {} HTTP/1.1\r\n\
Host: {}\r\n\
Connection: keep-alive\r\n\
\r\n", uri, host);
let response_body = self.send_request_with_retry(&request).await?;
F::try_from(response_body)
}
/// Sends a `POST` request for a resource identified by `uri` at the `host` using the given HTTP
/// authentication credentials.
///
/// The request body consists of the provided JSON `content`. Returns the response body in `F`
/// format.
#[allow(dead_code)]
pub async fn | <F>(&mut self, uri: &str, host: &str, auth: &str, content: serde_json::Value) -> std::io::Result<F>
where F: TryFrom<Vec<u8>, Error = std::io::Error> {
let content = content.to_string();
let request = format!(
"POST {} HTTP/1.1\r\n\
Host: {}\r\n\
Authorization: {}\r\n\
Connection: keep-alive\r\n\
Content-Type: application/json\r\n\
Content-Length: {}\r\n\
\r\n\
{}", uri, host, auth, content.len(), content);
let response_body = self.send_request_with_retry(&request).await?;
F::try_from(response_body)
}
/// Sends an HTTP request message and reads the response, returning its body. Attempts to
/// reconnect and retry if the connection has been closed.
async fn send_request_with_retry(&mut self, request: &str) -> std::io::Result<Vec<u8>> {
match self.send_request(request).await {
Ok(bytes) => Ok(bytes),
Err(_) => {
// Reconnect and retry on fail. This can happen if the connection was closed after
// the keep-alive limits are reached, or generally if the request timed out due to
// Bitcoin Core being stuck on a long-running operation or its RPC queue being
// full.
// Block 100ms before retrying the request as in many cases the source of the error
// may be persistent for some time.
#[cfg(feature = "tokio")]
tokio::time::sleep(Duration::from_millis(100)).await;
#[cfg(not(feature = "tokio"))]
std::thread::sleep(Duration::from_millis(100));
*self = Self::connect(self.address)?;
self.send_request(request).await
},
}
}
/// Sends an HTTP request message and reads the response, returning its body.
async fn send_request(&mut self, request: &str) -> std::io::Result<Vec<u8>> {
self.write_request(request).await?;
self.read_response().await
}
/// Writes an HTTP request message.
async fn write_request(&mut self, request: &str) -> std::io::Result<()> {
#[cfg(feature = "tokio")]
{
self.stream.write_all(request.as_bytes()).await?;
self.stream.flush().await
}
#[cfg(not(feature = "tokio"))]
{
self.stream.write_all(request.as_bytes())?;
self.stream.flush()
}
}
/// Reads an HTTP response message.
async fn read_response(&mut self) -> std::io::Result<Vec<u8>> {
#[cfg(feature = "tokio")]
let stream = self.stream.split().0;
#[cfg(not(feature = "tokio"))]
let stream = std::io::Read::by_ref(&mut self.stream);
let limited_stream = stream.take(MAX_HTTP_MESSAGE_HEADER_SIZE as u64);
#[cfg(feature = "tokio")]
let mut reader = tokio::io::BufReader::new(limited_stream);
#[cfg(not(feature = "tokio"))]
let mut reader = std::io::BufReader::new(limited_stream);
macro_rules! read_line {
() => { read_line!(0) };
($retry_count: expr) => { {
let mut line = String::new();
let mut timeout_count: u64 = 0;
let bytes_read = loop {
#[cfg(feature = "tokio")]
let read_res = reader.read_line(&mut line).await;
#[cfg(not(feature = "tokio"))]
let read_res = reader.read_line(&mut line);
match read_res {
Ok(bytes_read) => break bytes_read,
Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
timeout_count += 1;
if timeout_count > $retry_count {
return Err(e);
} else {
continue;
}
}
Err(e) => return Err(e),
}
};
match bytes_read {
0 => None,
_ => {
// Remove trailing CRLF
if line.ends_with('\n') { line.pop(); if line.ends_with('\r') { line.pop(); } }
Some(line)
},
}
} }
}
// Read and parse status line
// Note that we allow retrying a few times to reach TCP_STREAM_RESPONSE_TIMEOUT.
let status_line = read_line!(TCP_STREAM_RESPONSE_TIMEOUT.as_secs() / TCP_STREAM_TIMEOUT.as_secs())
.ok_or(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "no status line"))?;
let status = HttpStatus::parse(&status_line)?;
// Read and parse relevant headers
let mut message_length = HttpMessageLength::Empty;
loop {
let line = read_line!()
.ok_or(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "no headers"))?;
if line.is_empty() { break; }
let header = HttpHeader::parse(&line)?;
if header.has_name("Content-Length") {
let length = header.value.parse()
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
if let HttpMessageLength::Empty = message_length {
message_length = HttpMessageLength::ContentLength(length);
}
continue;
}
if header.has_name("Transfer-Encoding") {
message_length = HttpMessageLength::TransferEncoding(header.value.into());
continue;
}
}
// Read message body
let read_limit = MAX_HTTP_MESSAGE_BODY_SIZE - reader.buffer().len();
reader.get_mut().set_limit(read_limit as u64);
let contents = match message_length {
HttpMessageLength::Empty => { Vec::new() },
HttpMessageLength::ContentLength(length) => {
if length == 0 || length > MAX_HTTP_MESSAGE_BODY_SIZE {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "out of range"))
} else {
let mut content = vec![0; length];
#[cfg(feature = "tokio")]
reader.read_exact(&mut content[..]).await?;
#[cfg(not(feature = "tokio"))]
reader.read_exact(&mut content[..])?;
content
}
},
HttpMessageLength::TransferEncoding(coding) => {
if !coding.eq_ignore_ascii_case("chunked") {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput, "unsupported transfer coding"))
} else {
let mut content = Vec::new();
#[cfg(feature = "tokio")]
{
// Since chunked_transfer doesn't have an async interface, only use it to
// determine the size of each chunk to read.
//
// TODO: Replace with an async interface when available.
// https://github.com/frewsxcv/rust-chunked-transfer/issues/7
loop {
// Read the chunk header which contains the chunk size.
let mut chunk_header = String::new();
reader.read_line(&mut chunk_header).await?;
if chunk_header == "0\r\n" {
// Read the terminator chunk since the decoder consumes the CRLF
// immediately when this chunk is encountered.
reader.read_line(&mut chunk_header).await?;
}
// Decode the chunk header to obtain the chunk size.
let mut buffer = Vec::new();
let mut decoder = chunked_transfer::Decoder::new(chunk_header.as_bytes());
decoder.read_to_end(&mut buffer)?;
// Read the chunk body.
let chunk_size = match decoder.remaining_chunks_size() {
None => break,
Some(chunk_size) => chunk_size,
};
let chunk_offset = content.len();
content.resize(chunk_offset + chunk_size + "\r\n".len(), 0);
reader.read_exact(&mut content[chunk_offset..]).await?;
content.resize(chunk_offset + chunk_size, 0);
}
content
}
#[cfg(not(feature = "tokio"))]
{
let mut decoder = chunked_transfer::Decoder::new(reader);
decoder.read_to_end(&mut content)?;
content
}
}
},
};
if !status.is_ok() {
// TODO: Handle 3xx redirection responses.
let error = HttpError {
status_code: status.code.to_string(),
contents,
};
return Err(std::io::Error::new(std::io::ErrorKind::Other, error));
}
Ok(contents)
}
}
/// HTTP error consisting of a status code and body contents.
#[derive(Debug)]
pub(crate) struct HttpError {
pub(crate) status_code: String,
pub(crate) contents: Vec<u8>,
}
impl std::error::Error for HttpError {}
impl fmt::Display for HttpError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let contents = String::from_utf8_lossy(&self.contents);
write!(f, "status_code: {}, contents: {}", self.status_code, contents)
}
}
/// HTTP response status code as defined by [RFC 7231].
///
/// [RFC 7231]: https://tools.ietf.org/html/rfc7231#section-6
struct HttpStatus<'a> {
code: &'a str,
}
impl<'a> HttpStatus<'a> {
/// Parses an HTTP status line as defined by [RFC 7230].
///
/// [RFC 7230]: https://tools.ietf.org/html/rfc7230#section-3.1.2
fn parse(line: &'a String) -> std::io::Result<HttpStatus<'a>> {
let mut tokens = line.splitn(3, ' ');
let http_version = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no HTTP-Version"))?;
if !http_version.eq_ignore_ascii_case("HTTP/1.1") &&
!http_version.eq_ignore_ascii_case("HTTP/1.0") {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid HTTP-Version"));
}
let code = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no Status-Code"))?;
if code.len() != 3 || !code.chars().all(|c| c.is_ascii_digit()) {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid Status-Code"));
}
let _reason = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no Reason-Phrase"))?;
Ok(Self { code })
}
/// Returns whether the status is successful (i.e., 2xx status class).
fn is_ok(&self) -> bool {
self.code.starts_with('2')
}
}
/// HTTP response header as defined by [RFC 7231].
///
/// [RFC 7231]: https://tools.ietf.org/html/rfc7231#section-7
struct HttpHeader<'a> {
name: &'a str,
value: &'a str,
}
impl<'a> HttpHeader<'a> {
/// Parses an HTTP header field as defined by [RFC 7230].
///
/// [RFC 7230]: https://tools.ietf.org/html/rfc7230#section-3.2
fn parse(line: &'a String) -> std::io::Result<HttpHeader<'a>> {
let mut tokens = line.splitn(2, ':');
let name = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no header name"))?;
let value = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no header value"))?
.trim_start();
Ok(Self { name, value })
}
/// Returns whether the header field has the given name.
fn has_name(&self, name: &str) -> bool {
self.name.eq_ignore_ascii_case(name)
}
}
/// HTTP message body length as defined by [RFC 7230].
///
/// [RFC 7230]: https://tools.ietf.org/html/rfc7230#section-3.3.3
enum HttpMessageLength {
Empty,
ContentLength(usize),
TransferEncoding(String),
}
/// An HTTP response body in binary format.
pub struct BinaryResponse(pub Vec<u8>);
/// An HTTP response body in JSON format.
pub struct JsonResponse(pub serde_json::Value);
/// Interprets bytes from an HTTP response body as binary data.
impl TryFrom<Vec<u8>> for BinaryResponse {
type Error = std::io::Error;
fn try_from(bytes: Vec<u8>) -> std::io::Result<Self> {
Ok(BinaryResponse(bytes))
}
}
/// Interprets bytes from an HTTP response body as a JSON value.
impl TryFrom<Vec<u8>> for JsonResponse {
type Error = std::io::Error;
fn try_from(bytes: Vec<u8>) -> std::io::Result<Self> {
Ok(JsonResponse(serde_json::from_slice(&bytes)?))
}
}
#[cfg(test)]
mod endpoint_tests {
use super::HttpEndpoint;
#[test]
fn with_default_port() {
let endpoint = HttpEndpoint::for_host("foo.com".into());
assert_eq!(endpoint.host(), "foo.com");
assert_eq!(endpoint.port(), 80);
}
#[test]
fn with_custom_port() {
let endpoint = HttpEndpoint::for_host("foo.com".into()).with_port(8080);
assert_eq!(endpoint.host(), "foo.com");
assert_eq!(endpoint.port(), 8080);
}
#[test]
fn with_uri_path() {
let endpoint = HttpEndpoint::for_host("foo.com".into()).with_path("/path".into());
assert_eq!(endpoint.host(), "foo.com");
assert_eq!(endpoint.path(), "/path");
}
#[test]
fn without_uri_path() {
let endpoint = HttpEndpoint::for_host("foo.com".into());
assert_eq!(endpoint.host(), "foo.com");
assert_eq!(endpoint.path(), "/");
}
#[test]
fn convert_to_socket_addrs() {
let endpoint = HttpEndpoint::for_host("foo.com".into());
let host = endpoint.host();
let port = endpoint.port();
use std::net::ToSocketAddrs;
match (&endpoint).to_socket_addrs() {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(mut socket_addrs) => {
match socket_addrs.next() {
None => panic!("Expected socket address"),
Some(addr) => {
assert_eq!(addr, (host, port).to_socket_addrs().unwrap().next().unwrap());
assert!(socket_addrs.next().is_none());
}
}
}
}
}
}
#[cfg(test)]
pub(crate) mod client_tests {
use super::*;
use std::io::BufRead;
use std::io::Write;
/// Server for handling HTTP client requests with a stock response.
pub struct HttpServer {
address: std::net::SocketAddr,
handler: std::thread::JoinHandle<()>,
shutdown: std::sync::Arc<std::sync::atomic::AtomicBool>,
}
/// Body of HTTP response messages.
pub enum MessageBody<T: ToString> {
Empty,
Content(T),
ChunkedContent(T),
}
impl HttpServer {
fn responding_with_body<T: ToString>(status: &str, body: MessageBody<T>) -> Self {
let response = match body {
MessageBody::Empty => format!("{}\r\n\r\n", status),
MessageBody::Content(body) => {
let body = body.to_string();
format!(
"{}\r\n\
Content-Length: {}\r\n\
\r\n\
{}", status, body.len(), body)
},
MessageBody::ChunkedContent(body) => {
let mut chuncked_body = Vec::new();
{
use chunked_transfer::Encoder;
let mut encoder = Encoder::with_chunks_size(&mut chuncked_body, 8);
encoder.write_all(body.to_string().as_bytes()).unwrap();
}
format!(
"{}\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
{}", status, String::from_utf8(chuncked_body).unwrap())
},
};
HttpServer::responding_with(response)
}
pub fn responding_with_ok<T: ToString>(body: MessageBody<T>) -> Self {
HttpServer::responding_with_body("HTTP/1.1 200 OK", body)
}
pub fn responding_with_not_found() -> Self {
HttpServer::responding_with_body::<String>("HTTP/1.1 404 Not Found", MessageBody::Empty)
}
pub fn responding_with_server_error<T: ToString>(content: T) -> Self {
let body = MessageBody::Content(content);
HttpServer::responding_with_body("HTTP/1.1 500 Internal Server Error", body)
}
fn responding_with(response: String) -> Self {
let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap();
let address = listener.local_addr().unwrap();
let shutdown = std::sync::Arc::new(std::sync::atomic::AtomicBool::new(false));
let shutdown_signaled = std::sync::Arc::clone(&shutdown);
let handler = std::thread::spawn(move || {
for stream in listener.incoming() {
let mut stream = stream.unwrap();
stream.set_write_timeout(Some(TCP_STREAM_TIMEOUT)).unwrap();
let lines_read = std::io::BufReader::new(&stream)
.lines()
.take_while(|line| !line.as_ref().unwrap().is_empty())
.count();
if lines_read == 0 { continue; }
for chunk in response.as_bytes().chunks(16) {
if shutdown_signaled.load(std::sync::atomic::Ordering::SeqCst) {
return;
} else {
if let Err(_) = stream.write(chunk) { break; }
if let Err(_) = stream.flush() { break; }
}
}
}
});
Self { address, handler, shutdown }
}
fn shutdown(self) {
self.shutdown.store(true, std::sync::atomic::Ordering::SeqCst);
self.handler.join().unwrap();
}
pub fn endpoint(&self) -> HttpEndpoint {
HttpEndpoint::for_host(self.address.ip().to_string()).with_port(self.address.port())
}
}
#[test]
fn connect_to_unresolvable_host() {
match HttpClient::connect(("example.invalid", 80)) {
Err(e) => {
assert!(e.to_string().contains("failed to lookup address information") ||
e.to_string().contains("No such host"), "{:?}", e);
},
Ok(_) => panic!("Expected error"),
}
}
#[test]
fn connect_with_no_socket_address() {
match HttpClient::connect(&vec![][..]) {
Err(e) => assert_eq!(e.kind(), std::io::ErrorKind::InvalidInput),
Ok(_) => panic!("Expected error"),
}
}
#[test]
fn connect_with_unknown_server() {
match HttpClient::connect(("::", 80)) {
#[cfg(target_os = "windows")]
Err(e) => assert_eq!(e.kind(), std::io::ErrorKind::AddrNotAvailable),
#[cfg(not(target_os = "windows"))]
Err(e) => assert_eq!(e.kind(), std::io::ErrorKind::ConnectionRefused),
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn connect_with_valid_endpoint() {
let server = HttpServer::responding_with_ok::<String>(MessageBody::Empty);
match HttpClient::connect(&server.endpoint()) {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(_) => {},
}
}
#[tokio::test]
async fn read_empty_message() {
let server = HttpServer::responding_with("".to_string());
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::UnexpectedEof);
assert_eq!(e.get_ref().unwrap().to_string(), "no status line");
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_incomplete_message() {
let server = HttpServer::responding_with("HTTP/1.1 200 OK".to_string());
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::UnexpectedEof);
assert_eq!(e.get_ref().unwrap().to_string(), "no headers");
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_too_large_message_headers() {
let response = format!(
"HTTP/1.1 302 Found\r\n\
Location: {}\r\n\
\r\n", "Z".repeat(MAX_HTTP_MESSAGE_HEADER_SIZE));
let server = HttpServer::responding_with(response);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::UnexpectedEof);
assert_eq!(e.get_ref().unwrap().to_string(), "no headers");
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_too_large_message_body() {
let body = "Z".repeat(MAX_HTTP_MESSAGE_BODY_SIZE + 1);
let server = HttpServer::responding_with_ok::<String>(MessageBody::Content(body));
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::InvalidData);
assert_eq!(e.get_ref().unwrap().to_string(), "out of range");
},
Ok(_) => panic!("Expected error"),
}
server.shutdown();
}
#[tokio::test]
async fn read_message_with_unsupported_transfer_coding() {
let response = String::from(
"HTTP/1.1 200 OK\r\n\
Transfer-Encoding: gzip\r\n\
\r\n\
foobar");
let server = HttpServer::responding_with(response);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::InvalidInput);
assert_eq!(e.get_ref().unwrap().to_string(), "unsupported transfer coding");
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_error() {
let server = HttpServer::responding_with_server_error("foo");
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<JsonResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::Other);
let http_error = e.into_inner().unwrap().downcast::<HttpError>().unwrap();
assert_eq!(http_error.status_code, "500");
assert_eq!(http_error.contents, "foo".as_bytes());
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_empty_message_body() {
let server = HttpServer::responding_with_ok::<String>(MessageBody::Empty);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(bytes) => assert_eq!(bytes.0, Vec::<u8>::new()),
}
}
#[tokio::test]
async fn read_message_body_with_length() {
let body = "foo bar baz qux".repeat(32);
let content = MessageBody::Content(body.clone());
let server = HttpServer::responding_with_ok::<String>(content);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(bytes) => assert_eq!(bytes.0, body.as_bytes()),
}
}
#[tokio::test]
async fn read_chunked_message_body() {
let body = "foo bar baz qux".repeat(32);
let chunked_content = MessageBody::ChunkedContent(body.clone());
let server = HttpServer::responding_with_ok::<String>(chunked_content);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(bytes) => assert_eq!(bytes.0, body.as_bytes()),
}
}
#[tokio::test]
async fn reconnect_closed_connection() {
let server = HttpServer::responding_with_ok::<String>(MessageBody::Empty);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
assert!(client.get::<BinaryResponse>("/foo", "foo.com").await.is_ok());
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(bytes) => assert_eq!(bytes.0, Vec::<u8>::new()),
}
}
#[test]
fn from_bytes_into_binary_response() {
let bytes = b"foo";
match BinaryResponse::try_from(bytes.to_vec()) {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(response) => assert_eq!(&response.0, bytes),
}
}
#[test]
fn from_invalid_bytes_into_json_response() {
let json = serde_json::json!({ "result": 42 });
match JsonResponse::try_from(json.to_string().as_bytes()[..5].to_vec()) {
Err(_) => {},
Ok(_) => panic!("Expected error"),
}
}
#[test]
fn from_valid_bytes_into_json_response() {
let json = serde_json::json!({ "result": 42 });
match JsonResponse::try_from(json.to_string().as_bytes().to_vec()) {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(response) => assert_eq!(response.0, json),
}
}
}
| post | identifier_name |
coref.py | import sys
import shelve
import numpy as np
import tensorflow as tf
import time
import random
import math
import os
import glob
from bs4 import BeautifulSoup
starttime = time.time()
BATCH_SIZE = 50
def load_dir(Dir):
r = Dir + '/'
cors = []
cons = []
data = []
genre = []
dirs = [x for x in glob.glob(r + '*')]
for d in dirs:
for _r, _d, files in os.walk(d):
for f in files:
if f.endswith('_cors'):
cors.append([os.path.join(_r, f),d])
elif f.endswith('_cons'):
cons.append(os.path.join(_r,f))
cors.sort()
cons.sort()i
count = 0
for r,n in zip(cors,cons):
data.append([r[0],n])
genre.append(r[1].split('/')[-1])
if count == 50:
break
count = count + 1
return data,genre
def file_to_word(conllfile):
wordList = []
for line in open(conllfile).readlines():
sp = line.split()
if len(sp) > 6:
wordList.append([sp[3],sp[4],sp[9]])
return wordList
def file_to_coref(corefFile,conllFile):
index = 0
corefno = 0
sentno = 0
f = open(corefFile) | soup = BeautifulSoup(f.read(),'lxml')
f.close()
soup_all = []
stack = []
coreList = []
core_all = []
coref = []
wrapped = False
for siru in soup.findAll('coref'):
soupList = []
soupList.append(siru.attrs.get('id'))
soupList.append(siru.attrs.get('type'))
soupList.append(''.join(list(siru.strings)))
soup_all.append(soupList)
for line in open(conllFile):
param = line.split()
if len(param) < 6:
sentno = sentno + 1
continue
info_coref = param[-1].split('|')
for co in info_coref:
if co.startswith('('):
stack.append([co,corefno])
core_all.append([index,0,0,sentno])
if wrapped:
core_all[corefno][2]
corefno = corefno + 1
if len(stack) == 0:
wrapped = False
elif len(stack) == 1:
wrapped = True
elif len(stack) > 1:
wrapped = True
for j in range(len(stack) - 1):
core_all[stack[j][1]][1] = 1
if co.endswith(')'):
stack.pop()
index = index + 1
for (cor,miso) in zip(core_all,soup_all):
coref.append(cor + miso)
return coref
def file_to_List(data):
return file_to_coref(data[0],data[1]),file_to_word(data[1])
def getGenreFeature(genre):
if genre == 'bc':
param = 0
elif genre == 'bn':
param = 1
elif genre == 'mz':
param = 2
elif genre == 'nw':
param = 3
elif genre == 'pt':
param = 4
elif genre == 'tc':
param = 5
elif genre == 'wb':
param = 6
return [param]
def getWordEmbedding(info_words):
dic = shelve.open('wordembedding.db')
embeddingList = []
for words in info_words:
if words[0] in dic:
embeddingList.append(np.array(dic[words[0].lower()], dtype = np.float32))
else:
embeddingList.append(np.random.randn(50))
ave_all = sum(embeddingList)/len(embeddingList)
dic.close()
return (embeddingList,ave_all)
def getEmbeddingFeature(mention, mpos, word, Wembed,ave_all_d):
smention = mention.split()
ret = []
len_m = len(smention)
len_w = len(Wembed)
vacant = ave_pre_5 = ave_fol_5 = ave_all_m = ave_all_s = np.zeros(50)
#mentionの最初の単語
ret.append(Wembed[mpos])
#mentionの最後の単語
ret.append(Wembed[mpos + len_m - 1])
#mentionの2つ前の単語
if mpos / 2 > 0:
ret.append(Wembed[mpos - 2])
else:
ret.append(vacant)
#mentionの1つ前の単語
if mpos != 0:
ret.append(Wembed[mpos - 1])
else:
ret.append(vacant)
#mentionの1つ後の単語
pos_f = len_w - (mpos + len_m - 1)
if pos_f > 1:
ret.append(Wembed[mpos + len_m])
else:
ret.append(vacant)
#mentionの2つ後の単語
if pos_f > 2:
ret.append(Wembed[mpos + len_m + 1])
else:
ret.append(vacant)
#前5つの単語の平均
if mpos / 5 > 0:
for i in range(5):
ave_pre_5 += Wembed[mpos - i - 1]
else:
for i in range(mpos):
ave_pre_5 += Wembed[mpos - i - 1]
ret.append(ave_pre_5/5)
#後5つの単語の平均
pos_f5 = len_w - (mpos + len_m - 1)
if pos_f5 > 5:
for j in range(5):
ave_fol_5 += Wembed[mpos + len_m + j - 1]
else:
for j in range(pos_f5):
ave_fol_5 += Wembed[mpos + len_m + j - 1]
ret.append(ave_fol_5/5)
#mentionの単語の平均
for k in range(len_m):
ave_all_m += Wembed[mpos + k]
ret.append(ave_all_m/len_m)
#文書の全単語の平均
ret.append(ave_all_d)
ret = [flatten for inner in ret for flatten in inner]
return ret
def getDistance(aPos, mPos):
dis = mPos - aPos
if dis == 0:
ret = [1,0,0,0,0,0,0,0,0,0]
elif dis == 1:
ret = [0,1,0,0,0,0,0,0,0,0]
elif dis == 2:
ret = [0,0,1,0,0,0,0,0,0,0]
elif dis == 3:
ret = [0,0,0,1,0,0,0,0,0,0]
elif dis == 4:
ret = [0,0,0,0,1,0,0,0,0,0]
elif dis > 4 and dis < 8:
ret = [0,0,0,0,0,1,0,0,0,0]
elif dis > 7 and dis < 16:
ret = [0,0,0,0,0,0,1,0,0,0]
elif dis > 15 and dis < 32:
ret = [0,0,0,0,0,0,0,1,0,0]
elif dis > 31 and dis < 64:
ret = [0,0,0,0,0,0,0,0,1,0]
elif dis > 63:
ret = [0,0,0,0,0,0,0,0,0,1]
return ret
def getSpeaker(aSpeaker, mSpeaker):
if (aSpeaker == mSpeaker):
return [1]
else:
return [0]
def stringMatch(a, m):
if (a == m):
return [1]
else:
return [0]
def getLength(mention):
length = len(mention.split())
if length == 0:
ret = [1,0,0,0,0,0,0,0,0,0]
elif length == 1:
ret = [0,1,0,0,0,0,0,0,0,0]
elif length == 2:
ret = [0,0,1,0,0,0,0,0,0,0]
elif length == 3:
ret = [0,0,0,1,0,0,0,0,0,0]
elif length == 4:
ret = [0,0,0,0,1,0,0,0,0,0]
elif length > 4 and length < 8:
ret = [0,0,0,0,0,1,0,0,0,0]
elif length > 7 and length < 16:
ret = [0,0,0,0,0,0,1,0,0,0]
elif length > 15 and length < 32:
ret = [0,0,0,0,0,0,0,1,0,0]
elif length > 31 and length < 64:
ret = [0,0,0,0,0,0,0,0,1,0]
elif length > 63:
ret = [0,0,0,0,0,0,0,0,0,1]
return ret
def getPosition(mpos,total):
return [float(mpos)/float(total)]
def particalMatch(a, m):
awords = a.split()
mwords = m.split()
pMatch = 0
for a in awords:
for m in mwords:
if (a == m):
pMatch = 1
break
return [pMatch]
def getInclude(include):
if include:
return [1]
else:
return [0]
def getVectors(mentions,words,Wembedave,genre):
print('begin')
vector = []
vectors = []
labels = []
costs = []
antecedents = ['NA']
total = len(mentions)
print(total)
Wembed = Wembedave[0]
ave_all = Wembedave[1]
for m in mentions:
for a in antecedents:
if a == 'NA':
tmp = [0 for i in range(512)]
tmp.extend(getEmbeddingFeature(m[6],m[0],words,Wembed,ave_all))
for i in range(36):
tmp.append(0)
vectors.append(tmp)
labels.append([0.])
continue
elif(m[4] == a[4]):
labels.append([1.])
else:
labels.append([0.])
vector.extend(getEmbeddingFeature(a[6],a[0],words,Wembed,ave_all))
vector.extend(getPosition(a[0],total))
vector.extend(getInclude(a[1]))
vector.extend(getLength(m[6]))
vector.extend(getEmbeddingFeature(m[6],m[0],words,Wembed,ave_all))
vector.extend(getPosition(m[0],total))
vector.extend(getInclude(a[1]))
vector.extend(getLength(a[6]))
vector.extend(getGenreFeature(genre))
vector.extend(getDistance(a[0],m[0]))
vector.extend(getDistance(a[3],m[3]))
vector.extend(getSpeaker(words[a[0]][2],words[m[0]][2]))
vector.extend(stringMatch(a[6],m[6]))
vector.extend(particalMatch(a[6],m[6]))
if len(vector) != 1048:
exit()
vectors.append(vector)
vector = npvec = []
antecedents.append(m)
return vectors, labels
def placeholder_inputs():
input_placeholder = tf.placeholder(tf.float32,shape=[None,1048])
label_placeholder = tf.placeholder(tf.float32,shape=[None,1])
return input_placeholder,label_placeholder
def mention_encoder(x):
#hidden1
with tf.name_scope(u'hidden_layer1') as scope:
weight1 = tf.nn.l2_normalize(tf.Variable(tf.truncated_normal([1048,1000],stddev=0.2)),0)
bias1 = tf.Variable(tf.zeros([1000]))
hidden1 = tf.matmul(x,weight1) + bias1
#hidden2
with tf.name_scope(u'hidden_layer2') as scope:
weight2 = tf.nn.l2_normalize(tf.Variable(tf.truncated_normal([1000,500], stddev=0.2)),0)
bias2 = tf.Variable(tf.zeros([500]))
hidden2 = tf.matmul(hidden1,weight2) + bias2
#representation
with tf.name_scope(u'representaton') as scope:
weight3 = tf.nn.l2_normalize(tf.Variable(tf.truncated_normal([500,500], stddev=0.2)),0)
bias3 = tf.Variable(tf.zeros([500]))
representation = tf.matmul(hidden2,weight3) + bias3
return representation,weight1,bias1
def scoring_func(rep):
with tf.name_scope(u'scoring') as scope:
weight_s = tf.Variable(tf.truncated_normal([500,1], stddev=0.2))
bias_s = tf.Variable(tf.zeros([1]))
score = tf.matmul(rep, weight_s) + bias_s
return score
def prob_func(score):
return tf.nn.sigmoid(score)
def pre_loss_func(prob,y):
return -tf.reduce_mean(y*tf.log(prob) + (1-y)*tf.log(1-prob))
def fill_feed_dict(vectors,labels,input_pl,label_pl):
len_v = len(vector)
random.seed(time.time())
rand = [n for n in range(len_v)]
random.shuffle(rand)
if len_v >= BATCH_SIZE:
b_vectors = np.array([vectors[rand[x]] for x in range(BATCH_SIZE)], dtype=np.float32)
b_labels = np.array([labels[rand[x]] for x in range(BATCH_SIZE)], dtype=np.float32)
else:
b_vectors = np.array([vectors[rand[x]] for x in range(len_v)], dtype=np.float32)
b_labels = np.array([labels[rand[x]] for x in range(len_v)], dtype=np.float32)
feed_dict={
input_pl: b_vectors,
label_pl: b_labels
}
return feed_dict
def make_allvector(data,genre):
allvector = []
alllabel = []
for d, g in zip(data,genre):
print(d)
mentions, words = file_to_List(d)
Wembed = getWordEmbedding(words)
vectors,labels = getVectors(mentions,words,Wembed,g)
allvector.extend(vectors)
alllabel.extend(labels)
return allvector,allllabel
if __name__ == '__main__':
path = '/home/kadomae.13029/conll2012/conll-2012/v4/data/train/data/english/annotations'
dev_path = '/home/kadomae.13029/conll2012/conll-2012/v4/data/development/data/english/annotations'
data,genre = load_dir(path)
allvector, alllabel = make_allvector(data,genre)
print('-----pretraining start-----')
input_vector, label = placeholder_inputs()
rep,w1,b2 = mention_encoder(input_vector)
score = scoring_func(rep)
prob = prob_func(score)
loss = pre_loss_func(prob,label)
train_step = tf.train.GradientDescentOptimizer(0.001).minimize(loss)
sess = tf.Session()
init = tf.initialize_all_variables()
sess.run(init)
for i in range(200):
feed_dict = fill_feed_dict(allvector,alllabel,input_vector,label)
_, l = sess.run([train_step, loss], feed_dict=feed_dict)
print(l)
print(time.time()-starttime)
print('-----pretraining finish-----') | random_line_split | |
coref.py | import sys
import shelve
import numpy as np
import tensorflow as tf
import time
import random
import math
import os
import glob
from bs4 import BeautifulSoup
starttime = time.time()
BATCH_SIZE = 50
def | (Dir):
r = Dir + '/'
cors = []
cons = []
data = []
genre = []
dirs = [x for x in glob.glob(r + '*')]
for d in dirs:
for _r, _d, files in os.walk(d):
for f in files:
if f.endswith('_cors'):
cors.append([os.path.join(_r, f),d])
elif f.endswith('_cons'):
cons.append(os.path.join(_r,f))
cors.sort()
cons.sort()i
count = 0
for r,n in zip(cors,cons):
data.append([r[0],n])
genre.append(r[1].split('/')[-1])
if count == 50:
break
count = count + 1
return data,genre
def file_to_word(conllfile):
wordList = []
for line in open(conllfile).readlines():
sp = line.split()
if len(sp) > 6:
wordList.append([sp[3],sp[4],sp[9]])
return wordList
def file_to_coref(corefFile,conllFile):
index = 0
corefno = 0
sentno = 0
f = open(corefFile)
soup = BeautifulSoup(f.read(),'lxml')
f.close()
soup_all = []
stack = []
coreList = []
core_all = []
coref = []
wrapped = False
for siru in soup.findAll('coref'):
soupList = []
soupList.append(siru.attrs.get('id'))
soupList.append(siru.attrs.get('type'))
soupList.append(''.join(list(siru.strings)))
soup_all.append(soupList)
for line in open(conllFile):
param = line.split()
if len(param) < 6:
sentno = sentno + 1
continue
info_coref = param[-1].split('|')
for co in info_coref:
if co.startswith('('):
stack.append([co,corefno])
core_all.append([index,0,0,sentno])
if wrapped:
core_all[corefno][2]
corefno = corefno + 1
if len(stack) == 0:
wrapped = False
elif len(stack) == 1:
wrapped = True
elif len(stack) > 1:
wrapped = True
for j in range(len(stack) - 1):
core_all[stack[j][1]][1] = 1
if co.endswith(')'):
stack.pop()
index = index + 1
for (cor,miso) in zip(core_all,soup_all):
coref.append(cor + miso)
return coref
def file_to_List(data):
return file_to_coref(data[0],data[1]),file_to_word(data[1])
def getGenreFeature(genre):
if genre == 'bc':
param = 0
elif genre == 'bn':
param = 1
elif genre == 'mz':
param = 2
elif genre == 'nw':
param = 3
elif genre == 'pt':
param = 4
elif genre == 'tc':
param = 5
elif genre == 'wb':
param = 6
return [param]
def getWordEmbedding(info_words):
dic = shelve.open('wordembedding.db')
embeddingList = []
for words in info_words:
if words[0] in dic:
embeddingList.append(np.array(dic[words[0].lower()], dtype = np.float32))
else:
embeddingList.append(np.random.randn(50))
ave_all = sum(embeddingList)/len(embeddingList)
dic.close()
return (embeddingList,ave_all)
def getEmbeddingFeature(mention, mpos, word, Wembed,ave_all_d):
smention = mention.split()
ret = []
len_m = len(smention)
len_w = len(Wembed)
vacant = ave_pre_5 = ave_fol_5 = ave_all_m = ave_all_s = np.zeros(50)
#mentionの最初の単語
ret.append(Wembed[mpos])
#mentionの最後の単語
ret.append(Wembed[mpos + len_m - 1])
#mentionの2つ前の単語
if mpos / 2 > 0:
ret.append(Wembed[mpos - 2])
else:
ret.append(vacant)
#mentionの1つ前の単語
if mpos != 0:
ret.append(Wembed[mpos - 1])
else:
ret.append(vacant)
#mentionの1つ後の単語
pos_f = len_w - (mpos + len_m - 1)
if pos_f > 1:
ret.append(Wembed[mpos + len_m])
else:
ret.append(vacant)
#mentionの2つ後の単語
if pos_f > 2:
ret.append(Wembed[mpos + len_m + 1])
else:
ret.append(vacant)
#前5つの単語の平均
if mpos / 5 > 0:
for i in range(5):
ave_pre_5 += Wembed[mpos - i - 1]
else:
for i in range(mpos):
ave_pre_5 += Wembed[mpos - i - 1]
ret.append(ave_pre_5/5)
#後5つの単語の平均
pos_f5 = len_w - (mpos + len_m - 1)
if pos_f5 > 5:
for j in range(5):
ave_fol_5 += Wembed[mpos + len_m + j - 1]
else:
for j in range(pos_f5):
ave_fol_5 += Wembed[mpos + len_m + j - 1]
ret.append(ave_fol_5/5)
#mentionの単語の平均
for k in range(len_m):
ave_all_m += Wembed[mpos + k]
ret.append(ave_all_m/len_m)
#文書の全単語の平均
ret.append(ave_all_d)
ret = [flatten for inner in ret for flatten in inner]
return ret
def getDistance(aPos, mPos):
dis = mPos - aPos
if dis == 0:
ret = [1,0,0,0,0,0,0,0,0,0]
elif dis == 1:
ret = [0,1,0,0,0,0,0,0,0,0]
elif dis == 2:
ret = [0,0,1,0,0,0,0,0,0,0]
elif dis == 3:
ret = [0,0,0,1,0,0,0,0,0,0]
elif dis == 4:
ret = [0,0,0,0,1,0,0,0,0,0]
elif dis > 4 and dis < 8:
ret = [0,0,0,0,0,1,0,0,0,0]
elif dis > 7 and dis < 16:
ret = [0,0,0,0,0,0,1,0,0,0]
elif dis > 15 and dis < 32:
ret = [0,0,0,0,0,0,0,1,0,0]
elif dis > 31 and dis < 64:
ret = [0,0,0,0,0,0,0,0,1,0]
elif dis > 63:
ret = [0,0,0,0,0,0,0,0,0,1]
return ret
def getSpeaker(aSpeaker, mSpeaker):
if (aSpeaker == mSpeaker):
return [1]
else:
return [0]
def stringMatch(a, m):
if (a == m):
return [1]
else:
return [0]
def getLength(mention):
length = len(mention.split())
if length == 0:
ret = [1,0,0,0,0,0,0,0,0,0]
elif length == 1:
ret = [0,1,0,0,0,0,0,0,0,0]
elif length == 2:
ret = [0,0,1,0,0,0,0,0,0,0]
elif length == 3:
ret = [0,0,0,1,0,0,0,0,0,0]
elif length == 4:
ret = [0,0,0,0,1,0,0,0,0,0]
elif length > 4 and length < 8:
ret = [0,0,0,0,0,1,0,0,0,0]
elif length > 7 and length < 16:
ret = [0,0,0,0,0,0,1,0,0,0]
elif length > 15 and length < 32:
ret = [0,0,0,0,0,0,0,1,0,0]
elif length > 31 and length < 64:
ret = [0,0,0,0,0,0,0,0,1,0]
elif length > 63:
ret = [0,0,0,0,0,0,0,0,0,1]
return ret
def getPosition(mpos,total):
return [float(mpos)/float(total)]
def particalMatch(a, m):
awords = a.split()
mwords = m.split()
pMatch = 0
for a in awords:
for m in mwords:
if (a == m):
pMatch = 1
break
return [pMatch]
def getInclude(include):
if include:
return [1]
else:
return [0]
def getVectors(mentions,words,Wembedave,genre):
print('begin')
vector = []
vectors = []
labels = []
costs = []
antecedents = ['NA']
total = len(mentions)
print(total)
Wembed = Wembedave[0]
ave_all = Wembedave[1]
for m in mentions:
for a in antecedents:
if a == 'NA':
tmp = [0 for i in range(512)]
tmp.extend(getEmbeddingFeature(m[6],m[0],words,Wembed,ave_all))
for i in range(36):
tmp.append(0)
vectors.append(tmp)
labels.append([0.])
continue
elif(m[4] == a[4]):
labels.append([1.])
else:
labels.append([0.])
vector.extend(getEmbeddingFeature(a[6],a[0],words,Wembed,ave_all))
vector.extend(getPosition(a[0],total))
vector.extend(getInclude(a[1]))
vector.extend(getLength(m[6]))
vector.extend(getEmbeddingFeature(m[6],m[0],words,Wembed,ave_all))
vector.extend(getPosition(m[0],total))
vector.extend(getInclude(a[1]))
vector.extend(getLength(a[6]))
vector.extend(getGenreFeature(genre))
vector.extend(getDistance(a[0],m[0]))
vector.extend(getDistance(a[3],m[3]))
vector.extend(getSpeaker(words[a[0]][2],words[m[0]][2]))
vector.extend(stringMatch(a[6],m[6]))
vector.extend(particalMatch(a[6],m[6]))
if len(vector) != 1048:
exit()
vectors.append(vector)
vector = npvec = []
antecedents.append(m)
return vectors, labels
def placeholder_inputs():
input_placeholder = tf.placeholder(tf.float32,shape=[None,1048])
label_placeholder = tf.placeholder(tf.float32,shape=[None,1])
return input_placeholder,label_placeholder
def mention_encoder(x):
#hidden1
with tf.name_scope(u'hidden_layer1') as scope:
weight1 = tf.nn.l2_normalize(tf.Variable(tf.truncated_normal([1048,1000],stddev=0.2)),0)
bias1 = tf.Variable(tf.zeros([1000]))
hidden1 = tf.matmul(x,weight1) + bias1
#hidden2
with tf.name_scope(u'hidden_layer2') as scope:
weight2 = tf.nn.l2_normalize(tf.Variable(tf.truncated_normal([1000,500], stddev=0.2)),0)
bias2 = tf.Variable(tf.zeros([500]))
hidden2 = tf.matmul(hidden1,weight2) + bias2
#representation
with tf.name_scope(u'representaton') as scope:
weight3 = tf.nn.l2_normalize(tf.Variable(tf.truncated_normal([500,500], stddev=0.2)),0)
bias3 = tf.Variable(tf.zeros([500]))
representation = tf.matmul(hidden2,weight3) + bias3
return representation,weight1,bias1
def scoring_func(rep):
with tf.name_scope(u'scoring') as scope:
weight_s = tf.Variable(tf.truncated_normal([500,1], stddev=0.2))
bias_s = tf.Variable(tf.zeros([1]))
score = tf.matmul(rep, weight_s) + bias_s
return score
def prob_func(score):
return tf.nn.sigmoid(score)
def pre_loss_func(prob,y):
return -tf.reduce_mean(y*tf.log(prob) + (1-y)*tf.log(1-prob))
def fill_feed_dict(vectors,labels,input_pl,label_pl):
len_v = len(vector)
random.seed(time.time())
rand = [n for n in range(len_v)]
random.shuffle(rand)
if len_v >= BATCH_SIZE:
b_vectors = np.array([vectors[rand[x]] for x in range(BATCH_SIZE)], dtype=np.float32)
b_labels = np.array([labels[rand[x]] for x in range(BATCH_SIZE)], dtype=np.float32)
else:
b_vectors = np.array([vectors[rand[x]] for x in range(len_v)], dtype=np.float32)
b_labels = np.array([labels[rand[x]] for x in range(len_v)], dtype=np.float32)
feed_dict={
input_pl: b_vectors,
label_pl: b_labels
}
return feed_dict
def make_allvector(data,genre):
allvector = []
alllabel = []
for d, g in zip(data,genre):
print(d)
mentions, words = file_to_List(d)
Wembed = getWordEmbedding(words)
vectors,labels = getVectors(mentions,words,Wembed,g)
allvector.extend(vectors)
alllabel.extend(labels)
return allvector,allllabel
if __name__ == '__main__':
path = '/home/kadomae.13029/conll2012/conll-2012/v4/data/train/data/english/annotations'
dev_path = '/home/kadomae.13029/conll2012/conll-2012/v4/data/development/data/english/annotations'
data,genre = load_dir(path)
allvector, alllabel = make_allvector(data,genre)
print('-----pretraining start-----')
input_vector, label = placeholder_inputs()
rep,w1,b2 = mention_encoder(input_vector)
score = scoring_func(rep)
prob = prob_func(score)
loss = pre_loss_func(prob,label)
train_step = tf.train.GradientDescentOptimizer(0.001).minimize(loss)
sess = tf.Session()
init = tf.initialize_all_variables()
sess.run(init)
for i in range(200):
feed_dict = fill_feed_dict(allvector,alllabel,input_vector,label)
_, l = sess.run([train_step, loss], feed_dict=feed_dict)
print(l)
print(time.time()-starttime)
print('-----pretraining finish-----')
| load_dir | identifier_name |
coref.py | import sys
import shelve
import numpy as np
import tensorflow as tf
import time
import random
import math
import os
import glob
from bs4 import BeautifulSoup
starttime = time.time()
BATCH_SIZE = 50
def load_dir(Dir):
r = Dir + '/'
cors = []
cons = []
data = []
genre = []
dirs = [x for x in glob.glob(r + '*')]
for d in dirs:
for _r, _d, files in os.walk(d):
for f in files:
if f.endswith('_cors'):
cors.append([os.path.join(_r, f),d])
elif f.endswith('_cons'):
cons.append(os.path.join(_r,f))
cors.sort()
cons.sort()i
count = 0
for r,n in zip(cors,cons):
data.append([r[0],n])
genre.append(r[1].split('/')[-1])
if count == 50:
break
count = count + 1
return data,genre
def file_to_word(conllfile):
wordList = []
for line in open(conllfile).readlines():
sp = line.split()
if len(sp) > 6:
wordList.append([sp[3],sp[4],sp[9]])
return wordList
def file_to_coref(corefFile,conllFile):
index = 0
corefno = 0
sentno = 0
f = open(corefFile)
soup = BeautifulSoup(f.read(),'lxml')
f.close()
soup_all = []
stack = []
coreList = []
core_all = []
coref = []
wrapped = False
for siru in soup.findAll('coref'):
soupList = []
soupList.append(siru.attrs.get('id'))
soupList.append(siru.attrs.get('type'))
soupList.append(''.join(list(siru.strings)))
soup_all.append(soupList)
for line in open(conllFile):
param = line.split()
if len(param) < 6:
sentno = sentno + 1
continue
info_coref = param[-1].split('|')
for co in info_coref:
if co.startswith('('):
stack.append([co,corefno])
core_all.append([index,0,0,sentno])
if wrapped:
core_all[corefno][2]
corefno = corefno + 1
if len(stack) == 0:
wrapped = False
elif len(stack) == 1:
wrapped = True
elif len(stack) > 1:
wrapped = True
for j in range(len(stack) - 1):
core_all[stack[j][1]][1] = 1
if co.endswith(')'):
stack.pop()
index = index + 1
for (cor,miso) in zip(core_all,soup_all):
coref.append(cor + miso)
return coref
def file_to_List(data):
return file_to_coref(data[0],data[1]),file_to_word(data[1])
def getGenreFeature(genre):
if genre == 'bc':
|
elif genre == 'bn':
param = 1
elif genre == 'mz':
param = 2
elif genre == 'nw':
param = 3
elif genre == 'pt':
param = 4
elif genre == 'tc':
param = 5
elif genre == 'wb':
param = 6
return [param]
def getWordEmbedding(info_words):
dic = shelve.open('wordembedding.db')
embeddingList = []
for words in info_words:
if words[0] in dic:
embeddingList.append(np.array(dic[words[0].lower()], dtype = np.float32))
else:
embeddingList.append(np.random.randn(50))
ave_all = sum(embeddingList)/len(embeddingList)
dic.close()
return (embeddingList,ave_all)
def getEmbeddingFeature(mention, mpos, word, Wembed,ave_all_d):
smention = mention.split()
ret = []
len_m = len(smention)
len_w = len(Wembed)
vacant = ave_pre_5 = ave_fol_5 = ave_all_m = ave_all_s = np.zeros(50)
#mentionの最初の単語
ret.append(Wembed[mpos])
#mentionの最後の単語
ret.append(Wembed[mpos + len_m - 1])
#mentionの2つ前の単語
if mpos / 2 > 0:
ret.append(Wembed[mpos - 2])
else:
ret.append(vacant)
#mentionの1つ前の単語
if mpos != 0:
ret.append(Wembed[mpos - 1])
else:
ret.append(vacant)
#mentionの1つ後の単語
pos_f = len_w - (mpos + len_m - 1)
if pos_f > 1:
ret.append(Wembed[mpos + len_m])
else:
ret.append(vacant)
#mentionの2つ後の単語
if pos_f > 2:
ret.append(Wembed[mpos + len_m + 1])
else:
ret.append(vacant)
#前5つの単語の平均
if mpos / 5 > 0:
for i in range(5):
ave_pre_5 += Wembed[mpos - i - 1]
else:
for i in range(mpos):
ave_pre_5 += Wembed[mpos - i - 1]
ret.append(ave_pre_5/5)
#後5つの単語の平均
pos_f5 = len_w - (mpos + len_m - 1)
if pos_f5 > 5:
for j in range(5):
ave_fol_5 += Wembed[mpos + len_m + j - 1]
else:
for j in range(pos_f5):
ave_fol_5 += Wembed[mpos + len_m + j - 1]
ret.append(ave_fol_5/5)
#mentionの単語の平均
for k in range(len_m):
ave_all_m += Wembed[mpos + k]
ret.append(ave_all_m/len_m)
#文書の全単語の平均
ret.append(ave_all_d)
ret = [flatten for inner in ret for flatten in inner]
return ret
def getDistance(aPos, mPos):
dis = mPos - aPos
if dis == 0:
ret = [1,0,0,0,0,0,0,0,0,0]
elif dis == 1:
ret = [0,1,0,0,0,0,0,0,0,0]
elif dis == 2:
ret = [0,0,1,0,0,0,0,0,0,0]
elif dis == 3:
ret = [0,0,0,1,0,0,0,0,0,0]
elif dis == 4:
ret = [0,0,0,0,1,0,0,0,0,0]
elif dis > 4 and dis < 8:
ret = [0,0,0,0,0,1,0,0,0,0]
elif dis > 7 and dis < 16:
ret = [0,0,0,0,0,0,1,0,0,0]
elif dis > 15 and dis < 32:
ret = [0,0,0,0,0,0,0,1,0,0]
elif dis > 31 and dis < 64:
ret = [0,0,0,0,0,0,0,0,1,0]
elif dis > 63:
ret = [0,0,0,0,0,0,0,0,0,1]
return ret
def getSpeaker(aSpeaker, mSpeaker):
if (aSpeaker == mSpeaker):
return [1]
else:
return [0]
def stringMatch(a, m):
if (a == m):
return [1]
else:
return [0]
def getLength(mention):
length = len(mention.split())
if length == 0:
ret = [1,0,0,0,0,0,0,0,0,0]
elif length == 1:
ret = [0,1,0,0,0,0,0,0,0,0]
elif length == 2:
ret = [0,0,1,0,0,0,0,0,0,0]
elif length == 3:
ret = [0,0,0,1,0,0,0,0,0,0]
elif length == 4:
ret = [0,0,0,0,1,0,0,0,0,0]
elif length > 4 and length < 8:
ret = [0,0,0,0,0,1,0,0,0,0]
elif length > 7 and length < 16:
ret = [0,0,0,0,0,0,1,0,0,0]
elif length > 15 and length < 32:
ret = [0,0,0,0,0,0,0,1,0,0]
elif length > 31 and length < 64:
ret = [0,0,0,0,0,0,0,0,1,0]
elif length > 63:
ret = [0,0,0,0,0,0,0,0,0,1]
return ret
def getPosition(mpos,total):
return [float(mpos)/float(total)]
def particalMatch(a, m):
awords = a.split()
mwords = m.split()
pMatch = 0
for a in awords:
for m in mwords:
if (a == m):
pMatch = 1
break
return [pMatch]
def getInclude(include):
if include:
return [1]
else:
return [0]
def getVectors(mentions,words,Wembedave,genre):
print('begin')
vector = []
vectors = []
labels = []
costs = []
antecedents = ['NA']
total = len(mentions)
print(total)
Wembed = Wembedave[0]
ave_all = Wembedave[1]
for m in mentions:
for a in antecedents:
if a == 'NA':
tmp = [0 for i in range(512)]
tmp.extend(getEmbeddingFeature(m[6],m[0],words,Wembed,ave_all))
for i in range(36):
tmp.append(0)
vectors.append(tmp)
labels.append([0.])
continue
elif(m[4] == a[4]):
labels.append([1.])
else:
labels.append([0.])
vector.extend(getEmbeddingFeature(a[6],a[0],words,Wembed,ave_all))
vector.extend(getPosition(a[0],total))
vector.extend(getInclude(a[1]))
vector.extend(getLength(m[6]))
vector.extend(getEmbeddingFeature(m[6],m[0],words,Wembed,ave_all))
vector.extend(getPosition(m[0],total))
vector.extend(getInclude(a[1]))
vector.extend(getLength(a[6]))
vector.extend(getGenreFeature(genre))
vector.extend(getDistance(a[0],m[0]))
vector.extend(getDistance(a[3],m[3]))
vector.extend(getSpeaker(words[a[0]][2],words[m[0]][2]))
vector.extend(stringMatch(a[6],m[6]))
vector.extend(particalMatch(a[6],m[6]))
if len(vector) != 1048:
exit()
vectors.append(vector)
vector = npvec = []
antecedents.append(m)
return vectors, labels
def placeholder_inputs():
input_placeholder = tf.placeholder(tf.float32,shape=[None,1048])
label_placeholder = tf.placeholder(tf.float32,shape=[None,1])
return input_placeholder,label_placeholder
def mention_encoder(x):
#hidden1
with tf.name_scope(u'hidden_layer1') as scope:
weight1 = tf.nn.l2_normalize(tf.Variable(tf.truncated_normal([1048,1000],stddev=0.2)),0)
bias1 = tf.Variable(tf.zeros([1000]))
hidden1 = tf.matmul(x,weight1) + bias1
#hidden2
with tf.name_scope(u'hidden_layer2') as scope:
weight2 = tf.nn.l2_normalize(tf.Variable(tf.truncated_normal([1000,500], stddev=0.2)),0)
bias2 = tf.Variable(tf.zeros([500]))
hidden2 = tf.matmul(hidden1,weight2) + bias2
#representation
with tf.name_scope(u'representaton') as scope:
weight3 = tf.nn.l2_normalize(tf.Variable(tf.truncated_normal([500,500], stddev=0.2)),0)
bias3 = tf.Variable(tf.zeros([500]))
representation = tf.matmul(hidden2,weight3) + bias3
return representation,weight1,bias1
def scoring_func(rep):
with tf.name_scope(u'scoring') as scope:
weight_s = tf.Variable(tf.truncated_normal([500,1], stddev=0.2))
bias_s = tf.Variable(tf.zeros([1]))
score = tf.matmul(rep, weight_s) + bias_s
return score
def prob_func(score):
return tf.nn.sigmoid(score)
def pre_loss_func(prob,y):
return -tf.reduce_mean(y*tf.log(prob) + (1-y)*tf.log(1-prob))
def fill_feed_dict(vectors,labels,input_pl,label_pl):
len_v = len(vector)
random.seed(time.time())
rand = [n for n in range(len_v)]
random.shuffle(rand)
if len_v >= BATCH_SIZE:
b_vectors = np.array([vectors[rand[x]] for x in range(BATCH_SIZE)], dtype=np.float32)
b_labels = np.array([labels[rand[x]] for x in range(BATCH_SIZE)], dtype=np.float32)
else:
b_vectors = np.array([vectors[rand[x]] for x in range(len_v)], dtype=np.float32)
b_labels = np.array([labels[rand[x]] for x in range(len_v)], dtype=np.float32)
feed_dict={
input_pl: b_vectors,
label_pl: b_labels
}
return feed_dict
def make_allvector(data,genre):
allvector = []
alllabel = []
for d, g in zip(data,genre):
print(d)
mentions, words = file_to_List(d)
Wembed = getWordEmbedding(words)
vectors,labels = getVectors(mentions,words,Wembed,g)
allvector.extend(vectors)
alllabel.extend(labels)
return allvector,allllabel
if __name__ == '__main__':
path = '/home/kadomae.13029/conll2012/conll-2012/v4/data/train/data/english/annotations'
dev_path = '/home/kadomae.13029/conll2012/conll-2012/v4/data/development/data/english/annotations'
data,genre = load_dir(path)
allvector, alllabel = make_allvector(data,genre)
print('-----pretraining start-----')
input_vector, label = placeholder_inputs()
rep,w1,b2 = mention_encoder(input_vector)
score = scoring_func(rep)
prob = prob_func(score)
loss = pre_loss_func(prob,label)
train_step = tf.train.GradientDescentOptimizer(0.001).minimize(loss)
sess = tf.Session()
init = tf.initialize_all_variables()
sess.run(init)
for i in range(200):
feed_dict = fill_feed_dict(allvector,alllabel,input_vector,label)
_, l = sess.run([train_step, loss], feed_dict=feed_dict)
print(l)
print(time.time()-starttime)
print('-----pretraining finish-----')
| param = 0 | conditional_block |
coref.py | import sys
import shelve
import numpy as np
import tensorflow as tf
import time
import random
import math
import os
import glob
from bs4 import BeautifulSoup
starttime = time.time()
BATCH_SIZE = 50
def load_dir(Dir):
r = Dir + '/'
cors = []
cons = []
data = []
genre = []
dirs = [x for x in glob.glob(r + '*')]
for d in dirs:
for _r, _d, files in os.walk(d):
for f in files:
if f.endswith('_cors'):
cors.append([os.path.join(_r, f),d])
elif f.endswith('_cons'):
cons.append(os.path.join(_r,f))
cors.sort()
cons.sort()i
count = 0
for r,n in zip(cors,cons):
data.append([r[0],n])
genre.append(r[1].split('/')[-1])
if count == 50:
break
count = count + 1
return data,genre
def file_to_word(conllfile):
wordList = []
for line in open(conllfile).readlines():
sp = line.split()
if len(sp) > 6:
wordList.append([sp[3],sp[4],sp[9]])
return wordList
def file_to_coref(corefFile,conllFile):
index = 0
corefno = 0
sentno = 0
f = open(corefFile)
soup = BeautifulSoup(f.read(),'lxml')
f.close()
soup_all = []
stack = []
coreList = []
core_all = []
coref = []
wrapped = False
for siru in soup.findAll('coref'):
soupList = []
soupList.append(siru.attrs.get('id'))
soupList.append(siru.attrs.get('type'))
soupList.append(''.join(list(siru.strings)))
soup_all.append(soupList)
for line in open(conllFile):
param = line.split()
if len(param) < 6:
sentno = sentno + 1
continue
info_coref = param[-1].split('|')
for co in info_coref:
if co.startswith('('):
stack.append([co,corefno])
core_all.append([index,0,0,sentno])
if wrapped:
core_all[corefno][2]
corefno = corefno + 1
if len(stack) == 0:
wrapped = False
elif len(stack) == 1:
wrapped = True
elif len(stack) > 1:
wrapped = True
for j in range(len(stack) - 1):
core_all[stack[j][1]][1] = 1
if co.endswith(')'):
stack.pop()
index = index + 1
for (cor,miso) in zip(core_all,soup_all):
coref.append(cor + miso)
return coref
def file_to_List(data):
return file_to_coref(data[0],data[1]),file_to_word(data[1])
def getGenreFeature(genre):
if genre == 'bc':
param = 0
elif genre == 'bn':
param = 1
elif genre == 'mz':
param = 2
elif genre == 'nw':
param = 3
elif genre == 'pt':
param = 4
elif genre == 'tc':
param = 5
elif genre == 'wb':
param = 6
return [param]
def getWordEmbedding(info_words):
dic = shelve.open('wordembedding.db')
embeddingList = []
for words in info_words:
if words[0] in dic:
embeddingList.append(np.array(dic[words[0].lower()], dtype = np.float32))
else:
embeddingList.append(np.random.randn(50))
ave_all = sum(embeddingList)/len(embeddingList)
dic.close()
return (embeddingList,ave_all)
def getEmbeddingFeature(mention, mpos, word, Wembed,ave_all_d):
smention = mention.split()
ret = []
len_m = len(smention)
len_w = len(Wembed)
vacant = ave_pre_5 = ave_fol_5 = ave_all_m = ave_all_s = np.zeros(50)
#mentionの最初の単語
ret.append(Wembed[mpos])
#mentionの最後の単語
ret.append(Wembed[mpos + len_m - 1])
#mentionの2つ前の単語
if mpos / 2 > 0:
ret.append(Wembed[mpos - 2])
else:
ret.append(vacant)
#mentionの1つ前の単語
if mpos != 0:
ret.append(Wembed[mpos - 1])
else:
ret.append(vacant)
#mentionの1つ後の単語
pos_f = len_w - (mpos + len_m - 1)
if pos_f > 1:
ret.append(Wembed[mpos + len_m])
else:
ret.append(vacant)
#mentionの2つ後の単語
if pos_f > 2:
ret.append(Wembed[mpos + len_m + 1])
else:
ret.append(vacant)
#前5つの単語の平均
if mpos / 5 > 0:
for i in range(5):
ave_pre_5 += Wembed[mpos - i - 1]
else:
for i in range(mpos):
ave_pre_5 += Wembed[mpos - i - 1]
ret.append(ave_pre_5/5)
#後5つの単語の平均
pos_f5 = len_w - (mpos + len_m - 1)
if pos_f5 > 5:
for j in range(5):
ave_fol_5 += Wembed[mpos + len_m + j - 1]
else:
for j in range(pos_f5):
ave_fol_5 += Wembed[mpos + len_m + j - 1]
ret.append(ave_fol_5/5)
#mentionの単語の平均
for k in range(len_m):
ave_all_m += Wembed[mpos + k]
ret.append(ave_all_m/len_m)
#文書の全単語の平均
ret.append(ave_all_d)
ret = [flatten for inner in ret for flatten in inner]
return ret
def getDistance(aPos, mPos):
dis = mPos - aPos
if dis == 0:
ret = [1,0,0,0,0,0,0,0,0,0]
elif dis == 1:
ret = [0,1,0,0,0,0,0,0,0,0]
elif dis == 2:
ret = [0,0,1,0,0,0,0,0,0,0]
elif dis == 3:
ret = [0,0,0,1,0,0,0,0,0,0]
elif dis == 4:
ret = [0,0,0,0,1,0,0,0,0,0]
elif dis > 4 and dis < 8:
ret = [0,0,0,0,0,1,0,0,0,0]
elif dis > 7 and dis < 16:
ret = [0,0,0,0,0,0,1,0,0,0]
elif dis > 15 and dis < 32:
ret = [0,0,0,0,0,0,0,1,0,0]
elif dis > 31 and dis < 64:
ret = [0,0,0,0,0,0,0,0,1,0]
elif dis > 63:
ret = [0,0,0,0,0,0,0,0,0,1]
return ret
def getSpeaker(aSpeaker, mSpeaker):
if (aSpeaker == mSpeaker):
return [1]
else:
return [0]
def stringMatch(a, m):
if (a == m):
return [1]
else:
return [0]
def getLength(mention):
length = len(mention.split())
if length == 0:
ret = [1,0,0,0,0,0,0,0,0,0]
elif length == 1:
ret = [0,1,0,0,0,0,0,0,0,0]
elif length == 2:
ret = [0,0,1,0,0,0,0,0,0,0]
elif length == 3:
ret = [0,0,0,1,0,0,0,0,0,0]
elif length == 4:
ret = [0,0,0,0,1,0,0,0,0,0]
elif length > 4 and length < 8:
ret = [0,0,0,0,0,1,0,0,0,0]
elif length > 7 and length < 16:
ret = [0,0,0,0,0,0,1,0,0,0]
elif length > 15 and length < 32:
ret = [0,0,0,0,0,0,0,1,0,0]
elif length > 31 and length < 64:
ret = [0,0,0,0,0,0,0,0,1,0]
elif length > 63:
ret = [0,0,0,0,0,0,0,0,0,1]
return ret
def getPosition(mpos,total):
return [float(mpos)/float(total)]
def particalMatch(a, m):
awords = a.split()
mwords = m.split()
pMatch = 0
for a in awords:
for m in mwords:
if (a == m):
| nre):
print('begin')
vector = []
vectors = []
labels = []
costs = []
antecedents = ['NA']
total = len(mentions)
print(total)
Wembed = Wembedave[0]
ave_all = Wembedave[1]
for m in mentions:
for a in antecedents:
if a == 'NA':
tmp = [0 for i in range(512)]
tmp.extend(getEmbeddingFeature(m[6],m[0],words,Wembed,ave_all))
for i in range(36):
tmp.append(0)
vectors.append(tmp)
labels.append([0.])
continue
elif(m[4] == a[4]):
labels.append([1.])
else:
labels.append([0.])
vector.extend(getEmbeddingFeature(a[6],a[0],words,Wembed,ave_all))
vector.extend(getPosition(a[0],total))
vector.extend(getInclude(a[1]))
vector.extend(getLength(m[6]))
vector.extend(getEmbeddingFeature(m[6],m[0],words,Wembed,ave_all))
vector.extend(getPosition(m[0],total))
vector.extend(getInclude(a[1]))
vector.extend(getLength(a[6]))
vector.extend(getGenreFeature(genre))
vector.extend(getDistance(a[0],m[0]))
vector.extend(getDistance(a[3],m[3]))
vector.extend(getSpeaker(words[a[0]][2],words[m[0]][2]))
vector.extend(stringMatch(a[6],m[6]))
vector.extend(particalMatch(a[6],m[6]))
if len(vector) != 1048:
exit()
vectors.append(vector)
vector = npvec = []
antecedents.append(m)
return vectors, labels
def placeholder_inputs():
input_placeholder = tf.placeholder(tf.float32,shape=[None,1048])
label_placeholder = tf.placeholder(tf.float32,shape=[None,1])
return input_placeholder,label_placeholder
def mention_encoder(x):
#hidden1
with tf.name_scope(u'hidden_layer1') as scope:
weight1 = tf.nn.l2_normalize(tf.Variable(tf.truncated_normal([1048,1000],stddev=0.2)),0)
bias1 = tf.Variable(tf.zeros([1000]))
hidden1 = tf.matmul(x,weight1) + bias1
#hidden2
with tf.name_scope(u'hidden_layer2') as scope:
weight2 = tf.nn.l2_normalize(tf.Variable(tf.truncated_normal([1000,500], stddev=0.2)),0)
bias2 = tf.Variable(tf.zeros([500]))
hidden2 = tf.matmul(hidden1,weight2) + bias2
#representation
with tf.name_scope(u'representaton') as scope:
weight3 = tf.nn.l2_normalize(tf.Variable(tf.truncated_normal([500,500], stddev=0.2)),0)
bias3 = tf.Variable(tf.zeros([500]))
representation = tf.matmul(hidden2,weight3) + bias3
return representation,weight1,bias1
def scoring_func(rep):
with tf.name_scope(u'scoring') as scope:
weight_s = tf.Variable(tf.truncated_normal([500,1], stddev=0.2))
bias_s = tf.Variable(tf.zeros([1]))
score = tf.matmul(rep, weight_s) + bias_s
return score
def prob_func(score):
return tf.nn.sigmoid(score)
def pre_loss_func(prob,y):
return -tf.reduce_mean(y*tf.log(prob) + (1-y)*tf.log(1-prob))
def fill_feed_dict(vectors,labels,input_pl,label_pl):
len_v = len(vector)
random.seed(time.time())
rand = [n for n in range(len_v)]
random.shuffle(rand)
if len_v >= BATCH_SIZE:
b_vectors = np.array([vectors[rand[x]] for x in range(BATCH_SIZE)], dtype=np.float32)
b_labels = np.array([labels[rand[x]] for x in range(BATCH_SIZE)], dtype=np.float32)
else:
b_vectors = np.array([vectors[rand[x]] for x in range(len_v)], dtype=np.float32)
b_labels = np.array([labels[rand[x]] for x in range(len_v)], dtype=np.float32)
feed_dict={
input_pl: b_vectors,
label_pl: b_labels
}
return feed_dict
def make_allvector(data,genre):
allvector = []
alllabel = []
for d, g in zip(data,genre):
print(d)
mentions, words = file_to_List(d)
Wembed = getWordEmbedding(words)
vectors,labels = getVectors(mentions,words,Wembed,g)
allvector.extend(vectors)
alllabel.extend(labels)
return allvector,allllabel
if __name__ == '__main__':
path = '/home/kadomae.13029/conll2012/conll-2012/v4/data/train/data/english/annotations'
dev_path = '/home/kadomae.13029/conll2012/conll-2012/v4/data/development/data/english/annotations'
data,genre = load_dir(path)
allvector, alllabel = make_allvector(data,genre)
print('-----pretraining start-----')
input_vector, label = placeholder_inputs()
rep,w1,b2 = mention_encoder(input_vector)
score = scoring_func(rep)
prob = prob_func(score)
loss = pre_loss_func(prob,label)
train_step = tf.train.GradientDescentOptimizer(0.001).minimize(loss)
sess = tf.Session()
init = tf.initialize_all_variables()
sess.run(init)
for i in range(200):
feed_dict = fill_feed_dict(allvector,alllabel,input_vector,label)
_, l = sess.run([train_step, loss], feed_dict=feed_dict)
print(l)
print(time.time()-starttime)
print('-----pretraining finish-----')
| pMatch = 1
break
return [pMatch]
def getInclude(include):
if include:
return [1]
else:
return [0]
def getVectors(mentions,words,Wembedave,ge | identifier_body |
avito-lightgbm-with-ridge-feature-1.py | #Initially forked from Bojan's kernel here: https://www.kaggle.com/tunguz/bow-meta-text-and-dense-features-lb-0-2242/code
#improvement using kernel from Nick Brook's kernel here: https://www.kaggle.com/nicapotato/bow-meta-text-and-dense-features-lgbm
#Used oof method from Faron's kernel here: https://www.kaggle.com/mmueller/stacking-starter?scriptVersionId=390867
#Used some text cleaning method from Muhammad Alfiansyah's kernel here: https://www.kaggle.com/muhammadalfiansyah/push-the-lgbm-v19
import time
notebookstart= time.time()
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import gc
print("Data:\n",os.listdir("../input"))
# Models Packages
from sklearn import metrics
from sklearn.metrics import mean_squared_error
from sklearn import feature_selection
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
# Gradient Boosting
import lightgbm as lgb
from sklearn.linear_model import Ridge
from sklearn.cross_validation import KFold
# Tf-Idf
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.pipeline import FeatureUnion
from scipy.sparse import hstack, csr_matrix
from nltk.corpus import stopwords
# Viz
import seaborn as sns
import matplotlib.pyplot as plt
import re
import string
NFOLDS = 5
SEED = 42
VALID = True
class SklearnWrapper(object):
def __init__(self, clf, seed=0, params=None, seed_bool = True):
if(seed_bool == True):
params['random_state'] = seed
self.clf = clf(**params)
def train(self, x_train, y_train):
self.clf.fit(x_train, y_train)
def predict(self, x):
return self.clf.predict(x)
def get_oof(clf, x_train, y, x_test):
oof_train = np.zeros((ntrain,))
oof_test = np.zeros((ntest,))
oof_test_skf = np.empty((NFOLDS, ntest))
for i, (train_index, test_index) in enumerate(kf):
|
oof_test[:] = oof_test_skf.mean(axis=0)
return oof_train.reshape(-1, 1), oof_test.reshape(-1, 1)
def cleanName(text):
try:
textProc = text.lower()
# textProc = " ".join(map(str.strip, re.split('(\d+)',textProc)))
#regex = re.compile(u'[^[:alpha:]]')
#textProc = regex.sub(" ", textProc)
textProc = re.sub('[!@#$_“”¨«»®´·º½¾¿¡§£₤‘’]', '', textProc)
textProc = " ".join(textProc.split())
return textProc
except:
return "name error"
def rmse(y, y0):
assert len(y) == len(y0)
return np.sqrt(np.mean(np.power((y - y0), 2)))
print("\nData Load Stage")
training = pd.read_csv('../input/train.csv', index_col = "item_id", parse_dates = ["activation_date"])
traindex = training.index
testing = pd.read_csv('../input/test.csv', index_col = "item_id", parse_dates = ["activation_date"])
testdex = testing.index
ntrain = training.shape[0]
ntest = testing.shape[0]
kf = KFold(ntrain, n_folds=NFOLDS, shuffle=True, random_state=SEED)
y = training.deal_probability.copy()
training.drop("deal_probability",axis=1, inplace=True)
print('Train shape: {} Rows, {} Columns'.format(*training.shape))
print('Test shape: {} Rows, {} Columns'.format(*testing.shape))
print("Combine Train and Test")
df = pd.concat([training,testing],axis=0)
del training, testing
gc.collect()
print('\nAll Data shape: {} Rows, {} Columns'.format(*df.shape))
print("Feature Engineering")
df["price"] = np.log(df["price"]+0.001)
df["price"].fillna(df.price.mean(),inplace=True)
df["image_top_1"].fillna(-999,inplace=True)
print("\nCreate Time Variables")
df["Weekday"] = df['activation_date'].dt.weekday
df["Weekd of Year"] = df['activation_date'].dt.week
df["Day of Month"] = df['activation_date'].dt.day
# Create Validation Index and Remove Dead Variables
training_index = df.loc[df.activation_date<=pd.to_datetime('2017-04-07')].index
validation_index = df.loc[df.activation_date>=pd.to_datetime('2017-04-08')].index
df.drop(["activation_date","image"],axis=1,inplace=True)
print("\nEncode Variables")
categorical = ["user_id","region","city","parent_category_name","category_name","user_type","image_top_1","param_1","param_2","param_3"]
print("Encoding :",categorical)
# Encoder:
lbl = preprocessing.LabelEncoder()
for col in categorical:
df[col].fillna('Unknown')
df[col] = lbl.fit_transform(df[col].astype(str))
print("\nText Features")
# Feature Engineering
# Meta Text Features
textfeats = ["description", "title"]
df['desc_punc'] = df['description'].apply(lambda x: len([c for c in str(x) if c in string.punctuation]))
df['title'] = df['title'].apply(lambda x: cleanName(x))
df["description"] = df["description"].apply(lambda x: cleanName(x))
for cols in textfeats:
df[cols] = df[cols].astype(str)
df[cols] = df[cols].astype(str).fillna('missing') # FILL NA
df[cols] = df[cols].str.lower() # Lowercase all text, so that capitalized words dont get treated differently
df[cols + '_num_words'] = df[cols].apply(lambda comment: len(comment.split())) # Count number of Words
df[cols + '_num_unique_words'] = df[cols].apply(lambda comment: len(set(w for w in comment.split())))
df[cols + '_words_vs_unique'] = df[cols+'_num_unique_words'] / df[cols+'_num_words'] * 100 # Count Unique Words
print("\n[TF-IDF] Term Frequency Inverse Document Frequency Stage")
russian_stop = set(stopwords.words('russian'))
tfidf_para = {
"stop_words": russian_stop,
"analyzer": 'word',
"token_pattern": r'\w{1,}',
"sublinear_tf": True,
"dtype": np.float32,
"norm": 'l2',
#"min_df":5,
#"max_df":.9,
"smooth_idf":False
}
def get_col(col_name): return lambda x: x[col_name]
##I added to the max_features of the description. It did not change my score much but it may be worth investigating
vectorizer = FeatureUnion([
('description',TfidfVectorizer(
ngram_range=(1, 2),
max_features=17000,
**tfidf_para,
preprocessor=get_col('description'))),
('title',CountVectorizer(
ngram_range=(1, 2),
stop_words = russian_stop,
#max_features=7000,
preprocessor=get_col('title')))
])
start_vect=time.time()
#Fit my vectorizer on the entire dataset instead of the training rows
#Score improved by .0001
vectorizer.fit(df.to_dict('records'))
ready_df = vectorizer.transform(df.to_dict('records'))
tfvocab = vectorizer.get_feature_names()
print("Vectorization Runtime: %0.2f Minutes"%((time.time() - start_vect)/60))
# Drop Text Cols
textfeats = ["description", "title"]
df.drop(textfeats, axis=1,inplace=True)
from sklearn.metrics import mean_squared_error
from math import sqrt
ridge_params = {'alpha':20.0, 'fit_intercept':True, 'normalize':False, 'copy_X':True,
'max_iter':None, 'tol':0.001, 'solver':'auto', 'random_state':SEED}
#Ridge oof method from Faron's kernel
#I was using this to analyze my vectorization, but figured it would be interesting to add the results back into the dataset
#It doesn't really add much to the score, but it does help lightgbm converge faster
ridge = SklearnWrapper(clf=Ridge, seed = SEED, params = ridge_params)
ridge_oof_train, ridge_oof_test = get_oof(ridge, ready_df[:ntrain], y, ready_df[ntrain:])
rms = sqrt(mean_squared_error(y, ridge_oof_train))
print('Ridge OOF RMSE: {}'.format(rms))
print("Modeling Stage")
ridge_preds = np.concatenate([ridge_oof_train, ridge_oof_test])
df['ridge_preds'] = ridge_preds
# Combine Dense Features with Sparse Text Bag of Words Features
X = hstack([csr_matrix(df.loc[traindex,:].values),ready_df[0:traindex.shape[0]]]) # Sparse Matrix
testing = hstack([csr_matrix(df.loc[testdex,:].values),ready_df[traindex.shape[0]:]])
tfvocab = df.columns.tolist() + tfvocab
for shape in [X,testing]:
print("{} Rows and {} Cols".format(*shape.shape))
print("Feature Names Length: ",len(tfvocab))
del df
gc.collect();
print("\nModeling Stage")
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.10, random_state=23)
del ridge_preds,vectorizer,ready_df
gc.collect();
print("Light Gradient Boosting Regressor")
lgbm_params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': 'rmse',
# 'max_depth': 15,
'num_leaves': 270,
'feature_fraction': 0.5,
'bagging_fraction': 0.75,
# 'bagging_freq': 5,
'learning_rate': 0.018,
'verbose': 0
}
if VALID == True:
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, test_size=0.10, random_state=23)
# LGBM Dataset Formatting
lgtrain = lgb.Dataset(X_train, y_train,
feature_name=tfvocab,
categorical_feature = categorical)
lgvalid = lgb.Dataset(X_valid, y_valid,
feature_name=tfvocab,
categorical_feature = categorical)
del X, X_train; gc.collect()
# Go Go Go
lgb_clf = lgb.train(
lgbm_params,
lgtrain,
num_boost_round=20000,
valid_sets=[lgtrain, lgvalid],
valid_names=['train','valid'],
early_stopping_rounds=50,
verbose_eval=100
)
print("Model Evaluation Stage")
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_valid, lgb_clf.predict(X_valid))))
del X_valid ; gc.collect()
else:
# LGBM Dataset Formatting
lgtrain = lgb.Dataset(X, y,
feature_name=tfvocab,
categorical_feature = categorical)
del X; gc.collect()
# Go Go Go
lgb_clf = lgb.train(
lgbm_params,
lgtrain,
num_boost_round=1380,
verbose_eval=100
)
# Feature Importance Plot
f, ax = plt.subplots(figsize=[7,10])
lgb.plot_importance(lgb_clf, max_num_features=50, ax=ax)
plt.title("Light GBM Feature Importance")
plt.savefig('feature_import.png')
print("Model Evaluation Stage")
lgpred = lgb_clf.predict(testing)
#Mixing lightgbm with ridge. I haven't really tested if this improves the score or not
#blend = 0.95*lgpred + 0.05*ridge_oof_test[:,0]
lgsub = pd.DataFrame(lgpred,columns=["deal_probability"],index=testdex)
lgsub['deal_probability'].clip(0.0, 1.0, inplace=True) # Between 0 and 1
lgsub.to_csv("lgsub.csv",index=True,header=True)
#print("Model Runtime: %0.2f Minutes"%((time.time() - modelstart)/60))
print("Notebook Runtime: %0.2f Minutes"%((time.time() - notebookstart)/60)) | print('\nFold {}'.format(i))
x_tr = x_train[train_index]
y_tr = y[train_index]
x_te = x_train[test_index]
clf.train(x_tr, y_tr)
oof_train[test_index] = clf.predict(x_te)
oof_test_skf[i, :] = clf.predict(x_test) | conditional_block |
avito-lightgbm-with-ridge-feature-1.py | #Initially forked from Bojan's kernel here: https://www.kaggle.com/tunguz/bow-meta-text-and-dense-features-lb-0-2242/code
#improvement using kernel from Nick Brook's kernel here: https://www.kaggle.com/nicapotato/bow-meta-text-and-dense-features-lgbm
#Used oof method from Faron's kernel here: https://www.kaggle.com/mmueller/stacking-starter?scriptVersionId=390867
#Used some text cleaning method from Muhammad Alfiansyah's kernel here: https://www.kaggle.com/muhammadalfiansyah/push-the-lgbm-v19
import time
notebookstart= time.time()
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import gc
print("Data:\n",os.listdir("../input"))
# Models Packages
from sklearn import metrics
from sklearn.metrics import mean_squared_error
from sklearn import feature_selection
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
# Gradient Boosting
import lightgbm as lgb
from sklearn.linear_model import Ridge
from sklearn.cross_validation import KFold
# Tf-Idf
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.pipeline import FeatureUnion
from scipy.sparse import hstack, csr_matrix
from nltk.corpus import stopwords
# Viz
import seaborn as sns
import matplotlib.pyplot as plt
import re
import string
NFOLDS = 5
SEED = 42
VALID = True
class SklearnWrapper(object):
def __init__(self, clf, seed=0, params=None, seed_bool = True):
if(seed_bool == True):
params['random_state'] = seed
self.clf = clf(**params)
def train(self, x_train, y_train):
self.clf.fit(x_train, y_train)
def predict(self, x):
return self.clf.predict(x)
def get_oof(clf, x_train, y, x_test):
oof_train = np.zeros((ntrain,))
oof_test = np.zeros((ntest,))
oof_test_skf = np.empty((NFOLDS, ntest))
for i, (train_index, test_index) in enumerate(kf):
print('\nFold {}'.format(i))
x_tr = x_train[train_index]
y_tr = y[train_index]
x_te = x_train[test_index]
clf.train(x_tr, y_tr)
oof_train[test_index] = clf.predict(x_te)
oof_test_skf[i, :] = clf.predict(x_test)
oof_test[:] = oof_test_skf.mean(axis=0)
return oof_train.reshape(-1, 1), oof_test.reshape(-1, 1)
def cleanName(text):
try:
textProc = text.lower()
# textProc = " ".join(map(str.strip, re.split('(\d+)',textProc)))
#regex = re.compile(u'[^[:alpha:]]')
#textProc = regex.sub(" ", textProc)
textProc = re.sub('[!@#$_“”¨«»®´·º½¾¿¡§£₤‘’]', '', textProc)
textProc = " ".join(textProc.split())
return textProc
except:
return "name error"
def rmse(y, y0):
assert len(y) == len(y0)
return np.sqrt(np.mean(np.power((y - y0), 2)))
print("\nData Load Stage")
training = pd.read_csv('../input/train.csv', index_col = "item_id", parse_dates = ["activation_date"])
traindex = training.index
testing = pd.read_csv('../input/test.csv', index_col = "item_id", parse_dates = ["activation_date"])
testdex = testing.index
ntrain = training.shape[0]
ntest = testing.shape[0]
kf = KFold(ntrain, n_folds=NFOLDS, shuffle=True, random_state=SEED)
y = training.deal_probability.copy()
training.drop("deal_probability",axis=1, inplace=True)
print('Train shape: {} Rows, {} Columns'.format(*training.shape))
print('Test shape: {} Rows, {} Columns'.format(*testing.shape))
print("Combine Train and Test")
df = pd.concat([training,testing],axis=0)
del training, testing
gc.collect()
print('\nAll Data shape: {} Rows, {} Columns'.format(*df.shape))
print("Feature Engineering")
df["price"] = np.log(df["price"]+0.001)
df["price"].fillna(df.price.mean(),inplace=True)
df["image_top_1"].fillna(-999,inplace=True)
print("\nCreate Time Variables")
df["Weekday"] = df['activation_date'].dt.weekday
df["Weekd of Year"] = df['activation_date'].dt.week
df["Day of Month"] = df['activation_date'].dt.day
# Create Validation Index and Remove Dead Variables
training_index = df.loc[df.activation_date<=pd.to_datetime('2017-04-07')].index
validation_index = df.loc[df.activation_date>=pd.to_datetime('2017-04-08')].index
df.drop(["activation_date","image"],axis=1,inplace=True)
print("\nEncode Variables")
| lbl = preprocessing.LabelEncoder()
for col in categorical:
df[col].fillna('Unknown')
df[col] = lbl.fit_transform(df[col].astype(str))
print("\nText Features")
# Feature Engineering
# Meta Text Features
textfeats = ["description", "title"]
df['desc_punc'] = df['description'].apply(lambda x: len([c for c in str(x) if c in string.punctuation]))
df['title'] = df['title'].apply(lambda x: cleanName(x))
df["description"] = df["description"].apply(lambda x: cleanName(x))
for cols in textfeats:
df[cols] = df[cols].astype(str)
df[cols] = df[cols].astype(str).fillna('missing') # FILL NA
df[cols] = df[cols].str.lower() # Lowercase all text, so that capitalized words dont get treated differently
df[cols + '_num_words'] = df[cols].apply(lambda comment: len(comment.split())) # Count number of Words
df[cols + '_num_unique_words'] = df[cols].apply(lambda comment: len(set(w for w in comment.split())))
df[cols + '_words_vs_unique'] = df[cols+'_num_unique_words'] / df[cols+'_num_words'] * 100 # Count Unique Words
print("\n[TF-IDF] Term Frequency Inverse Document Frequency Stage")
russian_stop = set(stopwords.words('russian'))
tfidf_para = {
"stop_words": russian_stop,
"analyzer": 'word',
"token_pattern": r'\w{1,}',
"sublinear_tf": True,
"dtype": np.float32,
"norm": 'l2',
#"min_df":5,
#"max_df":.9,
"smooth_idf":False
}
def get_col(col_name): return lambda x: x[col_name]
##I added to the max_features of the description. It did not change my score much but it may be worth investigating
vectorizer = FeatureUnion([
('description',TfidfVectorizer(
ngram_range=(1, 2),
max_features=17000,
**tfidf_para,
preprocessor=get_col('description'))),
('title',CountVectorizer(
ngram_range=(1, 2),
stop_words = russian_stop,
#max_features=7000,
preprocessor=get_col('title')))
])
start_vect=time.time()
#Fit my vectorizer on the entire dataset instead of the training rows
#Score improved by .0001
vectorizer.fit(df.to_dict('records'))
ready_df = vectorizer.transform(df.to_dict('records'))
tfvocab = vectorizer.get_feature_names()
print("Vectorization Runtime: %0.2f Minutes"%((time.time() - start_vect)/60))
# Drop Text Cols
textfeats = ["description", "title"]
df.drop(textfeats, axis=1,inplace=True)
from sklearn.metrics import mean_squared_error
from math import sqrt
ridge_params = {'alpha':20.0, 'fit_intercept':True, 'normalize':False, 'copy_X':True,
'max_iter':None, 'tol':0.001, 'solver':'auto', 'random_state':SEED}
#Ridge oof method from Faron's kernel
#I was using this to analyze my vectorization, but figured it would be interesting to add the results back into the dataset
#It doesn't really add much to the score, but it does help lightgbm converge faster
ridge = SklearnWrapper(clf=Ridge, seed = SEED, params = ridge_params)
ridge_oof_train, ridge_oof_test = get_oof(ridge, ready_df[:ntrain], y, ready_df[ntrain:])
rms = sqrt(mean_squared_error(y, ridge_oof_train))
print('Ridge OOF RMSE: {}'.format(rms))
print("Modeling Stage")
ridge_preds = np.concatenate([ridge_oof_train, ridge_oof_test])
df['ridge_preds'] = ridge_preds
# Combine Dense Features with Sparse Text Bag of Words Features
X = hstack([csr_matrix(df.loc[traindex,:].values),ready_df[0:traindex.shape[0]]]) # Sparse Matrix
testing = hstack([csr_matrix(df.loc[testdex,:].values),ready_df[traindex.shape[0]:]])
tfvocab = df.columns.tolist() + tfvocab
for shape in [X,testing]:
print("{} Rows and {} Cols".format(*shape.shape))
print("Feature Names Length: ",len(tfvocab))
del df
gc.collect();
print("\nModeling Stage")
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.10, random_state=23)
del ridge_preds,vectorizer,ready_df
gc.collect();
print("Light Gradient Boosting Regressor")
lgbm_params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': 'rmse',
# 'max_depth': 15,
'num_leaves': 270,
'feature_fraction': 0.5,
'bagging_fraction': 0.75,
# 'bagging_freq': 5,
'learning_rate': 0.018,
'verbose': 0
}
if VALID == True:
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, test_size=0.10, random_state=23)
# LGBM Dataset Formatting
lgtrain = lgb.Dataset(X_train, y_train,
feature_name=tfvocab,
categorical_feature = categorical)
lgvalid = lgb.Dataset(X_valid, y_valid,
feature_name=tfvocab,
categorical_feature = categorical)
del X, X_train; gc.collect()
# Go Go Go
lgb_clf = lgb.train(
lgbm_params,
lgtrain,
num_boost_round=20000,
valid_sets=[lgtrain, lgvalid],
valid_names=['train','valid'],
early_stopping_rounds=50,
verbose_eval=100
)
print("Model Evaluation Stage")
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_valid, lgb_clf.predict(X_valid))))
del X_valid ; gc.collect()
else:
# LGBM Dataset Formatting
lgtrain = lgb.Dataset(X, y,
feature_name=tfvocab,
categorical_feature = categorical)
del X; gc.collect()
# Go Go Go
lgb_clf = lgb.train(
lgbm_params,
lgtrain,
num_boost_round=1380,
verbose_eval=100
)
# Feature Importance Plot
f, ax = plt.subplots(figsize=[7,10])
lgb.plot_importance(lgb_clf, max_num_features=50, ax=ax)
plt.title("Light GBM Feature Importance")
plt.savefig('feature_import.png')
print("Model Evaluation Stage")
lgpred = lgb_clf.predict(testing)
#Mixing lightgbm with ridge. I haven't really tested if this improves the score or not
#blend = 0.95*lgpred + 0.05*ridge_oof_test[:,0]
lgsub = pd.DataFrame(lgpred,columns=["deal_probability"],index=testdex)
lgsub['deal_probability'].clip(0.0, 1.0, inplace=True) # Between 0 and 1
lgsub.to_csv("lgsub.csv",index=True,header=True)
#print("Model Runtime: %0.2f Minutes"%((time.time() - modelstart)/60))
print("Notebook Runtime: %0.2f Minutes"%((time.time() - notebookstart)/60)) | categorical = ["user_id","region","city","parent_category_name","category_name","user_type","image_top_1","param_1","param_2","param_3"]
print("Encoding :",categorical)
# Encoder:
| random_line_split |
avito-lightgbm-with-ridge-feature-1.py | #Initially forked from Bojan's kernel here: https://www.kaggle.com/tunguz/bow-meta-text-and-dense-features-lb-0-2242/code
#improvement using kernel from Nick Brook's kernel here: https://www.kaggle.com/nicapotato/bow-meta-text-and-dense-features-lgbm
#Used oof method from Faron's kernel here: https://www.kaggle.com/mmueller/stacking-starter?scriptVersionId=390867
#Used some text cleaning method from Muhammad Alfiansyah's kernel here: https://www.kaggle.com/muhammadalfiansyah/push-the-lgbm-v19
import time
notebookstart= time.time()
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import gc
print("Data:\n",os.listdir("../input"))
# Models Packages
from sklearn import metrics
from sklearn.metrics import mean_squared_error
from sklearn import feature_selection
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
# Gradient Boosting
import lightgbm as lgb
from sklearn.linear_model import Ridge
from sklearn.cross_validation import KFold
# Tf-Idf
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.pipeline import FeatureUnion
from scipy.sparse import hstack, csr_matrix
from nltk.corpus import stopwords
# Viz
import seaborn as sns
import matplotlib.pyplot as plt
import re
import string
NFOLDS = 5
SEED = 42
VALID = True
class SklearnWrapper(object):
def __init__(self, clf, seed=0, params=None, seed_bool = True):
if(seed_bool == True):
params['random_state'] = seed
self.clf = clf(**params)
def train(self, x_train, y_train):
self.clf.fit(x_train, y_train)
def predict(self, x):
return self.clf.predict(x)
def get_oof(clf, x_train, y, x_test):
oof_train = np.zeros((ntrain,))
oof_test = np.zeros((ntest,))
oof_test_skf = np.empty((NFOLDS, ntest))
for i, (train_index, test_index) in enumerate(kf):
print('\nFold {}'.format(i))
x_tr = x_train[train_index]
y_tr = y[train_index]
x_te = x_train[test_index]
clf.train(x_tr, y_tr)
oof_train[test_index] = clf.predict(x_te)
oof_test_skf[i, :] = clf.predict(x_test)
oof_test[:] = oof_test_skf.mean(axis=0)
return oof_train.reshape(-1, 1), oof_test.reshape(-1, 1)
def cleanName(text):
try:
textProc = text.lower()
# textProc = " ".join(map(str.strip, re.split('(\d+)',textProc)))
#regex = re.compile(u'[^[:alpha:]]')
#textProc = regex.sub(" ", textProc)
textProc = re.sub('[!@#$_“”¨«»®´·º½¾¿¡§£₤‘’]', '', textProc)
textProc = " ".join(textProc.split())
return textProc
except:
return "name error"
def rmse(y, y0):
assert len(y) == len(y0)
return np.sqrt(np.mean(np.power((y - y0), 2)))
print("\nData Load Stage")
training = pd.read_csv('../input/train.csv', index_col = "item_id", parse_dates = ["activation_date"])
traindex = training.index
testing = pd.read_csv('../input/test.csv', index_col = "item_id", parse_dates = ["activation_date"])
testdex = testing.index
ntrain = training.shape[0]
ntest = testing.shape[0]
kf = KFold(ntrain, n_folds=NFOLDS, shuffle=True, random_state=SEED)
y = training.deal_probability.copy()
training.drop("deal_probability",axis=1, inplace=True)
print('Train shape: {} Rows, {} Columns'.format(*training.shape))
print('Test shape: {} Rows, {} Columns'.format(*testing.shape))
print("Combine Train and Test")
df = pd.concat([training,testing],axis=0)
del training, testing
gc.collect()
print('\nAll Data shape: {} Rows, {} Columns'.format(*df.shape))
print("Feature Engineering")
df["price"] = np.log(df["price"]+0.001)
df["price"].fillna(df.price.mean(),inplace=True)
df["image_top_1"].fillna(-999,inplace=True)
print("\nCreate Time Variables")
df["Weekday"] = df['activation_date'].dt.weekday
df["Weekd of Year"] = df['activation_date'].dt.week
df["Day of Month"] = df['activation_date'].dt.day
# Create Validation Index and Remove Dead Variables
training_index = df.loc[df.activation_date<=pd.to_datetime('2017-04-07')].index
validation_index = df.loc[df.activation_date>=pd.to_datetime('2017-04-08')].index
df.drop(["activation_date","image"],axis=1,inplace=True)
print("\nEncode Variables")
categorical = ["user_id","region","city","parent_category_name","category_name","user_type","image_top_1","param_1","param_2","param_3"]
print("Encoding :",categorical)
# Encoder:
lbl = preprocessing.LabelEncoder()
for col in categorical:
df[col].fillna('Unknown')
df[col] = lbl.fit_transform(df[col].astype(str))
print("\nText Features")
# Feature Engineering
# Meta Text Features
textfeats = ["description", "title"]
df['desc_punc'] = df['description'].apply(lambda x: len([c for c in str(x) if c in string.punctuation]))
df['title'] = df['title'].apply(lambda x: cleanName(x))
df["description"] = df["description"].apply(lambda x: cleanName(x))
for cols in textfeats:
df[cols] = df[cols].astype(str)
df[cols] = df[cols].astype(str).fillna('missing') # FILL NA
df[cols] = df[cols].str.lower() # Lowercase all text, so that capitalized words dont get treated differently
df[cols + '_num_words'] = df[cols].apply(lambda comment: len(comment.split())) # Count number of Words
df[cols + '_num_unique_words'] = df[cols].apply(lambda comment: len(set(w for w in comment.split())))
df[cols + '_words_vs_unique'] = df[cols+'_num_unique_words'] / df[cols+'_num_words'] * 100 # Count Unique Words
print("\n[TF-IDF] Term Frequency Inverse Document Frequency Stage")
russian_stop = set(stopwords.words('russian'))
tfidf_para = {
"stop_words": russian_stop,
"analyzer": 'word',
"token_pattern": r'\w{1,}',
"sublinear_tf": True,
"dtype": np.float32,
"norm": 'l2',
#"min_df":5,
#"max_df":.9,
"smooth_idf":False
}
def get_col(col_name): retu | da x: x[col_name]
##I added to the max_features of the description. It did not change my score much but it may be worth investigating
vectorizer = FeatureUnion([
('description',TfidfVectorizer(
ngram_range=(1, 2),
max_features=17000,
**tfidf_para,
preprocessor=get_col('description'))),
('title',CountVectorizer(
ngram_range=(1, 2),
stop_words = russian_stop,
#max_features=7000,
preprocessor=get_col('title')))
])
start_vect=time.time()
#Fit my vectorizer on the entire dataset instead of the training rows
#Score improved by .0001
vectorizer.fit(df.to_dict('records'))
ready_df = vectorizer.transform(df.to_dict('records'))
tfvocab = vectorizer.get_feature_names()
print("Vectorization Runtime: %0.2f Minutes"%((time.time() - start_vect)/60))
# Drop Text Cols
textfeats = ["description", "title"]
df.drop(textfeats, axis=1,inplace=True)
from sklearn.metrics import mean_squared_error
from math import sqrt
ridge_params = {'alpha':20.0, 'fit_intercept':True, 'normalize':False, 'copy_X':True,
'max_iter':None, 'tol':0.001, 'solver':'auto', 'random_state':SEED}
#Ridge oof method from Faron's kernel
#I was using this to analyze my vectorization, but figured it would be interesting to add the results back into the dataset
#It doesn't really add much to the score, but it does help lightgbm converge faster
ridge = SklearnWrapper(clf=Ridge, seed = SEED, params = ridge_params)
ridge_oof_train, ridge_oof_test = get_oof(ridge, ready_df[:ntrain], y, ready_df[ntrain:])
rms = sqrt(mean_squared_error(y, ridge_oof_train))
print('Ridge OOF RMSE: {}'.format(rms))
print("Modeling Stage")
ridge_preds = np.concatenate([ridge_oof_train, ridge_oof_test])
df['ridge_preds'] = ridge_preds
# Combine Dense Features with Sparse Text Bag of Words Features
X = hstack([csr_matrix(df.loc[traindex,:].values),ready_df[0:traindex.shape[0]]]) # Sparse Matrix
testing = hstack([csr_matrix(df.loc[testdex,:].values),ready_df[traindex.shape[0]:]])
tfvocab = df.columns.tolist() + tfvocab
for shape in [X,testing]:
print("{} Rows and {} Cols".format(*shape.shape))
print("Feature Names Length: ",len(tfvocab))
del df
gc.collect();
print("\nModeling Stage")
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.10, random_state=23)
del ridge_preds,vectorizer,ready_df
gc.collect();
print("Light Gradient Boosting Regressor")
lgbm_params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': 'rmse',
# 'max_depth': 15,
'num_leaves': 270,
'feature_fraction': 0.5,
'bagging_fraction': 0.75,
# 'bagging_freq': 5,
'learning_rate': 0.018,
'verbose': 0
}
if VALID == True:
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, test_size=0.10, random_state=23)
# LGBM Dataset Formatting
lgtrain = lgb.Dataset(X_train, y_train,
feature_name=tfvocab,
categorical_feature = categorical)
lgvalid = lgb.Dataset(X_valid, y_valid,
feature_name=tfvocab,
categorical_feature = categorical)
del X, X_train; gc.collect()
# Go Go Go
lgb_clf = lgb.train(
lgbm_params,
lgtrain,
num_boost_round=20000,
valid_sets=[lgtrain, lgvalid],
valid_names=['train','valid'],
early_stopping_rounds=50,
verbose_eval=100
)
print("Model Evaluation Stage")
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_valid, lgb_clf.predict(X_valid))))
del X_valid ; gc.collect()
else:
# LGBM Dataset Formatting
lgtrain = lgb.Dataset(X, y,
feature_name=tfvocab,
categorical_feature = categorical)
del X; gc.collect()
# Go Go Go
lgb_clf = lgb.train(
lgbm_params,
lgtrain,
num_boost_round=1380,
verbose_eval=100
)
# Feature Importance Plot
f, ax = plt.subplots(figsize=[7,10])
lgb.plot_importance(lgb_clf, max_num_features=50, ax=ax)
plt.title("Light GBM Feature Importance")
plt.savefig('feature_import.png')
print("Model Evaluation Stage")
lgpred = lgb_clf.predict(testing)
#Mixing lightgbm with ridge. I haven't really tested if this improves the score or not
#blend = 0.95*lgpred + 0.05*ridge_oof_test[:,0]
lgsub = pd.DataFrame(lgpred,columns=["deal_probability"],index=testdex)
lgsub['deal_probability'].clip(0.0, 1.0, inplace=True) # Between 0 and 1
lgsub.to_csv("lgsub.csv",index=True,header=True)
#print("Model Runtime: %0.2f Minutes"%((time.time() - modelstart)/60))
print("Notebook Runtime: %0.2f Minutes"%((time.time() - notebookstart)/60)) | rn lamb | identifier_name |
avito-lightgbm-with-ridge-feature-1.py | #Initially forked from Bojan's kernel here: https://www.kaggle.com/tunguz/bow-meta-text-and-dense-features-lb-0-2242/code
#improvement using kernel from Nick Brook's kernel here: https://www.kaggle.com/nicapotato/bow-meta-text-and-dense-features-lgbm
#Used oof method from Faron's kernel here: https://www.kaggle.com/mmueller/stacking-starter?scriptVersionId=390867
#Used some text cleaning method from Muhammad Alfiansyah's kernel here: https://www.kaggle.com/muhammadalfiansyah/push-the-lgbm-v19
import time
notebookstart= time.time()
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import gc
print("Data:\n",os.listdir("../input"))
# Models Packages
from sklearn import metrics
from sklearn.metrics import mean_squared_error
from sklearn import feature_selection
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
# Gradient Boosting
import lightgbm as lgb
from sklearn.linear_model import Ridge
from sklearn.cross_validation import KFold
# Tf-Idf
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.pipeline import FeatureUnion
from scipy.sparse import hstack, csr_matrix
from nltk.corpus import stopwords
# Viz
import seaborn as sns
import matplotlib.pyplot as plt
import re
import string
NFOLDS = 5
SEED = 42
VALID = True
class SklearnWrapper(object):
def __init__(self, clf, seed=0, params=None, seed_bool = True):
if(seed_bool == True):
params['random_state'] = seed
self.clf = clf(**params)
def train(self, x_train, y_train):
self.clf.fit(x_train, y_train)
def predict(self, x):
return self.clf.predict(x)
def get_oof(clf, x_train, y, x_test):
oof_train = np.zeros((ntrain,))
oof_test = np.zeros((ntest,))
oof_test_skf = np.empty((NFOLDS, ntest))
for i, (train_index, test_index) in enumerate(kf):
print('\nFold {}'.format(i))
x_tr = x_train[train_index]
y_tr = y[train_index]
x_te = x_train[test_index]
clf.train(x_tr, y_tr)
oof_train[test_index] = clf.predict(x_te)
oof_test_skf[i, :] = clf.predict(x_test)
oof_test[:] = oof_test_skf.mean(axis=0)
return oof_train.reshape(-1, 1), oof_test.reshape(-1, 1)
def cleanName(text):
try:
textProc = text.lower()
# textProc = " ".join(map(str.strip, re.split('(\d+)',textProc)))
#regex = re.compile(u'[^[:alpha:]]')
#textProc = regex.sub(" ", textProc)
textProc = re.sub('[!@#$_“”¨«»®´·º½¾¿¡§£₤‘’]', '', textProc)
textProc = " ".join(textProc.split())
return textProc
except:
return "name error"
def rmse(y, y0):
assert len(y) == len(y0 | Stage")
training = pd.read_csv('../input/train.csv', index_col = "item_id", parse_dates = ["activation_date"])
traindex = training.index
testing = pd.read_csv('../input/test.csv', index_col = "item_id", parse_dates = ["activation_date"])
testdex = testing.index
ntrain = training.shape[0]
ntest = testing.shape[0]
kf = KFold(ntrain, n_folds=NFOLDS, shuffle=True, random_state=SEED)
y = training.deal_probability.copy()
training.drop("deal_probability",axis=1, inplace=True)
print('Train shape: {} Rows, {} Columns'.format(*training.shape))
print('Test shape: {} Rows, {} Columns'.format(*testing.shape))
print("Combine Train and Test")
df = pd.concat([training,testing],axis=0)
del training, testing
gc.collect()
print('\nAll Data shape: {} Rows, {} Columns'.format(*df.shape))
print("Feature Engineering")
df["price"] = np.log(df["price"]+0.001)
df["price"].fillna(df.price.mean(),inplace=True)
df["image_top_1"].fillna(-999,inplace=True)
print("\nCreate Time Variables")
df["Weekday"] = df['activation_date'].dt.weekday
df["Weekd of Year"] = df['activation_date'].dt.week
df["Day of Month"] = df['activation_date'].dt.day
# Create Validation Index and Remove Dead Variables
training_index = df.loc[df.activation_date<=pd.to_datetime('2017-04-07')].index
validation_index = df.loc[df.activation_date>=pd.to_datetime('2017-04-08')].index
df.drop(["activation_date","image"],axis=1,inplace=True)
print("\nEncode Variables")
categorical = ["user_id","region","city","parent_category_name","category_name","user_type","image_top_1","param_1","param_2","param_3"]
print("Encoding :",categorical)
# Encoder:
lbl = preprocessing.LabelEncoder()
for col in categorical:
df[col].fillna('Unknown')
df[col] = lbl.fit_transform(df[col].astype(str))
print("\nText Features")
# Feature Engineering
# Meta Text Features
textfeats = ["description", "title"]
df['desc_punc'] = df['description'].apply(lambda x: len([c for c in str(x) if c in string.punctuation]))
df['title'] = df['title'].apply(lambda x: cleanName(x))
df["description"] = df["description"].apply(lambda x: cleanName(x))
for cols in textfeats:
df[cols] = df[cols].astype(str)
df[cols] = df[cols].astype(str).fillna('missing') # FILL NA
df[cols] = df[cols].str.lower() # Lowercase all text, so that capitalized words dont get treated differently
df[cols + '_num_words'] = df[cols].apply(lambda comment: len(comment.split())) # Count number of Words
df[cols + '_num_unique_words'] = df[cols].apply(lambda comment: len(set(w for w in comment.split())))
df[cols + '_words_vs_unique'] = df[cols+'_num_unique_words'] / df[cols+'_num_words'] * 100 # Count Unique Words
print("\n[TF-IDF] Term Frequency Inverse Document Frequency Stage")
russian_stop = set(stopwords.words('russian'))
tfidf_para = {
"stop_words": russian_stop,
"analyzer": 'word',
"token_pattern": r'\w{1,}',
"sublinear_tf": True,
"dtype": np.float32,
"norm": 'l2',
#"min_df":5,
#"max_df":.9,
"smooth_idf":False
}
def get_col(col_name): return lambda x: x[col_name]
##I added to the max_features of the description. It did not change my score much but it may be worth investigating
vectorizer = FeatureUnion([
('description',TfidfVectorizer(
ngram_range=(1, 2),
max_features=17000,
**tfidf_para,
preprocessor=get_col('description'))),
('title',CountVectorizer(
ngram_range=(1, 2),
stop_words = russian_stop,
#max_features=7000,
preprocessor=get_col('title')))
])
start_vect=time.time()
#Fit my vectorizer on the entire dataset instead of the training rows
#Score improved by .0001
vectorizer.fit(df.to_dict('records'))
ready_df = vectorizer.transform(df.to_dict('records'))
tfvocab = vectorizer.get_feature_names()
print("Vectorization Runtime: %0.2f Minutes"%((time.time() - start_vect)/60))
# Drop Text Cols
textfeats = ["description", "title"]
df.drop(textfeats, axis=1,inplace=True)
from sklearn.metrics import mean_squared_error
from math import sqrt
ridge_params = {'alpha':20.0, 'fit_intercept':True, 'normalize':False, 'copy_X':True,
'max_iter':None, 'tol':0.001, 'solver':'auto', 'random_state':SEED}
#Ridge oof method from Faron's kernel
#I was using this to analyze my vectorization, but figured it would be interesting to add the results back into the dataset
#It doesn't really add much to the score, but it does help lightgbm converge faster
ridge = SklearnWrapper(clf=Ridge, seed = SEED, params = ridge_params)
ridge_oof_train, ridge_oof_test = get_oof(ridge, ready_df[:ntrain], y, ready_df[ntrain:])
rms = sqrt(mean_squared_error(y, ridge_oof_train))
print('Ridge OOF RMSE: {}'.format(rms))
print("Modeling Stage")
ridge_preds = np.concatenate([ridge_oof_train, ridge_oof_test])
df['ridge_preds'] = ridge_preds
# Combine Dense Features with Sparse Text Bag of Words Features
X = hstack([csr_matrix(df.loc[traindex,:].values),ready_df[0:traindex.shape[0]]]) # Sparse Matrix
testing = hstack([csr_matrix(df.loc[testdex,:].values),ready_df[traindex.shape[0]:]])
tfvocab = df.columns.tolist() + tfvocab
for shape in [X,testing]:
print("{} Rows and {} Cols".format(*shape.shape))
print("Feature Names Length: ",len(tfvocab))
del df
gc.collect();
print("\nModeling Stage")
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.10, random_state=23)
del ridge_preds,vectorizer,ready_df
gc.collect();
print("Light Gradient Boosting Regressor")
lgbm_params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': 'rmse',
# 'max_depth': 15,
'num_leaves': 270,
'feature_fraction': 0.5,
'bagging_fraction': 0.75,
# 'bagging_freq': 5,
'learning_rate': 0.018,
'verbose': 0
}
if VALID == True:
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, test_size=0.10, random_state=23)
# LGBM Dataset Formatting
lgtrain = lgb.Dataset(X_train, y_train,
feature_name=tfvocab,
categorical_feature = categorical)
lgvalid = lgb.Dataset(X_valid, y_valid,
feature_name=tfvocab,
categorical_feature = categorical)
del X, X_train; gc.collect()
# Go Go Go
lgb_clf = lgb.train(
lgbm_params,
lgtrain,
num_boost_round=20000,
valid_sets=[lgtrain, lgvalid],
valid_names=['train','valid'],
early_stopping_rounds=50,
verbose_eval=100
)
print("Model Evaluation Stage")
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_valid, lgb_clf.predict(X_valid))))
del X_valid ; gc.collect()
else:
# LGBM Dataset Formatting
lgtrain = lgb.Dataset(X, y,
feature_name=tfvocab,
categorical_feature = categorical)
del X; gc.collect()
# Go Go Go
lgb_clf = lgb.train(
lgbm_params,
lgtrain,
num_boost_round=1380,
verbose_eval=100
)
# Feature Importance Plot
f, ax = plt.subplots(figsize=[7,10])
lgb.plot_importance(lgb_clf, max_num_features=50, ax=ax)
plt.title("Light GBM Feature Importance")
plt.savefig('feature_import.png')
print("Model Evaluation Stage")
lgpred = lgb_clf.predict(testing)
#Mixing lightgbm with ridge. I haven't really tested if this improves the score or not
#blend = 0.95*lgpred + 0.05*ridge_oof_test[:,0]
lgsub = pd.DataFrame(lgpred,columns=["deal_probability"],index=testdex)
lgsub['deal_probability'].clip(0.0, 1.0, inplace=True) # Between 0 and 1
lgsub.to_csv("lgsub.csv",index=True,header=True)
#print("Model Runtime: %0.2f Minutes"%((time.time() - modelstart)/60))
print("Notebook Runtime: %0.2f Minutes"%((time.time() - notebookstart)/60)) | )
return np.sqrt(np.mean(np.power((y - y0), 2)))
print("\nData Load | identifier_body |
uiLibFlexbox.go | package framework
import (
"fmt"
"sort"
"github.com/CCDirectLink/CCUpdaterUI/frenyard"
)
// Implements a highly limited subset of flexbox to be extended to full support as-needed.
// FlexboxWrapMode describes a type of wrapping mode for Flexbox containers.
type FlexboxWrapMode uint8
// FlexboxWrapModeNone disallows wrapping for items, they are all on one line.
const FlexboxWrapModeNone FlexboxWrapMode = 0
// FlexboxWrapModeWrap allows items to wrap between lines.
const FlexboxWrapModeWrap FlexboxWrapMode = 1
// FlexboxContainer describes a UIFlexboxContainer's contents.
type FlexboxContainer struct {
DirVertical bool
WrapMode FlexboxWrapMode
// Ignored when used by the line solver; it uses fyFlexboxSlotlike instead
Slots []FlexboxSlot
// This is NOT part of the public API. It is provided for debugging use only.
// Do not even mention this field in releases.
Debug bool
}
// FlexboxSlot describes an element within a Flexbox container.
type FlexboxSlot struct {
// Can be nil.
Element UILayoutElement
// If there is a surplus, these are used to distribute it.
Grow int32
// If there is a deficit, these are used to distribute it (along with minimum sizes)
// DEFAULTS TO 1 IN CSS
Shrink int32
// If *non-zero*, then this specifies the "initial share size" of this element.
// Useful when Element is nil.
Basis int32
// Slightly non-standard extension (or is it?) for cases where Basis would be used to pad a problematic element
MinBasis int32
// Used to order the flexboxes visually. The Z-Order remains the index order.
Order int
// If the container should respect the minimum size of this slot.
// DEFAULTS TO TRUE IN CSS (settable to false by overriding min w/h), BUT THIS IS REALLY STUPID B/C IT IGNORES SIZE CONSTRAINTS, SO LET'S NOT DO THAT
RespectMinimumSize bool
}
type fyFlexboxSlotlike interface {
fyMainCrossSizeForMainCrossLimits(limits frenyard.Vec2i, vertical bool, debug bool) frenyard.Vec2i
fyGrowShrink() (int32, int32)
fyCalcBasis(cross int32, vertical bool) int32
fyGetOrder() int
fyRespectMinimumSize() bool
}
func (slot FlexboxSlot) fyMainCrossSizeForMainCrossLimits(limits frenyard.Vec2i, vertical bool, debug bool) frenyard.Vec2i {
if slot.Element == nil {
return frenyard.Vec2i{}
}
if debug {
fmt.Print("?")
}
mainCrossSize := slot.Element.FyLSizeForLimits(limits.ConditionalTranspose(vertical)).ConditionalTranspose(vertical)
mainCrossSize.X = frenyard.Max(mainCrossSize.X, slot.MinBasis)
return mainCrossSize
}
func (slot FlexboxSlot) fyGrowShrink() (int32, int32) {
return slot.Grow, slot.Shrink
}
func (slot FlexboxSlot) fyCalcBasis(cross int32, vertical bool) int32 {
if slot.Basis != 0 {
return slot.Basis
}
return slot.fyMainCrossSizeForMainCrossLimits(frenyard.Vec2i{frenyard.SizeUnlimited, cross}, vertical, false).X
}
func (slot FlexboxSlot) fyGetOrder() int {
return slot.Order
}
func (slot FlexboxSlot) fyRespectMinimumSize() bool {
return slot.RespectMinimumSize
}
// -- Solver --
func fyFlexboxGetPreferredSize(details FlexboxContainer) frenyard.Vec2i {
// Do note, this is in main/cross format.
mainCrossSize := frenyard.Vec2i{}
for _, v := range details.Slots {
sz := v.fyMainCrossSizeForMainCrossLimits(frenyard.Vec2iUnlimited(), details.DirVertical, false)
mainCrossSize.X += sz.X
mainCrossSize.Y = frenyard.Max(mainCrossSize.Y, sz.Y)
}
return mainCrossSize.ConditionalTranspose(details.DirVertical)
}
type fyFlexboxRow struct {
elem []fyFlexboxSlotlike
area []frenyard.Area2i
fullArea frenyard.Area2i
}
func (slot fyFlexboxRow) fyGrowShrink() (int32, int32) {
return 1, 1
}
// Critical to the whole thing and it's full of guesswork due to the vertical flags and axis juggling.
func (slot fyFlexboxRow) fyMainCrossSizeForMainCrossLimits(limits frenyard.Vec2i, vertical bool, debug bool) frenyard.Vec2i {
if debug {
fmt.Print("R{")
}
// Main & Cross in here refer to in the row flexbox, not the outer flexbox.
maximumMain := int32(0)
presentAreaCross := slot.fullArea.Size().ConditionalTranspose(vertical).Y
for _, v := range slot.elem {
lim := frenyard.Vec2i{X: limits.X, Y: presentAreaCross}
if debug {
fmt.Print(" ", limits.X, "x", presentAreaCross)
}
rcs := v.fyMainCrossSizeForMainCrossLimits(lim, vertical, false)
maximumMain = frenyard.Max(maximumMain, rcs.X)
if debug {
fmt.Print(":", rcs.X, "x", rcs.Y)
}
}
if debug {
fmt.Print(" }")
}
return frenyard.Vec2i{X: maximumMain, Y: presentAreaCross}
} | func (slot fyFlexboxRow) fyCalcBasis(cross int32, vertical bool) int32 {
return slot.fyMainCrossSizeForMainCrossLimits(frenyard.Vec2i{X: frenyard.SizeUnlimited, Y: cross}, vertical, false).X
}
func (slot fyFlexboxRow) fyGetOrder() int {
return 0
}
func (slot fyFlexboxRow) fyRespectMinimumSize() bool {
return false
}
// Do be aware, this only handles the one relevant axis.
func (slot *fyFlexboxRow) Fill(area frenyard.Area2i, vertical bool) {
for k := range slot.area {
if !vertical {
// Rows perpendicular to X
slot.area[k].X = area.X
} else {
// Rows perpendicular to Y
slot.area[k].Y = area.Y
}
}
slot.fullArea = area
}
type fyFlexboxSortingCollection struct {
// The collection being sorted.
slots []fyFlexboxSlotlike
// Given a SOURCE slot index, what is the RESULTING slot index?
originalToDisplayIndices []int
// Given a RESULTING slot index, what is the SOURCE slot index?
displayToOriginalIndices []int
}
func (sc fyFlexboxSortingCollection) Len() int {
return len(sc.slots)
}
func (sc fyFlexboxSortingCollection) Less(i int, j int) bool {
order1 := sc.slots[i].fyGetOrder()
order2 := sc.slots[j].fyGetOrder()
// Order1 != order2?
if order1 < order2 {
return true
}
if order1 > order2 {
return false
}
// No, they're equal. Sort by original index.
if sc.displayToOriginalIndices[i] < sc.displayToOriginalIndices[j] {
return true
}
return false
}
func (sc fyFlexboxSortingCollection) Swap(i int, j int) {
backup := sc.slots[i]
backup2 := sc.originalToDisplayIndices[i]
backup3 := sc.displayToOriginalIndices[i]
sc.slots[i] = sc.slots[j]
sc.originalToDisplayIndices[i] = sc.originalToDisplayIndices[j]
sc.displayToOriginalIndices[i] = sc.displayToOriginalIndices[j]
sc.slots[j] = backup
sc.originalToDisplayIndices[j] = backup2
sc.displayToOriginalIndices[j] = backup3
}
func fyFlexboxSolveLayout(details FlexboxContainer, limits frenyard.Vec2i) []frenyard.Area2i {
// Stage 1. Element order pre-processing (DirReverse)
slots := make([]fyFlexboxSlotlike, len(details.Slots))
originalToDisplayIndices := make([]int, len(details.Slots))
displayToOriginalIndices := make([]int, len(details.Slots))
for k, v := range details.Slots {
originalToDisplayIndices[k] = k
displayToOriginalIndices[k] = k
slots[k] = v
}
sort.Sort(fyFlexboxSortingCollection{
slots: slots,
originalToDisplayIndices: originalToDisplayIndices,
displayToOriginalIndices: displayToOriginalIndices,
})
// Stage 2. Wrapping (if relevant)
out := make([]frenyard.Area2i, len(slots))
mainCrossLimits := limits.ConditionalTranspose(details.DirVertical)
shouldWrap := fyFlexboxSolveLine(details, slots, out, mainCrossLimits, details.Debug)
// One row, so this is simple
rows := []fyFlexboxRow{{slots, out, frenyard.UnionArea2i(out)}}
if shouldWrap && details.WrapMode != FlexboxWrapModeNone {
// Wrapping has to start. Oh no...
// Do note, lines is implicitly limited because of the "one slot cannot wrap" rule.
lines := int32(2)
for {
rows = make([]fyFlexboxRow, lines)
lineStartSlot := 0
consumedSlots := 0
currentLine := int32(0)
for consumedSlots < len(slots) {
// If it wraps...
if fyFlexboxSolveLine(details, slots[lineStartSlot:consumedSlots+1], out[lineStartSlot:consumedSlots+1], mainCrossLimits, false) {
// Revert it & finish the line.
rows[currentLine] = fyFlexboxRow{
slots[lineStartSlot:consumedSlots],
out[lineStartSlot:consumedSlots],
frenyard.UnionArea2i(out),
}
fyFlexboxSolveLine(details, rows[currentLine].elem, rows[currentLine].area, mainCrossLimits, false)
// Now setup the new line.
currentLine++
lineStartSlot = consumedSlots
if currentLine == lines {
// Out of range, cancel before rows[currentLine] brings it to a halt
break
}
// Retry the same slot (slot not consumed)
} else {
// Success! Advance.
consumedSlots++
}
}
if currentLine < lines {
// Finish last line
rows[currentLine] = fyFlexboxRow{
slots[lineStartSlot:consumedSlots],
out[lineStartSlot:consumedSlots],
frenyard.UnionArea2i(out),
}
break
}
lines++
}
}
if details.WrapMode != FlexboxWrapModeNone {
// Stage 3. Row compression
rowAreas := make([]frenyard.Area2i, len(rows))
rowSlots := make([]fyFlexboxSlotlike, len(rows))
for rk, row := range rows {
rowSlots[rk] = row
}
fyFlexboxSolveLine(FlexboxContainer{
DirVertical: !details.DirVertical,
WrapMode: FlexboxWrapModeNone,
}, rowSlots, rowAreas, frenyard.Vec2i{mainCrossLimits.Y, mainCrossLimits.X}, false)
for rk, row := range rows {
row.Fill(rowAreas[rk], !details.DirVertical)
}
} else {
// Stage 3. Row setup
if mainCrossLimits.Y != frenyard.SizeUnlimited {
rows[0].Fill(frenyard.Area2iOfSize(mainCrossLimits.ConditionalTranspose(details.DirVertical)), !details.DirVertical)
}
}
// Stage 4. Element order post-processing (DirReverse)
realOutput := make([]frenyard.Area2i, len(out))
for k, v := range originalToDisplayIndices {
realOutput[k] = out[v]
}
return realOutput
}
// Returns true if should wrap. Will not return true ever for only one slot as this cannot wrap.
func fyFlexboxSolveLine(details FlexboxContainer, slots []fyFlexboxSlotlike, out []frenyard.Area2i, mainCrossLimits frenyard.Vec2i, debug bool) bool {
if len(slots) == 0 {
// Nowhere to output. Also, some calculations rely on at least one slot existing.
return false
}
if debug {
if details.DirVertical {
fmt.Print("VERTICAL ")
}
fmt.Println("AREA", mainCrossLimits.X, "x", mainCrossLimits.Y)
}
// Substage 1. Input basis values & create total
shares := make([]int32, len(slots))
totalMainAccumulator := int32(0)
totalGrowAccumulator := int32(0)
totalShrinkAccumulator := int32(0)
for idx, slot := range slots {
shares[idx] = slot.fyCalcBasis(mainCrossLimits.Y, details.DirVertical)
totalMainAccumulator += shares[idx]
slotGrow, slotShrink := slot.fyGrowShrink()
totalGrowAccumulator += slotGrow
totalShrinkAccumulator += slotShrink
}
// Notably, totalMainAccumulator must not change after this point.
// It's the 'reference' for if we ought to wrap.
// Substage 2. Determine expansion or contraction
if mainCrossLimits.X != frenyard.SizeUnlimited && totalMainAccumulator != mainCrossLimits.X {
additionalSpaceAvailable := mainCrossLimits.X - totalMainAccumulator
if debug {
fmt.Println("COMPRESSOR II: ", additionalSpaceAvailable)
}
// Determine which accumulator to use.
totalFactorAccumulator := totalGrowAccumulator
if additionalSpaceAvailable < 0 {
totalFactorAccumulator = totalShrinkAccumulator
}
// Actually redistribute space. This may require multiple passes as a factor may not always be fully appliable.
// This is because the Flexbox system respects minimum size.
// When set to true, the relevant factor must be subtracted from the Accumulator.
slotsHitMinimumSize := make([]bool, len(slots))
needAnotherPass := true
for needAnotherPass && totalFactorAccumulator != 0 {
needAnotherPass = false
totalAlloc := int32(0)
for idx, slot := range slots {
if slotsHitMinimumSize[idx] {
continue
}
grow, shrink := slot.fyGrowShrink()
factor := grow
smallestAlloc := -shares[idx] // Cannot shrink below 0
largestAlloc := frenyard.SizeUnlimited
// There is no 'largest alloc'; if the element is told to grow, that is what it will do
if additionalSpaceAvailable < 0 {
factor = shrink
}
if factor == 0 {
// has no effect, and means totalFactorAccumulator could be 0
continue
}
if additionalSpaceAvailable < 0 && shrink > 0 && slot.fyRespectMinimumSize() {
// Smallest possible alloc: maximum amount that can be shrunk
smallestAlloc = slot.fyMainCrossSizeForMainCrossLimits(frenyard.Vec2i{X: 0, Y: mainCrossLimits.Y}, details.DirVertical, false).X - shares[idx]
}
alloc := (additionalSpaceAvailable * factor) / totalFactorAccumulator
// Limit allocation.
clamped := false
if alloc <= smallestAlloc {
alloc = smallestAlloc
clamped = true
}
if alloc >= largestAlloc {
alloc = largestAlloc
clamped = true
}
// If the limit is hit, remove from processing for the next loop.
if clamped {
slotsHitMinimumSize[idx] = true
needAnotherPass = true
totalFactorAccumulator -= factor
}
// Confirm allocation
shares[idx] += alloc
totalAlloc += alloc
}
additionalSpaceAvailable -= totalAlloc
}
// additionalSpaceAvailable non-zero: justify-content implementation goes here
}
// Substage 3. With horizontal sizes established, calculate crossLimit
crossLimit := int32(0)
for idx := 0; idx < len(slots); idx++ {
crossLimit = frenyard.Max(crossLimit, slots[idx].fyMainCrossSizeForMainCrossLimits(frenyard.Vec2i{X: shares[idx], Y: mainCrossLimits.Y}, details.DirVertical, false).Y)
}
// -- Actual layout! For real this time! --
mainPosition := int32(0)
if debug {
fmt.Println(" CROSS ", crossLimit)
}
for idx := 0; idx < len(slots); idx++ {
out[idx] = frenyard.Area2iOfSize(frenyard.Vec2i{shares[idx], crossLimit}.ConditionalTranspose(details.DirVertical)).Translate(frenyard.Vec2i{mainPosition, 0}.ConditionalTranspose(details.DirVertical))
if debug {
fmt.Println(" SHARE ", shares[idx])
}
mainPosition += shares[idx]
}
if debug {
fmt.Println("END AREA")
}
// If len(slots) <= 1 then wrapping would inf. loop, so only wrap for >1.
return (len(slots) > 1) && (totalMainAccumulator > mainCrossLimits.X)
}
// -- UI element --
// UIFlexboxContainer lays out UILayoutElements using a partial implementation of Flexbox.
type UIFlexboxContainer struct {
UIPanel
UILayoutElementComponent
_state FlexboxContainer
_preferredSize frenyard.Vec2i
}
// NewUIFlexboxContainerPtr creates a UIFlexboxContainer from the FlexboxContainer details
func NewUIFlexboxContainerPtr(setup FlexboxContainer) *UIFlexboxContainer {
container := &UIFlexboxContainer{
UIPanel: NewPanel(frenyard.Vec2i{}),
}
InitUILayoutElementComponent(container)
container.SetContent(setup)
container.FyEResize(container._preferredSize)
return container
}
// FyLSubelementChanged implements UILayoutElement.FyLSubelementChanged
func (ufc *UIFlexboxContainer) FyLSubelementChanged() {
ufc._preferredSize = fyFlexboxGetPreferredSize(ufc._state)
ufc.ThisUILayoutElementComponentDetails.ContentChanged()
}
// FyLSizeForLimits implements UILayoutElement.FyLSizeForLimits
func (ufc *UIFlexboxContainer) FyLSizeForLimits(limits frenyard.Vec2i) frenyard.Vec2i {
if limits.Ge(ufc._preferredSize) {
return ufc._preferredSize
}
solved := fyFlexboxSolveLayout(ufc._state, limits)
max := frenyard.Vec2i{}
for _, v := range solved {
max = max.Max(v.Pos().Add(v.Size()))
}
return max
}
// SetContent changes the contents of the UIFlexboxContainer.
func (ufc *UIFlexboxContainer) SetContent(setup FlexboxContainer) {
if ufc._state.Slots != nil {
for _, v := range ufc._state.Slots {
if v.Element != nil {
ufc.ThisUILayoutElementComponentDetails.Detach(v.Element)
}
}
}
ufc._state = setup
for _, v := range setup.Slots {
if v.Element != nil {
ufc.ThisUILayoutElementComponentDetails.Attach(v.Element)
}
}
ufc.FyLSubelementChanged()
}
// FyEResize overrides UIPanel.FyEResize
func (ufc *UIFlexboxContainer) FyEResize(size frenyard.Vec2i) {
ufc.UIPanel.FyEResize(size)
areas := fyFlexboxSolveLayout(ufc._state, size)
fixes := make([]PanelFixedElement, len(areas))
fixesCount := 0
for idx, slot := range ufc._state.Slots {
if slot.Element != nil {
fixes[fixesCount].Pos = areas[idx].Pos()
fixes[fixesCount].Visible = true
fixes[fixesCount].Element = slot.Element
slot.Element.FyEResize(areas[idx].Size())
fixesCount++
}
}
ufc.ThisUIPanelDetails.SetContent(fixes[:fixesCount])
} | random_line_split | |
uiLibFlexbox.go | package framework
import (
"fmt"
"sort"
"github.com/CCDirectLink/CCUpdaterUI/frenyard"
)
// Implements a highly limited subset of flexbox to be extended to full support as-needed.
// FlexboxWrapMode describes a type of wrapping mode for Flexbox containers.
type FlexboxWrapMode uint8
// FlexboxWrapModeNone disallows wrapping for items, they are all on one line.
const FlexboxWrapModeNone FlexboxWrapMode = 0
// FlexboxWrapModeWrap allows items to wrap between lines.
const FlexboxWrapModeWrap FlexboxWrapMode = 1
// FlexboxContainer describes a UIFlexboxContainer's contents.
type FlexboxContainer struct {
DirVertical bool
WrapMode FlexboxWrapMode
// Ignored when used by the line solver; it uses fyFlexboxSlotlike instead
Slots []FlexboxSlot
// This is NOT part of the public API. It is provided for debugging use only.
// Do not even mention this field in releases.
Debug bool
}
// FlexboxSlot describes an element within a Flexbox container.
type FlexboxSlot struct {
// Can be nil.
Element UILayoutElement
// If there is a surplus, these are used to distribute it.
Grow int32
// If there is a deficit, these are used to distribute it (along with minimum sizes)
// DEFAULTS TO 1 IN CSS
Shrink int32
// If *non-zero*, then this specifies the "initial share size" of this element.
// Useful when Element is nil.
Basis int32
// Slightly non-standard extension (or is it?) for cases where Basis would be used to pad a problematic element
MinBasis int32
// Used to order the flexboxes visually. The Z-Order remains the index order.
Order int
// If the container should respect the minimum size of this slot.
// DEFAULTS TO TRUE IN CSS (settable to false by overriding min w/h), BUT THIS IS REALLY STUPID B/C IT IGNORES SIZE CONSTRAINTS, SO LET'S NOT DO THAT
RespectMinimumSize bool
}
type fyFlexboxSlotlike interface {
fyMainCrossSizeForMainCrossLimits(limits frenyard.Vec2i, vertical bool, debug bool) frenyard.Vec2i
fyGrowShrink() (int32, int32)
fyCalcBasis(cross int32, vertical bool) int32
fyGetOrder() int
fyRespectMinimumSize() bool
}
func (slot FlexboxSlot) fyMainCrossSizeForMainCrossLimits(limits frenyard.Vec2i, vertical bool, debug bool) frenyard.Vec2i {
if slot.Element == nil {
return frenyard.Vec2i{}
}
if debug {
fmt.Print("?")
}
mainCrossSize := slot.Element.FyLSizeForLimits(limits.ConditionalTranspose(vertical)).ConditionalTranspose(vertical)
mainCrossSize.X = frenyard.Max(mainCrossSize.X, slot.MinBasis)
return mainCrossSize
}
func (slot FlexboxSlot) fyGrowShrink() (int32, int32) {
return slot.Grow, slot.Shrink
}
func (slot FlexboxSlot) fyCalcBasis(cross int32, vertical bool) int32 {
if slot.Basis != 0 {
return slot.Basis
}
return slot.fyMainCrossSizeForMainCrossLimits(frenyard.Vec2i{frenyard.SizeUnlimited, cross}, vertical, false).X
}
func (slot FlexboxSlot) fyGetOrder() int {
return slot.Order
}
func (slot FlexboxSlot) fyRespectMinimumSize() bool {
return slot.RespectMinimumSize
}
// -- Solver --
func fyFlexboxGetPreferredSize(details FlexboxContainer) frenyard.Vec2i {
// Do note, this is in main/cross format.
mainCrossSize := frenyard.Vec2i{}
for _, v := range details.Slots {
sz := v.fyMainCrossSizeForMainCrossLimits(frenyard.Vec2iUnlimited(), details.DirVertical, false)
mainCrossSize.X += sz.X
mainCrossSize.Y = frenyard.Max(mainCrossSize.Y, sz.Y)
}
return mainCrossSize.ConditionalTranspose(details.DirVertical)
}
type fyFlexboxRow struct {
elem []fyFlexboxSlotlike
area []frenyard.Area2i
fullArea frenyard.Area2i
}
func (slot fyFlexboxRow) fyGrowShrink() (int32, int32) {
return 1, 1
}
// Critical to the whole thing and it's full of guesswork due to the vertical flags and axis juggling.
func (slot fyFlexboxRow) fyMainCrossSizeForMainCrossLimits(limits frenyard.Vec2i, vertical bool, debug bool) frenyard.Vec2i {
if debug {
fmt.Print("R{")
}
// Main & Cross in here refer to in the row flexbox, not the outer flexbox.
maximumMain := int32(0)
presentAreaCross := slot.fullArea.Size().ConditionalTranspose(vertical).Y
for _, v := range slot.elem {
lim := frenyard.Vec2i{X: limits.X, Y: presentAreaCross}
if debug {
fmt.Print(" ", limits.X, "x", presentAreaCross)
}
rcs := v.fyMainCrossSizeForMainCrossLimits(lim, vertical, false)
maximumMain = frenyard.Max(maximumMain, rcs.X)
if debug {
fmt.Print(":", rcs.X, "x", rcs.Y)
}
}
if debug |
return frenyard.Vec2i{X: maximumMain, Y: presentAreaCross}
}
func (slot fyFlexboxRow) fyCalcBasis(cross int32, vertical bool) int32 {
return slot.fyMainCrossSizeForMainCrossLimits(frenyard.Vec2i{X: frenyard.SizeUnlimited, Y: cross}, vertical, false).X
}
func (slot fyFlexboxRow) fyGetOrder() int {
return 0
}
func (slot fyFlexboxRow) fyRespectMinimumSize() bool {
return false
}
// Do be aware, this only handles the one relevant axis.
func (slot *fyFlexboxRow) Fill(area frenyard.Area2i, vertical bool) {
for k := range slot.area {
if !vertical {
// Rows perpendicular to X
slot.area[k].X = area.X
} else {
// Rows perpendicular to Y
slot.area[k].Y = area.Y
}
}
slot.fullArea = area
}
type fyFlexboxSortingCollection struct {
// The collection being sorted.
slots []fyFlexboxSlotlike
// Given a SOURCE slot index, what is the RESULTING slot index?
originalToDisplayIndices []int
// Given a RESULTING slot index, what is the SOURCE slot index?
displayToOriginalIndices []int
}
func (sc fyFlexboxSortingCollection) Len() int {
return len(sc.slots)
}
func (sc fyFlexboxSortingCollection) Less(i int, j int) bool {
order1 := sc.slots[i].fyGetOrder()
order2 := sc.slots[j].fyGetOrder()
// Order1 != order2?
if order1 < order2 {
return true
}
if order1 > order2 {
return false
}
// No, they're equal. Sort by original index.
if sc.displayToOriginalIndices[i] < sc.displayToOriginalIndices[j] {
return true
}
return false
}
func (sc fyFlexboxSortingCollection) Swap(i int, j int) {
backup := sc.slots[i]
backup2 := sc.originalToDisplayIndices[i]
backup3 := sc.displayToOriginalIndices[i]
sc.slots[i] = sc.slots[j]
sc.originalToDisplayIndices[i] = sc.originalToDisplayIndices[j]
sc.displayToOriginalIndices[i] = sc.displayToOriginalIndices[j]
sc.slots[j] = backup
sc.originalToDisplayIndices[j] = backup2
sc.displayToOriginalIndices[j] = backup3
}
func fyFlexboxSolveLayout(details FlexboxContainer, limits frenyard.Vec2i) []frenyard.Area2i {
// Stage 1. Element order pre-processing (DirReverse)
slots := make([]fyFlexboxSlotlike, len(details.Slots))
originalToDisplayIndices := make([]int, len(details.Slots))
displayToOriginalIndices := make([]int, len(details.Slots))
for k, v := range details.Slots {
originalToDisplayIndices[k] = k
displayToOriginalIndices[k] = k
slots[k] = v
}
sort.Sort(fyFlexboxSortingCollection{
slots: slots,
originalToDisplayIndices: originalToDisplayIndices,
displayToOriginalIndices: displayToOriginalIndices,
})
// Stage 2. Wrapping (if relevant)
out := make([]frenyard.Area2i, len(slots))
mainCrossLimits := limits.ConditionalTranspose(details.DirVertical)
shouldWrap := fyFlexboxSolveLine(details, slots, out, mainCrossLimits, details.Debug)
// One row, so this is simple
rows := []fyFlexboxRow{{slots, out, frenyard.UnionArea2i(out)}}
if shouldWrap && details.WrapMode != FlexboxWrapModeNone {
// Wrapping has to start. Oh no...
// Do note, lines is implicitly limited because of the "one slot cannot wrap" rule.
lines := int32(2)
for {
rows = make([]fyFlexboxRow, lines)
lineStartSlot := 0
consumedSlots := 0
currentLine := int32(0)
for consumedSlots < len(slots) {
// If it wraps...
if fyFlexboxSolveLine(details, slots[lineStartSlot:consumedSlots+1], out[lineStartSlot:consumedSlots+1], mainCrossLimits, false) {
// Revert it & finish the line.
rows[currentLine] = fyFlexboxRow{
slots[lineStartSlot:consumedSlots],
out[lineStartSlot:consumedSlots],
frenyard.UnionArea2i(out),
}
fyFlexboxSolveLine(details, rows[currentLine].elem, rows[currentLine].area, mainCrossLimits, false)
// Now setup the new line.
currentLine++
lineStartSlot = consumedSlots
if currentLine == lines {
// Out of range, cancel before rows[currentLine] brings it to a halt
break
}
// Retry the same slot (slot not consumed)
} else {
// Success! Advance.
consumedSlots++
}
}
if currentLine < lines {
// Finish last line
rows[currentLine] = fyFlexboxRow{
slots[lineStartSlot:consumedSlots],
out[lineStartSlot:consumedSlots],
frenyard.UnionArea2i(out),
}
break
}
lines++
}
}
if details.WrapMode != FlexboxWrapModeNone {
// Stage 3. Row compression
rowAreas := make([]frenyard.Area2i, len(rows))
rowSlots := make([]fyFlexboxSlotlike, len(rows))
for rk, row := range rows {
rowSlots[rk] = row
}
fyFlexboxSolveLine(FlexboxContainer{
DirVertical: !details.DirVertical,
WrapMode: FlexboxWrapModeNone,
}, rowSlots, rowAreas, frenyard.Vec2i{mainCrossLimits.Y, mainCrossLimits.X}, false)
for rk, row := range rows {
row.Fill(rowAreas[rk], !details.DirVertical)
}
} else {
// Stage 3. Row setup
if mainCrossLimits.Y != frenyard.SizeUnlimited {
rows[0].Fill(frenyard.Area2iOfSize(mainCrossLimits.ConditionalTranspose(details.DirVertical)), !details.DirVertical)
}
}
// Stage 4. Element order post-processing (DirReverse)
realOutput := make([]frenyard.Area2i, len(out))
for k, v := range originalToDisplayIndices {
realOutput[k] = out[v]
}
return realOutput
}
// Returns true if should wrap. Will not return true ever for only one slot as this cannot wrap.
func fyFlexboxSolveLine(details FlexboxContainer, slots []fyFlexboxSlotlike, out []frenyard.Area2i, mainCrossLimits frenyard.Vec2i, debug bool) bool {
if len(slots) == 0 {
// Nowhere to output. Also, some calculations rely on at least one slot existing.
return false
}
if debug {
if details.DirVertical {
fmt.Print("VERTICAL ")
}
fmt.Println("AREA", mainCrossLimits.X, "x", mainCrossLimits.Y)
}
// Substage 1. Input basis values & create total
shares := make([]int32, len(slots))
totalMainAccumulator := int32(0)
totalGrowAccumulator := int32(0)
totalShrinkAccumulator := int32(0)
for idx, slot := range slots {
shares[idx] = slot.fyCalcBasis(mainCrossLimits.Y, details.DirVertical)
totalMainAccumulator += shares[idx]
slotGrow, slotShrink := slot.fyGrowShrink()
totalGrowAccumulator += slotGrow
totalShrinkAccumulator += slotShrink
}
// Notably, totalMainAccumulator must not change after this point.
// It's the 'reference' for if we ought to wrap.
// Substage 2. Determine expansion or contraction
if mainCrossLimits.X != frenyard.SizeUnlimited && totalMainAccumulator != mainCrossLimits.X {
additionalSpaceAvailable := mainCrossLimits.X - totalMainAccumulator
if debug {
fmt.Println("COMPRESSOR II: ", additionalSpaceAvailable)
}
// Determine which accumulator to use.
totalFactorAccumulator := totalGrowAccumulator
if additionalSpaceAvailable < 0 {
totalFactorAccumulator = totalShrinkAccumulator
}
// Actually redistribute space. This may require multiple passes as a factor may not always be fully appliable.
// This is because the Flexbox system respects minimum size.
// When set to true, the relevant factor must be subtracted from the Accumulator.
slotsHitMinimumSize := make([]bool, len(slots))
needAnotherPass := true
for needAnotherPass && totalFactorAccumulator != 0 {
needAnotherPass = false
totalAlloc := int32(0)
for idx, slot := range slots {
if slotsHitMinimumSize[idx] {
continue
}
grow, shrink := slot.fyGrowShrink()
factor := grow
smallestAlloc := -shares[idx] // Cannot shrink below 0
largestAlloc := frenyard.SizeUnlimited
// There is no 'largest alloc'; if the element is told to grow, that is what it will do
if additionalSpaceAvailable < 0 {
factor = shrink
}
if factor == 0 {
// has no effect, and means totalFactorAccumulator could be 0
continue
}
if additionalSpaceAvailable < 0 && shrink > 0 && slot.fyRespectMinimumSize() {
// Smallest possible alloc: maximum amount that can be shrunk
smallestAlloc = slot.fyMainCrossSizeForMainCrossLimits(frenyard.Vec2i{X: 0, Y: mainCrossLimits.Y}, details.DirVertical, false).X - shares[idx]
}
alloc := (additionalSpaceAvailable * factor) / totalFactorAccumulator
// Limit allocation.
clamped := false
if alloc <= smallestAlloc {
alloc = smallestAlloc
clamped = true
}
if alloc >= largestAlloc {
alloc = largestAlloc
clamped = true
}
// If the limit is hit, remove from processing for the next loop.
if clamped {
slotsHitMinimumSize[idx] = true
needAnotherPass = true
totalFactorAccumulator -= factor
}
// Confirm allocation
shares[idx] += alloc
totalAlloc += alloc
}
additionalSpaceAvailable -= totalAlloc
}
// additionalSpaceAvailable non-zero: justify-content implementation goes here
}
// Substage 3. With horizontal sizes established, calculate crossLimit
crossLimit := int32(0)
for idx := 0; idx < len(slots); idx++ {
crossLimit = frenyard.Max(crossLimit, slots[idx].fyMainCrossSizeForMainCrossLimits(frenyard.Vec2i{X: shares[idx], Y: mainCrossLimits.Y}, details.DirVertical, false).Y)
}
// -- Actual layout! For real this time! --
mainPosition := int32(0)
if debug {
fmt.Println(" CROSS ", crossLimit)
}
for idx := 0; idx < len(slots); idx++ {
out[idx] = frenyard.Area2iOfSize(frenyard.Vec2i{shares[idx], crossLimit}.ConditionalTranspose(details.DirVertical)).Translate(frenyard.Vec2i{mainPosition, 0}.ConditionalTranspose(details.DirVertical))
if debug {
fmt.Println(" SHARE ", shares[idx])
}
mainPosition += shares[idx]
}
if debug {
fmt.Println("END AREA")
}
// If len(slots) <= 1 then wrapping would inf. loop, so only wrap for >1.
return (len(slots) > 1) && (totalMainAccumulator > mainCrossLimits.X)
}
// -- UI element --
// UIFlexboxContainer lays out UILayoutElements using a partial implementation of Flexbox.
type UIFlexboxContainer struct {
UIPanel
UILayoutElementComponent
_state FlexboxContainer
_preferredSize frenyard.Vec2i
}
// NewUIFlexboxContainerPtr creates a UIFlexboxContainer from the FlexboxContainer details
func NewUIFlexboxContainerPtr(setup FlexboxContainer) *UIFlexboxContainer {
container := &UIFlexboxContainer{
UIPanel: NewPanel(frenyard.Vec2i{}),
}
InitUILayoutElementComponent(container)
container.SetContent(setup)
container.FyEResize(container._preferredSize)
return container
}
// FyLSubelementChanged implements UILayoutElement.FyLSubelementChanged
func (ufc *UIFlexboxContainer) FyLSubelementChanged() {
ufc._preferredSize = fyFlexboxGetPreferredSize(ufc._state)
ufc.ThisUILayoutElementComponentDetails.ContentChanged()
}
// FyLSizeForLimits implements UILayoutElement.FyLSizeForLimits
func (ufc *UIFlexboxContainer) FyLSizeForLimits(limits frenyard.Vec2i) frenyard.Vec2i {
if limits.Ge(ufc._preferredSize) {
return ufc._preferredSize
}
solved := fyFlexboxSolveLayout(ufc._state, limits)
max := frenyard.Vec2i{}
for _, v := range solved {
max = max.Max(v.Pos().Add(v.Size()))
}
return max
}
// SetContent changes the contents of the UIFlexboxContainer.
func (ufc *UIFlexboxContainer) SetContent(setup FlexboxContainer) {
if ufc._state.Slots != nil {
for _, v := range ufc._state.Slots {
if v.Element != nil {
ufc.ThisUILayoutElementComponentDetails.Detach(v.Element)
}
}
}
ufc._state = setup
for _, v := range setup.Slots {
if v.Element != nil {
ufc.ThisUILayoutElementComponentDetails.Attach(v.Element)
}
}
ufc.FyLSubelementChanged()
}
// FyEResize overrides UIPanel.FyEResize
func (ufc *UIFlexboxContainer) FyEResize(size frenyard.Vec2i) {
ufc.UIPanel.FyEResize(size)
areas := fyFlexboxSolveLayout(ufc._state, size)
fixes := make([]PanelFixedElement, len(areas))
fixesCount := 0
for idx, slot := range ufc._state.Slots {
if slot.Element != nil {
fixes[fixesCount].Pos = areas[idx].Pos()
fixes[fixesCount].Visible = true
fixes[fixesCount].Element = slot.Element
slot.Element.FyEResize(areas[idx].Size())
fixesCount++
}
}
ufc.ThisUIPanelDetails.SetContent(fixes[:fixesCount])
}
| {
fmt.Print(" }")
} | conditional_block |
uiLibFlexbox.go | package framework
import (
"fmt"
"sort"
"github.com/CCDirectLink/CCUpdaterUI/frenyard"
)
// Implements a highly limited subset of flexbox to be extended to full support as-needed.
// FlexboxWrapMode describes a type of wrapping mode for Flexbox containers.
type FlexboxWrapMode uint8
// FlexboxWrapModeNone disallows wrapping for items, they are all on one line.
const FlexboxWrapModeNone FlexboxWrapMode = 0
// FlexboxWrapModeWrap allows items to wrap between lines.
const FlexboxWrapModeWrap FlexboxWrapMode = 1
// FlexboxContainer describes a UIFlexboxContainer's contents.
type FlexboxContainer struct {
DirVertical bool
WrapMode FlexboxWrapMode
// Ignored when used by the line solver; it uses fyFlexboxSlotlike instead
Slots []FlexboxSlot
// This is NOT part of the public API. It is provided for debugging use only.
// Do not even mention this field in releases.
Debug bool
}
// FlexboxSlot describes an element within a Flexbox container.
type FlexboxSlot struct {
// Can be nil.
Element UILayoutElement
// If there is a surplus, these are used to distribute it.
Grow int32
// If there is a deficit, these are used to distribute it (along with minimum sizes)
// DEFAULTS TO 1 IN CSS
Shrink int32
// If *non-zero*, then this specifies the "initial share size" of this element.
// Useful when Element is nil.
Basis int32
// Slightly non-standard extension (or is it?) for cases where Basis would be used to pad a problematic element
MinBasis int32
// Used to order the flexboxes visually. The Z-Order remains the index order.
Order int
// If the container should respect the minimum size of this slot.
// DEFAULTS TO TRUE IN CSS (settable to false by overriding min w/h), BUT THIS IS REALLY STUPID B/C IT IGNORES SIZE CONSTRAINTS, SO LET'S NOT DO THAT
RespectMinimumSize bool
}
type fyFlexboxSlotlike interface {
fyMainCrossSizeForMainCrossLimits(limits frenyard.Vec2i, vertical bool, debug bool) frenyard.Vec2i
fyGrowShrink() (int32, int32)
fyCalcBasis(cross int32, vertical bool) int32
fyGetOrder() int
fyRespectMinimumSize() bool
}
func (slot FlexboxSlot) fyMainCrossSizeForMainCrossLimits(limits frenyard.Vec2i, vertical bool, debug bool) frenyard.Vec2i {
if slot.Element == nil {
return frenyard.Vec2i{}
}
if debug {
fmt.Print("?")
}
mainCrossSize := slot.Element.FyLSizeForLimits(limits.ConditionalTranspose(vertical)).ConditionalTranspose(vertical)
mainCrossSize.X = frenyard.Max(mainCrossSize.X, slot.MinBasis)
return mainCrossSize
}
func (slot FlexboxSlot) fyGrowShrink() (int32, int32) {
return slot.Grow, slot.Shrink
}
func (slot FlexboxSlot) fyCalcBasis(cross int32, vertical bool) int32 {
if slot.Basis != 0 {
return slot.Basis
}
return slot.fyMainCrossSizeForMainCrossLimits(frenyard.Vec2i{frenyard.SizeUnlimited, cross}, vertical, false).X
}
func (slot FlexboxSlot) fyGetOrder() int {
return slot.Order
}
func (slot FlexboxSlot) fyRespectMinimumSize() bool {
return slot.RespectMinimumSize
}
// -- Solver --
func fyFlexboxGetPreferredSize(details FlexboxContainer) frenyard.Vec2i {
// Do note, this is in main/cross format.
mainCrossSize := frenyard.Vec2i{}
for _, v := range details.Slots {
sz := v.fyMainCrossSizeForMainCrossLimits(frenyard.Vec2iUnlimited(), details.DirVertical, false)
mainCrossSize.X += sz.X
mainCrossSize.Y = frenyard.Max(mainCrossSize.Y, sz.Y)
}
return mainCrossSize.ConditionalTranspose(details.DirVertical)
}
type fyFlexboxRow struct {
elem []fyFlexboxSlotlike
area []frenyard.Area2i
fullArea frenyard.Area2i
}
func (slot fyFlexboxRow) fyGrowShrink() (int32, int32) {
return 1, 1
}
// Critical to the whole thing and it's full of guesswork due to the vertical flags and axis juggling.
func (slot fyFlexboxRow) fyMainCrossSizeForMainCrossLimits(limits frenyard.Vec2i, vertical bool, debug bool) frenyard.Vec2i {
if debug {
fmt.Print("R{")
}
// Main & Cross in here refer to in the row flexbox, not the outer flexbox.
maximumMain := int32(0)
presentAreaCross := slot.fullArea.Size().ConditionalTranspose(vertical).Y
for _, v := range slot.elem {
lim := frenyard.Vec2i{X: limits.X, Y: presentAreaCross}
if debug {
fmt.Print(" ", limits.X, "x", presentAreaCross)
}
rcs := v.fyMainCrossSizeForMainCrossLimits(lim, vertical, false)
maximumMain = frenyard.Max(maximumMain, rcs.X)
if debug {
fmt.Print(":", rcs.X, "x", rcs.Y)
}
}
if debug {
fmt.Print(" }")
}
return frenyard.Vec2i{X: maximumMain, Y: presentAreaCross}
}
func (slot fyFlexboxRow) fyCalcBasis(cross int32, vertical bool) int32 {
return slot.fyMainCrossSizeForMainCrossLimits(frenyard.Vec2i{X: frenyard.SizeUnlimited, Y: cross}, vertical, false).X
}
func (slot fyFlexboxRow) fyGetOrder() int {
return 0
}
func (slot fyFlexboxRow) fyRespectMinimumSize() bool {
return false
}
// Do be aware, this only handles the one relevant axis.
func (slot *fyFlexboxRow) Fill(area frenyard.Area2i, vertical bool) {
for k := range slot.area {
if !vertical {
// Rows perpendicular to X
slot.area[k].X = area.X
} else {
// Rows perpendicular to Y
slot.area[k].Y = area.Y
}
}
slot.fullArea = area
}
type fyFlexboxSortingCollection struct {
// The collection being sorted.
slots []fyFlexboxSlotlike
// Given a SOURCE slot index, what is the RESULTING slot index?
originalToDisplayIndices []int
// Given a RESULTING slot index, what is the SOURCE slot index?
displayToOriginalIndices []int
}
func (sc fyFlexboxSortingCollection) | () int {
return len(sc.slots)
}
func (sc fyFlexboxSortingCollection) Less(i int, j int) bool {
order1 := sc.slots[i].fyGetOrder()
order2 := sc.slots[j].fyGetOrder()
// Order1 != order2?
if order1 < order2 {
return true
}
if order1 > order2 {
return false
}
// No, they're equal. Sort by original index.
if sc.displayToOriginalIndices[i] < sc.displayToOriginalIndices[j] {
return true
}
return false
}
func (sc fyFlexboxSortingCollection) Swap(i int, j int) {
backup := sc.slots[i]
backup2 := sc.originalToDisplayIndices[i]
backup3 := sc.displayToOriginalIndices[i]
sc.slots[i] = sc.slots[j]
sc.originalToDisplayIndices[i] = sc.originalToDisplayIndices[j]
sc.displayToOriginalIndices[i] = sc.displayToOriginalIndices[j]
sc.slots[j] = backup
sc.originalToDisplayIndices[j] = backup2
sc.displayToOriginalIndices[j] = backup3
}
func fyFlexboxSolveLayout(details FlexboxContainer, limits frenyard.Vec2i) []frenyard.Area2i {
// Stage 1. Element order pre-processing (DirReverse)
slots := make([]fyFlexboxSlotlike, len(details.Slots))
originalToDisplayIndices := make([]int, len(details.Slots))
displayToOriginalIndices := make([]int, len(details.Slots))
for k, v := range details.Slots {
originalToDisplayIndices[k] = k
displayToOriginalIndices[k] = k
slots[k] = v
}
sort.Sort(fyFlexboxSortingCollection{
slots: slots,
originalToDisplayIndices: originalToDisplayIndices,
displayToOriginalIndices: displayToOriginalIndices,
})
// Stage 2. Wrapping (if relevant)
out := make([]frenyard.Area2i, len(slots))
mainCrossLimits := limits.ConditionalTranspose(details.DirVertical)
shouldWrap := fyFlexboxSolveLine(details, slots, out, mainCrossLimits, details.Debug)
// One row, so this is simple
rows := []fyFlexboxRow{{slots, out, frenyard.UnionArea2i(out)}}
if shouldWrap && details.WrapMode != FlexboxWrapModeNone {
// Wrapping has to start. Oh no...
// Do note, lines is implicitly limited because of the "one slot cannot wrap" rule.
lines := int32(2)
for {
rows = make([]fyFlexboxRow, lines)
lineStartSlot := 0
consumedSlots := 0
currentLine := int32(0)
for consumedSlots < len(slots) {
// If it wraps...
if fyFlexboxSolveLine(details, slots[lineStartSlot:consumedSlots+1], out[lineStartSlot:consumedSlots+1], mainCrossLimits, false) {
// Revert it & finish the line.
rows[currentLine] = fyFlexboxRow{
slots[lineStartSlot:consumedSlots],
out[lineStartSlot:consumedSlots],
frenyard.UnionArea2i(out),
}
fyFlexboxSolveLine(details, rows[currentLine].elem, rows[currentLine].area, mainCrossLimits, false)
// Now setup the new line.
currentLine++
lineStartSlot = consumedSlots
if currentLine == lines {
// Out of range, cancel before rows[currentLine] brings it to a halt
break
}
// Retry the same slot (slot not consumed)
} else {
// Success! Advance.
consumedSlots++
}
}
if currentLine < lines {
// Finish last line
rows[currentLine] = fyFlexboxRow{
slots[lineStartSlot:consumedSlots],
out[lineStartSlot:consumedSlots],
frenyard.UnionArea2i(out),
}
break
}
lines++
}
}
if details.WrapMode != FlexboxWrapModeNone {
// Stage 3. Row compression
rowAreas := make([]frenyard.Area2i, len(rows))
rowSlots := make([]fyFlexboxSlotlike, len(rows))
for rk, row := range rows {
rowSlots[rk] = row
}
fyFlexboxSolveLine(FlexboxContainer{
DirVertical: !details.DirVertical,
WrapMode: FlexboxWrapModeNone,
}, rowSlots, rowAreas, frenyard.Vec2i{mainCrossLimits.Y, mainCrossLimits.X}, false)
for rk, row := range rows {
row.Fill(rowAreas[rk], !details.DirVertical)
}
} else {
// Stage 3. Row setup
if mainCrossLimits.Y != frenyard.SizeUnlimited {
rows[0].Fill(frenyard.Area2iOfSize(mainCrossLimits.ConditionalTranspose(details.DirVertical)), !details.DirVertical)
}
}
// Stage 4. Element order post-processing (DirReverse)
realOutput := make([]frenyard.Area2i, len(out))
for k, v := range originalToDisplayIndices {
realOutput[k] = out[v]
}
return realOutput
}
// Returns true if should wrap. Will not return true ever for only one slot as this cannot wrap.
func fyFlexboxSolveLine(details FlexboxContainer, slots []fyFlexboxSlotlike, out []frenyard.Area2i, mainCrossLimits frenyard.Vec2i, debug bool) bool {
if len(slots) == 0 {
// Nowhere to output. Also, some calculations rely on at least one slot existing.
return false
}
if debug {
if details.DirVertical {
fmt.Print("VERTICAL ")
}
fmt.Println("AREA", mainCrossLimits.X, "x", mainCrossLimits.Y)
}
// Substage 1. Input basis values & create total
shares := make([]int32, len(slots))
totalMainAccumulator := int32(0)
totalGrowAccumulator := int32(0)
totalShrinkAccumulator := int32(0)
for idx, slot := range slots {
shares[idx] = slot.fyCalcBasis(mainCrossLimits.Y, details.DirVertical)
totalMainAccumulator += shares[idx]
slotGrow, slotShrink := slot.fyGrowShrink()
totalGrowAccumulator += slotGrow
totalShrinkAccumulator += slotShrink
}
// Notably, totalMainAccumulator must not change after this point.
// It's the 'reference' for if we ought to wrap.
// Substage 2. Determine expansion or contraction
if mainCrossLimits.X != frenyard.SizeUnlimited && totalMainAccumulator != mainCrossLimits.X {
additionalSpaceAvailable := mainCrossLimits.X - totalMainAccumulator
if debug {
fmt.Println("COMPRESSOR II: ", additionalSpaceAvailable)
}
// Determine which accumulator to use.
totalFactorAccumulator := totalGrowAccumulator
if additionalSpaceAvailable < 0 {
totalFactorAccumulator = totalShrinkAccumulator
}
// Actually redistribute space. This may require multiple passes as a factor may not always be fully appliable.
// This is because the Flexbox system respects minimum size.
// When set to true, the relevant factor must be subtracted from the Accumulator.
slotsHitMinimumSize := make([]bool, len(slots))
needAnotherPass := true
for needAnotherPass && totalFactorAccumulator != 0 {
needAnotherPass = false
totalAlloc := int32(0)
for idx, slot := range slots {
if slotsHitMinimumSize[idx] {
continue
}
grow, shrink := slot.fyGrowShrink()
factor := grow
smallestAlloc := -shares[idx] // Cannot shrink below 0
largestAlloc := frenyard.SizeUnlimited
// There is no 'largest alloc'; if the element is told to grow, that is what it will do
if additionalSpaceAvailable < 0 {
factor = shrink
}
if factor == 0 {
// has no effect, and means totalFactorAccumulator could be 0
continue
}
if additionalSpaceAvailable < 0 && shrink > 0 && slot.fyRespectMinimumSize() {
// Smallest possible alloc: maximum amount that can be shrunk
smallestAlloc = slot.fyMainCrossSizeForMainCrossLimits(frenyard.Vec2i{X: 0, Y: mainCrossLimits.Y}, details.DirVertical, false).X - shares[idx]
}
alloc := (additionalSpaceAvailable * factor) / totalFactorAccumulator
// Limit allocation.
clamped := false
if alloc <= smallestAlloc {
alloc = smallestAlloc
clamped = true
}
if alloc >= largestAlloc {
alloc = largestAlloc
clamped = true
}
// If the limit is hit, remove from processing for the next loop.
if clamped {
slotsHitMinimumSize[idx] = true
needAnotherPass = true
totalFactorAccumulator -= factor
}
// Confirm allocation
shares[idx] += alloc
totalAlloc += alloc
}
additionalSpaceAvailable -= totalAlloc
}
// additionalSpaceAvailable non-zero: justify-content implementation goes here
}
// Substage 3. With horizontal sizes established, calculate crossLimit
crossLimit := int32(0)
for idx := 0; idx < len(slots); idx++ {
crossLimit = frenyard.Max(crossLimit, slots[idx].fyMainCrossSizeForMainCrossLimits(frenyard.Vec2i{X: shares[idx], Y: mainCrossLimits.Y}, details.DirVertical, false).Y)
}
// -- Actual layout! For real this time! --
mainPosition := int32(0)
if debug {
fmt.Println(" CROSS ", crossLimit)
}
for idx := 0; idx < len(slots); idx++ {
out[idx] = frenyard.Area2iOfSize(frenyard.Vec2i{shares[idx], crossLimit}.ConditionalTranspose(details.DirVertical)).Translate(frenyard.Vec2i{mainPosition, 0}.ConditionalTranspose(details.DirVertical))
if debug {
fmt.Println(" SHARE ", shares[idx])
}
mainPosition += shares[idx]
}
if debug {
fmt.Println("END AREA")
}
// If len(slots) <= 1 then wrapping would inf. loop, so only wrap for >1.
return (len(slots) > 1) && (totalMainAccumulator > mainCrossLimits.X)
}
// -- UI element --
// UIFlexboxContainer lays out UILayoutElements using a partial implementation of Flexbox.
type UIFlexboxContainer struct {
UIPanel
UILayoutElementComponent
_state FlexboxContainer
_preferredSize frenyard.Vec2i
}
// NewUIFlexboxContainerPtr creates a UIFlexboxContainer from the FlexboxContainer details
func NewUIFlexboxContainerPtr(setup FlexboxContainer) *UIFlexboxContainer {
container := &UIFlexboxContainer{
UIPanel: NewPanel(frenyard.Vec2i{}),
}
InitUILayoutElementComponent(container)
container.SetContent(setup)
container.FyEResize(container._preferredSize)
return container
}
// FyLSubelementChanged implements UILayoutElement.FyLSubelementChanged
func (ufc *UIFlexboxContainer) FyLSubelementChanged() {
ufc._preferredSize = fyFlexboxGetPreferredSize(ufc._state)
ufc.ThisUILayoutElementComponentDetails.ContentChanged()
}
// FyLSizeForLimits implements UILayoutElement.FyLSizeForLimits
func (ufc *UIFlexboxContainer) FyLSizeForLimits(limits frenyard.Vec2i) frenyard.Vec2i {
if limits.Ge(ufc._preferredSize) {
return ufc._preferredSize
}
solved := fyFlexboxSolveLayout(ufc._state, limits)
max := frenyard.Vec2i{}
for _, v := range solved {
max = max.Max(v.Pos().Add(v.Size()))
}
return max
}
// SetContent changes the contents of the UIFlexboxContainer.
func (ufc *UIFlexboxContainer) SetContent(setup FlexboxContainer) {
if ufc._state.Slots != nil {
for _, v := range ufc._state.Slots {
if v.Element != nil {
ufc.ThisUILayoutElementComponentDetails.Detach(v.Element)
}
}
}
ufc._state = setup
for _, v := range setup.Slots {
if v.Element != nil {
ufc.ThisUILayoutElementComponentDetails.Attach(v.Element)
}
}
ufc.FyLSubelementChanged()
}
// FyEResize overrides UIPanel.FyEResize
func (ufc *UIFlexboxContainer) FyEResize(size frenyard.Vec2i) {
ufc.UIPanel.FyEResize(size)
areas := fyFlexboxSolveLayout(ufc._state, size)
fixes := make([]PanelFixedElement, len(areas))
fixesCount := 0
for idx, slot := range ufc._state.Slots {
if slot.Element != nil {
fixes[fixesCount].Pos = areas[idx].Pos()
fixes[fixesCount].Visible = true
fixes[fixesCount].Element = slot.Element
slot.Element.FyEResize(areas[idx].Size())
fixesCount++
}
}
ufc.ThisUIPanelDetails.SetContent(fixes[:fixesCount])
}
| Len | identifier_name |
uiLibFlexbox.go | package framework
import (
"fmt"
"sort"
"github.com/CCDirectLink/CCUpdaterUI/frenyard"
)
// Implements a highly limited subset of flexbox to be extended to full support as-needed.
// FlexboxWrapMode describes a type of wrapping mode for Flexbox containers.
type FlexboxWrapMode uint8
// FlexboxWrapModeNone disallows wrapping for items, they are all on one line.
const FlexboxWrapModeNone FlexboxWrapMode = 0
// FlexboxWrapModeWrap allows items to wrap between lines.
const FlexboxWrapModeWrap FlexboxWrapMode = 1
// FlexboxContainer describes a UIFlexboxContainer's contents.
type FlexboxContainer struct {
DirVertical bool
WrapMode FlexboxWrapMode
// Ignored when used by the line solver; it uses fyFlexboxSlotlike instead
Slots []FlexboxSlot
// This is NOT part of the public API. It is provided for debugging use only.
// Do not even mention this field in releases.
Debug bool
}
// FlexboxSlot describes an element within a Flexbox container.
type FlexboxSlot struct {
// Can be nil.
Element UILayoutElement
// If there is a surplus, these are used to distribute it.
Grow int32
// If there is a deficit, these are used to distribute it (along with minimum sizes)
// DEFAULTS TO 1 IN CSS
Shrink int32
// If *non-zero*, then this specifies the "initial share size" of this element.
// Useful when Element is nil.
Basis int32
// Slightly non-standard extension (or is it?) for cases where Basis would be used to pad a problematic element
MinBasis int32
// Used to order the flexboxes visually. The Z-Order remains the index order.
Order int
// If the container should respect the minimum size of this slot.
// DEFAULTS TO TRUE IN CSS (settable to false by overriding min w/h), BUT THIS IS REALLY STUPID B/C IT IGNORES SIZE CONSTRAINTS, SO LET'S NOT DO THAT
RespectMinimumSize bool
}
type fyFlexboxSlotlike interface {
fyMainCrossSizeForMainCrossLimits(limits frenyard.Vec2i, vertical bool, debug bool) frenyard.Vec2i
fyGrowShrink() (int32, int32)
fyCalcBasis(cross int32, vertical bool) int32
fyGetOrder() int
fyRespectMinimumSize() bool
}
func (slot FlexboxSlot) fyMainCrossSizeForMainCrossLimits(limits frenyard.Vec2i, vertical bool, debug bool) frenyard.Vec2i {
if slot.Element == nil {
return frenyard.Vec2i{}
}
if debug {
fmt.Print("?")
}
mainCrossSize := slot.Element.FyLSizeForLimits(limits.ConditionalTranspose(vertical)).ConditionalTranspose(vertical)
mainCrossSize.X = frenyard.Max(mainCrossSize.X, slot.MinBasis)
return mainCrossSize
}
func (slot FlexboxSlot) fyGrowShrink() (int32, int32) {
return slot.Grow, slot.Shrink
}
func (slot FlexboxSlot) fyCalcBasis(cross int32, vertical bool) int32 {
if slot.Basis != 0 {
return slot.Basis
}
return slot.fyMainCrossSizeForMainCrossLimits(frenyard.Vec2i{frenyard.SizeUnlimited, cross}, vertical, false).X
}
func (slot FlexboxSlot) fyGetOrder() int {
return slot.Order
}
func (slot FlexboxSlot) fyRespectMinimumSize() bool {
return slot.RespectMinimumSize
}
// -- Solver --
func fyFlexboxGetPreferredSize(details FlexboxContainer) frenyard.Vec2i {
// Do note, this is in main/cross format.
mainCrossSize := frenyard.Vec2i{}
for _, v := range details.Slots {
sz := v.fyMainCrossSizeForMainCrossLimits(frenyard.Vec2iUnlimited(), details.DirVertical, false)
mainCrossSize.X += sz.X
mainCrossSize.Y = frenyard.Max(mainCrossSize.Y, sz.Y)
}
return mainCrossSize.ConditionalTranspose(details.DirVertical)
}
type fyFlexboxRow struct {
elem []fyFlexboxSlotlike
area []frenyard.Area2i
fullArea frenyard.Area2i
}
func (slot fyFlexboxRow) fyGrowShrink() (int32, int32) {
return 1, 1
}
// Critical to the whole thing and it's full of guesswork due to the vertical flags and axis juggling.
func (slot fyFlexboxRow) fyMainCrossSizeForMainCrossLimits(limits frenyard.Vec2i, vertical bool, debug bool) frenyard.Vec2i {
if debug {
fmt.Print("R{")
}
// Main & Cross in here refer to in the row flexbox, not the outer flexbox.
maximumMain := int32(0)
presentAreaCross := slot.fullArea.Size().ConditionalTranspose(vertical).Y
for _, v := range slot.elem {
lim := frenyard.Vec2i{X: limits.X, Y: presentAreaCross}
if debug {
fmt.Print(" ", limits.X, "x", presentAreaCross)
}
rcs := v.fyMainCrossSizeForMainCrossLimits(lim, vertical, false)
maximumMain = frenyard.Max(maximumMain, rcs.X)
if debug {
fmt.Print(":", rcs.X, "x", rcs.Y)
}
}
if debug {
fmt.Print(" }")
}
return frenyard.Vec2i{X: maximumMain, Y: presentAreaCross}
}
func (slot fyFlexboxRow) fyCalcBasis(cross int32, vertical bool) int32 {
return slot.fyMainCrossSizeForMainCrossLimits(frenyard.Vec2i{X: frenyard.SizeUnlimited, Y: cross}, vertical, false).X
}
func (slot fyFlexboxRow) fyGetOrder() int {
return 0
}
func (slot fyFlexboxRow) fyRespectMinimumSize() bool {
return false
}
// Do be aware, this only handles the one relevant axis.
func (slot *fyFlexboxRow) Fill(area frenyard.Area2i, vertical bool) {
for k := range slot.area {
if !vertical {
// Rows perpendicular to X
slot.area[k].X = area.X
} else {
// Rows perpendicular to Y
slot.area[k].Y = area.Y
}
}
slot.fullArea = area
}
type fyFlexboxSortingCollection struct {
// The collection being sorted.
slots []fyFlexboxSlotlike
// Given a SOURCE slot index, what is the RESULTING slot index?
originalToDisplayIndices []int
// Given a RESULTING slot index, what is the SOURCE slot index?
displayToOriginalIndices []int
}
func (sc fyFlexboxSortingCollection) Len() int {
return len(sc.slots)
}
func (sc fyFlexboxSortingCollection) Less(i int, j int) bool {
order1 := sc.slots[i].fyGetOrder()
order2 := sc.slots[j].fyGetOrder()
// Order1 != order2?
if order1 < order2 {
return true
}
if order1 > order2 {
return false
}
// No, they're equal. Sort by original index.
if sc.displayToOriginalIndices[i] < sc.displayToOriginalIndices[j] {
return true
}
return false
}
func (sc fyFlexboxSortingCollection) Swap(i int, j int) |
func fyFlexboxSolveLayout(details FlexboxContainer, limits frenyard.Vec2i) []frenyard.Area2i {
// Stage 1. Element order pre-processing (DirReverse)
slots := make([]fyFlexboxSlotlike, len(details.Slots))
originalToDisplayIndices := make([]int, len(details.Slots))
displayToOriginalIndices := make([]int, len(details.Slots))
for k, v := range details.Slots {
originalToDisplayIndices[k] = k
displayToOriginalIndices[k] = k
slots[k] = v
}
sort.Sort(fyFlexboxSortingCollection{
slots: slots,
originalToDisplayIndices: originalToDisplayIndices,
displayToOriginalIndices: displayToOriginalIndices,
})
// Stage 2. Wrapping (if relevant)
out := make([]frenyard.Area2i, len(slots))
mainCrossLimits := limits.ConditionalTranspose(details.DirVertical)
shouldWrap := fyFlexboxSolveLine(details, slots, out, mainCrossLimits, details.Debug)
// One row, so this is simple
rows := []fyFlexboxRow{{slots, out, frenyard.UnionArea2i(out)}}
if shouldWrap && details.WrapMode != FlexboxWrapModeNone {
// Wrapping has to start. Oh no...
// Do note, lines is implicitly limited because of the "one slot cannot wrap" rule.
lines := int32(2)
for {
rows = make([]fyFlexboxRow, lines)
lineStartSlot := 0
consumedSlots := 0
currentLine := int32(0)
for consumedSlots < len(slots) {
// If it wraps...
if fyFlexboxSolveLine(details, slots[lineStartSlot:consumedSlots+1], out[lineStartSlot:consumedSlots+1], mainCrossLimits, false) {
// Revert it & finish the line.
rows[currentLine] = fyFlexboxRow{
slots[lineStartSlot:consumedSlots],
out[lineStartSlot:consumedSlots],
frenyard.UnionArea2i(out),
}
fyFlexboxSolveLine(details, rows[currentLine].elem, rows[currentLine].area, mainCrossLimits, false)
// Now setup the new line.
currentLine++
lineStartSlot = consumedSlots
if currentLine == lines {
// Out of range, cancel before rows[currentLine] brings it to a halt
break
}
// Retry the same slot (slot not consumed)
} else {
// Success! Advance.
consumedSlots++
}
}
if currentLine < lines {
// Finish last line
rows[currentLine] = fyFlexboxRow{
slots[lineStartSlot:consumedSlots],
out[lineStartSlot:consumedSlots],
frenyard.UnionArea2i(out),
}
break
}
lines++
}
}
if details.WrapMode != FlexboxWrapModeNone {
// Stage 3. Row compression
rowAreas := make([]frenyard.Area2i, len(rows))
rowSlots := make([]fyFlexboxSlotlike, len(rows))
for rk, row := range rows {
rowSlots[rk] = row
}
fyFlexboxSolveLine(FlexboxContainer{
DirVertical: !details.DirVertical,
WrapMode: FlexboxWrapModeNone,
}, rowSlots, rowAreas, frenyard.Vec2i{mainCrossLimits.Y, mainCrossLimits.X}, false)
for rk, row := range rows {
row.Fill(rowAreas[rk], !details.DirVertical)
}
} else {
// Stage 3. Row setup
if mainCrossLimits.Y != frenyard.SizeUnlimited {
rows[0].Fill(frenyard.Area2iOfSize(mainCrossLimits.ConditionalTranspose(details.DirVertical)), !details.DirVertical)
}
}
// Stage 4. Element order post-processing (DirReverse)
realOutput := make([]frenyard.Area2i, len(out))
for k, v := range originalToDisplayIndices {
realOutput[k] = out[v]
}
return realOutput
}
// Returns true if should wrap. Will not return true ever for only one slot as this cannot wrap.
func fyFlexboxSolveLine(details FlexboxContainer, slots []fyFlexboxSlotlike, out []frenyard.Area2i, mainCrossLimits frenyard.Vec2i, debug bool) bool {
if len(slots) == 0 {
// Nowhere to output. Also, some calculations rely on at least one slot existing.
return false
}
if debug {
if details.DirVertical {
fmt.Print("VERTICAL ")
}
fmt.Println("AREA", mainCrossLimits.X, "x", mainCrossLimits.Y)
}
// Substage 1. Input basis values & create total
shares := make([]int32, len(slots))
totalMainAccumulator := int32(0)
totalGrowAccumulator := int32(0)
totalShrinkAccumulator := int32(0)
for idx, slot := range slots {
shares[idx] = slot.fyCalcBasis(mainCrossLimits.Y, details.DirVertical)
totalMainAccumulator += shares[idx]
slotGrow, slotShrink := slot.fyGrowShrink()
totalGrowAccumulator += slotGrow
totalShrinkAccumulator += slotShrink
}
// Notably, totalMainAccumulator must not change after this point.
// It's the 'reference' for if we ought to wrap.
// Substage 2. Determine expansion or contraction
if mainCrossLimits.X != frenyard.SizeUnlimited && totalMainAccumulator != mainCrossLimits.X {
additionalSpaceAvailable := mainCrossLimits.X - totalMainAccumulator
if debug {
fmt.Println("COMPRESSOR II: ", additionalSpaceAvailable)
}
// Determine which accumulator to use.
totalFactorAccumulator := totalGrowAccumulator
if additionalSpaceAvailable < 0 {
totalFactorAccumulator = totalShrinkAccumulator
}
// Actually redistribute space. This may require multiple passes as a factor may not always be fully appliable.
// This is because the Flexbox system respects minimum size.
// When set to true, the relevant factor must be subtracted from the Accumulator.
slotsHitMinimumSize := make([]bool, len(slots))
needAnotherPass := true
for needAnotherPass && totalFactorAccumulator != 0 {
needAnotherPass = false
totalAlloc := int32(0)
for idx, slot := range slots {
if slotsHitMinimumSize[idx] {
continue
}
grow, shrink := slot.fyGrowShrink()
factor := grow
smallestAlloc := -shares[idx] // Cannot shrink below 0
largestAlloc := frenyard.SizeUnlimited
// There is no 'largest alloc'; if the element is told to grow, that is what it will do
if additionalSpaceAvailable < 0 {
factor = shrink
}
if factor == 0 {
// has no effect, and means totalFactorAccumulator could be 0
continue
}
if additionalSpaceAvailable < 0 && shrink > 0 && slot.fyRespectMinimumSize() {
// Smallest possible alloc: maximum amount that can be shrunk
smallestAlloc = slot.fyMainCrossSizeForMainCrossLimits(frenyard.Vec2i{X: 0, Y: mainCrossLimits.Y}, details.DirVertical, false).X - shares[idx]
}
alloc := (additionalSpaceAvailable * factor) / totalFactorAccumulator
// Limit allocation.
clamped := false
if alloc <= smallestAlloc {
alloc = smallestAlloc
clamped = true
}
if alloc >= largestAlloc {
alloc = largestAlloc
clamped = true
}
// If the limit is hit, remove from processing for the next loop.
if clamped {
slotsHitMinimumSize[idx] = true
needAnotherPass = true
totalFactorAccumulator -= factor
}
// Confirm allocation
shares[idx] += alloc
totalAlloc += alloc
}
additionalSpaceAvailable -= totalAlloc
}
// additionalSpaceAvailable non-zero: justify-content implementation goes here
}
// Substage 3. With horizontal sizes established, calculate crossLimit
crossLimit := int32(0)
for idx := 0; idx < len(slots); idx++ {
crossLimit = frenyard.Max(crossLimit, slots[idx].fyMainCrossSizeForMainCrossLimits(frenyard.Vec2i{X: shares[idx], Y: mainCrossLimits.Y}, details.DirVertical, false).Y)
}
// -- Actual layout! For real this time! --
mainPosition := int32(0)
if debug {
fmt.Println(" CROSS ", crossLimit)
}
for idx := 0; idx < len(slots); idx++ {
out[idx] = frenyard.Area2iOfSize(frenyard.Vec2i{shares[idx], crossLimit}.ConditionalTranspose(details.DirVertical)).Translate(frenyard.Vec2i{mainPosition, 0}.ConditionalTranspose(details.DirVertical))
if debug {
fmt.Println(" SHARE ", shares[idx])
}
mainPosition += shares[idx]
}
if debug {
fmt.Println("END AREA")
}
// If len(slots) <= 1 then wrapping would inf. loop, so only wrap for >1.
return (len(slots) > 1) && (totalMainAccumulator > mainCrossLimits.X)
}
// -- UI element --
// UIFlexboxContainer lays out UILayoutElements using a partial implementation of Flexbox.
type UIFlexboxContainer struct {
UIPanel
UILayoutElementComponent
_state FlexboxContainer
_preferredSize frenyard.Vec2i
}
// NewUIFlexboxContainerPtr creates a UIFlexboxContainer from the FlexboxContainer details
func NewUIFlexboxContainerPtr(setup FlexboxContainer) *UIFlexboxContainer {
container := &UIFlexboxContainer{
UIPanel: NewPanel(frenyard.Vec2i{}),
}
InitUILayoutElementComponent(container)
container.SetContent(setup)
container.FyEResize(container._preferredSize)
return container
}
// FyLSubelementChanged implements UILayoutElement.FyLSubelementChanged
func (ufc *UIFlexboxContainer) FyLSubelementChanged() {
ufc._preferredSize = fyFlexboxGetPreferredSize(ufc._state)
ufc.ThisUILayoutElementComponentDetails.ContentChanged()
}
// FyLSizeForLimits implements UILayoutElement.FyLSizeForLimits
func (ufc *UIFlexboxContainer) FyLSizeForLimits(limits frenyard.Vec2i) frenyard.Vec2i {
if limits.Ge(ufc._preferredSize) {
return ufc._preferredSize
}
solved := fyFlexboxSolveLayout(ufc._state, limits)
max := frenyard.Vec2i{}
for _, v := range solved {
max = max.Max(v.Pos().Add(v.Size()))
}
return max
}
// SetContent changes the contents of the UIFlexboxContainer.
func (ufc *UIFlexboxContainer) SetContent(setup FlexboxContainer) {
if ufc._state.Slots != nil {
for _, v := range ufc._state.Slots {
if v.Element != nil {
ufc.ThisUILayoutElementComponentDetails.Detach(v.Element)
}
}
}
ufc._state = setup
for _, v := range setup.Slots {
if v.Element != nil {
ufc.ThisUILayoutElementComponentDetails.Attach(v.Element)
}
}
ufc.FyLSubelementChanged()
}
// FyEResize overrides UIPanel.FyEResize
func (ufc *UIFlexboxContainer) FyEResize(size frenyard.Vec2i) {
ufc.UIPanel.FyEResize(size)
areas := fyFlexboxSolveLayout(ufc._state, size)
fixes := make([]PanelFixedElement, len(areas))
fixesCount := 0
for idx, slot := range ufc._state.Slots {
if slot.Element != nil {
fixes[fixesCount].Pos = areas[idx].Pos()
fixes[fixesCount].Visible = true
fixes[fixesCount].Element = slot.Element
slot.Element.FyEResize(areas[idx].Size())
fixesCount++
}
}
ufc.ThisUIPanelDetails.SetContent(fixes[:fixesCount])
}
| {
backup := sc.slots[i]
backup2 := sc.originalToDisplayIndices[i]
backup3 := sc.displayToOriginalIndices[i]
sc.slots[i] = sc.slots[j]
sc.originalToDisplayIndices[i] = sc.originalToDisplayIndices[j]
sc.displayToOriginalIndices[i] = sc.displayToOriginalIndices[j]
sc.slots[j] = backup
sc.originalToDisplayIndices[j] = backup2
sc.displayToOriginalIndices[j] = backup3
} | identifier_body |
typegenAutoConfig.ts | import { GraphQLNamedType, GraphQLSchema, isOutputType } from 'graphql'
import type { TypegenInfo } from './builder'
import type { TypingImport } from './definitions/_types'
import { TYPEGEN_HEADER } from './lang'
import { nodeImports } from './node'
import { getOwnPackage, log, objValues, relativePathTo, typeScriptFileExtension } from './utils'
/** Any common types / constants that would otherwise be circular-imported */
export const SCALAR_TYPES = {
Int: 'number',
String: 'string',
ID: 'string',
Float: 'number',
Boolean: 'boolean',
}
export interface SourceTypeModule {
/**
* The module for where to look for the types. This uses the node resolution algorithm via require.resolve,
* so if this lives in node_modules, you can just provide the module name otherwise you should provide the
* absolute path to the file.
*/
module: string
/**
* When we import the module, we use `import * as ____` to prevent conflicts. This alias should be a name
* that doesn't conflict with any other types, usually a short lowercase name.
*/
alias: string
/**
* Provides a custom approach to matching for the type
*
* If not provided, the default implementation is:
*
* (type) => [ new RegExp(`(?:interface|type|class|enum)\\s+(${type.name})\\W`, "g"), ]
*/
typeMatch?: (type: GraphQLNamedType, defaultRegex: RegExp) => RegExp | RegExp[]
/**
* A list of typesNames or regular expressions matching type names that should be resolved by this import.
* Provide an empty array if you wish to use the file for context and ensure no other types are matched.
*/
onlyTypes?: (string | RegExp)[]
/**
* By default the import is configured `import * as alias from`, setting glob to false will change this to
* `import alias from`
*/
glob?: false
}
export interface SourceTypesConfigOptions {
/** Any headers to prefix on the generated type file */
headers?: string[]
/**
* Array of SourceTypeModule's to look in and match the type names against.
*
* @example
* modules: [
* { module: 'typescript', alias: 'ts' },
* { module: path.join(__dirname, '../sourceTypes'), alias: 'b' },
* ]
*/
modules: SourceTypeModule[]
/**
* Types that should not be matched for a source type,
*
* By default this is set to ['Query', 'Mutation', 'Subscription']
*
* @example
* skipTypes: ['Query', 'Mutation', /(.*?)Edge/, /(.*?)Connection/]
*/
skipTypes?: (string | RegExp)[]
/**
* If debug is set to true, this will log out info about all types found, skipped, etc. for the type
* generation files. @default false
*/
debug?: boolean
/**
* If provided this will be used for the source types rather than the auto-resolve mechanism above. Useful
* as an override for one-off cases, or for scalar source types.
*/
mapping?: Record<string, string>
}
/**
* This is an approach for handling type definition auto-resolution. It is designed to handle the most common
* cases, as can be seen in the examples / the simplicity of the implementation.
*
* If you wish to do something more complex, involving full AST parsing, etc, you can provide a different
* function to the `typegenInfo` property of the `makeSchema` config.
*
* @param options
*/
export function typegenAutoConfig(options: SourceTypesConfigOptions, contextType: TypingImport | undefined) {
return async (schema: GraphQLSchema, outputPath: string): Promise<TypegenInfo> => {
const {
headers,
skipTypes = ['Query', 'Mutation', 'Subscription'],
mapping: _sourceTypeMap,
debug,
} = options
const typeMap = schema.getTypeMap()
const typesToIgnore = new Set<string>()
const typesToIgnoreRegex: RegExp[] = []
const allImportsMap: Record<string, string> = {} | }
const forceImports = new Set(
objValues(sourceTypeMap)
.concat(typeof contextType === 'string' ? contextType || '' : '')
.map((t) => {
const match = t.match(/^(\w+)\./)
return match ? match[1] : null
})
.filter((f) => f)
)
skipTypes.forEach((skip) => {
if (typeof skip === 'string') {
typesToIgnore.add(skip)
} else if (skip instanceof RegExp) {
typesToIgnoreRegex.push(skip)
} else {
throw new Error('Invalid type for options.skipTypes, expected string or RegExp')
}
})
const path = nodeImports().path
const typeSources = await Promise.all(
options.modules.map(async (source) => {
// Keeping all of this in here so if we don't have any sources
// e.g. in the Playground, it doesn't break things.
const { module: pathOrModule, glob = true, onlyTypes, alias, typeMatch } = source
if (path.isAbsolute(pathOrModule) && path.extname(pathOrModule) !== '.ts') {
return console.warn(
`Nexus Schema Typegen: Expected module ${pathOrModule} to be an absolute path to a TypeScript module, skipping.`
)
}
let resolvedPath: string
let fileContents: string
try {
resolvedPath = require.resolve(pathOrModule, {
paths: [process.cwd()],
})
if (path.extname(resolvedPath) !== '.ts') {
resolvedPath = findTypingForFile(resolvedPath, pathOrModule)
}
fileContents = String(await nodeImports().fs.promises.readFile(resolvedPath, 'utf-8'))
} catch (e) {
if (e instanceof Error && e.message.indexOf('Cannot find module') !== -1) {
console.error(`GraphQL Nexus: Unable to find file or module ${pathOrModule}, skipping`)
} else {
console.error(e.message)
}
return null
}
const importPath = (
path.isAbsolute(pathOrModule) ? relativePathTo(resolvedPath, outputPath) : pathOrModule
).replace(typeScriptFileExtension, '')
if (allImportsMap[alias] && allImportsMap[alias] !== importPath) {
return console.warn(
`Nexus Schema Typegen: Cannot have multiple type sources ${importsMap[alias]} and ${pathOrModule} with the same alias ${alias}, skipping`
)
}
allImportsMap[alias] = importPath
if (forceImports.has(alias)) {
importsMap[alias] = [importPath, glob]
forceImports.delete(alias)
}
return {
alias,
glob,
importPath,
fileContents,
onlyTypes,
typeMatch: typeMatch || defaultTypeMatcher,
}
})
)
const builtinScalars = new Set(Object.keys(SCALAR_TYPES))
Object.keys(typeMap).forEach((typeName) => {
if (typeName.startsWith('__')) {
return
}
if (typesToIgnore.has(typeName)) {
return
}
if (typesToIgnoreRegex.some((r) => r.test(typeName))) {
return
}
if (sourceTypeMap[typeName]) {
return
}
if (builtinScalars.has(typeName)) {
return
}
const type = schema.getType(typeName)
// For now we'll say that if it's output type it can be backed
if (isOutputType(type)) {
for (let i = 0; i < typeSources.length; i++) {
const typeSource = typeSources[i]
if (!typeSource) {
continue
}
// If we've specified an array of "onlyTypes" to match ensure the
// `typeName` falls within that list.
if (typeSource.onlyTypes) {
if (
!typeSource.onlyTypes.some((t) => {
return t instanceof RegExp ? t.test(typeName) : t === typeName
})
) {
continue
}
}
const { fileContents, importPath, glob, alias, typeMatch } = typeSource
const typeRegex = typeMatch(type, defaultTypeMatcher(type)[0])
const matched = firstMatch(fileContents, Array.isArray(typeRegex) ? typeRegex : [typeRegex])
if (matched) {
if (debug) {
log(`Matched type - ${typeName} in "${importPath}" - ${alias}.${matched[1]}`)
}
importsMap[alias] = [importPath, glob]
sourceTypeMap[typeName] = `${alias}.${matched[1]}`
} else {
if (debug) {
log(`No match for ${typeName} in "${importPath}" using ${typeRegex}`)
}
}
}
}
})
if (forceImports.size > 0) {
console.error(`Missing required typegen import: ${Array.from(forceImports)}`)
}
const imports: string[] = []
Object.keys(importsMap)
.sort()
.forEach((alias) => {
const [importPath, glob] = importsMap[alias]
const safeImportPath = importPath.replace(/\\+/g, '/')
imports.push(`import type ${glob ? '* as ' : ''}${alias} from "${safeImportPath}"`)
})
const typegenInfo = {
headers: headers || [TYPEGEN_HEADER],
sourceTypeMap,
imports,
contextTypeImport: contextType,
nexusSchemaImportId: getOwnPackage().name,
}
return typegenInfo
}
}
function findTypingForFile(absolutePath: string, pathOrModule: string) {
// First try to find the "d.ts" adjacent to the file
try {
const typeDefPath = absolutePath.replace(nodeImports().path.extname(absolutePath), '.d.ts')
require.resolve(typeDefPath)
return typeDefPath
} catch (e) {
console.error(e)
}
// TODO: need to figure out cases where it's a node module
// and "typings" is set in the package.json
throw new Error(`Unable to find typings associated with ${pathOrModule}, skipping`)
}
const firstMatch = (fileContents: string, typeRegex: RegExp[]): RegExpExecArray | null => {
for (let i = 0; i < typeRegex.length; i++) {
const regex = typeRegex[i]
const match = regex.exec(fileContents)
if (match) {
return match
}
}
return null
}
const defaultTypeMatcher = (type: GraphQLNamedType) => {
return [new RegExp(`(?:interface|type|class|enum)\\s+(${type.name})\\W`, 'g')]
} | const importsMap: Record<string, [string, boolean]> = {}
const sourceTypeMap: Record<string, string> = {
...SCALAR_TYPES,
..._sourceTypeMap, | random_line_split |
typegenAutoConfig.ts | import { GraphQLNamedType, GraphQLSchema, isOutputType } from 'graphql'
import type { TypegenInfo } from './builder'
import type { TypingImport } from './definitions/_types'
import { TYPEGEN_HEADER } from './lang'
import { nodeImports } from './node'
import { getOwnPackage, log, objValues, relativePathTo, typeScriptFileExtension } from './utils'
/** Any common types / constants that would otherwise be circular-imported */
export const SCALAR_TYPES = {
Int: 'number',
String: 'string',
ID: 'string',
Float: 'number',
Boolean: 'boolean',
}
export interface SourceTypeModule {
/**
* The module for where to look for the types. This uses the node resolution algorithm via require.resolve,
* so if this lives in node_modules, you can just provide the module name otherwise you should provide the
* absolute path to the file.
*/
module: string
/**
* When we import the module, we use `import * as ____` to prevent conflicts. This alias should be a name
* that doesn't conflict with any other types, usually a short lowercase name.
*/
alias: string
/**
* Provides a custom approach to matching for the type
*
* If not provided, the default implementation is:
*
* (type) => [ new RegExp(`(?:interface|type|class|enum)\\s+(${type.name})\\W`, "g"), ]
*/
typeMatch?: (type: GraphQLNamedType, defaultRegex: RegExp) => RegExp | RegExp[]
/**
* A list of typesNames or regular expressions matching type names that should be resolved by this import.
* Provide an empty array if you wish to use the file for context and ensure no other types are matched.
*/
onlyTypes?: (string | RegExp)[]
/**
* By default the import is configured `import * as alias from`, setting glob to false will change this to
* `import alias from`
*/
glob?: false
}
export interface SourceTypesConfigOptions {
/** Any headers to prefix on the generated type file */
headers?: string[]
/**
* Array of SourceTypeModule's to look in and match the type names against.
*
* @example
* modules: [
* { module: 'typescript', alias: 'ts' },
* { module: path.join(__dirname, '../sourceTypes'), alias: 'b' },
* ]
*/
modules: SourceTypeModule[]
/**
* Types that should not be matched for a source type,
*
* By default this is set to ['Query', 'Mutation', 'Subscription']
*
* @example
* skipTypes: ['Query', 'Mutation', /(.*?)Edge/, /(.*?)Connection/]
*/
skipTypes?: (string | RegExp)[]
/**
* If debug is set to true, this will log out info about all types found, skipped, etc. for the type
* generation files. @default false
*/
debug?: boolean
/**
* If provided this will be used for the source types rather than the auto-resolve mechanism above. Useful
* as an override for one-off cases, or for scalar source types.
*/
mapping?: Record<string, string>
}
/**
* This is an approach for handling type definition auto-resolution. It is designed to handle the most common
* cases, as can be seen in the examples / the simplicity of the implementation.
*
* If you wish to do something more complex, involving full AST parsing, etc, you can provide a different
* function to the `typegenInfo` property of the `makeSchema` config.
*
* @param options
*/
export function typegenAutoConfig(options: SourceTypesConfigOptions, contextType: TypingImport | undefined) {
return async (schema: GraphQLSchema, outputPath: string): Promise<TypegenInfo> => {
const {
headers,
skipTypes = ['Query', 'Mutation', 'Subscription'],
mapping: _sourceTypeMap,
debug,
} = options
const typeMap = schema.getTypeMap()
const typesToIgnore = new Set<string>()
const typesToIgnoreRegex: RegExp[] = []
const allImportsMap: Record<string, string> = {}
const importsMap: Record<string, [string, boolean]> = {}
const sourceTypeMap: Record<string, string> = {
...SCALAR_TYPES,
..._sourceTypeMap,
}
const forceImports = new Set(
objValues(sourceTypeMap)
.concat(typeof contextType === 'string' ? contextType || '' : '')
.map((t) => {
const match = t.match(/^(\w+)\./)
return match ? match[1] : null
})
.filter((f) => f)
)
skipTypes.forEach((skip) => {
if (typeof skip === 'string') {
typesToIgnore.add(skip)
} else if (skip instanceof RegExp) {
typesToIgnoreRegex.push(skip)
} else {
throw new Error('Invalid type for options.skipTypes, expected string or RegExp')
}
})
const path = nodeImports().path
const typeSources = await Promise.all(
options.modules.map(async (source) => {
// Keeping all of this in here so if we don't have any sources
// e.g. in the Playground, it doesn't break things.
const { module: pathOrModule, glob = true, onlyTypes, alias, typeMatch } = source
if (path.isAbsolute(pathOrModule) && path.extname(pathOrModule) !== '.ts') {
return console.warn(
`Nexus Schema Typegen: Expected module ${pathOrModule} to be an absolute path to a TypeScript module, skipping.`
)
}
let resolvedPath: string
let fileContents: string
try {
resolvedPath = require.resolve(pathOrModule, {
paths: [process.cwd()],
})
if (path.extname(resolvedPath) !== '.ts') {
resolvedPath = findTypingForFile(resolvedPath, pathOrModule)
}
fileContents = String(await nodeImports().fs.promises.readFile(resolvedPath, 'utf-8'))
} catch (e) {
if (e instanceof Error && e.message.indexOf('Cannot find module') !== -1) {
console.error(`GraphQL Nexus: Unable to find file or module ${pathOrModule}, skipping`)
} else {
console.error(e.message)
}
return null
}
const importPath = (
path.isAbsolute(pathOrModule) ? relativePathTo(resolvedPath, outputPath) : pathOrModule
).replace(typeScriptFileExtension, '')
if (allImportsMap[alias] && allImportsMap[alias] !== importPath) {
return console.warn(
`Nexus Schema Typegen: Cannot have multiple type sources ${importsMap[alias]} and ${pathOrModule} with the same alias ${alias}, skipping`
)
}
allImportsMap[alias] = importPath
if (forceImports.has(alias)) {
importsMap[alias] = [importPath, glob]
forceImports.delete(alias)
}
return {
alias,
glob,
importPath,
fileContents,
onlyTypes,
typeMatch: typeMatch || defaultTypeMatcher,
}
})
)
const builtinScalars = new Set(Object.keys(SCALAR_TYPES))
Object.keys(typeMap).forEach((typeName) => {
if (typeName.startsWith('__')) {
return
}
if (typesToIgnore.has(typeName)) {
return
}
if (typesToIgnoreRegex.some((r) => r.test(typeName))) {
return
}
if (sourceTypeMap[typeName]) {
return
}
if (builtinScalars.has(typeName)) {
return
}
const type = schema.getType(typeName)
// For now we'll say that if it's output type it can be backed
if (isOutputType(type)) {
for (let i = 0; i < typeSources.length; i++) |
}
})
if (forceImports.size > 0) {
console.error(`Missing required typegen import: ${Array.from(forceImports)}`)
}
const imports: string[] = []
Object.keys(importsMap)
.sort()
.forEach((alias) => {
const [importPath, glob] = importsMap[alias]
const safeImportPath = importPath.replace(/\\+/g, '/')
imports.push(`import type ${glob ? '* as ' : ''}${alias} from "${safeImportPath}"`)
})
const typegenInfo = {
headers: headers || [TYPEGEN_HEADER],
sourceTypeMap,
imports,
contextTypeImport: contextType,
nexusSchemaImportId: getOwnPackage().name,
}
return typegenInfo
}
}
function findTypingForFile(absolutePath: string, pathOrModule: string) {
// First try to find the "d.ts" adjacent to the file
try {
const typeDefPath = absolutePath.replace(nodeImports().path.extname(absolutePath), '.d.ts')
require.resolve(typeDefPath)
return typeDefPath
} catch (e) {
console.error(e)
}
// TODO: need to figure out cases where it's a node module
// and "typings" is set in the package.json
throw new Error(`Unable to find typings associated with ${pathOrModule}, skipping`)
}
const firstMatch = (fileContents: string, typeRegex: RegExp[]): RegExpExecArray | null => {
for (let i = 0; i < typeRegex.length; i++) {
const regex = typeRegex[i]
const match = regex.exec(fileContents)
if (match) {
return match
}
}
return null
}
const defaultTypeMatcher = (type: GraphQLNamedType) => {
return [new RegExp(`(?:interface|type|class|enum)\\s+(${type.name})\\W`, 'g')]
}
| {
const typeSource = typeSources[i]
if (!typeSource) {
continue
}
// If we've specified an array of "onlyTypes" to match ensure the
// `typeName` falls within that list.
if (typeSource.onlyTypes) {
if (
!typeSource.onlyTypes.some((t) => {
return t instanceof RegExp ? t.test(typeName) : t === typeName
})
) {
continue
}
}
const { fileContents, importPath, glob, alias, typeMatch } = typeSource
const typeRegex = typeMatch(type, defaultTypeMatcher(type)[0])
const matched = firstMatch(fileContents, Array.isArray(typeRegex) ? typeRegex : [typeRegex])
if (matched) {
if (debug) {
log(`Matched type - ${typeName} in "${importPath}" - ${alias}.${matched[1]}`)
}
importsMap[alias] = [importPath, glob]
sourceTypeMap[typeName] = `${alias}.${matched[1]}`
} else {
if (debug) {
log(`No match for ${typeName} in "${importPath}" using ${typeRegex}`)
}
}
} | conditional_block |
typegenAutoConfig.ts | import { GraphQLNamedType, GraphQLSchema, isOutputType } from 'graphql'
import type { TypegenInfo } from './builder'
import type { TypingImport } from './definitions/_types'
import { TYPEGEN_HEADER } from './lang'
import { nodeImports } from './node'
import { getOwnPackage, log, objValues, relativePathTo, typeScriptFileExtension } from './utils'
/** Any common types / constants that would otherwise be circular-imported */
export const SCALAR_TYPES = {
Int: 'number',
String: 'string',
ID: 'string',
Float: 'number',
Boolean: 'boolean',
}
export interface SourceTypeModule {
/**
* The module for where to look for the types. This uses the node resolution algorithm via require.resolve,
* so if this lives in node_modules, you can just provide the module name otherwise you should provide the
* absolute path to the file.
*/
module: string
/**
* When we import the module, we use `import * as ____` to prevent conflicts. This alias should be a name
* that doesn't conflict with any other types, usually a short lowercase name.
*/
alias: string
/**
* Provides a custom approach to matching for the type
*
* If not provided, the default implementation is:
*
* (type) => [ new RegExp(`(?:interface|type|class|enum)\\s+(${type.name})\\W`, "g"), ]
*/
typeMatch?: (type: GraphQLNamedType, defaultRegex: RegExp) => RegExp | RegExp[]
/**
* A list of typesNames or regular expressions matching type names that should be resolved by this import.
* Provide an empty array if you wish to use the file for context and ensure no other types are matched.
*/
onlyTypes?: (string | RegExp)[]
/**
* By default the import is configured `import * as alias from`, setting glob to false will change this to
* `import alias from`
*/
glob?: false
}
export interface SourceTypesConfigOptions {
/** Any headers to prefix on the generated type file */
headers?: string[]
/**
* Array of SourceTypeModule's to look in and match the type names against.
*
* @example
* modules: [
* { module: 'typescript', alias: 'ts' },
* { module: path.join(__dirname, '../sourceTypes'), alias: 'b' },
* ]
*/
modules: SourceTypeModule[]
/**
* Types that should not be matched for a source type,
*
* By default this is set to ['Query', 'Mutation', 'Subscription']
*
* @example
* skipTypes: ['Query', 'Mutation', /(.*?)Edge/, /(.*?)Connection/]
*/
skipTypes?: (string | RegExp)[]
/**
* If debug is set to true, this will log out info about all types found, skipped, etc. for the type
* generation files. @default false
*/
debug?: boolean
/**
* If provided this will be used for the source types rather than the auto-resolve mechanism above. Useful
* as an override for one-off cases, or for scalar source types.
*/
mapping?: Record<string, string>
}
/**
* This is an approach for handling type definition auto-resolution. It is designed to handle the most common
* cases, as can be seen in the examples / the simplicity of the implementation.
*
* If you wish to do something more complex, involving full AST parsing, etc, you can provide a different
* function to the `typegenInfo` property of the `makeSchema` config.
*
* @param options
*/
export function typegenAutoConfig(options: SourceTypesConfigOptions, contextType: TypingImport | undefined) |
function findTypingForFile(absolutePath: string, pathOrModule: string) {
// First try to find the "d.ts" adjacent to the file
try {
const typeDefPath = absolutePath.replace(nodeImports().path.extname(absolutePath), '.d.ts')
require.resolve(typeDefPath)
return typeDefPath
} catch (e) {
console.error(e)
}
// TODO: need to figure out cases where it's a node module
// and "typings" is set in the package.json
throw new Error(`Unable to find typings associated with ${pathOrModule}, skipping`)
}
const firstMatch = (fileContents: string, typeRegex: RegExp[]): RegExpExecArray | null => {
for (let i = 0; i < typeRegex.length; i++) {
const regex = typeRegex[i]
const match = regex.exec(fileContents)
if (match) {
return match
}
}
return null
}
const defaultTypeMatcher = (type: GraphQLNamedType) => {
return [new RegExp(`(?:interface|type|class|enum)\\s+(${type.name})\\W`, 'g')]
}
| {
return async (schema: GraphQLSchema, outputPath: string): Promise<TypegenInfo> => {
const {
headers,
skipTypes = ['Query', 'Mutation', 'Subscription'],
mapping: _sourceTypeMap,
debug,
} = options
const typeMap = schema.getTypeMap()
const typesToIgnore = new Set<string>()
const typesToIgnoreRegex: RegExp[] = []
const allImportsMap: Record<string, string> = {}
const importsMap: Record<string, [string, boolean]> = {}
const sourceTypeMap: Record<string, string> = {
...SCALAR_TYPES,
..._sourceTypeMap,
}
const forceImports = new Set(
objValues(sourceTypeMap)
.concat(typeof contextType === 'string' ? contextType || '' : '')
.map((t) => {
const match = t.match(/^(\w+)\./)
return match ? match[1] : null
})
.filter((f) => f)
)
skipTypes.forEach((skip) => {
if (typeof skip === 'string') {
typesToIgnore.add(skip)
} else if (skip instanceof RegExp) {
typesToIgnoreRegex.push(skip)
} else {
throw new Error('Invalid type for options.skipTypes, expected string or RegExp')
}
})
const path = nodeImports().path
const typeSources = await Promise.all(
options.modules.map(async (source) => {
// Keeping all of this in here so if we don't have any sources
// e.g. in the Playground, it doesn't break things.
const { module: pathOrModule, glob = true, onlyTypes, alias, typeMatch } = source
if (path.isAbsolute(pathOrModule) && path.extname(pathOrModule) !== '.ts') {
return console.warn(
`Nexus Schema Typegen: Expected module ${pathOrModule} to be an absolute path to a TypeScript module, skipping.`
)
}
let resolvedPath: string
let fileContents: string
try {
resolvedPath = require.resolve(pathOrModule, {
paths: [process.cwd()],
})
if (path.extname(resolvedPath) !== '.ts') {
resolvedPath = findTypingForFile(resolvedPath, pathOrModule)
}
fileContents = String(await nodeImports().fs.promises.readFile(resolvedPath, 'utf-8'))
} catch (e) {
if (e instanceof Error && e.message.indexOf('Cannot find module') !== -1) {
console.error(`GraphQL Nexus: Unable to find file or module ${pathOrModule}, skipping`)
} else {
console.error(e.message)
}
return null
}
const importPath = (
path.isAbsolute(pathOrModule) ? relativePathTo(resolvedPath, outputPath) : pathOrModule
).replace(typeScriptFileExtension, '')
if (allImportsMap[alias] && allImportsMap[alias] !== importPath) {
return console.warn(
`Nexus Schema Typegen: Cannot have multiple type sources ${importsMap[alias]} and ${pathOrModule} with the same alias ${alias}, skipping`
)
}
allImportsMap[alias] = importPath
if (forceImports.has(alias)) {
importsMap[alias] = [importPath, glob]
forceImports.delete(alias)
}
return {
alias,
glob,
importPath,
fileContents,
onlyTypes,
typeMatch: typeMatch || defaultTypeMatcher,
}
})
)
const builtinScalars = new Set(Object.keys(SCALAR_TYPES))
Object.keys(typeMap).forEach((typeName) => {
if (typeName.startsWith('__')) {
return
}
if (typesToIgnore.has(typeName)) {
return
}
if (typesToIgnoreRegex.some((r) => r.test(typeName))) {
return
}
if (sourceTypeMap[typeName]) {
return
}
if (builtinScalars.has(typeName)) {
return
}
const type = schema.getType(typeName)
// For now we'll say that if it's output type it can be backed
if (isOutputType(type)) {
for (let i = 0; i < typeSources.length; i++) {
const typeSource = typeSources[i]
if (!typeSource) {
continue
}
// If we've specified an array of "onlyTypes" to match ensure the
// `typeName` falls within that list.
if (typeSource.onlyTypes) {
if (
!typeSource.onlyTypes.some((t) => {
return t instanceof RegExp ? t.test(typeName) : t === typeName
})
) {
continue
}
}
const { fileContents, importPath, glob, alias, typeMatch } = typeSource
const typeRegex = typeMatch(type, defaultTypeMatcher(type)[0])
const matched = firstMatch(fileContents, Array.isArray(typeRegex) ? typeRegex : [typeRegex])
if (matched) {
if (debug) {
log(`Matched type - ${typeName} in "${importPath}" - ${alias}.${matched[1]}`)
}
importsMap[alias] = [importPath, glob]
sourceTypeMap[typeName] = `${alias}.${matched[1]}`
} else {
if (debug) {
log(`No match for ${typeName} in "${importPath}" using ${typeRegex}`)
}
}
}
}
})
if (forceImports.size > 0) {
console.error(`Missing required typegen import: ${Array.from(forceImports)}`)
}
const imports: string[] = []
Object.keys(importsMap)
.sort()
.forEach((alias) => {
const [importPath, glob] = importsMap[alias]
const safeImportPath = importPath.replace(/\\+/g, '/')
imports.push(`import type ${glob ? '* as ' : ''}${alias} from "${safeImportPath}"`)
})
const typegenInfo = {
headers: headers || [TYPEGEN_HEADER],
sourceTypeMap,
imports,
contextTypeImport: contextType,
nexusSchemaImportId: getOwnPackage().name,
}
return typegenInfo
}
} | identifier_body |
typegenAutoConfig.ts | import { GraphQLNamedType, GraphQLSchema, isOutputType } from 'graphql'
import type { TypegenInfo } from './builder'
import type { TypingImport } from './definitions/_types'
import { TYPEGEN_HEADER } from './lang'
import { nodeImports } from './node'
import { getOwnPackage, log, objValues, relativePathTo, typeScriptFileExtension } from './utils'
/** Any common types / constants that would otherwise be circular-imported */
export const SCALAR_TYPES = {
Int: 'number',
String: 'string',
ID: 'string',
Float: 'number',
Boolean: 'boolean',
}
export interface SourceTypeModule {
/**
* The module for where to look for the types. This uses the node resolution algorithm via require.resolve,
* so if this lives in node_modules, you can just provide the module name otherwise you should provide the
* absolute path to the file.
*/
module: string
/**
* When we import the module, we use `import * as ____` to prevent conflicts. This alias should be a name
* that doesn't conflict with any other types, usually a short lowercase name.
*/
alias: string
/**
* Provides a custom approach to matching for the type
*
* If not provided, the default implementation is:
*
* (type) => [ new RegExp(`(?:interface|type|class|enum)\\s+(${type.name})\\W`, "g"), ]
*/
typeMatch?: (type: GraphQLNamedType, defaultRegex: RegExp) => RegExp | RegExp[]
/**
* A list of typesNames or regular expressions matching type names that should be resolved by this import.
* Provide an empty array if you wish to use the file for context and ensure no other types are matched.
*/
onlyTypes?: (string | RegExp)[]
/**
* By default the import is configured `import * as alias from`, setting glob to false will change this to
* `import alias from`
*/
glob?: false
}
export interface SourceTypesConfigOptions {
/** Any headers to prefix on the generated type file */
headers?: string[]
/**
* Array of SourceTypeModule's to look in and match the type names against.
*
* @example
* modules: [
* { module: 'typescript', alias: 'ts' },
* { module: path.join(__dirname, '../sourceTypes'), alias: 'b' },
* ]
*/
modules: SourceTypeModule[]
/**
* Types that should not be matched for a source type,
*
* By default this is set to ['Query', 'Mutation', 'Subscription']
*
* @example
* skipTypes: ['Query', 'Mutation', /(.*?)Edge/, /(.*?)Connection/]
*/
skipTypes?: (string | RegExp)[]
/**
* If debug is set to true, this will log out info about all types found, skipped, etc. for the type
* generation files. @default false
*/
debug?: boolean
/**
* If provided this will be used for the source types rather than the auto-resolve mechanism above. Useful
* as an override for one-off cases, or for scalar source types.
*/
mapping?: Record<string, string>
}
/**
* This is an approach for handling type definition auto-resolution. It is designed to handle the most common
* cases, as can be seen in the examples / the simplicity of the implementation.
*
* If you wish to do something more complex, involving full AST parsing, etc, you can provide a different
* function to the `typegenInfo` property of the `makeSchema` config.
*
* @param options
*/
export function | (options: SourceTypesConfigOptions, contextType: TypingImport | undefined) {
return async (schema: GraphQLSchema, outputPath: string): Promise<TypegenInfo> => {
const {
headers,
skipTypes = ['Query', 'Mutation', 'Subscription'],
mapping: _sourceTypeMap,
debug,
} = options
const typeMap = schema.getTypeMap()
const typesToIgnore = new Set<string>()
const typesToIgnoreRegex: RegExp[] = []
const allImportsMap: Record<string, string> = {}
const importsMap: Record<string, [string, boolean]> = {}
const sourceTypeMap: Record<string, string> = {
...SCALAR_TYPES,
..._sourceTypeMap,
}
const forceImports = new Set(
objValues(sourceTypeMap)
.concat(typeof contextType === 'string' ? contextType || '' : '')
.map((t) => {
const match = t.match(/^(\w+)\./)
return match ? match[1] : null
})
.filter((f) => f)
)
skipTypes.forEach((skip) => {
if (typeof skip === 'string') {
typesToIgnore.add(skip)
} else if (skip instanceof RegExp) {
typesToIgnoreRegex.push(skip)
} else {
throw new Error('Invalid type for options.skipTypes, expected string or RegExp')
}
})
const path = nodeImports().path
const typeSources = await Promise.all(
options.modules.map(async (source) => {
// Keeping all of this in here so if we don't have any sources
// e.g. in the Playground, it doesn't break things.
const { module: pathOrModule, glob = true, onlyTypes, alias, typeMatch } = source
if (path.isAbsolute(pathOrModule) && path.extname(pathOrModule) !== '.ts') {
return console.warn(
`Nexus Schema Typegen: Expected module ${pathOrModule} to be an absolute path to a TypeScript module, skipping.`
)
}
let resolvedPath: string
let fileContents: string
try {
resolvedPath = require.resolve(pathOrModule, {
paths: [process.cwd()],
})
if (path.extname(resolvedPath) !== '.ts') {
resolvedPath = findTypingForFile(resolvedPath, pathOrModule)
}
fileContents = String(await nodeImports().fs.promises.readFile(resolvedPath, 'utf-8'))
} catch (e) {
if (e instanceof Error && e.message.indexOf('Cannot find module') !== -1) {
console.error(`GraphQL Nexus: Unable to find file or module ${pathOrModule}, skipping`)
} else {
console.error(e.message)
}
return null
}
const importPath = (
path.isAbsolute(pathOrModule) ? relativePathTo(resolvedPath, outputPath) : pathOrModule
).replace(typeScriptFileExtension, '')
if (allImportsMap[alias] && allImportsMap[alias] !== importPath) {
return console.warn(
`Nexus Schema Typegen: Cannot have multiple type sources ${importsMap[alias]} and ${pathOrModule} with the same alias ${alias}, skipping`
)
}
allImportsMap[alias] = importPath
if (forceImports.has(alias)) {
importsMap[alias] = [importPath, glob]
forceImports.delete(alias)
}
return {
alias,
glob,
importPath,
fileContents,
onlyTypes,
typeMatch: typeMatch || defaultTypeMatcher,
}
})
)
const builtinScalars = new Set(Object.keys(SCALAR_TYPES))
Object.keys(typeMap).forEach((typeName) => {
if (typeName.startsWith('__')) {
return
}
if (typesToIgnore.has(typeName)) {
return
}
if (typesToIgnoreRegex.some((r) => r.test(typeName))) {
return
}
if (sourceTypeMap[typeName]) {
return
}
if (builtinScalars.has(typeName)) {
return
}
const type = schema.getType(typeName)
// For now we'll say that if it's output type it can be backed
if (isOutputType(type)) {
for (let i = 0; i < typeSources.length; i++) {
const typeSource = typeSources[i]
if (!typeSource) {
continue
}
// If we've specified an array of "onlyTypes" to match ensure the
// `typeName` falls within that list.
if (typeSource.onlyTypes) {
if (
!typeSource.onlyTypes.some((t) => {
return t instanceof RegExp ? t.test(typeName) : t === typeName
})
) {
continue
}
}
const { fileContents, importPath, glob, alias, typeMatch } = typeSource
const typeRegex = typeMatch(type, defaultTypeMatcher(type)[0])
const matched = firstMatch(fileContents, Array.isArray(typeRegex) ? typeRegex : [typeRegex])
if (matched) {
if (debug) {
log(`Matched type - ${typeName} in "${importPath}" - ${alias}.${matched[1]}`)
}
importsMap[alias] = [importPath, glob]
sourceTypeMap[typeName] = `${alias}.${matched[1]}`
} else {
if (debug) {
log(`No match for ${typeName} in "${importPath}" using ${typeRegex}`)
}
}
}
}
})
if (forceImports.size > 0) {
console.error(`Missing required typegen import: ${Array.from(forceImports)}`)
}
const imports: string[] = []
Object.keys(importsMap)
.sort()
.forEach((alias) => {
const [importPath, glob] = importsMap[alias]
const safeImportPath = importPath.replace(/\\+/g, '/')
imports.push(`import type ${glob ? '* as ' : ''}${alias} from "${safeImportPath}"`)
})
const typegenInfo = {
headers: headers || [TYPEGEN_HEADER],
sourceTypeMap,
imports,
contextTypeImport: contextType,
nexusSchemaImportId: getOwnPackage().name,
}
return typegenInfo
}
}
function findTypingForFile(absolutePath: string, pathOrModule: string) {
// First try to find the "d.ts" adjacent to the file
try {
const typeDefPath = absolutePath.replace(nodeImports().path.extname(absolutePath), '.d.ts')
require.resolve(typeDefPath)
return typeDefPath
} catch (e) {
console.error(e)
}
// TODO: need to figure out cases where it's a node module
// and "typings" is set in the package.json
throw new Error(`Unable to find typings associated with ${pathOrModule}, skipping`)
}
const firstMatch = (fileContents: string, typeRegex: RegExp[]): RegExpExecArray | null => {
for (let i = 0; i < typeRegex.length; i++) {
const regex = typeRegex[i]
const match = regex.exec(fileContents)
if (match) {
return match
}
}
return null
}
const defaultTypeMatcher = (type: GraphQLNamedType) => {
return [new RegExp(`(?:interface|type|class|enum)\\s+(${type.name})\\W`, 'g')]
}
| typegenAutoConfig | identifier_name |
csbref.go | package main
import (
"bufio"
"fmt"
"math"
"math/rand"
"os"
"strconv"
"strings"
"time"
)
//GAME CONSTANTS
const podRSQ = 800 * 800
const cpRSQ = 600 * 600
const podCount = 4
const minImpulse = 120
const frictionVal = 0.85
const checkpointGenerationGap = 30
//MATH CONSTANTS
const fullCircle = (2 * math.Pi)
const radToDeg = 180.0 / math.Pi
const degToRad = math.Pi / 180.0
const maxRotate = (18.0 * degToRad)
//types
type distanceSqType float64
type gameMap []point
type point struct {
x float64
y float64
}
type object struct {
p point
s point
angle float64
next int
shieldtimer int
boosted int
won bool
}
type playerMove struct {
target point
thrust int
shield bool
boost bool
}
type game [podCount]object
var globalCp [50]point
var globalNumCp int
var playerTimeout [2]int
//taken from AGADE CSB RUNNER ARENA
//https://github.com/Agade09/CSB-Runner-Arena/blob/master/Arena.cpp
var possibleMaps = []gameMap{
{{12460, 1350}, {10540, 5980}, {3580, 5180}, {13580, 7600}},
{{3600, 5280}, {13840, 5080}, {10680, 2280}, {8700, 7460}, {7200, 2160}},
{{4560, 2180}, {7350, 4940}, {3320, 7230}, {14580, 7700}, {10560, 5060}, {13100, 2320}},
{{5010, 5260}, {11480, 6080}, {9100, 1840}}, {{14660, 1410}, {3450, 7220}, {9420, 7240}, {5970, 4240}},
{{3640, 4420}, {8000, 7900}, {13300, 5540}, {9560, 1400}},
{{4100, 7420}, {13500, 2340}, {12940, 7220}, {5640, 2580}},
{{14520, 7780}, {6320, 4290}, {7800, 860}, {7660, 5970}, {3140, 7540}, {9520, 4380}},
{{10040, 5970}, {13920, 1940}, {8020, 3260}, {2670, 7020}}, {{7500, 6940}, {6000, 5360}, {11300, 2820}},
{{4060, 4660}, {13040, 1900}, {6560, 7840}, {7480, 1360}, {12700, 7100}},
{{3020, 5190}, {6280, 7760}, {14100, 7760}, {13880, 1220}, {10240, 4920}, {6100, 2200}},
{{10323, 3366}, {11203, 5425}, {7259, 6656}, {5425, 2838}}}
var possibleMapCount = len(possibleMaps)
func (p *point) dot(n point) float64 {
return p.x*n.x + p.y*n.y
}
func (p *point) norm() float64 {
return (math.Sqrt(((p.x * p.x) + (p.y * p.y))))
}
func (g *game) nextTurn() {
t := 1.0
curps := [4]point{g[0].p, g[1].p, g[2].p, g[3].p}
for t > 0.0 {
first := t
cli := 0
clj := 0
for i := podCount - 1; i > 0; i-- {
for j := i - 1; j >= 0; j-- {
tx := g[i].newCollide(&g[j], podRSQ)
if tx <= first {
first = tx
cli = i
clj = j
}
}
}
g.forwardTime(first)
t -= first
if cli != clj {
g.bounce(cli, clj)
}
if t > 0 {
for i := 0; i < podCount; i++ {
if (cpCollide(curps[i], g[i].p, globalCp[g[i].next], cpRSQ)) > 0 {
g[i].passCheckpoint(i)
}
}
curps = [4]point{g[0].p, g[1].p, g[2].p, g[3].p}
}
}
for i := 0; i < podCount; i++ {
g[i].endTurn(i)
if (cpCollide(curps[i], g[i].p, globalCp[g[i].next], cpRSQ)) > 0 {
g[i].passCheckpoint(i)
}
}
playerTimeout[0]--
playerTimeout[1]--
}
const EPSILON = .00001
func (g *game) bounce(p1 int, p2 int) {
oa := &g[p1]
ob := &g[p2]
normal := ob.p
normal.x -= oa.p.x
normal.y -= oa.p.y
dd := normal.norm()
normal.x /= dd
normal.y /= dd
relv := oa.s
relv.x -= ob.s.x
relv.y -= ob.s.y
var m1 float64 = 1
var m2 float64 = 1
if oa.shieldtimer == 4 {
m1 = 0.1
}
if ob.shieldtimer == 4 {
m2 = 0.1
}
force := normal.dot(relv) / (m1 + m2)
if force < 120 {
force += 120
} else {
force += force
} | ob.s.x += -impulse.x * m2
ob.s.y += -impulse.y * m2
if dd <= 800 {
dd -= 800
oa.p.x += (normal.x * -(-dd/2 + EPSILON))
oa.p.y += (normal.y * -(-dd/2 + EPSILON))
ob.p.x += (normal.x * (-dd/2 + EPSILON))
ob.p.y += (normal.y * (-dd/2 + EPSILON))
}
}
func getAngle(start point, end point) float64 {
dx := (end.x - start.x)
dy := (end.y - start.y)
a := (math.Atan2(dy, dx))
return a
}
func distance2(p1 point, p2 point) distanceSqType {
x := distanceSqType(p2.x - p1.x)
x = x * x
y := distanceSqType(p2.y - p1.y)
y = y * y
return x + y
}
func distance(p1 point, p2 point) float64 {
return (math.Sqrt(float64(distance2(p1, p2))))
}
func (obj *object) passCheckpoint(podn int) {
obj.next = (obj.next + 1)
if obj.next >= globalNumCp {
obj.next = globalNumCp - 1
obj.won = true
}
if podn < 2 {
playerTimeout[0] = 100
} else {
playerTimeout[1] = 100
}
}
func (g *game) forwardTime(t float64) {
for i := 0; i < podCount; i++ {
obj := &g[i]
obj.p.x += (obj.s.x * (t))
obj.p.y += (obj.s.y * (t))
}
}
func round(x float64) float64 {
x = (math.Floor((x) + 0.50000))
return x
}
func (obj *object) newCollide(b *object, rsq float64) float64 {
p := point{b.p.x - obj.p.x, b.p.y - obj.p.y}
pLength2 := p.x*p.x + p.y*p.y
if pLength2 <= rsq {
return 0
}
v := point{(b.s.x - obj.s.x), (b.s.y - obj.s.y)}
dot := p.dot(v)
if dot > 0 {
return 10
}
vLength2 := v.x*v.x + v.y*v.y
disc := dot*dot - vLength2*(pLength2-rsq)
if disc < 0 {
return 10
}
discdist := (math.Sqrt(disc))
t1 := (-dot - discdist) / vLength2
return float64(t1)
}
func cpCollide(p1 point, p2 point, cp point, cpRSQ float64) byte {
dx := (p2.x - p1.x)
dy := (p2.y - p1.y)
pp := p1
pd2 := dx*dx + dy*dy
if pd2 != 0 {
u := ((cp.x-p1.x)*dx + (cp.y-p1.y)*dy) / pd2
if u > 1 {
pp = p2
} else if u > 0 {
pp.x = p1.x + u*dx
pp.y = p1.y + u*dy
}
}
pp.x -= cp.x
pp.y -= cp.y
if ((pp.x * pp.x) + (pp.y * pp.y)) < cpRSQ {
return 1
}
return 0
}
func (obj *object) applyRotate(p point) {
a := getAngle(obj.p, p)
rotateAngle := obj.diffAngle(p)
if rotateAngle < -maxRotate {
a = obj.angle - maxRotate
}
if rotateAngle > maxRotate {
a = obj.angle + maxRotate
}
obj.angle = a
/*for obj.angle < 0 {
obj.angle += fullCircle
}
for obj.angle > fullCircle {
obj.angle -= fullCircle
}*/
}
func (obj *object) applyRotateFirst(rotateAngle float64) {
obj.angle = rotateAngle
for obj.angle < 0 {
obj.angle += fullCircle
}
for obj.angle > fullCircle {
obj.angle -= fullCircle
}
}
func (obj *object) applyThrust(t int) {
cs, cc := math.Sincos(obj.angle)
obj.s.x += (cc * float64(t))
obj.s.y += (cs * float64(t))
}
func (obj *object) endTurn(podn int) {
if obj.s.x > 0 {
obj.s.x = (math.Trunc((obj.s.x * frictionVal)))
} else {
obj.s.x = (math.Trunc((obj.s.x * frictionVal)))
}
if obj.s.y > 0 {
obj.s.y = (math.Trunc((obj.s.y * frictionVal)))
} else {
obj.s.y = (math.Trunc((obj.s.y * frictionVal)))
}
obj.p.x = round(obj.p.x)
obj.p.y = round(obj.p.y)
if obj.shieldtimer > 0 {
obj.shieldtimer--
}
}
func (obj *object) diffAngle(p point) float64 {
a := getAngle(obj.p, p)
da := math.Mod(a-obj.angle, math.Pi*2)
return math.Mod(2*da, math.Pi*2) - da
}
func testMode() {
scanner := bufio.NewScanner(os.Stdin)
scanner.Scan()
fmt.Sscan(scanner.Text(), &globalNumCp)
for i := 0; i < globalNumCp; i++ {
var x, y float64
scanner.Scan()
fmt.Sscan(scanner.Text(), &x, &y)
globalCp[i] = point{x, y}
}
var nTest int
scanner.Scan()
fmt.Sscan(scanner.Text(), &nTest)
var g game
initialiseGame(&g, globalCp[:])
for tn := 0; tn < nTest; tn++ {
for i := 0; i < podCount; i++ {
scanner.Scan()
}
for i := 0; i < podCount; i++ {
var px, py float64
var thrust string
var t int
scanner.Scan()
fmt.Sscan(scanner.Text(), &px, &py, &thrust)
t, err := strconv.Atoi(thrust)
if err != nil {
t = 0
if thrust == "SHIELD" {
g[i].shieldtimer = 4
} else if thrust == "BOOST" {
t = 650
if g[i].boosted == 0 {
g[i].boosted = 1
} else {
t = 200
}
}
}
if g[i].shieldtimer > 0 {
t = 0
}
dest := point{px, py}
if dest == g[i].p {
continue
}
if tn == 0 {
g[i].angle = 0
angle := g[i].diffAngle(dest)
g[i].applyRotateFirst(angle)
} else {
g[i].applyRotate(dest)
}
g[i].applyThrust(t)
}
g.nextTurn()
for i := 0; i < podCount; i++ {
p := &g[i]
fmt.Printf("%d %d %d %d %f %d %d %d\n", int(p.p.x), int(p.p.y), int(p.s.x), int(p.s.y), p.angle*radToDeg, p.next, p.shieldtimer, p.boosted)
}
}
}
var startPointMult = [4]point{{500, -500}, {-500, 500}, {1500, -1500}, {-1500, 1500}}
func initialiseGame(g *game, m gameMap) {
cp1minus0 := point{}
cp1minus0.x = m[1].x - m[0].x
cp1minus0.y = m[1].y - m[0].y
dd := distance(m[1], m[0])
cp1minus0.x /= dd
cp1minus0.y /= dd
for podN := range g {
p := &g[podN]
p.angle = -1 * degToRad
p.next = 1
p.p.x = round(m[0].x + cp1minus0.y*startPointMult[podN].x)
p.p.y = round(m[0].y + cp1minus0.x*startPointMult[podN].y)
}
}
func main() {
validateMode := false
if len(os.Args) > 1 {
if os.Args[1] == "-test" {
testMode()
return
}
}
playerTimeout[0] = 100
playerTimeout[1] = 100
rand.Seed(time.Now().UTC().UnixNano())
scanner := bufio.NewScanner(os.Stdin)
started := false
var players int
for started == false {
scanner.Scan()
startText := strings.Split(scanner.Text(), " ")
if startText[0] == "###Start" {
var err error
players, err = strconv.Atoi(startText[1])
if err != nil || players != 2 {
fmt.Fprintln(os.Stderr, "Error with player count input")
os.Exit(-1)
}
started = true
} else if startText[0] == "###Seed" {
v, err := strconv.ParseInt(startText[1], 10, 64)
fmt.Fprintln(os.Stderr, v)
if err == nil {
rand.Seed(v)
}
} else if startText[0] == "###Validate" {
validateMode = true
players = 2
started = true
} else {
fmt.Fprintln(os.Stderr, "Unsupported startup command: ", startText[0])
os.Exit(0)
}
}
currentMap := possibleMaps[rand.Intn(possibleMapCount)]
for i, v := range currentMap {
currentMap[i].x = v.x + float64(rand.Intn(checkpointGenerationGap*2+1)-checkpointGenerationGap)
currentMap[i].y = v.y + float64(rand.Intn(checkpointGenerationGap*2+1)-checkpointGenerationGap)
}
for i := len(currentMap) - 1; i > 0; i-- {
v := rand.Intn(i)
currentMap[v], currentMap[i] = currentMap[i], currentMap[v]
}
if validateMode {
var ncp int
scanner.Scan()
fmt.Sscan(scanner.Text(), &ncp)
currentMap = make(gameMap, ncp)
for i := range currentMap {
var x float64
var y float64
scanner.Scan()
fmt.Sscan(scanner.Text(), &x, &y)
currentMap[i].x = x
currentMap[i].y = y
}
}
//setup global checkpoints
laps := 3
for i := 0; i < 3; i++ {
for _, v := range currentMap {
globalCp[globalNumCp] = v
globalNumCp++
}
}
//add last checkpoint at the end
globalCp[globalNumCp] = currentMap[0]
globalNumCp++
var g game
initialiseGame(&g, currentMap)
outputSetup(currentMap, 2, laps)
for turnCount := 0; turnCount < 500; turnCount++ {
var moves [4]playerMove
for player := 0; player < players; player++ {
givePlayerOutput(&g, player, currentMap)
theseMoves, valid := getPlayerInput(player, scanner)
if valid == false {
fmt.Fprintln(os.Stderr, "INVALID INPUT", theseMoves)
lostGame(player)
}
for i, v := range theseMoves {
moves[player*2+i] = v
}
}
for podN := range g {
pod := &g[podN]
move := &moves[podN]
if move.boost {
if pod.boosted == 0 {
pod.boosted = 1
move.thrust = 650
} else {
move.thrust = 200
}
}
if move.shield {
pod.shieldtimer = 4
}
if pod.shieldtimer > 0 {
move.thrust = 0
}
if move.target == pod.p {
continue
}
if turnCount == 0 {
pod.angle = 0
pod.angle = pod.diffAngle(move.target)
} else {
pod.applyRotate(move.target)
}
pod.applyThrust(moves[podN].thrust)
}
g.nextTurn()
if playerTimeout[0] <= 0 {
lostGame(0)
}
if playerTimeout[1] <= 0 {
lostGame(1)
}
for podN := range g {
pod := &g[podN]
if pod.won {
if podN < 2 {
wonGame(0)
} else {
wonGame(1)
}
}
}
}
winner := 0
best := 0.0
for podN := range g {
score := float64(g[podN].next * 1000000)
score -= distance(g[podN].p, globalCp[g[podN].next])
if score > best {
best = score
winner = podN
}
}
if winner < 2 {
wonGame(0)
} else {
wonGame(1)
}
}
func lostGame(player int) {
winner := 0
loser := 1
if player == winner {
winner, loser = loser, winner
}
fmt.Printf("###End %d %d\n", winner, loser)
os.Exit(0)
}
func wonGame(player int) {
winner := 0
loser := 1
if player == loser {
winner, loser = loser, winner
}
fmt.Printf("###End %d %d\n", winner, loser)
os.Exit(0)
}
func getPlayerInput(player int, scanner *bufio.Scanner) ([2]playerMove, bool) {
pm := [2]playerMove{}
valid := true
fmt.Printf("###Output %d 2\n", player)
for i := range pm {
if scanner.Scan() == false {
os.Exit(0)
}
var thrust string
fmt.Sscanf(scanner.Text(), "%f %f %s\n", &pm[i].target.x, &pm[i].target.y, &thrust)
pm[i].thrust = 0
switch thrust {
case "SHIELD":
pm[i].shield = true
case "BOOST":
pm[i].boost = true
default:
v, err := strconv.Atoi(thrust)
if err != nil {
valid = false
} else {
if v > 200 {
valid = false
}
pm[i].thrust = v
}
}
}
return pm, valid
}
func outputSetup(m gameMap, players int, laps int) {
for player := 0; player < players; player++ {
fmt.Printf("###Input %d\n", player)
fmt.Println(laps)
fmt.Println(len(m))
for _, v := range m {
fmt.Println(v.x, v.y)
}
}
}
func givePlayerOutput(g *game, player int, m gameMap) {
pods := [4]int{0, 1, 2, 3}
if player == 1 {
pods = [4]int{2, 3, 0, 1}
}
fmt.Printf("###Input %d\n", player)
for _, podN := range pods {
p := &g[podN]
fmt.Printf("%d %d %d %d %d %d\n", int(p.p.x), int(p.p.y), int(p.s.x), int(p.s.y), int(round(p.angle*radToDeg)), p.next%len(m))
// fmt.Fprintf(os.Stderr, "%d %d %d %d %d %d\n", int(p.p.x), int(p.p.y), int(p.s.x), int(p.s.y), int(round(p.angle*radToDeg)), p.next)
}
} | impulse := normal
impulse.x *= -force
impulse.y *= -force
oa.s.x += impulse.x * m1
oa.s.y += impulse.y * m1 | random_line_split |
csbref.go | package main
import (
"bufio"
"fmt"
"math"
"math/rand"
"os"
"strconv"
"strings"
"time"
)
//GAME CONSTANTS
const podRSQ = 800 * 800
const cpRSQ = 600 * 600
const podCount = 4
const minImpulse = 120
const frictionVal = 0.85
const checkpointGenerationGap = 30
//MATH CONSTANTS
const fullCircle = (2 * math.Pi)
const radToDeg = 180.0 / math.Pi
const degToRad = math.Pi / 180.0
const maxRotate = (18.0 * degToRad)
//types
type distanceSqType float64
type gameMap []point
type point struct {
x float64
y float64
}
type object struct {
p point
s point
angle float64
next int
shieldtimer int
boosted int
won bool
}
type playerMove struct {
target point
thrust int
shield bool
boost bool
}
type game [podCount]object
var globalCp [50]point
var globalNumCp int
var playerTimeout [2]int
//taken from AGADE CSB RUNNER ARENA
//https://github.com/Agade09/CSB-Runner-Arena/blob/master/Arena.cpp
var possibleMaps = []gameMap{
{{12460, 1350}, {10540, 5980}, {3580, 5180}, {13580, 7600}},
{{3600, 5280}, {13840, 5080}, {10680, 2280}, {8700, 7460}, {7200, 2160}},
{{4560, 2180}, {7350, 4940}, {3320, 7230}, {14580, 7700}, {10560, 5060}, {13100, 2320}},
{{5010, 5260}, {11480, 6080}, {9100, 1840}}, {{14660, 1410}, {3450, 7220}, {9420, 7240}, {5970, 4240}},
{{3640, 4420}, {8000, 7900}, {13300, 5540}, {9560, 1400}},
{{4100, 7420}, {13500, 2340}, {12940, 7220}, {5640, 2580}},
{{14520, 7780}, {6320, 4290}, {7800, 860}, {7660, 5970}, {3140, 7540}, {9520, 4380}},
{{10040, 5970}, {13920, 1940}, {8020, 3260}, {2670, 7020}}, {{7500, 6940}, {6000, 5360}, {11300, 2820}},
{{4060, 4660}, {13040, 1900}, {6560, 7840}, {7480, 1360}, {12700, 7100}},
{{3020, 5190}, {6280, 7760}, {14100, 7760}, {13880, 1220}, {10240, 4920}, {6100, 2200}},
{{10323, 3366}, {11203, 5425}, {7259, 6656}, {5425, 2838}}}
var possibleMapCount = len(possibleMaps)
func (p *point) dot(n point) float64 {
return p.x*n.x + p.y*n.y
}
func (p *point) norm() float64 {
return (math.Sqrt(((p.x * p.x) + (p.y * p.y))))
}
func (g *game) nextTurn() {
t := 1.0
curps := [4]point{g[0].p, g[1].p, g[2].p, g[3].p}
for t > 0.0 {
first := t
cli := 0
clj := 0
for i := podCount - 1; i > 0; i-- {
for j := i - 1; j >= 0; j-- {
tx := g[i].newCollide(&g[j], podRSQ)
if tx <= first {
first = tx
cli = i
clj = j
}
}
}
g.forwardTime(first)
t -= first
if cli != clj {
g.bounce(cli, clj)
}
if t > 0 {
for i := 0; i < podCount; i++ {
if (cpCollide(curps[i], g[i].p, globalCp[g[i].next], cpRSQ)) > 0 {
g[i].passCheckpoint(i)
}
}
curps = [4]point{g[0].p, g[1].p, g[2].p, g[3].p}
}
}
for i := 0; i < podCount; i++ {
g[i].endTurn(i)
if (cpCollide(curps[i], g[i].p, globalCp[g[i].next], cpRSQ)) > 0 {
g[i].passCheckpoint(i)
}
}
playerTimeout[0]--
playerTimeout[1]--
}
const EPSILON = .00001
func (g *game) bounce(p1 int, p2 int) {
oa := &g[p1]
ob := &g[p2]
normal := ob.p
normal.x -= oa.p.x
normal.y -= oa.p.y
dd := normal.norm()
normal.x /= dd
normal.y /= dd
relv := oa.s
relv.x -= ob.s.x
relv.y -= ob.s.y
var m1 float64 = 1
var m2 float64 = 1
if oa.shieldtimer == 4 {
m1 = 0.1
}
if ob.shieldtimer == 4 {
m2 = 0.1
}
force := normal.dot(relv) / (m1 + m2)
if force < 120 {
force += 120
} else {
force += force
}
impulse := normal
impulse.x *= -force
impulse.y *= -force
oa.s.x += impulse.x * m1
oa.s.y += impulse.y * m1
ob.s.x += -impulse.x * m2
ob.s.y += -impulse.y * m2
if dd <= 800 |
}
func getAngle(start point, end point) float64 {
dx := (end.x - start.x)
dy := (end.y - start.y)
a := (math.Atan2(dy, dx))
return a
}
func distance2(p1 point, p2 point) distanceSqType {
x := distanceSqType(p2.x - p1.x)
x = x * x
y := distanceSqType(p2.y - p1.y)
y = y * y
return x + y
}
func distance(p1 point, p2 point) float64 {
return (math.Sqrt(float64(distance2(p1, p2))))
}
func (obj *object) passCheckpoint(podn int) {
obj.next = (obj.next + 1)
if obj.next >= globalNumCp {
obj.next = globalNumCp - 1
obj.won = true
}
if podn < 2 {
playerTimeout[0] = 100
} else {
playerTimeout[1] = 100
}
}
func (g *game) forwardTime(t float64) {
for i := 0; i < podCount; i++ {
obj := &g[i]
obj.p.x += (obj.s.x * (t))
obj.p.y += (obj.s.y * (t))
}
}
func round(x float64) float64 {
x = (math.Floor((x) + 0.50000))
return x
}
func (obj *object) newCollide(b *object, rsq float64) float64 {
p := point{b.p.x - obj.p.x, b.p.y - obj.p.y}
pLength2 := p.x*p.x + p.y*p.y
if pLength2 <= rsq {
return 0
}
v := point{(b.s.x - obj.s.x), (b.s.y - obj.s.y)}
dot := p.dot(v)
if dot > 0 {
return 10
}
vLength2 := v.x*v.x + v.y*v.y
disc := dot*dot - vLength2*(pLength2-rsq)
if disc < 0 {
return 10
}
discdist := (math.Sqrt(disc))
t1 := (-dot - discdist) / vLength2
return float64(t1)
}
func cpCollide(p1 point, p2 point, cp point, cpRSQ float64) byte {
dx := (p2.x - p1.x)
dy := (p2.y - p1.y)
pp := p1
pd2 := dx*dx + dy*dy
if pd2 != 0 {
u := ((cp.x-p1.x)*dx + (cp.y-p1.y)*dy) / pd2
if u > 1 {
pp = p2
} else if u > 0 {
pp.x = p1.x + u*dx
pp.y = p1.y + u*dy
}
}
pp.x -= cp.x
pp.y -= cp.y
if ((pp.x * pp.x) + (pp.y * pp.y)) < cpRSQ {
return 1
}
return 0
}
func (obj *object) applyRotate(p point) {
a := getAngle(obj.p, p)
rotateAngle := obj.diffAngle(p)
if rotateAngle < -maxRotate {
a = obj.angle - maxRotate
}
if rotateAngle > maxRotate {
a = obj.angle + maxRotate
}
obj.angle = a
/*for obj.angle < 0 {
obj.angle += fullCircle
}
for obj.angle > fullCircle {
obj.angle -= fullCircle
}*/
}
func (obj *object) applyRotateFirst(rotateAngle float64) {
obj.angle = rotateAngle
for obj.angle < 0 {
obj.angle += fullCircle
}
for obj.angle > fullCircle {
obj.angle -= fullCircle
}
}
func (obj *object) applyThrust(t int) {
cs, cc := math.Sincos(obj.angle)
obj.s.x += (cc * float64(t))
obj.s.y += (cs * float64(t))
}
func (obj *object) endTurn(podn int) {
if obj.s.x > 0 {
obj.s.x = (math.Trunc((obj.s.x * frictionVal)))
} else {
obj.s.x = (math.Trunc((obj.s.x * frictionVal)))
}
if obj.s.y > 0 {
obj.s.y = (math.Trunc((obj.s.y * frictionVal)))
} else {
obj.s.y = (math.Trunc((obj.s.y * frictionVal)))
}
obj.p.x = round(obj.p.x)
obj.p.y = round(obj.p.y)
if obj.shieldtimer > 0 {
obj.shieldtimer--
}
}
func (obj *object) diffAngle(p point) float64 {
a := getAngle(obj.p, p)
da := math.Mod(a-obj.angle, math.Pi*2)
return math.Mod(2*da, math.Pi*2) - da
}
func testMode() {
scanner := bufio.NewScanner(os.Stdin)
scanner.Scan()
fmt.Sscan(scanner.Text(), &globalNumCp)
for i := 0; i < globalNumCp; i++ {
var x, y float64
scanner.Scan()
fmt.Sscan(scanner.Text(), &x, &y)
globalCp[i] = point{x, y}
}
var nTest int
scanner.Scan()
fmt.Sscan(scanner.Text(), &nTest)
var g game
initialiseGame(&g, globalCp[:])
for tn := 0; tn < nTest; tn++ {
for i := 0; i < podCount; i++ {
scanner.Scan()
}
for i := 0; i < podCount; i++ {
var px, py float64
var thrust string
var t int
scanner.Scan()
fmt.Sscan(scanner.Text(), &px, &py, &thrust)
t, err := strconv.Atoi(thrust)
if err != nil {
t = 0
if thrust == "SHIELD" {
g[i].shieldtimer = 4
} else if thrust == "BOOST" {
t = 650
if g[i].boosted == 0 {
g[i].boosted = 1
} else {
t = 200
}
}
}
if g[i].shieldtimer > 0 {
t = 0
}
dest := point{px, py}
if dest == g[i].p {
continue
}
if tn == 0 {
g[i].angle = 0
angle := g[i].diffAngle(dest)
g[i].applyRotateFirst(angle)
} else {
g[i].applyRotate(dest)
}
g[i].applyThrust(t)
}
g.nextTurn()
for i := 0; i < podCount; i++ {
p := &g[i]
fmt.Printf("%d %d %d %d %f %d %d %d\n", int(p.p.x), int(p.p.y), int(p.s.x), int(p.s.y), p.angle*radToDeg, p.next, p.shieldtimer, p.boosted)
}
}
}
var startPointMult = [4]point{{500, -500}, {-500, 500}, {1500, -1500}, {-1500, 1500}}
func initialiseGame(g *game, m gameMap) {
cp1minus0 := point{}
cp1minus0.x = m[1].x - m[0].x
cp1minus0.y = m[1].y - m[0].y
dd := distance(m[1], m[0])
cp1minus0.x /= dd
cp1minus0.y /= dd
for podN := range g {
p := &g[podN]
p.angle = -1 * degToRad
p.next = 1
p.p.x = round(m[0].x + cp1minus0.y*startPointMult[podN].x)
p.p.y = round(m[0].y + cp1minus0.x*startPointMult[podN].y)
}
}
func main() {
validateMode := false
if len(os.Args) > 1 {
if os.Args[1] == "-test" {
testMode()
return
}
}
playerTimeout[0] = 100
playerTimeout[1] = 100
rand.Seed(time.Now().UTC().UnixNano())
scanner := bufio.NewScanner(os.Stdin)
started := false
var players int
for started == false {
scanner.Scan()
startText := strings.Split(scanner.Text(), " ")
if startText[0] == "###Start" {
var err error
players, err = strconv.Atoi(startText[1])
if err != nil || players != 2 {
fmt.Fprintln(os.Stderr, "Error with player count input")
os.Exit(-1)
}
started = true
} else if startText[0] == "###Seed" {
v, err := strconv.ParseInt(startText[1], 10, 64)
fmt.Fprintln(os.Stderr, v)
if err == nil {
rand.Seed(v)
}
} else if startText[0] == "###Validate" {
validateMode = true
players = 2
started = true
} else {
fmt.Fprintln(os.Stderr, "Unsupported startup command: ", startText[0])
os.Exit(0)
}
}
currentMap := possibleMaps[rand.Intn(possibleMapCount)]
for i, v := range currentMap {
currentMap[i].x = v.x + float64(rand.Intn(checkpointGenerationGap*2+1)-checkpointGenerationGap)
currentMap[i].y = v.y + float64(rand.Intn(checkpointGenerationGap*2+1)-checkpointGenerationGap)
}
for i := len(currentMap) - 1; i > 0; i-- {
v := rand.Intn(i)
currentMap[v], currentMap[i] = currentMap[i], currentMap[v]
}
if validateMode {
var ncp int
scanner.Scan()
fmt.Sscan(scanner.Text(), &ncp)
currentMap = make(gameMap, ncp)
for i := range currentMap {
var x float64
var y float64
scanner.Scan()
fmt.Sscan(scanner.Text(), &x, &y)
currentMap[i].x = x
currentMap[i].y = y
}
}
//setup global checkpoints
laps := 3
for i := 0; i < 3; i++ {
for _, v := range currentMap {
globalCp[globalNumCp] = v
globalNumCp++
}
}
//add last checkpoint at the end
globalCp[globalNumCp] = currentMap[0]
globalNumCp++
var g game
initialiseGame(&g, currentMap)
outputSetup(currentMap, 2, laps)
for turnCount := 0; turnCount < 500; turnCount++ {
var moves [4]playerMove
for player := 0; player < players; player++ {
givePlayerOutput(&g, player, currentMap)
theseMoves, valid := getPlayerInput(player, scanner)
if valid == false {
fmt.Fprintln(os.Stderr, "INVALID INPUT", theseMoves)
lostGame(player)
}
for i, v := range theseMoves {
moves[player*2+i] = v
}
}
for podN := range g {
pod := &g[podN]
move := &moves[podN]
if move.boost {
if pod.boosted == 0 {
pod.boosted = 1
move.thrust = 650
} else {
move.thrust = 200
}
}
if move.shield {
pod.shieldtimer = 4
}
if pod.shieldtimer > 0 {
move.thrust = 0
}
if move.target == pod.p {
continue
}
if turnCount == 0 {
pod.angle = 0
pod.angle = pod.diffAngle(move.target)
} else {
pod.applyRotate(move.target)
}
pod.applyThrust(moves[podN].thrust)
}
g.nextTurn()
if playerTimeout[0] <= 0 {
lostGame(0)
}
if playerTimeout[1] <= 0 {
lostGame(1)
}
for podN := range g {
pod := &g[podN]
if pod.won {
if podN < 2 {
wonGame(0)
} else {
wonGame(1)
}
}
}
}
winner := 0
best := 0.0
for podN := range g {
score := float64(g[podN].next * 1000000)
score -= distance(g[podN].p, globalCp[g[podN].next])
if score > best {
best = score
winner = podN
}
}
if winner < 2 {
wonGame(0)
} else {
wonGame(1)
}
}
func lostGame(player int) {
winner := 0
loser := 1
if player == winner {
winner, loser = loser, winner
}
fmt.Printf("###End %d %d\n", winner, loser)
os.Exit(0)
}
func wonGame(player int) {
winner := 0
loser := 1
if player == loser {
winner, loser = loser, winner
}
fmt.Printf("###End %d %d\n", winner, loser)
os.Exit(0)
}
func getPlayerInput(player int, scanner *bufio.Scanner) ([2]playerMove, bool) {
pm := [2]playerMove{}
valid := true
fmt.Printf("###Output %d 2\n", player)
for i := range pm {
if scanner.Scan() == false {
os.Exit(0)
}
var thrust string
fmt.Sscanf(scanner.Text(), "%f %f %s\n", &pm[i].target.x, &pm[i].target.y, &thrust)
pm[i].thrust = 0
switch thrust {
case "SHIELD":
pm[i].shield = true
case "BOOST":
pm[i].boost = true
default:
v, err := strconv.Atoi(thrust)
if err != nil {
valid = false
} else {
if v > 200 {
valid = false
}
pm[i].thrust = v
}
}
}
return pm, valid
}
func outputSetup(m gameMap, players int, laps int) {
for player := 0; player < players; player++ {
fmt.Printf("###Input %d\n", player)
fmt.Println(laps)
fmt.Println(len(m))
for _, v := range m {
fmt.Println(v.x, v.y)
}
}
}
func givePlayerOutput(g *game, player int, m gameMap) {
pods := [4]int{0, 1, 2, 3}
if player == 1 {
pods = [4]int{2, 3, 0, 1}
}
fmt.Printf("###Input %d\n", player)
for _, podN := range pods {
p := &g[podN]
fmt.Printf("%d %d %d %d %d %d\n", int(p.p.x), int(p.p.y), int(p.s.x), int(p.s.y), int(round(p.angle*radToDeg)), p.next%len(m))
// fmt.Fprintf(os.Stderr, "%d %d %d %d %d %d\n", int(p.p.x), int(p.p.y), int(p.s.x), int(p.s.y), int(round(p.angle*radToDeg)), p.next)
}
}
| {
dd -= 800
oa.p.x += (normal.x * -(-dd/2 + EPSILON))
oa.p.y += (normal.y * -(-dd/2 + EPSILON))
ob.p.x += (normal.x * (-dd/2 + EPSILON))
ob.p.y += (normal.y * (-dd/2 + EPSILON))
} | conditional_block |
csbref.go | package main
import (
"bufio"
"fmt"
"math"
"math/rand"
"os"
"strconv"
"strings"
"time"
)
//GAME CONSTANTS
const podRSQ = 800 * 800
const cpRSQ = 600 * 600
const podCount = 4
const minImpulse = 120
const frictionVal = 0.85
const checkpointGenerationGap = 30
//MATH CONSTANTS
const fullCircle = (2 * math.Pi)
const radToDeg = 180.0 / math.Pi
const degToRad = math.Pi / 180.0
const maxRotate = (18.0 * degToRad)
//types
type distanceSqType float64
type gameMap []point
type point struct {
x float64
y float64
}
type object struct {
p point
s point
angle float64
next int
shieldtimer int
boosted int
won bool
}
type playerMove struct {
target point
thrust int
shield bool
boost bool
}
type game [podCount]object
var globalCp [50]point
var globalNumCp int
var playerTimeout [2]int
//taken from AGADE CSB RUNNER ARENA
//https://github.com/Agade09/CSB-Runner-Arena/blob/master/Arena.cpp
var possibleMaps = []gameMap{
{{12460, 1350}, {10540, 5980}, {3580, 5180}, {13580, 7600}},
{{3600, 5280}, {13840, 5080}, {10680, 2280}, {8700, 7460}, {7200, 2160}},
{{4560, 2180}, {7350, 4940}, {3320, 7230}, {14580, 7700}, {10560, 5060}, {13100, 2320}},
{{5010, 5260}, {11480, 6080}, {9100, 1840}}, {{14660, 1410}, {3450, 7220}, {9420, 7240}, {5970, 4240}},
{{3640, 4420}, {8000, 7900}, {13300, 5540}, {9560, 1400}},
{{4100, 7420}, {13500, 2340}, {12940, 7220}, {5640, 2580}},
{{14520, 7780}, {6320, 4290}, {7800, 860}, {7660, 5970}, {3140, 7540}, {9520, 4380}},
{{10040, 5970}, {13920, 1940}, {8020, 3260}, {2670, 7020}}, {{7500, 6940}, {6000, 5360}, {11300, 2820}},
{{4060, 4660}, {13040, 1900}, {6560, 7840}, {7480, 1360}, {12700, 7100}},
{{3020, 5190}, {6280, 7760}, {14100, 7760}, {13880, 1220}, {10240, 4920}, {6100, 2200}},
{{10323, 3366}, {11203, 5425}, {7259, 6656}, {5425, 2838}}}
var possibleMapCount = len(possibleMaps)
func (p *point) dot(n point) float64 {
return p.x*n.x + p.y*n.y
}
func (p *point) norm() float64 {
return (math.Sqrt(((p.x * p.x) + (p.y * p.y))))
}
func (g *game) nextTurn() {
t := 1.0
curps := [4]point{g[0].p, g[1].p, g[2].p, g[3].p}
for t > 0.0 {
first := t
cli := 0
clj := 0
for i := podCount - 1; i > 0; i-- {
for j := i - 1; j >= 0; j-- {
tx := g[i].newCollide(&g[j], podRSQ)
if tx <= first {
first = tx
cli = i
clj = j
}
}
}
g.forwardTime(first)
t -= first
if cli != clj {
g.bounce(cli, clj)
}
if t > 0 {
for i := 0; i < podCount; i++ {
if (cpCollide(curps[i], g[i].p, globalCp[g[i].next], cpRSQ)) > 0 {
g[i].passCheckpoint(i)
}
}
curps = [4]point{g[0].p, g[1].p, g[2].p, g[3].p}
}
}
for i := 0; i < podCount; i++ {
g[i].endTurn(i)
if (cpCollide(curps[i], g[i].p, globalCp[g[i].next], cpRSQ)) > 0 {
g[i].passCheckpoint(i)
}
}
playerTimeout[0]--
playerTimeout[1]--
}
const EPSILON = .00001
func (g *game) bounce(p1 int, p2 int) {
oa := &g[p1]
ob := &g[p2]
normal := ob.p
normal.x -= oa.p.x
normal.y -= oa.p.y
dd := normal.norm()
normal.x /= dd
normal.y /= dd
relv := oa.s
relv.x -= ob.s.x
relv.y -= ob.s.y
var m1 float64 = 1
var m2 float64 = 1
if oa.shieldtimer == 4 {
m1 = 0.1
}
if ob.shieldtimer == 4 {
m2 = 0.1
}
force := normal.dot(relv) / (m1 + m2)
if force < 120 {
force += 120
} else {
force += force
}
impulse := normal
impulse.x *= -force
impulse.y *= -force
oa.s.x += impulse.x * m1
oa.s.y += impulse.y * m1
ob.s.x += -impulse.x * m2
ob.s.y += -impulse.y * m2
if dd <= 800 {
dd -= 800
oa.p.x += (normal.x * -(-dd/2 + EPSILON))
oa.p.y += (normal.y * -(-dd/2 + EPSILON))
ob.p.x += (normal.x * (-dd/2 + EPSILON))
ob.p.y += (normal.y * (-dd/2 + EPSILON))
}
}
func getAngle(start point, end point) float64 {
dx := (end.x - start.x)
dy := (end.y - start.y)
a := (math.Atan2(dy, dx))
return a
}
func distance2(p1 point, p2 point) distanceSqType {
x := distanceSqType(p2.x - p1.x)
x = x * x
y := distanceSqType(p2.y - p1.y)
y = y * y
return x + y
}
func distance(p1 point, p2 point) float64 {
return (math.Sqrt(float64(distance2(p1, p2))))
}
func (obj *object) passCheckpoint(podn int) {
obj.next = (obj.next + 1)
if obj.next >= globalNumCp {
obj.next = globalNumCp - 1
obj.won = true
}
if podn < 2 {
playerTimeout[0] = 100
} else {
playerTimeout[1] = 100
}
}
func (g *game) forwardTime(t float64) {
for i := 0; i < podCount; i++ {
obj := &g[i]
obj.p.x += (obj.s.x * (t))
obj.p.y += (obj.s.y * (t))
}
}
func round(x float64) float64 {
x = (math.Floor((x) + 0.50000))
return x
}
func (obj *object) newCollide(b *object, rsq float64) float64 {
p := point{b.p.x - obj.p.x, b.p.y - obj.p.y}
pLength2 := p.x*p.x + p.y*p.y
if pLength2 <= rsq {
return 0
}
v := point{(b.s.x - obj.s.x), (b.s.y - obj.s.y)}
dot := p.dot(v)
if dot > 0 {
return 10
}
vLength2 := v.x*v.x + v.y*v.y
disc := dot*dot - vLength2*(pLength2-rsq)
if disc < 0 {
return 10
}
discdist := (math.Sqrt(disc))
t1 := (-dot - discdist) / vLength2
return float64(t1)
}
func cpCollide(p1 point, p2 point, cp point, cpRSQ float64) byte {
dx := (p2.x - p1.x)
dy := (p2.y - p1.y)
pp := p1
pd2 := dx*dx + dy*dy
if pd2 != 0 {
u := ((cp.x-p1.x)*dx + (cp.y-p1.y)*dy) / pd2
if u > 1 {
pp = p2
} else if u > 0 {
pp.x = p1.x + u*dx
pp.y = p1.y + u*dy
}
}
pp.x -= cp.x
pp.y -= cp.y
if ((pp.x * pp.x) + (pp.y * pp.y)) < cpRSQ {
return 1
}
return 0
}
func (obj *object) applyRotate(p point) {
a := getAngle(obj.p, p)
rotateAngle := obj.diffAngle(p)
if rotateAngle < -maxRotate {
a = obj.angle - maxRotate
}
if rotateAngle > maxRotate {
a = obj.angle + maxRotate
}
obj.angle = a
/*for obj.angle < 0 {
obj.angle += fullCircle
}
for obj.angle > fullCircle {
obj.angle -= fullCircle
}*/
}
func (obj *object) applyRotateFirst(rotateAngle float64) {
obj.angle = rotateAngle
for obj.angle < 0 {
obj.angle += fullCircle
}
for obj.angle > fullCircle {
obj.angle -= fullCircle
}
}
func (obj *object) applyThrust(t int) {
cs, cc := math.Sincos(obj.angle)
obj.s.x += (cc * float64(t))
obj.s.y += (cs * float64(t))
}
func (obj *object) endTurn(podn int) {
if obj.s.x > 0 {
obj.s.x = (math.Trunc((obj.s.x * frictionVal)))
} else {
obj.s.x = (math.Trunc((obj.s.x * frictionVal)))
}
if obj.s.y > 0 {
obj.s.y = (math.Trunc((obj.s.y * frictionVal)))
} else {
obj.s.y = (math.Trunc((obj.s.y * frictionVal)))
}
obj.p.x = round(obj.p.x)
obj.p.y = round(obj.p.y)
if obj.shieldtimer > 0 {
obj.shieldtimer--
}
}
func (obj *object) diffAngle(p point) float64 {
a := getAngle(obj.p, p)
da := math.Mod(a-obj.angle, math.Pi*2)
return math.Mod(2*da, math.Pi*2) - da
}
func testMode() {
scanner := bufio.NewScanner(os.Stdin)
scanner.Scan()
fmt.Sscan(scanner.Text(), &globalNumCp)
for i := 0; i < globalNumCp; i++ {
var x, y float64
scanner.Scan()
fmt.Sscan(scanner.Text(), &x, &y)
globalCp[i] = point{x, y}
}
var nTest int
scanner.Scan()
fmt.Sscan(scanner.Text(), &nTest)
var g game
initialiseGame(&g, globalCp[:])
for tn := 0; tn < nTest; tn++ {
for i := 0; i < podCount; i++ {
scanner.Scan()
}
for i := 0; i < podCount; i++ {
var px, py float64
var thrust string
var t int
scanner.Scan()
fmt.Sscan(scanner.Text(), &px, &py, &thrust)
t, err := strconv.Atoi(thrust)
if err != nil {
t = 0
if thrust == "SHIELD" {
g[i].shieldtimer = 4
} else if thrust == "BOOST" {
t = 650
if g[i].boosted == 0 {
g[i].boosted = 1
} else {
t = 200
}
}
}
if g[i].shieldtimer > 0 {
t = 0
}
dest := point{px, py}
if dest == g[i].p {
continue
}
if tn == 0 {
g[i].angle = 0
angle := g[i].diffAngle(dest)
g[i].applyRotateFirst(angle)
} else {
g[i].applyRotate(dest)
}
g[i].applyThrust(t)
}
g.nextTurn()
for i := 0; i < podCount; i++ {
p := &g[i]
fmt.Printf("%d %d %d %d %f %d %d %d\n", int(p.p.x), int(p.p.y), int(p.s.x), int(p.s.y), p.angle*radToDeg, p.next, p.shieldtimer, p.boosted)
}
}
}
var startPointMult = [4]point{{500, -500}, {-500, 500}, {1500, -1500}, {-1500, 1500}}
func initialiseGame(g *game, m gameMap) {
cp1minus0 := point{}
cp1minus0.x = m[1].x - m[0].x
cp1minus0.y = m[1].y - m[0].y
dd := distance(m[1], m[0])
cp1minus0.x /= dd
cp1minus0.y /= dd
for podN := range g {
p := &g[podN]
p.angle = -1 * degToRad
p.next = 1
p.p.x = round(m[0].x + cp1minus0.y*startPointMult[podN].x)
p.p.y = round(m[0].y + cp1minus0.x*startPointMult[podN].y)
}
}
func main() {
validateMode := false
if len(os.Args) > 1 {
if os.Args[1] == "-test" {
testMode()
return
}
}
playerTimeout[0] = 100
playerTimeout[1] = 100
rand.Seed(time.Now().UTC().UnixNano())
scanner := bufio.NewScanner(os.Stdin)
started := false
var players int
for started == false {
scanner.Scan()
startText := strings.Split(scanner.Text(), " ")
if startText[0] == "###Start" {
var err error
players, err = strconv.Atoi(startText[1])
if err != nil || players != 2 {
fmt.Fprintln(os.Stderr, "Error with player count input")
os.Exit(-1)
}
started = true
} else if startText[0] == "###Seed" {
v, err := strconv.ParseInt(startText[1], 10, 64)
fmt.Fprintln(os.Stderr, v)
if err == nil {
rand.Seed(v)
}
} else if startText[0] == "###Validate" {
validateMode = true
players = 2
started = true
} else {
fmt.Fprintln(os.Stderr, "Unsupported startup command: ", startText[0])
os.Exit(0)
}
}
currentMap := possibleMaps[rand.Intn(possibleMapCount)]
for i, v := range currentMap {
currentMap[i].x = v.x + float64(rand.Intn(checkpointGenerationGap*2+1)-checkpointGenerationGap)
currentMap[i].y = v.y + float64(rand.Intn(checkpointGenerationGap*2+1)-checkpointGenerationGap)
}
for i := len(currentMap) - 1; i > 0; i-- {
v := rand.Intn(i)
currentMap[v], currentMap[i] = currentMap[i], currentMap[v]
}
if validateMode {
var ncp int
scanner.Scan()
fmt.Sscan(scanner.Text(), &ncp)
currentMap = make(gameMap, ncp)
for i := range currentMap {
var x float64
var y float64
scanner.Scan()
fmt.Sscan(scanner.Text(), &x, &y)
currentMap[i].x = x
currentMap[i].y = y
}
}
//setup global checkpoints
laps := 3
for i := 0; i < 3; i++ {
for _, v := range currentMap {
globalCp[globalNumCp] = v
globalNumCp++
}
}
//add last checkpoint at the end
globalCp[globalNumCp] = currentMap[0]
globalNumCp++
var g game
initialiseGame(&g, currentMap)
outputSetup(currentMap, 2, laps)
for turnCount := 0; turnCount < 500; turnCount++ {
var moves [4]playerMove
for player := 0; player < players; player++ {
givePlayerOutput(&g, player, currentMap)
theseMoves, valid := getPlayerInput(player, scanner)
if valid == false {
fmt.Fprintln(os.Stderr, "INVALID INPUT", theseMoves)
lostGame(player)
}
for i, v := range theseMoves {
moves[player*2+i] = v
}
}
for podN := range g {
pod := &g[podN]
move := &moves[podN]
if move.boost {
if pod.boosted == 0 {
pod.boosted = 1
move.thrust = 650
} else {
move.thrust = 200
}
}
if move.shield {
pod.shieldtimer = 4
}
if pod.shieldtimer > 0 {
move.thrust = 0
}
if move.target == pod.p {
continue
}
if turnCount == 0 {
pod.angle = 0
pod.angle = pod.diffAngle(move.target)
} else {
pod.applyRotate(move.target)
}
pod.applyThrust(moves[podN].thrust)
}
g.nextTurn()
if playerTimeout[0] <= 0 {
lostGame(0)
}
if playerTimeout[1] <= 0 {
lostGame(1)
}
for podN := range g {
pod := &g[podN]
if pod.won {
if podN < 2 {
wonGame(0)
} else {
wonGame(1)
}
}
}
}
winner := 0
best := 0.0
for podN := range g {
score := float64(g[podN].next * 1000000)
score -= distance(g[podN].p, globalCp[g[podN].next])
if score > best {
best = score
winner = podN
}
}
if winner < 2 {
wonGame(0)
} else {
wonGame(1)
}
}
func lostGame(player int) {
winner := 0
loser := 1
if player == winner {
winner, loser = loser, winner
}
fmt.Printf("###End %d %d\n", winner, loser)
os.Exit(0)
}
func wonGame(player int) {
winner := 0
loser := 1
if player == loser {
winner, loser = loser, winner
}
fmt.Printf("###End %d %d\n", winner, loser)
os.Exit(0)
}
func getPlayerInput(player int, scanner *bufio.Scanner) ([2]playerMove, bool) |
func outputSetup(m gameMap, players int, laps int) {
for player := 0; player < players; player++ {
fmt.Printf("###Input %d\n", player)
fmt.Println(laps)
fmt.Println(len(m))
for _, v := range m {
fmt.Println(v.x, v.y)
}
}
}
func givePlayerOutput(g *game, player int, m gameMap) {
pods := [4]int{0, 1, 2, 3}
if player == 1 {
pods = [4]int{2, 3, 0, 1}
}
fmt.Printf("###Input %d\n", player)
for _, podN := range pods {
p := &g[podN]
fmt.Printf("%d %d %d %d %d %d\n", int(p.p.x), int(p.p.y), int(p.s.x), int(p.s.y), int(round(p.angle*radToDeg)), p.next%len(m))
// fmt.Fprintf(os.Stderr, "%d %d %d %d %d %d\n", int(p.p.x), int(p.p.y), int(p.s.x), int(p.s.y), int(round(p.angle*radToDeg)), p.next)
}
}
| {
pm := [2]playerMove{}
valid := true
fmt.Printf("###Output %d 2\n", player)
for i := range pm {
if scanner.Scan() == false {
os.Exit(0)
}
var thrust string
fmt.Sscanf(scanner.Text(), "%f %f %s\n", &pm[i].target.x, &pm[i].target.y, &thrust)
pm[i].thrust = 0
switch thrust {
case "SHIELD":
pm[i].shield = true
case "BOOST":
pm[i].boost = true
default:
v, err := strconv.Atoi(thrust)
if err != nil {
valid = false
} else {
if v > 200 {
valid = false
}
pm[i].thrust = v
}
}
}
return pm, valid
} | identifier_body |
csbref.go | package main
import (
"bufio"
"fmt"
"math"
"math/rand"
"os"
"strconv"
"strings"
"time"
)
//GAME CONSTANTS
const podRSQ = 800 * 800
const cpRSQ = 600 * 600
const podCount = 4
const minImpulse = 120
const frictionVal = 0.85
const checkpointGenerationGap = 30
//MATH CONSTANTS
const fullCircle = (2 * math.Pi)
const radToDeg = 180.0 / math.Pi
const degToRad = math.Pi / 180.0
const maxRotate = (18.0 * degToRad)
//types
type distanceSqType float64
type gameMap []point
type point struct {
x float64
y float64
}
type object struct {
p point
s point
angle float64
next int
shieldtimer int
boosted int
won bool
}
type playerMove struct {
target point
thrust int
shield bool
boost bool
}
type game [podCount]object
var globalCp [50]point
var globalNumCp int
var playerTimeout [2]int
//taken from AGADE CSB RUNNER ARENA
//https://github.com/Agade09/CSB-Runner-Arena/blob/master/Arena.cpp
var possibleMaps = []gameMap{
{{12460, 1350}, {10540, 5980}, {3580, 5180}, {13580, 7600}},
{{3600, 5280}, {13840, 5080}, {10680, 2280}, {8700, 7460}, {7200, 2160}},
{{4560, 2180}, {7350, 4940}, {3320, 7230}, {14580, 7700}, {10560, 5060}, {13100, 2320}},
{{5010, 5260}, {11480, 6080}, {9100, 1840}}, {{14660, 1410}, {3450, 7220}, {9420, 7240}, {5970, 4240}},
{{3640, 4420}, {8000, 7900}, {13300, 5540}, {9560, 1400}},
{{4100, 7420}, {13500, 2340}, {12940, 7220}, {5640, 2580}},
{{14520, 7780}, {6320, 4290}, {7800, 860}, {7660, 5970}, {3140, 7540}, {9520, 4380}},
{{10040, 5970}, {13920, 1940}, {8020, 3260}, {2670, 7020}}, {{7500, 6940}, {6000, 5360}, {11300, 2820}},
{{4060, 4660}, {13040, 1900}, {6560, 7840}, {7480, 1360}, {12700, 7100}},
{{3020, 5190}, {6280, 7760}, {14100, 7760}, {13880, 1220}, {10240, 4920}, {6100, 2200}},
{{10323, 3366}, {11203, 5425}, {7259, 6656}, {5425, 2838}}}
var possibleMapCount = len(possibleMaps)
func (p *point) dot(n point) float64 {
return p.x*n.x + p.y*n.y
}
func (p *point) norm() float64 {
return (math.Sqrt(((p.x * p.x) + (p.y * p.y))))
}
func (g *game) nextTurn() {
t := 1.0
curps := [4]point{g[0].p, g[1].p, g[2].p, g[3].p}
for t > 0.0 {
first := t
cli := 0
clj := 0
for i := podCount - 1; i > 0; i-- {
for j := i - 1; j >= 0; j-- {
tx := g[i].newCollide(&g[j], podRSQ)
if tx <= first {
first = tx
cli = i
clj = j
}
}
}
g.forwardTime(first)
t -= first
if cli != clj {
g.bounce(cli, clj)
}
if t > 0 {
for i := 0; i < podCount; i++ {
if (cpCollide(curps[i], g[i].p, globalCp[g[i].next], cpRSQ)) > 0 {
g[i].passCheckpoint(i)
}
}
curps = [4]point{g[0].p, g[1].p, g[2].p, g[3].p}
}
}
for i := 0; i < podCount; i++ {
g[i].endTurn(i)
if (cpCollide(curps[i], g[i].p, globalCp[g[i].next], cpRSQ)) > 0 {
g[i].passCheckpoint(i)
}
}
playerTimeout[0]--
playerTimeout[1]--
}
const EPSILON = .00001
func (g *game) bounce(p1 int, p2 int) {
oa := &g[p1]
ob := &g[p2]
normal := ob.p
normal.x -= oa.p.x
normal.y -= oa.p.y
dd := normal.norm()
normal.x /= dd
normal.y /= dd
relv := oa.s
relv.x -= ob.s.x
relv.y -= ob.s.y
var m1 float64 = 1
var m2 float64 = 1
if oa.shieldtimer == 4 {
m1 = 0.1
}
if ob.shieldtimer == 4 {
m2 = 0.1
}
force := normal.dot(relv) / (m1 + m2)
if force < 120 {
force += 120
} else {
force += force
}
impulse := normal
impulse.x *= -force
impulse.y *= -force
oa.s.x += impulse.x * m1
oa.s.y += impulse.y * m1
ob.s.x += -impulse.x * m2
ob.s.y += -impulse.y * m2
if dd <= 800 {
dd -= 800
oa.p.x += (normal.x * -(-dd/2 + EPSILON))
oa.p.y += (normal.y * -(-dd/2 + EPSILON))
ob.p.x += (normal.x * (-dd/2 + EPSILON))
ob.p.y += (normal.y * (-dd/2 + EPSILON))
}
}
func getAngle(start point, end point) float64 {
dx := (end.x - start.x)
dy := (end.y - start.y)
a := (math.Atan2(dy, dx))
return a
}
func distance2(p1 point, p2 point) distanceSqType {
x := distanceSqType(p2.x - p1.x)
x = x * x
y := distanceSqType(p2.y - p1.y)
y = y * y
return x + y
}
func distance(p1 point, p2 point) float64 {
return (math.Sqrt(float64(distance2(p1, p2))))
}
func (obj *object) passCheckpoint(podn int) {
obj.next = (obj.next + 1)
if obj.next >= globalNumCp {
obj.next = globalNumCp - 1
obj.won = true
}
if podn < 2 {
playerTimeout[0] = 100
} else {
playerTimeout[1] = 100
}
}
func (g *game) forwardTime(t float64) {
for i := 0; i < podCount; i++ {
obj := &g[i]
obj.p.x += (obj.s.x * (t))
obj.p.y += (obj.s.y * (t))
}
}
func round(x float64) float64 {
x = (math.Floor((x) + 0.50000))
return x
}
func (obj *object) newCollide(b *object, rsq float64) float64 {
p := point{b.p.x - obj.p.x, b.p.y - obj.p.y}
pLength2 := p.x*p.x + p.y*p.y
if pLength2 <= rsq {
return 0
}
v := point{(b.s.x - obj.s.x), (b.s.y - obj.s.y)}
dot := p.dot(v)
if dot > 0 {
return 10
}
vLength2 := v.x*v.x + v.y*v.y
disc := dot*dot - vLength2*(pLength2-rsq)
if disc < 0 {
return 10
}
discdist := (math.Sqrt(disc))
t1 := (-dot - discdist) / vLength2
return float64(t1)
}
func cpCollide(p1 point, p2 point, cp point, cpRSQ float64) byte {
dx := (p2.x - p1.x)
dy := (p2.y - p1.y)
pp := p1
pd2 := dx*dx + dy*dy
if pd2 != 0 {
u := ((cp.x-p1.x)*dx + (cp.y-p1.y)*dy) / pd2
if u > 1 {
pp = p2
} else if u > 0 {
pp.x = p1.x + u*dx
pp.y = p1.y + u*dy
}
}
pp.x -= cp.x
pp.y -= cp.y
if ((pp.x * pp.x) + (pp.y * pp.y)) < cpRSQ {
return 1
}
return 0
}
func (obj *object) applyRotate(p point) {
a := getAngle(obj.p, p)
rotateAngle := obj.diffAngle(p)
if rotateAngle < -maxRotate {
a = obj.angle - maxRotate
}
if rotateAngle > maxRotate {
a = obj.angle + maxRotate
}
obj.angle = a
/*for obj.angle < 0 {
obj.angle += fullCircle
}
for obj.angle > fullCircle {
obj.angle -= fullCircle
}*/
}
func (obj *object) applyRotateFirst(rotateAngle float64) {
obj.angle = rotateAngle
for obj.angle < 0 {
obj.angle += fullCircle
}
for obj.angle > fullCircle {
obj.angle -= fullCircle
}
}
func (obj *object) applyThrust(t int) {
cs, cc := math.Sincos(obj.angle)
obj.s.x += (cc * float64(t))
obj.s.y += (cs * float64(t))
}
func (obj *object) endTurn(podn int) {
if obj.s.x > 0 {
obj.s.x = (math.Trunc((obj.s.x * frictionVal)))
} else {
obj.s.x = (math.Trunc((obj.s.x * frictionVal)))
}
if obj.s.y > 0 {
obj.s.y = (math.Trunc((obj.s.y * frictionVal)))
} else {
obj.s.y = (math.Trunc((obj.s.y * frictionVal)))
}
obj.p.x = round(obj.p.x)
obj.p.y = round(obj.p.y)
if obj.shieldtimer > 0 {
obj.shieldtimer--
}
}
func (obj *object) diffAngle(p point) float64 {
a := getAngle(obj.p, p)
da := math.Mod(a-obj.angle, math.Pi*2)
return math.Mod(2*da, math.Pi*2) - da
}
func testMode() {
scanner := bufio.NewScanner(os.Stdin)
scanner.Scan()
fmt.Sscan(scanner.Text(), &globalNumCp)
for i := 0; i < globalNumCp; i++ {
var x, y float64
scanner.Scan()
fmt.Sscan(scanner.Text(), &x, &y)
globalCp[i] = point{x, y}
}
var nTest int
scanner.Scan()
fmt.Sscan(scanner.Text(), &nTest)
var g game
initialiseGame(&g, globalCp[:])
for tn := 0; tn < nTest; tn++ {
for i := 0; i < podCount; i++ {
scanner.Scan()
}
for i := 0; i < podCount; i++ {
var px, py float64
var thrust string
var t int
scanner.Scan()
fmt.Sscan(scanner.Text(), &px, &py, &thrust)
t, err := strconv.Atoi(thrust)
if err != nil {
t = 0
if thrust == "SHIELD" {
g[i].shieldtimer = 4
} else if thrust == "BOOST" {
t = 650
if g[i].boosted == 0 {
g[i].boosted = 1
} else {
t = 200
}
}
}
if g[i].shieldtimer > 0 {
t = 0
}
dest := point{px, py}
if dest == g[i].p {
continue
}
if tn == 0 {
g[i].angle = 0
angle := g[i].diffAngle(dest)
g[i].applyRotateFirst(angle)
} else {
g[i].applyRotate(dest)
}
g[i].applyThrust(t)
}
g.nextTurn()
for i := 0; i < podCount; i++ {
p := &g[i]
fmt.Printf("%d %d %d %d %f %d %d %d\n", int(p.p.x), int(p.p.y), int(p.s.x), int(p.s.y), p.angle*radToDeg, p.next, p.shieldtimer, p.boosted)
}
}
}
var startPointMult = [4]point{{500, -500}, {-500, 500}, {1500, -1500}, {-1500, 1500}}
func initialiseGame(g *game, m gameMap) {
cp1minus0 := point{}
cp1minus0.x = m[1].x - m[0].x
cp1minus0.y = m[1].y - m[0].y
dd := distance(m[1], m[0])
cp1minus0.x /= dd
cp1minus0.y /= dd
for podN := range g {
p := &g[podN]
p.angle = -1 * degToRad
p.next = 1
p.p.x = round(m[0].x + cp1minus0.y*startPointMult[podN].x)
p.p.y = round(m[0].y + cp1minus0.x*startPointMult[podN].y)
}
}
func main() {
validateMode := false
if len(os.Args) > 1 {
if os.Args[1] == "-test" {
testMode()
return
}
}
playerTimeout[0] = 100
playerTimeout[1] = 100
rand.Seed(time.Now().UTC().UnixNano())
scanner := bufio.NewScanner(os.Stdin)
started := false
var players int
for started == false {
scanner.Scan()
startText := strings.Split(scanner.Text(), " ")
if startText[0] == "###Start" {
var err error
players, err = strconv.Atoi(startText[1])
if err != nil || players != 2 {
fmt.Fprintln(os.Stderr, "Error with player count input")
os.Exit(-1)
}
started = true
} else if startText[0] == "###Seed" {
v, err := strconv.ParseInt(startText[1], 10, 64)
fmt.Fprintln(os.Stderr, v)
if err == nil {
rand.Seed(v)
}
} else if startText[0] == "###Validate" {
validateMode = true
players = 2
started = true
} else {
fmt.Fprintln(os.Stderr, "Unsupported startup command: ", startText[0])
os.Exit(0)
}
}
currentMap := possibleMaps[rand.Intn(possibleMapCount)]
for i, v := range currentMap {
currentMap[i].x = v.x + float64(rand.Intn(checkpointGenerationGap*2+1)-checkpointGenerationGap)
currentMap[i].y = v.y + float64(rand.Intn(checkpointGenerationGap*2+1)-checkpointGenerationGap)
}
for i := len(currentMap) - 1; i > 0; i-- {
v := rand.Intn(i)
currentMap[v], currentMap[i] = currentMap[i], currentMap[v]
}
if validateMode {
var ncp int
scanner.Scan()
fmt.Sscan(scanner.Text(), &ncp)
currentMap = make(gameMap, ncp)
for i := range currentMap {
var x float64
var y float64
scanner.Scan()
fmt.Sscan(scanner.Text(), &x, &y)
currentMap[i].x = x
currentMap[i].y = y
}
}
//setup global checkpoints
laps := 3
for i := 0; i < 3; i++ {
for _, v := range currentMap {
globalCp[globalNumCp] = v
globalNumCp++
}
}
//add last checkpoint at the end
globalCp[globalNumCp] = currentMap[0]
globalNumCp++
var g game
initialiseGame(&g, currentMap)
outputSetup(currentMap, 2, laps)
for turnCount := 0; turnCount < 500; turnCount++ {
var moves [4]playerMove
for player := 0; player < players; player++ {
givePlayerOutput(&g, player, currentMap)
theseMoves, valid := getPlayerInput(player, scanner)
if valid == false {
fmt.Fprintln(os.Stderr, "INVALID INPUT", theseMoves)
lostGame(player)
}
for i, v := range theseMoves {
moves[player*2+i] = v
}
}
for podN := range g {
pod := &g[podN]
move := &moves[podN]
if move.boost {
if pod.boosted == 0 {
pod.boosted = 1
move.thrust = 650
} else {
move.thrust = 200
}
}
if move.shield {
pod.shieldtimer = 4
}
if pod.shieldtimer > 0 {
move.thrust = 0
}
if move.target == pod.p {
continue
}
if turnCount == 0 {
pod.angle = 0
pod.angle = pod.diffAngle(move.target)
} else {
pod.applyRotate(move.target)
}
pod.applyThrust(moves[podN].thrust)
}
g.nextTurn()
if playerTimeout[0] <= 0 {
lostGame(0)
}
if playerTimeout[1] <= 0 {
lostGame(1)
}
for podN := range g {
pod := &g[podN]
if pod.won {
if podN < 2 {
wonGame(0)
} else {
wonGame(1)
}
}
}
}
winner := 0
best := 0.0
for podN := range g {
score := float64(g[podN].next * 1000000)
score -= distance(g[podN].p, globalCp[g[podN].next])
if score > best {
best = score
winner = podN
}
}
if winner < 2 {
wonGame(0)
} else {
wonGame(1)
}
}
func lostGame(player int) {
winner := 0
loser := 1
if player == winner {
winner, loser = loser, winner
}
fmt.Printf("###End %d %d\n", winner, loser)
os.Exit(0)
}
func wonGame(player int) {
winner := 0
loser := 1
if player == loser {
winner, loser = loser, winner
}
fmt.Printf("###End %d %d\n", winner, loser)
os.Exit(0)
}
func getPlayerInput(player int, scanner *bufio.Scanner) ([2]playerMove, bool) {
pm := [2]playerMove{}
valid := true
fmt.Printf("###Output %d 2\n", player)
for i := range pm {
if scanner.Scan() == false {
os.Exit(0)
}
var thrust string
fmt.Sscanf(scanner.Text(), "%f %f %s\n", &pm[i].target.x, &pm[i].target.y, &thrust)
pm[i].thrust = 0
switch thrust {
case "SHIELD":
pm[i].shield = true
case "BOOST":
pm[i].boost = true
default:
v, err := strconv.Atoi(thrust)
if err != nil {
valid = false
} else {
if v > 200 {
valid = false
}
pm[i].thrust = v
}
}
}
return pm, valid
}
func | (m gameMap, players int, laps int) {
for player := 0; player < players; player++ {
fmt.Printf("###Input %d\n", player)
fmt.Println(laps)
fmt.Println(len(m))
for _, v := range m {
fmt.Println(v.x, v.y)
}
}
}
func givePlayerOutput(g *game, player int, m gameMap) {
pods := [4]int{0, 1, 2, 3}
if player == 1 {
pods = [4]int{2, 3, 0, 1}
}
fmt.Printf("###Input %d\n", player)
for _, podN := range pods {
p := &g[podN]
fmt.Printf("%d %d %d %d %d %d\n", int(p.p.x), int(p.p.y), int(p.s.x), int(p.s.y), int(round(p.angle*radToDeg)), p.next%len(m))
// fmt.Fprintf(os.Stderr, "%d %d %d %d %d %d\n", int(p.p.x), int(p.p.y), int(p.s.x), int(p.s.y), int(round(p.angle*radToDeg)), p.next)
}
}
| outputSetup | identifier_name |
mod.rs | // Buttplug Rust Source Code File - See https://buttplug.io for more info.
//
// Copyright 2016-2019 Nonpolynomial Labs LLC. All rights reserved.
//
// Licensed under the BSD 3-Clause license. See LICENSE file in the project root
// for full license information.
//! Handling of communication with Buttplug Server.
pub mod messagesorter;
#[cfg(any(feature = "client-ws", feature = "client-ws-ssl"))]
pub mod websocket;
#[cfg(feature = "server")]
use crate::server::ButtplugServer;
use crate::{
client::internal::{
ButtplugClientFuture, ButtplugClientFutureState, ButtplugClientFutureStateShared,
ButtplugClientMessageStateShared,
},
core::messages::ButtplugMessageUnion,
};
use async_std::sync::{channel, Receiver};
#[cfg(feature = "serialize_json")]
use async_std::{
prelude::{FutureExt, StreamExt},
sync::Sender,
};
use async_trait::async_trait;
#[cfg(feature = "serialize_json")]
use futures::future::Future;
#[cfg(feature = "serialize_json")]
use messagesorter::ClientConnectorMessageSorter;
use std::{error::Error, fmt};
pub type ButtplugClientConnectionState =
ButtplugClientFutureState<Result<(), ButtplugClientConnectorError>>;
pub type ButtplugClientConnectionStateShared =
ButtplugClientFutureStateShared<Result<(), ButtplugClientConnectorError>>;
pub type ButtplugClientConnectionFuture =
ButtplugClientFuture<Result<(), ButtplugClientConnectorError>>;
#[derive(Debug, Clone)]
pub struct ButtplugClientConnectorError {
pub message: String,
}
impl ButtplugClientConnectorError {
pub fn new(msg: &str) -> Self {
Self {
message: msg.to_owned(),
}
}
}
impl fmt::Display for ButtplugClientConnectorError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Init Error: {}", self.message)
}
}
impl Error for ButtplugClientConnectorError {
fn description(&self) -> &str {
self.message.as_str()
}
fn source(&self) -> Option<&(dyn Error + 'static)> {
None
}
}
// Not real sure if this is sync, since there may be state that could get weird
// in connectors implementing this trait, but Send should be ok.
#[async_trait]
pub trait ButtplugClientConnector: Send {
async fn connect(&mut self) -> Result<(), ButtplugClientConnectorError>;
async fn disconnect(&mut self) -> Result<(), ButtplugClientConnectorError>;
async fn send(&mut self, msg: &ButtplugMessageUnion, state: &ButtplugClientMessageStateShared);
fn get_event_receiver(&mut self) -> Receiver<ButtplugMessageUnion>;
}
#[cfg(feature = "server")]
pub struct ButtplugEmbeddedClientConnector {
server: ButtplugServer,
recv: Option<Receiver<ButtplugMessageUnion>>,
}
#[cfg(feature = "server")]
impl ButtplugEmbeddedClientConnector {
pub fn new(name: &str, max_ping_time: u32) -> Self {
let (send, recv) = channel(256);
Self {
recv: Some(recv),
server: ButtplugServer::new(&name, max_ping_time, send),
}
}
}
#[cfg(feature = "server")]
#[async_trait]
impl ButtplugClientConnector for ButtplugEmbeddedClientConnector {
async fn connect(&mut self) -> Result<(), ButtplugClientConnectorError> {
Ok(())
}
async fn disconnect(&mut self) -> Result<(), ButtplugClientConnectorError> {
Ok(())
}
async fn send(&mut self, msg: &ButtplugMessageUnion, state: &ButtplugClientMessageStateShared) {
let ret_msg = self.server.send_message(msg).await;
let mut waker_state = state.lock().unwrap();
waker_state.set_reply(ret_msg.unwrap());
}
fn get_event_receiver(&mut self) -> Receiver<ButtplugMessageUnion> {
// This will panic if we've already taken the receiver.
self.recv.take().unwrap()
}
}
// The embedded connector is used heavily in the client unit tests, so we can
// assume code coverage there and omit specific tests here.
pub trait ButtplugRemoteClientConnectorSender: Sync + Send {
fn send(&self, msg: ButtplugMessageUnion);
fn close(&self);
}
pub enum ButtplugRemoteClientConnectorMessage {
Sender(Box<dyn ButtplugRemoteClientConnectorSender>),
Connected(),
Text(String),
Error(String),
ClientClose(String),
Close(String),
}
#[cfg(feature = "serialize_json")]
pub struct ButtplugRemoteClientConnectorHelper {
// Channel send/recv pair for applications wanting to send out through the
// remote connection. Receiver will be send to task on creation.
internal_send: Sender<(ButtplugMessageUnion, ButtplugClientMessageStateShared)>,
internal_recv: Option<Receiver<(ButtplugMessageUnion, ButtplugClientMessageStateShared)>>,
// Channel send/recv pair for remote connection sending information to the
// application. Receiver will be sent to task on creation.
remote_send: Sender<ButtplugRemoteClientConnectorMessage>,
remote_recv: Option<Receiver<ButtplugRemoteClientConnectorMessage>>,
event_send: Option<Sender<ButtplugMessageUnion>>,
}
#[cfg(feature = "serialize_json")]
unsafe impl Send for ButtplugRemoteClientConnectorHelper {}
#[cfg(feature = "serialize_json")]
unsafe impl Sync for ButtplugRemoteClientConnectorHelper {}
#[cfg(feature = "serialize_json")]
impl ButtplugRemoteClientConnectorHelper {
pub fn new(event_sender: Sender<ButtplugMessageUnion>) -> Self {
let (internal_send, internal_recv) = channel(256);
let (remote_send, remote_recv) = channel(256);
Self {
event_send: Some(event_sender),
remote_send,
remote_recv: Some(remote_recv),
internal_send,
internal_recv: Some(internal_recv),
}
}
pub fn | (&self) -> Sender<ButtplugRemoteClientConnectorMessage> {
self.remote_send.clone()
}
pub async fn send(
&mut self,
msg: &ButtplugMessageUnion,
state: &ButtplugClientMessageStateShared,
) {
self.internal_send.send((msg.clone(), state.clone())).await;
}
pub async fn close(&self) {
// Emulate a close from the connector side, which will cause us to
// close.
self.remote_send
.send(ButtplugRemoteClientConnectorMessage::Close(
"Client requested close.".to_owned(),
))
.await;
}
pub fn get_recv_future(&mut self) -> impl Future {
// Set up a way to get futures in and out of the sorter, which will live
// in our connector task.
let event_send = self.event_send.take().unwrap();
// Remove the receivers we need to move into the task.
let mut remote_recv = self.remote_recv.take().unwrap();
let mut internal_recv = self.internal_recv.take().unwrap();
async move {
let mut sorter = ClientConnectorMessageSorter::default();
// Our in-task remote sender, which is a wrapped version of whatever
// bus specific sender (websocket, tcp, etc) we'll be using.
let mut remote_send: Option<Box<dyn ButtplugRemoteClientConnectorSender>> = None;
enum StreamValue {
NoValue,
Incoming(ButtplugRemoteClientConnectorMessage),
Outgoing((ButtplugMessageUnion, ButtplugClientMessageStateShared)),
}
loop {
// We use two Options instead of an enum because we may never
// get anything.
let mut stream_return: StreamValue = async {
match remote_recv.next().await {
Some(msg) => StreamValue::Incoming(msg),
None => StreamValue::NoValue,
}
}
.race(async {
match internal_recv.next().await {
Some(msg) => StreamValue::Outgoing(msg),
None => StreamValue::NoValue,
}
})
.await;
match stream_return {
StreamValue::NoValue => break,
StreamValue::Incoming(remote_msg) => {
match remote_msg {
ButtplugRemoteClientConnectorMessage::Sender(s) => {
remote_send = Some(s);
}
ButtplugRemoteClientConnectorMessage::Text(t) => {
let array: Vec<ButtplugMessageUnion> =
serde_json::from_str(&t.clone()).unwrap();
for smsg in array {
if !sorter.maybe_resolve_message(&smsg) {
info!("Sending event!");
// Send notification through event channel
event_send.send(smsg).await;
}
}
}
ButtplugRemoteClientConnectorMessage::ClientClose(s) => {
info!("Client closing connection {}", s);
if let Some(ref mut remote_sender) = remote_send {
remote_sender.close();
} else {
panic!("Can't send message yet!");
}
}
ButtplugRemoteClientConnectorMessage::Close(s) => {
info!("Connector closing connection {}", s);
break;
}
_ => {
panic!("UNHANDLED BRANCH");
}
}
}
StreamValue::Outgoing(ref mut buttplug_fut_msg) => {
// Create future sets our message ID, so make sure this
// happens before we send out the message.
sorter.register_future(&mut buttplug_fut_msg.0, &buttplug_fut_msg.1);
if let Some(ref mut remote_sender) = remote_send {
remote_sender.send(buttplug_fut_msg.0.clone());
} else {
panic!("Can't send message yet!");
}
}
}
}
}
}
}
| get_remote_send | identifier_name |
mod.rs | // Buttplug Rust Source Code File - See https://buttplug.io for more info.
//
// Copyright 2016-2019 Nonpolynomial Labs LLC. All rights reserved.
//
// Licensed under the BSD 3-Clause license. See LICENSE file in the project root
// for full license information.
//! Handling of communication with Buttplug Server.
pub mod messagesorter;
#[cfg(any(feature = "client-ws", feature = "client-ws-ssl"))]
pub mod websocket;
#[cfg(feature = "server")]
use crate::server::ButtplugServer;
use crate::{
client::internal::{
ButtplugClientFuture, ButtplugClientFutureState, ButtplugClientFutureStateShared,
ButtplugClientMessageStateShared,
},
core::messages::ButtplugMessageUnion,
};
use async_std::sync::{channel, Receiver};
#[cfg(feature = "serialize_json")]
use async_std::{
prelude::{FutureExt, StreamExt},
sync::Sender,
};
use async_trait::async_trait;
#[cfg(feature = "serialize_json")]
use futures::future::Future;
#[cfg(feature = "serialize_json")]
use messagesorter::ClientConnectorMessageSorter;
use std::{error::Error, fmt};
pub type ButtplugClientConnectionState =
ButtplugClientFutureState<Result<(), ButtplugClientConnectorError>>;
pub type ButtplugClientConnectionStateShared =
ButtplugClientFutureStateShared<Result<(), ButtplugClientConnectorError>>;
pub type ButtplugClientConnectionFuture =
ButtplugClientFuture<Result<(), ButtplugClientConnectorError>>;
#[derive(Debug, Clone)]
pub struct ButtplugClientConnectorError {
pub message: String,
}
impl ButtplugClientConnectorError {
pub fn new(msg: &str) -> Self {
Self {
message: msg.to_owned(),
}
}
}
impl fmt::Display for ButtplugClientConnectorError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Init Error: {}", self.message)
}
}
impl Error for ButtplugClientConnectorError {
fn description(&self) -> &str {
self.message.as_str()
}
fn source(&self) -> Option<&(dyn Error + 'static)> {
None
}
}
// Not real sure if this is sync, since there may be state that could get weird
// in connectors implementing this trait, but Send should be ok.
#[async_trait]
pub trait ButtplugClientConnector: Send {
async fn connect(&mut self) -> Result<(), ButtplugClientConnectorError>;
async fn disconnect(&mut self) -> Result<(), ButtplugClientConnectorError>;
async fn send(&mut self, msg: &ButtplugMessageUnion, state: &ButtplugClientMessageStateShared);
fn get_event_receiver(&mut self) -> Receiver<ButtplugMessageUnion>;
}
#[cfg(feature = "server")]
pub struct ButtplugEmbeddedClientConnector {
server: ButtplugServer,
recv: Option<Receiver<ButtplugMessageUnion>>,
}
#[cfg(feature = "server")]
impl ButtplugEmbeddedClientConnector {
pub fn new(name: &str, max_ping_time: u32) -> Self {
let (send, recv) = channel(256);
Self {
recv: Some(recv),
server: ButtplugServer::new(&name, max_ping_time, send),
}
}
}
#[cfg(feature = "server")]
#[async_trait]
impl ButtplugClientConnector for ButtplugEmbeddedClientConnector {
async fn connect(&mut self) -> Result<(), ButtplugClientConnectorError> {
Ok(())
}
async fn disconnect(&mut self) -> Result<(), ButtplugClientConnectorError> {
Ok(())
}
async fn send(&mut self, msg: &ButtplugMessageUnion, state: &ButtplugClientMessageStateShared) {
let ret_msg = self.server.send_message(msg).await;
let mut waker_state = state.lock().unwrap();
waker_state.set_reply(ret_msg.unwrap());
}
fn get_event_receiver(&mut self) -> Receiver<ButtplugMessageUnion> {
// This will panic if we've already taken the receiver.
self.recv.take().unwrap()
}
}
// The embedded connector is used heavily in the client unit tests, so we can
// assume code coverage there and omit specific tests here.
pub trait ButtplugRemoteClientConnectorSender: Sync + Send {
fn send(&self, msg: ButtplugMessageUnion);
fn close(&self);
}
pub enum ButtplugRemoteClientConnectorMessage {
Sender(Box<dyn ButtplugRemoteClientConnectorSender>),
Connected(),
Text(String),
Error(String),
ClientClose(String),
Close(String),
}
#[cfg(feature = "serialize_json")]
pub struct ButtplugRemoteClientConnectorHelper {
// Channel send/recv pair for applications wanting to send out through the
// remote connection. Receiver will be send to task on creation.
internal_send: Sender<(ButtplugMessageUnion, ButtplugClientMessageStateShared)>,
internal_recv: Option<Receiver<(ButtplugMessageUnion, ButtplugClientMessageStateShared)>>,
// Channel send/recv pair for remote connection sending information to the
// application. Receiver will be sent to task on creation.
remote_send: Sender<ButtplugRemoteClientConnectorMessage>,
remote_recv: Option<Receiver<ButtplugRemoteClientConnectorMessage>>,
event_send: Option<Sender<ButtplugMessageUnion>>,
}
#[cfg(feature = "serialize_json")]
unsafe impl Send for ButtplugRemoteClientConnectorHelper {}
#[cfg(feature = "serialize_json")]
unsafe impl Sync for ButtplugRemoteClientConnectorHelper {}
#[cfg(feature = "serialize_json")]
impl ButtplugRemoteClientConnectorHelper {
pub fn new(event_sender: Sender<ButtplugMessageUnion>) -> Self {
let (internal_send, internal_recv) = channel(256);
let (remote_send, remote_recv) = channel(256);
Self {
event_send: Some(event_sender),
remote_send,
remote_recv: Some(remote_recv),
internal_send,
internal_recv: Some(internal_recv),
}
}
pub fn get_remote_send(&self) -> Sender<ButtplugRemoteClientConnectorMessage> {
self.remote_send.clone()
}
pub async fn send(
&mut self,
msg: &ButtplugMessageUnion,
state: &ButtplugClientMessageStateShared,
) {
self.internal_send.send((msg.clone(), state.clone())).await;
}
pub async fn close(&self) {
// Emulate a close from the connector side, which will cause us to
// close.
self.remote_send
.send(ButtplugRemoteClientConnectorMessage::Close(
"Client requested close.".to_owned(),
))
.await;
}
pub fn get_recv_future(&mut self) -> impl Future {
// Set up a way to get futures in and out of the sorter, which will live
// in our connector task.
let event_send = self.event_send.take().unwrap();
// Remove the receivers we need to move into the task.
let mut remote_recv = self.remote_recv.take().unwrap();
let mut internal_recv = self.internal_recv.take().unwrap();
async move {
let mut sorter = ClientConnectorMessageSorter::default();
// Our in-task remote sender, which is a wrapped version of whatever
// bus specific sender (websocket, tcp, etc) we'll be using.
let mut remote_send: Option<Box<dyn ButtplugRemoteClientConnectorSender>> = None;
enum StreamValue {
NoValue,
Incoming(ButtplugRemoteClientConnectorMessage),
Outgoing((ButtplugMessageUnion, ButtplugClientMessageStateShared)),
}
loop {
// We use two Options instead of an enum because we may never
// get anything.
let mut stream_return: StreamValue = async {
match remote_recv.next().await {
Some(msg) => StreamValue::Incoming(msg),
None => StreamValue::NoValue,
}
}
.race(async {
match internal_recv.next().await {
Some(msg) => StreamValue::Outgoing(msg),
None => StreamValue::NoValue,
}
})
.await;
match stream_return {
StreamValue::NoValue => break,
StreamValue::Incoming(remote_msg) => {
match remote_msg {
ButtplugRemoteClientConnectorMessage::Sender(s) => {
remote_send = Some(s);
}
ButtplugRemoteClientConnectorMessage::Text(t) => {
let array: Vec<ButtplugMessageUnion> =
serde_json::from_str(&t.clone()).unwrap();
for smsg in array {
if !sorter.maybe_resolve_message(&smsg) {
info!("Sending event!");
// Send notification through event channel
event_send.send(smsg).await;
}
}
}
ButtplugRemoteClientConnectorMessage::ClientClose(s) => {
info!("Client closing connection {}", s);
if let Some(ref mut remote_sender) = remote_send {
remote_sender.close();
} else {
panic!("Can't send message yet!");
}
}
ButtplugRemoteClientConnectorMessage::Close(s) => {
info!("Connector closing connection {}", s);
break;
}
_ => {
panic!("UNHANDLED BRANCH");
}
}
}
StreamValue::Outgoing(ref mut buttplug_fut_msg) => {
// Create future sets our message ID, so make sure this
// happens before we send out the message.
sorter.register_future(&mut buttplug_fut_msg.0, &buttplug_fut_msg.1);
if let Some(ref mut remote_sender) = remote_send | else {
panic!("Can't send message yet!");
}
}
}
}
}
}
}
| {
remote_sender.send(buttplug_fut_msg.0.clone());
} | conditional_block |
mod.rs | // Buttplug Rust Source Code File - See https://buttplug.io for more info.
//
// Copyright 2016-2019 Nonpolynomial Labs LLC. All rights reserved.
//
// Licensed under the BSD 3-Clause license. See LICENSE file in the project root
// for full license information.
//! Handling of communication with Buttplug Server.
pub mod messagesorter;
#[cfg(any(feature = "client-ws", feature = "client-ws-ssl"))]
pub mod websocket;
#[cfg(feature = "server")]
use crate::server::ButtplugServer;
use crate::{
client::internal::{
ButtplugClientFuture, ButtplugClientFutureState, ButtplugClientFutureStateShared,
ButtplugClientMessageStateShared,
},
core::messages::ButtplugMessageUnion,
};
use async_std::sync::{channel, Receiver};
#[cfg(feature = "serialize_json")]
use async_std::{
prelude::{FutureExt, StreamExt},
sync::Sender,
};
use async_trait::async_trait;
#[cfg(feature = "serialize_json")]
use futures::future::Future;
#[cfg(feature = "serialize_json")]
use messagesorter::ClientConnectorMessageSorter;
use std::{error::Error, fmt};
pub type ButtplugClientConnectionState =
ButtplugClientFutureState<Result<(), ButtplugClientConnectorError>>;
pub type ButtplugClientConnectionStateShared =
ButtplugClientFutureStateShared<Result<(), ButtplugClientConnectorError>>;
pub type ButtplugClientConnectionFuture =
ButtplugClientFuture<Result<(), ButtplugClientConnectorError>>;
#[derive(Debug, Clone)]
pub struct ButtplugClientConnectorError {
pub message: String,
}
impl ButtplugClientConnectorError {
pub fn new(msg: &str) -> Self {
Self {
message: msg.to_owned(),
}
}
}
impl fmt::Display for ButtplugClientConnectorError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Init Error: {}", self.message)
}
}
impl Error for ButtplugClientConnectorError {
fn description(&self) -> &str {
self.message.as_str()
}
fn source(&self) -> Option<&(dyn Error + 'static)> {
None
}
}
// Not real sure if this is sync, since there may be state that could get weird
// in connectors implementing this trait, but Send should be ok.
#[async_trait]
pub trait ButtplugClientConnector: Send {
async fn connect(&mut self) -> Result<(), ButtplugClientConnectorError>;
async fn disconnect(&mut self) -> Result<(), ButtplugClientConnectorError>;
async fn send(&mut self, msg: &ButtplugMessageUnion, state: &ButtplugClientMessageStateShared);
fn get_event_receiver(&mut self) -> Receiver<ButtplugMessageUnion>;
}
#[cfg(feature = "server")]
pub struct ButtplugEmbeddedClientConnector {
server: ButtplugServer,
recv: Option<Receiver<ButtplugMessageUnion>>,
}
#[cfg(feature = "server")]
impl ButtplugEmbeddedClientConnector {
pub fn new(name: &str, max_ping_time: u32) -> Self {
let (send, recv) = channel(256);
Self {
recv: Some(recv),
server: ButtplugServer::new(&name, max_ping_time, send),
}
}
}
#[cfg(feature = "server")]
#[async_trait]
impl ButtplugClientConnector for ButtplugEmbeddedClientConnector {
async fn connect(&mut self) -> Result<(), ButtplugClientConnectorError> {
Ok(())
}
async fn disconnect(&mut self) -> Result<(), ButtplugClientConnectorError> {
Ok(())
}
async fn send(&mut self, msg: &ButtplugMessageUnion, state: &ButtplugClientMessageStateShared) {
let ret_msg = self.server.send_message(msg).await;
let mut waker_state = state.lock().unwrap();
waker_state.set_reply(ret_msg.unwrap());
}
fn get_event_receiver(&mut self) -> Receiver<ButtplugMessageUnion> {
// This will panic if we've already taken the receiver.
self.recv.take().unwrap()
}
}
// The embedded connector is used heavily in the client unit tests, so we can
// assume code coverage there and omit specific tests here.
pub trait ButtplugRemoteClientConnectorSender: Sync + Send {
fn send(&self, msg: ButtplugMessageUnion);
fn close(&self);
}
pub enum ButtplugRemoteClientConnectorMessage {
Sender(Box<dyn ButtplugRemoteClientConnectorSender>),
Connected(),
Text(String),
Error(String),
ClientClose(String),
Close(String),
}
#[cfg(feature = "serialize_json")]
pub struct ButtplugRemoteClientConnectorHelper {
// Channel send/recv pair for applications wanting to send out through the
// remote connection. Receiver will be send to task on creation.
internal_send: Sender<(ButtplugMessageUnion, ButtplugClientMessageStateShared)>,
internal_recv: Option<Receiver<(ButtplugMessageUnion, ButtplugClientMessageStateShared)>>,
// Channel send/recv pair for remote connection sending information to the
// application. Receiver will be sent to task on creation.
remote_send: Sender<ButtplugRemoteClientConnectorMessage>,
remote_recv: Option<Receiver<ButtplugRemoteClientConnectorMessage>>,
event_send: Option<Sender<ButtplugMessageUnion>>,
}
#[cfg(feature = "serialize_json")]
unsafe impl Send for ButtplugRemoteClientConnectorHelper {}
#[cfg(feature = "serialize_json")]
unsafe impl Sync for ButtplugRemoteClientConnectorHelper {}
#[cfg(feature = "serialize_json")]
impl ButtplugRemoteClientConnectorHelper {
pub fn new(event_sender: Sender<ButtplugMessageUnion>) -> Self {
let (internal_send, internal_recv) = channel(256);
let (remote_send, remote_recv) = channel(256);
Self {
event_send: Some(event_sender),
remote_send,
remote_recv: Some(remote_recv),
internal_send,
internal_recv: Some(internal_recv),
}
}
pub fn get_remote_send(&self) -> Sender<ButtplugRemoteClientConnectorMessage> {
self.remote_send.clone()
}
pub async fn send(
&mut self,
msg: &ButtplugMessageUnion,
state: &ButtplugClientMessageStateShared,
) {
self.internal_send.send((msg.clone(), state.clone())).await;
}
pub async fn close(&self) {
// Emulate a close from the connector side, which will cause us to
// close.
self.remote_send
.send(ButtplugRemoteClientConnectorMessage::Close(
"Client requested close.".to_owned(),
))
.await;
}
pub fn get_recv_future(&mut self) -> impl Future {
// Set up a way to get futures in and out of the sorter, which will live
// in our connector task.
let event_send = self.event_send.take().unwrap();
// Remove the receivers we need to move into the task.
let mut remote_recv = self.remote_recv.take().unwrap();
let mut internal_recv = self.internal_recv.take().unwrap();
async move {
let mut sorter = ClientConnectorMessageSorter::default();
// Our in-task remote sender, which is a wrapped version of whatever
// bus specific sender (websocket, tcp, etc) we'll be using.
let mut remote_send: Option<Box<dyn ButtplugRemoteClientConnectorSender>> = None;
enum StreamValue {
NoValue,
Incoming(ButtplugRemoteClientConnectorMessage),
Outgoing((ButtplugMessageUnion, ButtplugClientMessageStateShared)),
}
| // We use two Options instead of an enum because we may never
// get anything.
let mut stream_return: StreamValue = async {
match remote_recv.next().await {
Some(msg) => StreamValue::Incoming(msg),
None => StreamValue::NoValue,
}
}
.race(async {
match internal_recv.next().await {
Some(msg) => StreamValue::Outgoing(msg),
None => StreamValue::NoValue,
}
})
.await;
match stream_return {
StreamValue::NoValue => break,
StreamValue::Incoming(remote_msg) => {
match remote_msg {
ButtplugRemoteClientConnectorMessage::Sender(s) => {
remote_send = Some(s);
}
ButtplugRemoteClientConnectorMessage::Text(t) => {
let array: Vec<ButtplugMessageUnion> =
serde_json::from_str(&t.clone()).unwrap();
for smsg in array {
if !sorter.maybe_resolve_message(&smsg) {
info!("Sending event!");
// Send notification through event channel
event_send.send(smsg).await;
}
}
}
ButtplugRemoteClientConnectorMessage::ClientClose(s) => {
info!("Client closing connection {}", s);
if let Some(ref mut remote_sender) = remote_send {
remote_sender.close();
} else {
panic!("Can't send message yet!");
}
}
ButtplugRemoteClientConnectorMessage::Close(s) => {
info!("Connector closing connection {}", s);
break;
}
_ => {
panic!("UNHANDLED BRANCH");
}
}
}
StreamValue::Outgoing(ref mut buttplug_fut_msg) => {
// Create future sets our message ID, so make sure this
// happens before we send out the message.
sorter.register_future(&mut buttplug_fut_msg.0, &buttplug_fut_msg.1);
if let Some(ref mut remote_sender) = remote_send {
remote_sender.send(buttplug_fut_msg.0.clone());
} else {
panic!("Can't send message yet!");
}
}
}
}
}
}
} | loop { | random_line_split |
mod.rs | // Buttplug Rust Source Code File - See https://buttplug.io for more info.
//
// Copyright 2016-2019 Nonpolynomial Labs LLC. All rights reserved.
//
// Licensed under the BSD 3-Clause license. See LICENSE file in the project root
// for full license information.
//! Handling of communication with Buttplug Server.
pub mod messagesorter;
#[cfg(any(feature = "client-ws", feature = "client-ws-ssl"))]
pub mod websocket;
#[cfg(feature = "server")]
use crate::server::ButtplugServer;
use crate::{
client::internal::{
ButtplugClientFuture, ButtplugClientFutureState, ButtplugClientFutureStateShared,
ButtplugClientMessageStateShared,
},
core::messages::ButtplugMessageUnion,
};
use async_std::sync::{channel, Receiver};
#[cfg(feature = "serialize_json")]
use async_std::{
prelude::{FutureExt, StreamExt},
sync::Sender,
};
use async_trait::async_trait;
#[cfg(feature = "serialize_json")]
use futures::future::Future;
#[cfg(feature = "serialize_json")]
use messagesorter::ClientConnectorMessageSorter;
use std::{error::Error, fmt};
pub type ButtplugClientConnectionState =
ButtplugClientFutureState<Result<(), ButtplugClientConnectorError>>;
pub type ButtplugClientConnectionStateShared =
ButtplugClientFutureStateShared<Result<(), ButtplugClientConnectorError>>;
pub type ButtplugClientConnectionFuture =
ButtplugClientFuture<Result<(), ButtplugClientConnectorError>>;
#[derive(Debug, Clone)]
pub struct ButtplugClientConnectorError {
pub message: String,
}
impl ButtplugClientConnectorError {
pub fn new(msg: &str) -> Self {
Self {
message: msg.to_owned(),
}
}
}
impl fmt::Display for ButtplugClientConnectorError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Init Error: {}", self.message)
}
}
impl Error for ButtplugClientConnectorError {
fn description(&self) -> &str {
self.message.as_str()
}
fn source(&self) -> Option<&(dyn Error + 'static)> {
None
}
}
// Not real sure if this is sync, since there may be state that could get weird
// in connectors implementing this trait, but Send should be ok.
#[async_trait]
pub trait ButtplugClientConnector: Send {
async fn connect(&mut self) -> Result<(), ButtplugClientConnectorError>;
async fn disconnect(&mut self) -> Result<(), ButtplugClientConnectorError>;
async fn send(&mut self, msg: &ButtplugMessageUnion, state: &ButtplugClientMessageStateShared);
fn get_event_receiver(&mut self) -> Receiver<ButtplugMessageUnion>;
}
#[cfg(feature = "server")]
pub struct ButtplugEmbeddedClientConnector {
server: ButtplugServer,
recv: Option<Receiver<ButtplugMessageUnion>>,
}
#[cfg(feature = "server")]
impl ButtplugEmbeddedClientConnector {
pub fn new(name: &str, max_ping_time: u32) -> Self {
let (send, recv) = channel(256);
Self {
recv: Some(recv),
server: ButtplugServer::new(&name, max_ping_time, send),
}
}
}
#[cfg(feature = "server")]
#[async_trait]
impl ButtplugClientConnector for ButtplugEmbeddedClientConnector {
async fn connect(&mut self) -> Result<(), ButtplugClientConnectorError> {
Ok(())
}
async fn disconnect(&mut self) -> Result<(), ButtplugClientConnectorError> {
Ok(())
}
async fn send(&mut self, msg: &ButtplugMessageUnion, state: &ButtplugClientMessageStateShared) {
let ret_msg = self.server.send_message(msg).await;
let mut waker_state = state.lock().unwrap();
waker_state.set_reply(ret_msg.unwrap());
}
fn get_event_receiver(&mut self) -> Receiver<ButtplugMessageUnion> {
// This will panic if we've already taken the receiver.
self.recv.take().unwrap()
}
}
// The embedded connector is used heavily in the client unit tests, so we can
// assume code coverage there and omit specific tests here.
pub trait ButtplugRemoteClientConnectorSender: Sync + Send {
fn send(&self, msg: ButtplugMessageUnion);
fn close(&self);
}
pub enum ButtplugRemoteClientConnectorMessage {
Sender(Box<dyn ButtplugRemoteClientConnectorSender>),
Connected(),
Text(String),
Error(String),
ClientClose(String),
Close(String),
}
#[cfg(feature = "serialize_json")]
pub struct ButtplugRemoteClientConnectorHelper {
// Channel send/recv pair for applications wanting to send out through the
// remote connection. Receiver will be send to task on creation.
internal_send: Sender<(ButtplugMessageUnion, ButtplugClientMessageStateShared)>,
internal_recv: Option<Receiver<(ButtplugMessageUnion, ButtplugClientMessageStateShared)>>,
// Channel send/recv pair for remote connection sending information to the
// application. Receiver will be sent to task on creation.
remote_send: Sender<ButtplugRemoteClientConnectorMessage>,
remote_recv: Option<Receiver<ButtplugRemoteClientConnectorMessage>>,
event_send: Option<Sender<ButtplugMessageUnion>>,
}
#[cfg(feature = "serialize_json")]
unsafe impl Send for ButtplugRemoteClientConnectorHelper {}
#[cfg(feature = "serialize_json")]
unsafe impl Sync for ButtplugRemoteClientConnectorHelper {}
#[cfg(feature = "serialize_json")]
impl ButtplugRemoteClientConnectorHelper {
pub fn new(event_sender: Sender<ButtplugMessageUnion>) -> Self {
let (internal_send, internal_recv) = channel(256);
let (remote_send, remote_recv) = channel(256);
Self {
event_send: Some(event_sender),
remote_send,
remote_recv: Some(remote_recv),
internal_send,
internal_recv: Some(internal_recv),
}
}
pub fn get_remote_send(&self) -> Sender<ButtplugRemoteClientConnectorMessage> {
self.remote_send.clone()
}
pub async fn send(
&mut self,
msg: &ButtplugMessageUnion,
state: &ButtplugClientMessageStateShared,
) {
self.internal_send.send((msg.clone(), state.clone())).await;
}
pub async fn close(&self) {
// Emulate a close from the connector side, which will cause us to
// close.
self.remote_send
.send(ButtplugRemoteClientConnectorMessage::Close(
"Client requested close.".to_owned(),
))
.await;
}
pub fn get_recv_future(&mut self) -> impl Future |
}
| {
// Set up a way to get futures in and out of the sorter, which will live
// in our connector task.
let event_send = self.event_send.take().unwrap();
// Remove the receivers we need to move into the task.
let mut remote_recv = self.remote_recv.take().unwrap();
let mut internal_recv = self.internal_recv.take().unwrap();
async move {
let mut sorter = ClientConnectorMessageSorter::default();
// Our in-task remote sender, which is a wrapped version of whatever
// bus specific sender (websocket, tcp, etc) we'll be using.
let mut remote_send: Option<Box<dyn ButtplugRemoteClientConnectorSender>> = None;
enum StreamValue {
NoValue,
Incoming(ButtplugRemoteClientConnectorMessage),
Outgoing((ButtplugMessageUnion, ButtplugClientMessageStateShared)),
}
loop {
// We use two Options instead of an enum because we may never
// get anything.
let mut stream_return: StreamValue = async {
match remote_recv.next().await {
Some(msg) => StreamValue::Incoming(msg),
None => StreamValue::NoValue,
}
}
.race(async {
match internal_recv.next().await {
Some(msg) => StreamValue::Outgoing(msg),
None => StreamValue::NoValue,
}
})
.await;
match stream_return {
StreamValue::NoValue => break,
StreamValue::Incoming(remote_msg) => {
match remote_msg {
ButtplugRemoteClientConnectorMessage::Sender(s) => {
remote_send = Some(s);
}
ButtplugRemoteClientConnectorMessage::Text(t) => {
let array: Vec<ButtplugMessageUnion> =
serde_json::from_str(&t.clone()).unwrap();
for smsg in array {
if !sorter.maybe_resolve_message(&smsg) {
info!("Sending event!");
// Send notification through event channel
event_send.send(smsg).await;
}
}
}
ButtplugRemoteClientConnectorMessage::ClientClose(s) => {
info!("Client closing connection {}", s);
if let Some(ref mut remote_sender) = remote_send {
remote_sender.close();
} else {
panic!("Can't send message yet!");
}
}
ButtplugRemoteClientConnectorMessage::Close(s) => {
info!("Connector closing connection {}", s);
break;
}
_ => {
panic!("UNHANDLED BRANCH");
}
}
}
StreamValue::Outgoing(ref mut buttplug_fut_msg) => {
// Create future sets our message ID, so make sure this
// happens before we send out the message.
sorter.register_future(&mut buttplug_fut_msg.0, &buttplug_fut_msg.1);
if let Some(ref mut remote_sender) = remote_send {
remote_sender.send(buttplug_fut_msg.0.clone());
} else {
panic!("Can't send message yet!");
}
}
}
}
}
} | identifier_body |
main.rs | // Copyright (c) 2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate clap;
extern crate env_logger;
extern crate habitat_core as hcore;
extern crate habitat_common as common;
extern crate habitat_pkg_export_docker as export_docker;
extern crate handlebars;
extern crate rand;
#[macro_use]
extern crate serde_json;
#[macro_use]
extern crate log;
extern crate failure;
#[macro_use]
extern crate failure_derive;
use clap::{App, Arg};
use handlebars::Handlebars;
use std::env;
use std::result;
use std::str::FromStr;
use std::io::prelude::*;
use std::io;
use std::fs::File;
use std::path::Path;
use hcore::channel;
use hcore::PROGRAM_NAME;
use hcore::url as hurl;
use hcore::env as henv;
use hcore::package::{PackageArchive, PackageIdent};
use common::ui::{Coloring, UI, NOCOLORING_ENVVAR, NONINTERACTIVE_ENVVAR};
use rand::Rng;
use export_docker::{Cli, Credentials, BuildSpec, Naming, Result};
// Synced with the version of the Habitat operator.
pub const VERSION: &'static str = "0.1.0";
// Kubernetes manifest template
const MANIFESTFILE: &'static str = include_str!("../defaults/KubernetesManifest.hbs");
const BINDFILE: &'static str = include_str!("../defaults/KubernetesBind.hbs");
#[derive(Debug, Fail)]
enum Error {
#[fail(display = "Invalid bind specification '{}'", _0)]
InvalidBindSpec(String),
}
fn main() {
env_logger::init().unwrap();
let mut ui = get_ui();
if let Err(e) = start(&mut ui) {
let _ = ui.fatal(e);
std::process::exit(1)
}
}
fn get_ui() -> UI {
let isatty = if henv::var(NONINTERACTIVE_ENVVAR)
.map(|val| val == "true")
.unwrap_or(false)
{
Some(false)
} else {
None
};
let coloring = if henv::var(NOCOLORING_ENVVAR)
.map(|val| val == "true")
.unwrap_or(false)
{
Coloring::Never
} else {
Coloring::Auto
};
UI::default_with(coloring, isatty)
}
fn start(ui: &mut UI) -> Result<()> {
let m = cli().get_matches();
debug!("clap cli args: {:?}", m);
if !m.is_present("NO_DOCKER_IMAGE") {
gen_docker_img(ui, &m)?;
}
gen_k8s_manifest(ui, &m)
}
fn gen_docker_img(ui: &mut UI, matches: &clap::ArgMatches) -> Result<()> {
let default_channel = channel::default();
let default_url = hurl::default_bldr_url();
let spec = BuildSpec::new_from_cli_matches(&matches, &default_channel, &default_url);
let naming = Naming::new_from_cli_matches(&matches);
let docker_image = export_docker::export(ui, spec, &naming)?;
docker_image.create_report(
ui,
env::current_dir()?.join("results"),
)?;
if matches.is_present("PUSH_IMAGE") {
let credentials = Credentials::new(
naming.registry_type,
matches.value_of("REGISTRY_USERNAME").unwrap(),
matches.value_of("REGISTRY_PASSWORD").unwrap(),
)?;
docker_image.push(ui, &credentials, naming.registry_url)?;
}
if matches.is_present("RM_IMAGE") {
docker_image.rm(ui)?;
}
Ok(())
}
fn gen_k8s_manifest(_ui: &mut UI, matches: &clap::ArgMatches) -> Result<()> {
let count = matches.value_of("COUNT").unwrap_or("1");
let topology = matches.value_of("TOPOLOGY").unwrap_or("standalone");
let group = matches.value_of("GROUP");
let config_secret_name = matches.value_of("CONFIG_SECRET_NAME");
let ring_secret_name = matches.value_of("RING_SECRET_NAME");
// clap ensures that we do have the mandatory args so unwrap() is fine here
let pkg_ident_str = matches.value_of("PKG_IDENT_OR_ARTIFACT").unwrap();
let pkg_ident = if Path::new(pkg_ident_str).is_file() {
// We're going to use the `$pkg_origin/$pkg_name`, fuzzy form of a package
// identifier to ensure that update strategies will work if desired
PackageArchive::new(pkg_ident_str).ident()?
} else | ;
// To allow multiple instances of Habitat application in Kubernetes,
// random suffix in metadata_name is needed.
let metadata_name = format!(
"{}-{}{}",
pkg_ident.name,
rand::thread_rng()
.gen_ascii_chars()
.filter(|c| c.is_lowercase() || c.is_numeric())
.take(4)
.collect::<String>(),
rand::thread_rng()
.gen_ascii_chars()
.filter(|c| c.is_lowercase() && !c.is_numeric())
.take(1)
.collect::<String>()
);
let image = match matches.value_of("IMAGE_NAME") {
Some(i) => i.to_string(),
None => pkg_ident.origin + "/" + &pkg_ident.name,
};
let bind = matches.value_of("BIND");
let json = json!({
"metadata_name": metadata_name,
"habitat_name": pkg_ident.name,
"image": image,
"count": count,
"service_topology": topology,
"service_group": group,
"config_secret_name": config_secret_name,
"ring_secret_name": ring_secret_name,
"bind": bind,
});
let mut write: Box<Write> = match matches.value_of("OUTPUT") {
Some(o) if o != "-" => Box::new(File::create(o)?),
_ => Box::new(io::stdout()),
};
let r = Handlebars::new().template_render(MANIFESTFILE, &json)?;
let mut out = r.lines().filter(|l| *l != "").collect::<Vec<_>>().join(
"\n",
) + "\n";
if let Some(binds) = matches.values_of("BIND") {
for bind in binds {
let split: Vec<&str> = bind.split(":").collect();
if split.len() < 3 {
return Err(Error::InvalidBindSpec(bind.to_string()).into());
}
let json = json!({
"name": split[0],
"service": split[1],
"group": split[2],
});
out += &Handlebars::new().template_render(BINDFILE, &json)?;
}
}
write.write(out.as_bytes())?;
Ok(())
}
fn cli<'a, 'b>() -> App<'a, 'b> {
let name: &str = &*PROGRAM_NAME;
let about = "Creates a Docker image and Kubernetes manifest for a Habitat package. Habitat \
operator must be deployed within the Kubernetes cluster before the generated \
manifest can be applied to this cluster.";
let app = Cli::new(name, about)
.add_base_packages_args()
.add_builder_args()
.add_tagging_args()
.add_publishing_args()
.app;
app.arg(
Arg::with_name("OUTPUT")
.value_name("OUTPUT")
.long("output")
.short("o")
.help(
"Name of manifest file to create. Pass '-' for stdout (default: -)",
),
).arg(
Arg::with_name("COUNT")
.value_name("COUNT")
.long("count")
.validator(valid_natural_number)
.help("Count is the number of desired instances"),
)
.arg(
Arg::with_name("TOPOLOGY")
.value_name("TOPOLOGY")
.long("topology")
.short("t")
.possible_values(&["standalone", "leader"])
.help(
"A topology describes the intended relationship between peers \
within a Habitat service group. Specify either standalone or leader \
topology (default: standalone)",
),
)
.arg(
Arg::with_name("GROUP")
.value_name("GROUP")
.long("service-group")
.short("g")
.help(
"group is a logical grouping of services with the same package and \
topology type connected together in a ring (default: default)",
),
)
.arg(
Arg::with_name("CONFIG_SECRET_NAME")
.value_name("CONFIG_SECRET_NAME")
.long("config-secret-name")
.short("n")
.help(
"name of the Kubernetes Secret containing the config file - \
user.toml - that the user has previously created. Habitat will \
use it for initial configuration of the service",
),
)
.arg(
Arg::with_name("RING_SECRET_NAME")
.value_name("RING_SECRET_NAME")
.long("ring-secret-name")
.short("r")
.help(
"name of the Kubernetes Secret that contains the ring key, which \
encrypts the communication between Habitat supervisors",
),
)
.arg(
Arg::with_name("BIND")
.value_name("BIND")
.long("bind")
.short("b")
.multiple(true)
.number_of_values(1)
.help(
"Bind to another service to form a producer/consumer relationship, \
specified as name:service:group",
),
)
.arg(
Arg::with_name("NO_DOCKER_IMAGE")
.long("no-docker-image")
.short("d")
.help(
"Disable creation of the Docker image and only create a Kubernetes manifest",
),
)
.arg(
Arg::with_name("PKG_IDENT_OR_ARTIFACT")
.value_name("PKG_IDENT_OR_ARTIFACT")
.required(true)
.help("Habitat package identifier (ex: acme/redis)"),
)
}
fn valid_natural_number(val: String) -> result::Result<(), String> {
match val.parse::<u32>() {
Ok(_) => Ok(()),
Err(_) => Err(format!("{} is not a natural number", val)),
}
}
| {
PackageIdent::from_str(pkg_ident_str)?
} | conditional_block |
main.rs | // Copyright (c) 2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate clap;
extern crate env_logger;
extern crate habitat_core as hcore;
extern crate habitat_common as common;
extern crate habitat_pkg_export_docker as export_docker;
extern crate handlebars;
extern crate rand;
#[macro_use]
extern crate serde_json;
#[macro_use]
extern crate log;
extern crate failure;
#[macro_use]
extern crate failure_derive;
use clap::{App, Arg};
use handlebars::Handlebars;
use std::env;
use std::result;
use std::str::FromStr;
use std::io::prelude::*;
use std::io;
use std::fs::File;
use std::path::Path;
use hcore::channel;
use hcore::PROGRAM_NAME;
use hcore::url as hurl;
use hcore::env as henv;
use hcore::package::{PackageArchive, PackageIdent};
use common::ui::{Coloring, UI, NOCOLORING_ENVVAR, NONINTERACTIVE_ENVVAR};
use rand::Rng;
use export_docker::{Cli, Credentials, BuildSpec, Naming, Result};
// Synced with the version of the Habitat operator.
pub const VERSION: &'static str = "0.1.0";
// Kubernetes manifest template
const MANIFESTFILE: &'static str = include_str!("../defaults/KubernetesManifest.hbs");
const BINDFILE: &'static str = include_str!("../defaults/KubernetesBind.hbs");
#[derive(Debug, Fail)]
enum Error {
#[fail(display = "Invalid bind specification '{}'", _0)]
InvalidBindSpec(String),
}
fn main() {
env_logger::init().unwrap();
let mut ui = get_ui();
if let Err(e) = start(&mut ui) {
let _ = ui.fatal(e);
std::process::exit(1)
}
}
fn get_ui() -> UI {
let isatty = if henv::var(NONINTERACTIVE_ENVVAR)
.map(|val| val == "true")
.unwrap_or(false)
{
Some(false)
} else {
None
};
let coloring = if henv::var(NOCOLORING_ENVVAR)
.map(|val| val == "true")
.unwrap_or(false)
{
Coloring::Never
} else {
Coloring::Auto
};
UI::default_with(coloring, isatty)
}
fn | (ui: &mut UI) -> Result<()> {
let m = cli().get_matches();
debug!("clap cli args: {:?}", m);
if !m.is_present("NO_DOCKER_IMAGE") {
gen_docker_img(ui, &m)?;
}
gen_k8s_manifest(ui, &m)
}
fn gen_docker_img(ui: &mut UI, matches: &clap::ArgMatches) -> Result<()> {
let default_channel = channel::default();
let default_url = hurl::default_bldr_url();
let spec = BuildSpec::new_from_cli_matches(&matches, &default_channel, &default_url);
let naming = Naming::new_from_cli_matches(&matches);
let docker_image = export_docker::export(ui, spec, &naming)?;
docker_image.create_report(
ui,
env::current_dir()?.join("results"),
)?;
if matches.is_present("PUSH_IMAGE") {
let credentials = Credentials::new(
naming.registry_type,
matches.value_of("REGISTRY_USERNAME").unwrap(),
matches.value_of("REGISTRY_PASSWORD").unwrap(),
)?;
docker_image.push(ui, &credentials, naming.registry_url)?;
}
if matches.is_present("RM_IMAGE") {
docker_image.rm(ui)?;
}
Ok(())
}
fn gen_k8s_manifest(_ui: &mut UI, matches: &clap::ArgMatches) -> Result<()> {
let count = matches.value_of("COUNT").unwrap_or("1");
let topology = matches.value_of("TOPOLOGY").unwrap_or("standalone");
let group = matches.value_of("GROUP");
let config_secret_name = matches.value_of("CONFIG_SECRET_NAME");
let ring_secret_name = matches.value_of("RING_SECRET_NAME");
// clap ensures that we do have the mandatory args so unwrap() is fine here
let pkg_ident_str = matches.value_of("PKG_IDENT_OR_ARTIFACT").unwrap();
let pkg_ident = if Path::new(pkg_ident_str).is_file() {
// We're going to use the `$pkg_origin/$pkg_name`, fuzzy form of a package
// identifier to ensure that update strategies will work if desired
PackageArchive::new(pkg_ident_str).ident()?
} else {
PackageIdent::from_str(pkg_ident_str)?
};
// To allow multiple instances of Habitat application in Kubernetes,
// random suffix in metadata_name is needed.
let metadata_name = format!(
"{}-{}{}",
pkg_ident.name,
rand::thread_rng()
.gen_ascii_chars()
.filter(|c| c.is_lowercase() || c.is_numeric())
.take(4)
.collect::<String>(),
rand::thread_rng()
.gen_ascii_chars()
.filter(|c| c.is_lowercase() && !c.is_numeric())
.take(1)
.collect::<String>()
);
let image = match matches.value_of("IMAGE_NAME") {
Some(i) => i.to_string(),
None => pkg_ident.origin + "/" + &pkg_ident.name,
};
let bind = matches.value_of("BIND");
let json = json!({
"metadata_name": metadata_name,
"habitat_name": pkg_ident.name,
"image": image,
"count": count,
"service_topology": topology,
"service_group": group,
"config_secret_name": config_secret_name,
"ring_secret_name": ring_secret_name,
"bind": bind,
});
let mut write: Box<Write> = match matches.value_of("OUTPUT") {
Some(o) if o != "-" => Box::new(File::create(o)?),
_ => Box::new(io::stdout()),
};
let r = Handlebars::new().template_render(MANIFESTFILE, &json)?;
let mut out = r.lines().filter(|l| *l != "").collect::<Vec<_>>().join(
"\n",
) + "\n";
if let Some(binds) = matches.values_of("BIND") {
for bind in binds {
let split: Vec<&str> = bind.split(":").collect();
if split.len() < 3 {
return Err(Error::InvalidBindSpec(bind.to_string()).into());
}
let json = json!({
"name": split[0],
"service": split[1],
"group": split[2],
});
out += &Handlebars::new().template_render(BINDFILE, &json)?;
}
}
write.write(out.as_bytes())?;
Ok(())
}
fn cli<'a, 'b>() -> App<'a, 'b> {
let name: &str = &*PROGRAM_NAME;
let about = "Creates a Docker image and Kubernetes manifest for a Habitat package. Habitat \
operator must be deployed within the Kubernetes cluster before the generated \
manifest can be applied to this cluster.";
let app = Cli::new(name, about)
.add_base_packages_args()
.add_builder_args()
.add_tagging_args()
.add_publishing_args()
.app;
app.arg(
Arg::with_name("OUTPUT")
.value_name("OUTPUT")
.long("output")
.short("o")
.help(
"Name of manifest file to create. Pass '-' for stdout (default: -)",
),
).arg(
Arg::with_name("COUNT")
.value_name("COUNT")
.long("count")
.validator(valid_natural_number)
.help("Count is the number of desired instances"),
)
.arg(
Arg::with_name("TOPOLOGY")
.value_name("TOPOLOGY")
.long("topology")
.short("t")
.possible_values(&["standalone", "leader"])
.help(
"A topology describes the intended relationship between peers \
within a Habitat service group. Specify either standalone or leader \
topology (default: standalone)",
),
)
.arg(
Arg::with_name("GROUP")
.value_name("GROUP")
.long("service-group")
.short("g")
.help(
"group is a logical grouping of services with the same package and \
topology type connected together in a ring (default: default)",
),
)
.arg(
Arg::with_name("CONFIG_SECRET_NAME")
.value_name("CONFIG_SECRET_NAME")
.long("config-secret-name")
.short("n")
.help(
"name of the Kubernetes Secret containing the config file - \
user.toml - that the user has previously created. Habitat will \
use it for initial configuration of the service",
),
)
.arg(
Arg::with_name("RING_SECRET_NAME")
.value_name("RING_SECRET_NAME")
.long("ring-secret-name")
.short("r")
.help(
"name of the Kubernetes Secret that contains the ring key, which \
encrypts the communication between Habitat supervisors",
),
)
.arg(
Arg::with_name("BIND")
.value_name("BIND")
.long("bind")
.short("b")
.multiple(true)
.number_of_values(1)
.help(
"Bind to another service to form a producer/consumer relationship, \
specified as name:service:group",
),
)
.arg(
Arg::with_name("NO_DOCKER_IMAGE")
.long("no-docker-image")
.short("d")
.help(
"Disable creation of the Docker image and only create a Kubernetes manifest",
),
)
.arg(
Arg::with_name("PKG_IDENT_OR_ARTIFACT")
.value_name("PKG_IDENT_OR_ARTIFACT")
.required(true)
.help("Habitat package identifier (ex: acme/redis)"),
)
}
fn valid_natural_number(val: String) -> result::Result<(), String> {
match val.parse::<u32>() {
Ok(_) => Ok(()),
Err(_) => Err(format!("{} is not a natural number", val)),
}
}
| start | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.