CombinedText
stringlengths
4
3.42M
# encoding: utf-8 module Humboldt module TypeConverter class Binary HADOOP = ::Hadoop::Io::BytesWritable RUBY = ::String attr_reader :hadoop def hadoop=(value) unless value.is_a?(HADOOP) raise ArgumentError, "Hadoop type mismatch, was #{value.class}, expected #{HADOOP}" end @hadoop = value end def initialize @hadoop = HADOOP.new end def ruby String.from_java_bytes(@hadoop.bytes).byteslice(0, @hadoop.length) end def ruby=(value) unless value.is_a?(RUBY) raise ArgumentError, "Hadoop type mismatch, was #{value.class}, expected #{RUBY}" end @hadoop.set(value.to_java_bytes, 0, value.bytesize) end end begin require 'msgpack' class Encoded < Binary def ruby=(value) unless value.is_a?(Hash) || value.is_a?(Array) raise ArgumentError, "Hadoop type mismatch, was #{value.class}, expected Hash or Array" end packed = MessagePack.pack(value) @hadoop.set(packed.to_java_bytes, 0, packed.bytesize) end def ruby packed = String.from_java_bytes(@hadoop.bytes).byteslice(0, @hadoop.length) MessagePack.unpack(packed, encoding: Encoding::UTF_8) end end rescue LoadError end class Text HADOOP = ::Hadoop::Io::Text RUBY = ::String attr_reader :hadoop def hadoop=(value) unless value.is_a?(HADOOP) raise ArgumentError, "Hadoop type mismatch, was #{value.class}, expected #{HADOOP}" end @hadoop = value end def initialize @hadoop = HADOOP.new end def ruby String.from_java_bytes(@hadoop.bytes).byteslice(0, @hadoop.length).force_encoding(Encoding::UTF_8) end def ruby=(value) unless value.is_a?(RUBY) raise ArgumentError, "Hadoop type mismatch, was #{value.class}, expected #{RUBY}" end if value.encoding == Encoding::UTF_8 @hadoop.set(value.to_java_bytes, 0, value.bytesize) else @hadoop.set(value) end end end begin require 'json' class Json < Text def ruby=(value) unless value.is_a?(Hash) || value.is_a?(Array) raise ArgumentError, "Hadoop type mismatch, was #{value.class}, expected Hash or Array" end @hadoop.set(JSON.generate(value)) end def ruby JSON.parse(hadoop.to_s) end end end class Long HADOOP = ::Hadoop::Io::LongWritable RUBY = ::Integer attr_reader :hadoop def hadoop=(value) unless value.is_a?(HADOOP) raise ArgumentError, "Hadoop type mismatch, was #{value.class}, expected #{HADOOP}" end @hadoop = value end def initialize @hadoop = HADOOP.new end def ruby @hadoop.get end def ruby=(value) unless value.is_a?(Integer) raise ArgumentError, "Hadoop type mismatch, was #{value.class}, expected #{RUBY}" end @hadoop.set value end end class None HADOOP = ::Hadoop::Io::NullWritable RUBY = ::NilClass def hadoop HADOOP.get end def hadoop=(value) unless value.is_a?(HADOOP) raise ArgumentError, "Hadoop type mismatch, was #{value.class}, expected #{HADOOP}" end end def ruby nil end def ruby=(value) unless value.nil? raise ArgumentError, "Hadoop type mismatch, was #{value.class}, expected #{RUBY}" end end end TYPE_CONVERTER_CLASS_CACHE = Hash.new { |h,k| h[k] = const_get(k.to_s.capitalize) } def self.[](name) TYPE_CONVERTER_CLASS_CACHE[name] end FROM_HADOOP_MAPPINGS = { ::Hadoop::Io::Text => Text, ::Hadoop::Io::BytesWritable => Binary, ::Hadoop::Io::LongWritable => Long, ::Hadoop::Io::NullWritable => None }.freeze def self.from_hadoop(hadoop_class) accessor = FROM_HADOOP_MAPPINGS[hadoop_class] raise ArgumentError, "Unsupported Hadoop type: #{hadoop_class}" unless accessor accessor end end end Reimplement Json/Message pack converters to call super This will allow optimisations to base classes to be used for these classes as well. # encoding: utf-8 module Humboldt module TypeConverter class Binary HADOOP = ::Hadoop::Io::BytesWritable RUBY = ::String attr_reader :hadoop def hadoop=(value) unless value.is_a?(HADOOP) raise ArgumentError, "Hadoop type mismatch, was #{value.class}, expected #{HADOOP}" end @hadoop = value end def initialize @hadoop = HADOOP.new end def ruby String.from_java_bytes(@hadoop.bytes).byteslice(0, @hadoop.length) end def ruby=(value) unless value.is_a?(RUBY) raise ArgumentError, "Hadoop type mismatch, was #{value.class}, expected #{RUBY}" end @hadoop.set(value.to_java_bytes, 0, value.bytesize) end end begin require 'msgpack' class Encoded < Binary def ruby=(value) unless value.is_a?(Hash) || value.is_a?(Array) raise ArgumentError, "Hadoop type mismatch, was #{value.class}, expected Hash or Array" end super(MessagePack.pack(value)) end def ruby MessagePack.unpack(super(), encoding: Encoding::UTF_8) end end rescue LoadError end class Text HADOOP = ::Hadoop::Io::Text RUBY = ::String attr_reader :hadoop def hadoop=(value) unless value.is_a?(HADOOP) raise ArgumentError, "Hadoop type mismatch, was #{value.class}, expected #{HADOOP}" end @hadoop = value end def initialize @hadoop = HADOOP.new end def ruby String.from_java_bytes(@hadoop.bytes).byteslice(0, @hadoop.length).force_encoding(Encoding::UTF_8) end def ruby=(value) unless value.is_a?(RUBY) raise ArgumentError, "Hadoop type mismatch, was #{value.class}, expected #{RUBY}" end if value.encoding == Encoding::UTF_8 @hadoop.set(value.to_java_bytes, 0, value.bytesize) else @hadoop.set(value) end end end begin require 'json' class Json < Text def ruby=(value) unless value.is_a?(Hash) || value.is_a?(Array) raise ArgumentError, "Hadoop type mismatch, was #{value.class}, expected Hash or Array" end super(JSON.generate(value)) end def ruby JSON.parse(super()) end end end class Long HADOOP = ::Hadoop::Io::LongWritable RUBY = ::Integer attr_reader :hadoop def hadoop=(value) unless value.is_a?(HADOOP) raise ArgumentError, "Hadoop type mismatch, was #{value.class}, expected #{HADOOP}" end @hadoop = value end def initialize @hadoop = HADOOP.new end def ruby @hadoop.get end def ruby=(value) unless value.is_a?(Integer) raise ArgumentError, "Hadoop type mismatch, was #{value.class}, expected #{RUBY}" end @hadoop.set value end end class None HADOOP = ::Hadoop::Io::NullWritable RUBY = ::NilClass def hadoop HADOOP.get end def hadoop=(value) unless value.is_a?(HADOOP) raise ArgumentError, "Hadoop type mismatch, was #{value.class}, expected #{HADOOP}" end end def ruby nil end def ruby=(value) unless value.nil? raise ArgumentError, "Hadoop type mismatch, was #{value.class}, expected #{RUBY}" end end end TYPE_CONVERTER_CLASS_CACHE = Hash.new { |h,k| h[k] = const_get(k.to_s.capitalize) } def self.[](name) TYPE_CONVERTER_CLASS_CACHE[name] end FROM_HADOOP_MAPPINGS = { ::Hadoop::Io::Text => Text, ::Hadoop::Io::BytesWritable => Binary, ::Hadoop::Io::LongWritable => Long, ::Hadoop::Io::NullWritable => None }.freeze def self.from_hadoop(hadoop_class) accessor = FROM_HADOOP_MAPPINGS[hadoop_class] raise ArgumentError, "Unsupported Hadoop type: #{hadoop_class}" unless accessor accessor end end end
module I18nScrewdriver VERSION = "9.5" end bump version module I18nScrewdriver VERSION = "9.6" end
module IdentityCache module QueryAPI extend ActiveSupport::Concern included do |base| base.after_commit :expire_cache end module ClassMethods # Similar to ActiveRecord::Base#exists? will return true if the id can be # found in the cache or in the DB. def exists_with_identity_cache?(id) raise NotImplementedError, "exists_with_identity_cache? needs the primary index enabled" unless primary_cache_index_enabled !!fetch_by_id(id) end # Default fetcher added to the model on inclusion, it behaves like # ActiveRecord::Base.where(id: id).first def fetch_by_id(id, options={}) ensure_base_model raise_if_scoped raise NotImplementedError, "fetching needs the primary index enabled" unless primary_cache_index_enabled return unless id record = if IdentityCache.should_use_cache? require_if_necessary do object = nil coder = IdentityCache.fetch(rails_cache_key(id)){ coder_from_record(object = resolve_cache_miss(id)) } object ||= record_from_coder(coder) IdentityCache.logger.error "[IDC id mismatch] fetch_by_id_requested=#{id} fetch_by_id_got=#{object.id} for #{object.inspect[(0..100)]} " if object && object.id != id.to_i object end else resolve_cache_miss(id) end prefetch_associations(options[:includes], [record]) if record && options[:includes] record end # Default fetcher added to the model on inclusion, it behaves like # ActiveRecord::Base.find, will raise ActiveRecord::RecordNotFound exception # if id is not in the cache or the db. def fetch(id, options={}) fetch_by_id(id, options) or raise(ActiveRecord::RecordNotFound, "Couldn't find #{self.name} with ID=#{id}") end # Default fetcher added to the model on inclusion, if behaves like # ActiveRecord::Base.find_all_by_id def fetch_multi(*ids) ensure_base_model raise_if_scoped raise NotImplementedError, "fetching needs the primary index enabled" unless primary_cache_index_enabled options = ids.extract_options! ids.flatten!(1) records = if IdentityCache.should_use_cache? require_if_necessary do cache_keys = ids.map {|id| rails_cache_key(id) } key_to_id_map = Hash[ cache_keys.zip(ids) ] key_to_record_map = {} coders_by_key = IdentityCache.fetch_multi(cache_keys) do |unresolved_keys| ids = unresolved_keys.map {|key| key_to_id_map[key] } records = find_batch(ids) key_to_record_map = records.compact.index_by{ |record| rails_cache_key(record.id) } records.map {|record| coder_from_record(record) } end cache_keys.map{ |key| key_to_record_map[key] || record_from_coder(coders_by_key[key]) } end else find_batch(ids) end records.compact! prefetch_associations(options[:includes], records) if options[:includes] records end private def raise_if_scoped if current_scope raise UnsupportedScopeError, "IdentityCache doesn't support rails scopes" end end def record_from_coder(coder) #:nodoc: if coder klass = coder[:class] record = klass.instantiate(coder[:attributes].dup) coder[:associations].each {|name, value| set_embedded_association(record, name, value) } if coder.has_key?(:associations) coder[:association_ids].each {|name, ids| record.instance_variable_set(:"@#{record.class.cached_has_manys[name][:ids_variable_name]}", ids) } if coder.has_key?(:association_ids) record end end def set_embedded_association(record, association_name, coder_or_array) #:nodoc: value = if IdentityCache.unmap_cached_nil_for(coder_or_array).nil? nil elsif (reflection = record.class.reflect_on_association(association_name)).collection? association = reflection.association_class.new(record, reflection) association.target = coder_or_array.map {|e| record_from_coder(e) } association.target.each {|e| association.set_inverse_instance(e) } association else record_from_coder(coder_or_array) end variable_name = record.class.send(:recursively_embedded_associations)[association_name][:records_variable_name] record.instance_variable_set(:"@#{variable_name}", IdentityCache.map_cached_nil_for(value)) end def get_embedded_association(record, association, options) #:nodoc: embedded_variable = record.public_send(options.fetch(:cached_accessor_name)) if record.class.reflect_on_association(association).collection? embedded_variable.map {|e| coder_from_record(e) } else coder_from_record(embedded_variable) end end def coder_from_record(record) #:nodoc: unless record.nil? coder = { attributes: record.attributes_before_type_cast, class: record.class, } add_cached_associations_to_coder(record, coder) coder end end def add_cached_associations_to_coder(record, coder) klass = record.class if klass.include?(IdentityCache) if (recursively_embedded_associations = klass.send(:recursively_embedded_associations)).present? coder[:associations] = recursively_embedded_associations.each_with_object({}) do |(name, options), hash| hash[name] = IdentityCache.map_cached_nil_for(get_embedded_association(record, name, options)) end end if (cached_has_manys = klass.cached_has_manys).present? coder[:association_ids] = cached_has_manys.each_with_object({}) do |(name, options), hash| hash[name] = record.instance_variable_get(:"@#{options[:ids_variable_name]}") unless options[:embed] == true end end end end def require_if_necessary #:nodoc: # mem_cache_store returns raw value if unmarshal fails rval = yield case rval when String rval = Marshal.load(rval) when Array rval.map!{ |v| v.kind_of?(String) ? Marshal.load(v) : v } end rval rescue ArgumentError => e if e.message =~ /undefined [\w\/]+ (\w+)/ ok = Kernel.const_get($1) rescue nil retry if ok end raise end def resolve_cache_miss(id) record = self.includes(cache_fetch_includes).reorder(nil).where(primary_key => id).first preload_id_embedded_associations([record]) if record record end def preload_id_embedded_associations(records) return if records.empty? each_id_embedded_association do |options| reflection = options.fetch(:association_reflection) child_model = reflection.klass scope = child_model.all scope = scope.instance_exec(nil, &reflection.scope) if reflection.scope pairs = scope.where(reflection.foreign_key => records.map(&:id)).pluck(reflection.foreign_key, reflection.active_record_primary_key) ids_by_parent = Hash.new{ |hash, key| hash[key] = [] } pairs.each do |parent_id, child_id| ids_by_parent[parent_id] << child_id end records.each do |parent| child_ids = ids_by_parent[parent.id] parent.instance_variable_set(:"@#{options.fetch(:ids_variable_name)}", child_ids) end end recursively_embedded_associations.each_value do |options| child_model = options.fetch(:association_reflection).klass if child_model.include?(IdentityCache) child_records = records.flat_map(&options.fetch(:cached_accessor_name).to_sym).compact child_model.send(:preload_id_embedded_associations, child_records) end end end def each_id_embedded_association cached_has_manys.each_value do |options| yield options if options.fetch(:embed) == :ids end end def recursively_embedded_associations all_cached_associations.select do |cached_association, options| options[:embed] == true end end def all_cached_associations (cached_has_manys || {}).merge(cached_has_ones || {}).merge(cached_belongs_tos || {}) end def embedded_associations all_cached_associations.select do |cached_association, options| options[:embed] end end def cache_fetch_includes associations_for_identity_cache = recursively_embedded_associations.map do |child_association, options| child_class = reflect_on_association(child_association).try(:klass) child_includes = nil if child_class.respond_to?(:cache_fetch_includes, true) child_includes = child_class.send(:cache_fetch_includes) end if child_includes.blank? child_association else { child_association => child_includes } end end associations_for_identity_cache.compact end def find_batch(ids) @id_column ||= columns.detect {|c| c.name == primary_key} ids = ids.map{ |id| connection.type_cast(id, @id_column) } records = where(primary_key => ids).includes(cache_fetch_includes).to_a preload_id_embedded_associations(records) records_by_id = records.index_by(&:id) ids.map{ |id| records_by_id[id] } end def prefetch_associations(associations, records) associations = hashify_includes_structure(associations) associations.each do |association, sub_associations| case when details = cached_has_manys[association] if details[:embed] == true child_records = records.map(&details[:cached_accessor_name].to_sym).flatten else ids_to_parent_record = records.each_with_object({}) do |record, hash| child_ids = record.send(details[:cached_ids_name]) child_ids.each do |child_id| hash[child_id] = record end end parent_record_to_child_records = Hash.new { |h, k| h[k] = [] } child_records = details[:association_reflection].klass.fetch_multi(*ids_to_parent_record.keys) child_records.each do |child_record| parent_record = ids_to_parent_record[child_record.id] parent_record_to_child_records[parent_record] << child_record end parent_record_to_child_records.each do |parent_record, child_records| parent_record.send(details[:prepopulate_method_name], child_records) end end next_level_records = child_records when details = cached_belongs_tos[association] if details[:embed] == true raise ArgumentError.new("Embedded belongs_to associations do not support prefetching yet.") else reflection = details[:association_reflection] if reflection.polymorphic? raise ArgumentError.new("Polymorphic belongs_to associations do not support prefetching yet.") end ids_to_child_record = records.each_with_object({}) do |child_record, hash| parent_id = child_record.send(reflection.foreign_key) hash[parent_id] = child_record if parent_id.present? end parent_records = reflection.klass.fetch_multi(ids_to_child_record.keys) parent_records.each do |parent_record| child_record = ids_to_child_record[parent_record.id] child_record.send(details[:prepopulate_method_name], parent_record) end end next_level_records = parent_records when details = cached_has_ones[association] if details[:embed] == true parent_records = records.map(&details[:cached_accessor_name].to_sym) else raise ArgumentError.new("Non-embedded has_one associations do not support prefetching yet.") end next_level_records = parent_records else raise ArgumentError.new("Unknown cached association #{association} listed for prefetching") end if details && details[:association_reflection].klass.respond_to?(:prefetch_associations, true) details[:association_reflection].klass.send(:prefetch_associations, sub_associations, next_level_records) end end end def hashify_includes_structure(structure) case structure when nil {} when Symbol {structure => []} when Hash structure.clone when Array structure.each_with_object({}) do |member, hash| case member when Hash hash.merge!(member) when Symbol hash[member] = [] end end end end end private def fetch_recursively_cached_association(ivar_name, association_name) # :nodoc: if IdentityCache.should_use_cache? ivar_full_name = :"@#{ivar_name}" unless ivar_value = instance_variable_get(ivar_full_name) ivar_value = IdentityCache.map_cached_nil_for(send(association_name)) instance_variable_set(ivar_full_name, ivar_value) end assoc = IdentityCache.unmap_cached_nil_for(ivar_value) assoc.is_a?(ActiveRecord::Associations::CollectionAssociation) ? assoc.reader : assoc else send(association_name.to_sym) end end def expire_primary_index # :nodoc: return unless self.class.primary_cache_index_enabled IdentityCache.logger.debug do extra_keys = if respond_to?(:updated_at) old_updated_at = old_values_for_fields([:updated_at]).first "expiring_last_updated_at=#{old_updated_at}" else "" end "[IdentityCache] expiring=#{self.class.name} expiring_id=#{id} #{extra_keys}" end IdentityCache.cache.delete(primary_cache_index_key) end def expire_attribute_indexes # :nodoc: cache_attributes.try(:each) do |(attribute, fields, unique)| unless was_new_record? old_cache_attribute_key = attribute_cache_key_for_attribute_and_previous_values(attribute, fields, unique) IdentityCache.cache.delete(old_cache_attribute_key) end unless destroyed? new_cache_attribute_key = attribute_cache_key_for_attribute_and_current_values(attribute, fields, unique) if new_cache_attribute_key != old_cache_attribute_key IdentityCache.cache.delete(new_cache_attribute_key) end end end end def expire_cache # :nodoc: expire_primary_index expire_attribute_indexes true end def was_new_record? # :nodoc: pk = self.class.primary_key !destroyed? && transaction_changed_attributes.has_key?(pk) && transaction_changed_attributes[pk].nil? end end end Remove try on cache_attributes since we already know the type. module IdentityCache module QueryAPI extend ActiveSupport::Concern included do |base| base.after_commit :expire_cache end module ClassMethods # Similar to ActiveRecord::Base#exists? will return true if the id can be # found in the cache or in the DB. def exists_with_identity_cache?(id) raise NotImplementedError, "exists_with_identity_cache? needs the primary index enabled" unless primary_cache_index_enabled !!fetch_by_id(id) end # Default fetcher added to the model on inclusion, it behaves like # ActiveRecord::Base.where(id: id).first def fetch_by_id(id, options={}) ensure_base_model raise_if_scoped raise NotImplementedError, "fetching needs the primary index enabled" unless primary_cache_index_enabled return unless id record = if IdentityCache.should_use_cache? require_if_necessary do object = nil coder = IdentityCache.fetch(rails_cache_key(id)){ coder_from_record(object = resolve_cache_miss(id)) } object ||= record_from_coder(coder) IdentityCache.logger.error "[IDC id mismatch] fetch_by_id_requested=#{id} fetch_by_id_got=#{object.id} for #{object.inspect[(0..100)]} " if object && object.id != id.to_i object end else resolve_cache_miss(id) end prefetch_associations(options[:includes], [record]) if record && options[:includes] record end # Default fetcher added to the model on inclusion, it behaves like # ActiveRecord::Base.find, will raise ActiveRecord::RecordNotFound exception # if id is not in the cache or the db. def fetch(id, options={}) fetch_by_id(id, options) or raise(ActiveRecord::RecordNotFound, "Couldn't find #{self.name} with ID=#{id}") end # Default fetcher added to the model on inclusion, if behaves like # ActiveRecord::Base.find_all_by_id def fetch_multi(*ids) ensure_base_model raise_if_scoped raise NotImplementedError, "fetching needs the primary index enabled" unless primary_cache_index_enabled options = ids.extract_options! ids.flatten!(1) records = if IdentityCache.should_use_cache? require_if_necessary do cache_keys = ids.map {|id| rails_cache_key(id) } key_to_id_map = Hash[ cache_keys.zip(ids) ] key_to_record_map = {} coders_by_key = IdentityCache.fetch_multi(cache_keys) do |unresolved_keys| ids = unresolved_keys.map {|key| key_to_id_map[key] } records = find_batch(ids) key_to_record_map = records.compact.index_by{ |record| rails_cache_key(record.id) } records.map {|record| coder_from_record(record) } end cache_keys.map{ |key| key_to_record_map[key] || record_from_coder(coders_by_key[key]) } end else find_batch(ids) end records.compact! prefetch_associations(options[:includes], records) if options[:includes] records end private def raise_if_scoped if current_scope raise UnsupportedScopeError, "IdentityCache doesn't support rails scopes" end end def record_from_coder(coder) #:nodoc: if coder klass = coder[:class] record = klass.instantiate(coder[:attributes].dup) coder[:associations].each {|name, value| set_embedded_association(record, name, value) } if coder.has_key?(:associations) coder[:association_ids].each {|name, ids| record.instance_variable_set(:"@#{record.class.cached_has_manys[name][:ids_variable_name]}", ids) } if coder.has_key?(:association_ids) record end end def set_embedded_association(record, association_name, coder_or_array) #:nodoc: value = if IdentityCache.unmap_cached_nil_for(coder_or_array).nil? nil elsif (reflection = record.class.reflect_on_association(association_name)).collection? association = reflection.association_class.new(record, reflection) association.target = coder_or_array.map {|e| record_from_coder(e) } association.target.each {|e| association.set_inverse_instance(e) } association else record_from_coder(coder_or_array) end variable_name = record.class.send(:recursively_embedded_associations)[association_name][:records_variable_name] record.instance_variable_set(:"@#{variable_name}", IdentityCache.map_cached_nil_for(value)) end def get_embedded_association(record, association, options) #:nodoc: embedded_variable = record.public_send(options.fetch(:cached_accessor_name)) if record.class.reflect_on_association(association).collection? embedded_variable.map {|e| coder_from_record(e) } else coder_from_record(embedded_variable) end end def coder_from_record(record) #:nodoc: unless record.nil? coder = { attributes: record.attributes_before_type_cast, class: record.class, } add_cached_associations_to_coder(record, coder) coder end end def add_cached_associations_to_coder(record, coder) klass = record.class if klass.include?(IdentityCache) if (recursively_embedded_associations = klass.send(:recursively_embedded_associations)).present? coder[:associations] = recursively_embedded_associations.each_with_object({}) do |(name, options), hash| hash[name] = IdentityCache.map_cached_nil_for(get_embedded_association(record, name, options)) end end if (cached_has_manys = klass.cached_has_manys).present? coder[:association_ids] = cached_has_manys.each_with_object({}) do |(name, options), hash| hash[name] = record.instance_variable_get(:"@#{options[:ids_variable_name]}") unless options[:embed] == true end end end end def require_if_necessary #:nodoc: # mem_cache_store returns raw value if unmarshal fails rval = yield case rval when String rval = Marshal.load(rval) when Array rval.map!{ |v| v.kind_of?(String) ? Marshal.load(v) : v } end rval rescue ArgumentError => e if e.message =~ /undefined [\w\/]+ (\w+)/ ok = Kernel.const_get($1) rescue nil retry if ok end raise end def resolve_cache_miss(id) record = self.includes(cache_fetch_includes).reorder(nil).where(primary_key => id).first preload_id_embedded_associations([record]) if record record end def preload_id_embedded_associations(records) return if records.empty? each_id_embedded_association do |options| reflection = options.fetch(:association_reflection) child_model = reflection.klass scope = child_model.all scope = scope.instance_exec(nil, &reflection.scope) if reflection.scope pairs = scope.where(reflection.foreign_key => records.map(&:id)).pluck(reflection.foreign_key, reflection.active_record_primary_key) ids_by_parent = Hash.new{ |hash, key| hash[key] = [] } pairs.each do |parent_id, child_id| ids_by_parent[parent_id] << child_id end records.each do |parent| child_ids = ids_by_parent[parent.id] parent.instance_variable_set(:"@#{options.fetch(:ids_variable_name)}", child_ids) end end recursively_embedded_associations.each_value do |options| child_model = options.fetch(:association_reflection).klass if child_model.include?(IdentityCache) child_records = records.flat_map(&options.fetch(:cached_accessor_name).to_sym).compact child_model.send(:preload_id_embedded_associations, child_records) end end end def each_id_embedded_association cached_has_manys.each_value do |options| yield options if options.fetch(:embed) == :ids end end def recursively_embedded_associations all_cached_associations.select do |cached_association, options| options[:embed] == true end end def all_cached_associations (cached_has_manys || {}).merge(cached_has_ones || {}).merge(cached_belongs_tos || {}) end def embedded_associations all_cached_associations.select do |cached_association, options| options[:embed] end end def cache_fetch_includes associations_for_identity_cache = recursively_embedded_associations.map do |child_association, options| child_class = reflect_on_association(child_association).try(:klass) child_includes = nil if child_class.respond_to?(:cache_fetch_includes, true) child_includes = child_class.send(:cache_fetch_includes) end if child_includes.blank? child_association else { child_association => child_includes } end end associations_for_identity_cache.compact end def find_batch(ids) @id_column ||= columns.detect {|c| c.name == primary_key} ids = ids.map{ |id| connection.type_cast(id, @id_column) } records = where(primary_key => ids).includes(cache_fetch_includes).to_a preload_id_embedded_associations(records) records_by_id = records.index_by(&:id) ids.map{ |id| records_by_id[id] } end def prefetch_associations(associations, records) associations = hashify_includes_structure(associations) associations.each do |association, sub_associations| case when details = cached_has_manys[association] if details[:embed] == true child_records = records.map(&details[:cached_accessor_name].to_sym).flatten else ids_to_parent_record = records.each_with_object({}) do |record, hash| child_ids = record.send(details[:cached_ids_name]) child_ids.each do |child_id| hash[child_id] = record end end parent_record_to_child_records = Hash.new { |h, k| h[k] = [] } child_records = details[:association_reflection].klass.fetch_multi(*ids_to_parent_record.keys) child_records.each do |child_record| parent_record = ids_to_parent_record[child_record.id] parent_record_to_child_records[parent_record] << child_record end parent_record_to_child_records.each do |parent_record, child_records| parent_record.send(details[:prepopulate_method_name], child_records) end end next_level_records = child_records when details = cached_belongs_tos[association] if details[:embed] == true raise ArgumentError.new("Embedded belongs_to associations do not support prefetching yet.") else reflection = details[:association_reflection] if reflection.polymorphic? raise ArgumentError.new("Polymorphic belongs_to associations do not support prefetching yet.") end ids_to_child_record = records.each_with_object({}) do |child_record, hash| parent_id = child_record.send(reflection.foreign_key) hash[parent_id] = child_record if parent_id.present? end parent_records = reflection.klass.fetch_multi(ids_to_child_record.keys) parent_records.each do |parent_record| child_record = ids_to_child_record[parent_record.id] child_record.send(details[:prepopulate_method_name], parent_record) end end next_level_records = parent_records when details = cached_has_ones[association] if details[:embed] == true parent_records = records.map(&details[:cached_accessor_name].to_sym) else raise ArgumentError.new("Non-embedded has_one associations do not support prefetching yet.") end next_level_records = parent_records else raise ArgumentError.new("Unknown cached association #{association} listed for prefetching") end if details && details[:association_reflection].klass.respond_to?(:prefetch_associations, true) details[:association_reflection].klass.send(:prefetch_associations, sub_associations, next_level_records) end end end def hashify_includes_structure(structure) case structure when nil {} when Symbol {structure => []} when Hash structure.clone when Array structure.each_with_object({}) do |member, hash| case member when Hash hash.merge!(member) when Symbol hash[member] = [] end end end end end private def fetch_recursively_cached_association(ivar_name, association_name) # :nodoc: if IdentityCache.should_use_cache? ivar_full_name = :"@#{ivar_name}" unless ivar_value = instance_variable_get(ivar_full_name) ivar_value = IdentityCache.map_cached_nil_for(send(association_name)) instance_variable_set(ivar_full_name, ivar_value) end assoc = IdentityCache.unmap_cached_nil_for(ivar_value) assoc.is_a?(ActiveRecord::Associations::CollectionAssociation) ? assoc.reader : assoc else send(association_name.to_sym) end end def expire_primary_index # :nodoc: return unless self.class.primary_cache_index_enabled IdentityCache.logger.debug do extra_keys = if respond_to?(:updated_at) old_updated_at = old_values_for_fields([:updated_at]).first "expiring_last_updated_at=#{old_updated_at}" else "" end "[IdentityCache] expiring=#{self.class.name} expiring_id=#{id} #{extra_keys}" end IdentityCache.cache.delete(primary_cache_index_key) end def expire_attribute_indexes # :nodoc: cache_attributes.each do |(attribute, fields, unique)| unless was_new_record? old_cache_attribute_key = attribute_cache_key_for_attribute_and_previous_values(attribute, fields, unique) IdentityCache.cache.delete(old_cache_attribute_key) end unless destroyed? new_cache_attribute_key = attribute_cache_key_for_attribute_and_current_values(attribute, fields, unique) if new_cache_attribute_key != old_cache_attribute_key IdentityCache.cache.delete(new_cache_attribute_key) end end end end def expire_cache # :nodoc: expire_primary_index expire_attribute_indexes true end def was_new_record? # :nodoc: pk = self.class.primary_key !destroyed? && transaction_changed_attributes.has_key?(pk) && transaction_changed_attributes[pk].nil? end end end
module Json module Stream module Path VERSION = "0.0.2" end end end version added module Json module Stream module Path VERSION = "0.0.3" end end end
module Kaname module Adapter class ReadOnly def initialize Kaname::Config.setup end def list_users @_users ||= Yao::User.list end def find_user(name) user = Yao::User.find_by_name(name).first {"id" => user.id, "name" => user.name} end def list_tenants @_tenants ||= Yao::Tenant.list end def list_roles @_roles ||= Yao::Role.list end def list_role_assignments unless @_role_assignments @_role_assignments ||= Yao::RoleAssignment.list Yao::Auth.try_new end @_role_assignments end def users_hash @_user_hash ||= list_users.each_with_object(Hash.new { |h,k| h[k] = {} }) do |u,uh| next if ignored_users.include?(u.name) uh[u.name]["email"] = u.email uh[u.name]["tenants"] = tenant_role_hash(u.id) end end def create_user(name, email) puts "Create User: #{name} #{email}" {"name" => name} end def create_user_role(tenant, user_hash, role) puts "Create User Role: #{tenant} #{user_hash["name"]} #{role}" end def delete_user(name) puts "Delete User: #{name}" end def delete_user_role(tenant, user_hash, role) puts "Delete User Role: #{tenant} #{user_hash["name"]} #{role}" end def change_user_role(tenant, user_hash, before_role, after_role) delete_user_role(tenant, user_hash, before_role) create_user_role(tenant, user_hash, after_role) end private def tenant_role_hash(user_id) list_role_assignments.each_with_object(Hash.new) do |t,th| if t.user.id == user_id th[list_tenants.find {|ts| ts.id == t.scope["project"]["id"]}["name"]] = list_roles.find {|r| r.id == t.role.id }['name'] end end end # default service users def ignored_users %w[ neutron glance cinder admin nova_ec2 nova heat ceilometer octavia ] end end end end keystone APIのバージョンにより使いわける module Kaname module Adapter class ReadOnly def initialize Kaname::Config.setup end def list_users @_users ||= Yao::User.list end def find_user(name) user = Yao::User.find_by_name(name).first {"id" => user.id, "name" => user.name} end def list_tenants @_tenants ||= if keystone_v2? Yao::Tenant.list else Yao::Project.list end end def list_roles @_roles ||= Yao::Role.list end def list_role_assignments unless @_role_assignments @_role_assignments ||= Yao::RoleAssignment.list Yao::Auth.try_new end @_role_assignments end def users_hash @_user_hash ||= list_users.each_with_object(Hash.new { |h,k| h[k] = {} }) do |u,uh| next if ignored_users.include?(u.name) uh[u.name]["email"] = u.email uh[u.name]["tenants"] = tenant_role_hash(u.id) end end def create_user(name, email) puts "Create User: #{name} #{email}" {"name" => name} end def create_user_role(tenant, user_hash, role) puts "Create User Role: #{tenant} #{user_hash["name"]} #{role}" end def delete_user(name) puts "Delete User: #{name}" end def delete_user_role(tenant, user_hash, role) puts "Delete User Role: #{tenant} #{user_hash["name"]} #{role}" end def change_user_role(tenant, user_hash, before_role, after_role) delete_user_role(tenant, user_hash, before_role) create_user_role(tenant, user_hash, after_role) end private def keystone_v2? Yao.default_client.pool["identity"].url_prefix.to_s.match(/v2\.0/) end def tenant_role_hash(user_id) list_role_assignments.each_with_object(Hash.new) do |t,th| if t.user.id == user_id th[list_tenants.find {|ts| ts.id == t.scope["project"]["id"]}["name"]] = list_roles.find {|r| r.id == t.role.id }['name'] end end end # default service users def ignored_users %w[ neutron glance cinder admin nova_ec2 nova heat ceilometer octavia ] end end end end
module Khipu VERSION = "2.8.9" end Version 2.8.13 module Khipu VERSION = "2.8.13" end
module KramdownPrismic VERSION = "0.0.1" end Bump version. module KramdownPrismic VERSION = "0.0.2" end
require 'json' class Array def swap!(a,b) self[a], self[b] = self[b], self[a] self end end module Lita module Handlers class Stackstorm < Handler # insert handler code here config :url, required: true config :username, required: true config :password, required: true config :auth_port, required: false, default: 9100 config :execution_port, required: false, default: 9101 class << self attr_accessor :token, :expires end def self.config(config) self.token = nil self.expires = nil end route /^st2 login$/, :login, command: false, help: { "st2 login" => "login with st2-api" } route /^st2 (ls|aliases|list)$/, :list, command: false, help: { "st2 list" => "list available st2 chatops commands" } route /^!(.*)$/, :call_alias, command: false, help: {} def auth_builder if Integer(config.auth_port) == 443 and config.url.start_with?('https') "#{config.url}/auth" else "#{config.url}:#{config.auth_port}/v1" end end def url_builder if Integer(config.execution_port) == 443 and config.url.start_with?('https') "#{config.url}/api" else "#{config.url}:#{config.execution_port}/v1" end end def authenticate resp = http.post("#{auth_builder()}/tokens") do |req| req.body = {} req.headers['Authorization'] = http.set_authorization_header(:basic_auth, config.username, config.password) end self.class.token = JSON.parse(resp.body)['token'] self.class.expires = JSON.parse(resp.body)['expiry'] resp end def call_alias(msg) if expired authenticate end command = msg.matches.flatten.first found = "" redis.scan_each do |a| possible = /#{a}/.match(command) if not possible.nil? found = a break end end jobject = JSON.parse(redis.get(found)) payload = { name: jobject['object']['name'], format: jobject['format'], command: command, user: msg.user.name, source_channel: 'chatops', notification_channel: 'lita' } s = make_post_request("/aliasexecution", payload) j = JSON.parse(s.body) if s.success? msg.reply "Got it! Details available at #{config.url}/#/history/#{j['execution']['id']}/general" else msg.reply "Execution failed with message: #{j['faultstring']}" end end def list(msg) if expired authenticate end redis.keys.each {|k| redis.del k } s = make_request("/actionalias", "") if JSON.parse(s.body).empty? msg.reply "No Action Aliases Registered" else j = JSON.parse(s.body) a = "" extra_params = '(\\s+(\\S+)\\s*=("([\\s\\S]*?)"|\'([\\s\\S]*?)\'|({[\\s\\S]*?})|(\\S+))\\s*)*' j.take_while{|i| i['enabled'] }.each do |command| command['formats'].each do |format| f = format.gsub(/(\s*){{\s*\S+\s*=\s*(?:({.+?}|.+?))\s*}}(\s*)/, '\\s*([\\s\\S]+?)?\\s*') f = f.gsub(/\s*{{.+?}}\s*/, '\\s*([\\s\\S]+?)\\s*') f = "^\\s*#{f}#{extra_params}\\s*$" redis.set(f, {format: format, object: command}.to_json) a+= "#{format} -> #{command['description']}\n" end end msg.reply a end end def login(msg) http_resp = authenticate if ![200, 201, 280].index(http_resp.status).nil? msg.reply "login successful\ntoken: #{self.class.token}" elsif http_resp.status == 500 msg.reply "#{http_resp.status}: login failed!!" else msg.reply "#{http_resp.status}: login failed!!" end end def expired self.class.token.nil? || Time.now >= Time.parse(self.class.expires) end def make_request(path, body) resp = http.get("#{url_builder()}#{path}") do |req| req.headers = headers req.body = body.to_json if not body.empty? end resp end def make_post_request(path, body) resp = http.post("#{url_builder()}#{path}") do |req| req.body = {} req.headers = headers req.body = body.to_json end resp end def headers headers = {} headers['Content-Type'] = 'application/json' headers['X-Auth-Token'] = "#{self.class.token}" headers end Lita.register_handler(self) end end end make arg matching greedier and do not allow args to match on spaces. This should allow use to handle args that contain dashes and periods. require 'json' class Array def swap!(a,b) self[a], self[b] = self[b], self[a] self end end module Lita module Handlers class Stackstorm < Handler # insert handler code here config :url, required: true config :username, required: true config :password, required: true config :auth_port, required: false, default: 9100 config :execution_port, required: false, default: 9101 class << self attr_accessor :token, :expires end def self.config(config) self.token = nil self.expires = nil end route /^st2 login$/, :login, command: false, help: { "st2 login" => "login with st2-api" } route /^st2 (ls|aliases|list)$/, :list, command: false, help: { "st2 list" => "list available st2 chatops commands" } route /^!(.*)$/, :call_alias, command: false, help: {} def auth_builder if Integer(config.auth_port) == 443 and config.url.start_with?('https') "#{config.url}/auth" else "#{config.url}:#{config.auth_port}/v1" end end def url_builder if Integer(config.execution_port) == 443 and config.url.start_with?('https') "#{config.url}/api" else "#{config.url}:#{config.execution_port}/v1" end end def authenticate resp = http.post("#{auth_builder()}/tokens") do |req| req.body = {} req.headers['Authorization'] = http.set_authorization_header(:basic_auth, config.username, config.password) end self.class.token = JSON.parse(resp.body)['token'] self.class.expires = JSON.parse(resp.body)['expiry'] resp end def call_alias(msg) if expired authenticate end command = msg.matches.flatten.first found = "" redis.scan_each do |a| possible = /#{a}/.match(command) if not possible.nil? found = a break end end jobject = JSON.parse(redis.get(found)) payload = { name: jobject['object']['name'], format: jobject['format'], command: command, user: msg.user.name, source_channel: 'chatops', notification_channel: 'lita' } s = make_post_request("/aliasexecution", payload) j = JSON.parse(s.body) if s.success? msg.reply "Got it! Details available at #{config.url}/#/history/#{j['execution']['id']}/general" else msg.reply "Execution failed with message: #{j['faultstring']}" end end def list(msg) if expired authenticate end redis.keys.each {|k| redis.del k } s = make_request("/actionalias", "") if JSON.parse(s.body).empty? msg.reply "No Action Aliases Registered" else j = JSON.parse(s.body) a = "" extra_params = '(\\s+(\\S+)\\s*=("([\\s\\S]*?)"|\'([\\s\\S]*?)\'|({[\\s\\S]*?})|(\\S+))\\s*)*' j.take_while{|i| i['enabled'] }.each do |command| command['formats'].each do |format| f = format.gsub(/(\s*){{\s*\S+\s*=\s*(?:({.+?}|.+?))\s*}}(\s*)/, '\\s*([\\S]+)?\\s*') f = f.gsub(/\s*{{.+?}}\s*/, '\\s*([\\S]+?)\\s*') puts f f = "^\\s*#{f}#{extra_params}\\s*$" redis.set(f, {format: format, object: command}.to_json) a+= "#{format} -> #{command['description']}\n" end end msg.reply a end end def login(msg) http_resp = authenticate if ![200, 201, 280].index(http_resp.status).nil? msg.reply "login successful\ntoken: #{self.class.token}" elsif http_resp.status == 500 msg.reply "#{http_resp.status}: login failed!!" else msg.reply "#{http_resp.status}: login failed!!" end end def expired self.class.token.nil? || Time.now >= Time.parse(self.class.expires) end def make_request(path, body) resp = http.get("#{url_builder()}#{path}") do |req| req.headers = headers req.body = body.to_json if not body.empty? end resp end def make_post_request(path, body) resp = http.post("#{url_builder()}#{path}") do |req| req.body = {} req.headers = headers req.body = body.to_json end resp end def headers headers = {} headers['Content-Type'] = 'application/json' headers['X-Auth-Token'] = "#{self.class.token}" headers end Lita.register_handler(self) end end end
require "eventmachine-tail" require "logstash/inputs/base" require "logstash/namespace" require "socket" # for Socket.gethostname class LogStash::Inputs::Internal < LogStash::Inputs::Base attr_reader :channel config_name "internal" public def initialize(params) super raise "issue/17: needs refactor to support configfile" # Default host to the machine's hostname if it's not set @url.host ||= Socket.gethostname @channel = EventMachine::Channel.new end public def register @logger.info("Registering input #{@url}") @channel.subscribe do |event| receive(event) end end # def register public def receive(event) if !event.is_a?(LogStash::Event) event = LogStash::Event.new({ "@message" => event, "@type" => @type, "@tags" => @tags.clone, "@source" => @url, }) end @logger.debug(["Got event", event]) @callback.call(event) end # def receive end # class LogStash::Inputs::Internal going to move this functionality to LogStash::Agent
require "logstash/inputs/threadable" require "logstash/namespace" require "cgi" # for CGI.escape # Pull events from a RabbitMQ exchange. # # The default settings will create an entirely transient queue and listen for all messages by default. # If you need durability or any other advanced settings, please set the appropriate options # # This has been tested with Bunny 0.9.x, which supports RabbitMQ 2.x and 3.x. You can # find links to both here: # # * RabbitMQ - <http://www.rabbitmq.com/> # * Bunny - <https://github.com/ruby-amqp/bunny> class LogStash::Inputs::RabbitMQ < LogStash::Inputs::Threadable config_name "rabbitmq" milestone 0 # Your amqp broker's custom arguments. For mirrored queues in RabbitMQ: [ "x-ha-policy", "all" ] config :arguments, :validate => :array, :default => [] # Your amqp server address config :host, :validate => :string, :required => true # The AMQP port to connect on config :port, :validate => :number, :default => 5672 # Your amqp username config :user, :validate => :string, :default => "guest" # Your amqp password config :password, :validate => :password, :default => "guest" # The name of the queue. config :queue, :validate => :string, :default => "" # The name of the exchange to bind the queue. This is analogous to the 'amqp # output' [config 'name'](../outputs/amqp) config :exchange, :validate => :string, :required => true # The routing key to use. This is only valid for direct or fanout exchanges # # * Routing keys are ignored on topic exchanges. # * Wildcards are not valid on direct exchanges. config :key, :validate => :string, :default => "logstash" # The vhost to use. If you don't know what this is, leave the default. config :vhost, :validate => :string, :default => "/" # Passive queue creation? Useful for checking queue existance without modifying server state config :passive, :validate => :boolean, :default => false # Is this queue durable? (aka; Should it survive a broker restart?) config :durable, :validate => :boolean, :default => false # Should the queue be deleted on the broker when the last consumer # disconnects? Set this option to 'false' if you want the queue to remain # on the broker, queueing up messages until a consumer comes along to # consume them. config :auto_delete, :validate => :boolean, :default => true # Is the queue exclusive? (aka: Will other clients connect to this named queue?) config :exclusive, :validate => :boolean, :default => true # Prefetch count. Number of messages to prefetch config :prefetch_count, :validate => :number, :default => 64 # Enable message acknowledgement config :ack, :validate => :boolean, :default => true # Enable or disable logging config :debug, :validate => :boolean, :default => false # Enable or disable SSL config :ssl, :validate => :boolean, :default => false # Validate SSL certificate config :verify_ssl, :validate => :boolean, :default => false def initialize(params) params["codec"] = "json" if !params["codec"] super end # Use HotBunnies on JRuby to avoid IO#select CPU spikes # (see github.com/ruby-amqp/bunny/issues/95). # # On MRI, use Bunny 0.9. # # See http://rubybunny.info and http://hotbunnies.info # for the docs. if RUBY_ENGINE == "jruby" require "logstash/inputs/rabbitmq/hot_bunnies" include HotBunniesImpl else require "logstash/inputs/rabbitmq/bunny" include BunnyImpl end end # class LogStash::Inputs::RabbitMQ No need for CGI any more require "logstash/inputs/threadable" require "logstash/namespace" # Pull events from a RabbitMQ exchange. # # The default settings will create an entirely transient queue and listen for all messages by default. # If you need durability or any other advanced settings, please set the appropriate options # # This has been tested with Bunny 0.9.x, which supports RabbitMQ 2.x and 3.x. You can # find links to both here: # # * RabbitMQ - <http://www.rabbitmq.com/> # * Bunny - <https://github.com/ruby-amqp/bunny> class LogStash::Inputs::RabbitMQ < LogStash::Inputs::Threadable config_name "rabbitmq" milestone 0 # Your amqp broker's custom arguments. For mirrored queues in RabbitMQ: [ "x-ha-policy", "all" ] config :arguments, :validate => :array, :default => [] # Your amqp server address config :host, :validate => :string, :required => true # The AMQP port to connect on config :port, :validate => :number, :default => 5672 # Your amqp username config :user, :validate => :string, :default => "guest" # Your amqp password config :password, :validate => :password, :default => "guest" # The name of the queue. config :queue, :validate => :string, :default => "" # The name of the exchange to bind the queue. This is analogous to the 'amqp # output' [config 'name'](../outputs/amqp) config :exchange, :validate => :string, :required => true # The routing key to use. This is only valid for direct or fanout exchanges # # * Routing keys are ignored on topic exchanges. # * Wildcards are not valid on direct exchanges. config :key, :validate => :string, :default => "logstash" # The vhost to use. If you don't know what this is, leave the default. config :vhost, :validate => :string, :default => "/" # Passive queue creation? Useful for checking queue existance without modifying server state config :passive, :validate => :boolean, :default => false # Is this queue durable? (aka; Should it survive a broker restart?) config :durable, :validate => :boolean, :default => false # Should the queue be deleted on the broker when the last consumer # disconnects? Set this option to 'false' if you want the queue to remain # on the broker, queueing up messages until a consumer comes along to # consume them. config :auto_delete, :validate => :boolean, :default => true # Is the queue exclusive? (aka: Will other clients connect to this named queue?) config :exclusive, :validate => :boolean, :default => true # Prefetch count. Number of messages to prefetch config :prefetch_count, :validate => :number, :default => 64 # Enable message acknowledgement config :ack, :validate => :boolean, :default => true # Enable or disable logging config :debug, :validate => :boolean, :default => false # Enable or disable SSL config :ssl, :validate => :boolean, :default => false # Validate SSL certificate config :verify_ssl, :validate => :boolean, :default => false def initialize(params) params["codec"] = "json" if !params["codec"] super end # Use HotBunnies on JRuby to avoid IO#select CPU spikes # (see github.com/ruby-amqp/bunny/issues/95). # # On MRI, use Bunny 0.9. # # See http://rubybunny.info and http://hotbunnies.info # for the docs. if RUBY_ENGINE == "jruby" require "logstash/inputs/rabbitmq/hot_bunnies" include HotBunniesImpl else require "logstash/inputs/rabbitmq/bunny" include BunnyImpl end end # class LogStash::Inputs::RabbitMQ
require "logstash/outputs/base" require "logstash/namespace" # This output allows you to pull metrics from your logs and ship them to # ganglia's gmond. This is heavily based on the graphite output. class LogStash::Outputs::Ganglia < LogStash::Outputs::Base config_name "ganglia" # The address of the graphite server. config :host, :validate => :string, :default => "localhost" # The port to connect on your graphite server. config :port, :validate => :number, :default => 8649 # The metric to use. This supports dynamic strings like %{@source_host} config :metric, :validate => :string, :required => true # The value to use. This supports dynamic strings like %{bytes} # It will be coerced to a floating point value. Values which cannot be # coerced will zero (0) config :value, :validate => :string, :required => true # Gmetric type config :type, :validate => :string, :default => "uint8" # Gmetric units for metric, such as "kb/sec" or "ms" or whatever unit # this metric uses. config :units, :validate => :string, :default => "" # Maximum time in seconds between gmetric calls for this metric. config :max_interval, :validate => :number, :default => 60 # Lifetime in seconds of this metric config :lifetime, :validate => :number, :default => 300 def register require "gmetric" end # def register public def receive(event) # gmetric only takes integer values, so convert it to int. case @type when "string" localvalue = event.sprintf(@value) when "float" localvalue = event.sprintf(@value).to_f when "double" localvalue = event.sprintf(@value).to_f else # int8|uint8|int16|uint16|int32|uint32 localvalue = event.sprintf(@value).to_i end Ganglia::GMetric.send(@host, @port, { :name => event.sprintf(@metric), :units => @units, :type => @type, :value => localvalue, :tmax => @tmax, :dmax => @dmax }) end # def receive end # class LogStash::Outputs::Ganglia - update ganglia config docs require "logstash/outputs/base" require "logstash/namespace" # This output allows you to pull metrics from your logs and ship them to # ganglia's gmond. This is heavily based on the graphite output. class LogStash::Outputs::Ganglia < LogStash::Outputs::Base config_name "ganglia" # The address of the graphite server. config :host, :validate => :string, :default => "localhost" # The port to connect on your graphite server. config :port, :validate => :number, :default => 8649 # The metric to use. This supports dynamic strings like %{@source_host} config :metric, :validate => :string, :required => true # The value to use. This supports dynamic strings like %{bytes} # It will be coerced to a floating point value. Values which cannot be # coerced will zero (0) config :value, :validate => :string, :required => true # The type of value for this metric. config :type, :validate => %w{string int8 uint8 int16 uint16 int32 uint32 float double}, :default => "uint8" # Gmetric units for metric, such as "kb/sec" or "ms" or whatever unit # this metric uses. config :units, :validate => :string, :default => "" # Maximum time in seconds between gmetric calls for this metric. config :max_interval, :validate => :number, :default => 60 # Lifetime in seconds of this metric config :lifetime, :validate => :number, :default => 300 def register require "gmetric" end # def register public def receive(event) # gmetric only takes integer values, so convert it to int. case @type when "string" localvalue = event.sprintf(@value) when "float" localvalue = event.sprintf(@value).to_f when "double" localvalue = event.sprintf(@value).to_f else # int8|uint8|int16|uint16|int32|uint32 localvalue = event.sprintf(@value).to_i end Ganglia::GMetric.send(@host, @port, { :name => event.sprintf(@metric), :units => @units, :type => @type, :value => localvalue, :tmax => @tmax, :dmax => @dmax }) end # def receive end # class LogStash::Outputs::Ganglia
# encoding: utf-8 module LybDeviseAdmin VERSION="0.3.5" end Bump to 0.3.6 # encoding: utf-8 module LybDeviseAdmin VERSION="0.3.6" end
require 'masamune' require 'thor' module Masamune::Tasks class HiveThor < Thor include Masamune::Thor include Masamune::Actions::Hive # FIXME need to add an unnecessary namespace until this issue is fixed: # https://github.com/wycats/thor/pull/247 namespace :hive desc 'hive', 'Launch a Hive session' method_option :file, :aliases => '-f', :desc => 'SQL from files' method_option :exec, :aliases => '-e', :desc => 'SQL from command line' def hive_exec hive(options.merge(print: true)) end default_task :hive_exec no_tasks do def log_enabled? false end end end end Add option for specifying hive output file require 'masamune' require 'thor' module Masamune::Tasks class HiveThor < Thor include Masamune::Thor include Masamune::Actions::Hive # FIXME need to add an unnecessary namespace until this issue is fixed: # https://github.com/wycats/thor/pull/247 namespace :hive desc 'hive', 'Launch a Hive session' method_option :file, :aliases => '-f', :desc => 'SQL from files' method_option :exec, :aliases => '-e', :desc => 'SQL from command line' method_option :output, :aliases => '-o', :desc => 'Save SQL output to file' def hive_exec hive(options.merge(print: true)) end default_task :hive_exec no_tasks do def log_enabled? false end end end end
module MesenForms class FormBuilder < ::ActionView::Helpers::FormBuilder delegate :content_tag, :button_tag, :submit_tag, :link_to, :current_user, :to => :@template %w[text_area text_field email_field url_field password_field collection_select].each do |method_name| define_method(method_name) do |attribute, *options| opts = options.extract_options! if opts[:skip_label] super(attribute, *options) else control_group do label(attribute, class: 'control-label')+ controls do if method_name == 'text_area' && opts[:cktext] cktext_area(attribute.to_sym, :toolbar => opts[:cktext], :rows => (opts[:rows] ? opts[:rows] : 5), :width => 322, :height => (opts[:height] ? opts[:height] : 200), :js_content_for => :ckeditor_js) else super(attribute, *options) end+ if opts[:help] help_block opts[:help] end end end end end end def errors options={} if object.errors.any? content_tag :div, :class => 'alert span7 alert-error' do content_tag(:a, "&times;".html_safe, href: "#", class: "close", data: {dismiss: "alert"})+ content_tag(:h3, I18n.t('activerecord.errors.template.header', :count => object.errors.size, :model => I18n.t(object.class.to_s.underscore, :scope => [:activerecord, :models])))+ content_tag(:ul) do object.errors.full_messages.reduce('') { |ccc, message| ccc << content_tag(:li, message) }.html_safe end end end end def image_upload(attribute, options={}) control_group do label(attribute, class: 'control-label')+ controls do if(defined?object[attribute] && !object[attribute].blank?) @template.image_tag.(object.instance_eval(attribute).url(:thumb)) end+ tag('br')+ file_field(attribute)+ if options[:help] help_block options[:help] end end end end def nested_form(attribute, options={}) control_group do label(attribute, class: 'control-label')+ controls do content_tag :div, :class => 'well fields-wrapper' do #self.fields_for attribute do |field| # @template.render attribute.singularize << '_fields', :f => field #end #content_tag :div, :class => ('hidden' if object.instance_eval(attribute).any?) # I18n.t(object.class.to_s.underscore, :scope => [:activerecord, :models]) #end end+ link_to_add_fields(I18n.t('Add ' + attribute.singularize, :scope => [:layouts, :admin]), self, attribute) end end end def datetime_select(attribute, options={}) control_group do label(attribute, class: 'control-label')+ controls do super+ if options[:help] help_block options[:help] end end end end def map_input(attribute, options={}) control_group do label(attribute, class: 'control-label')+ controls do hidden_field(:lat).html_safe+ hidden_field(:lng).html_safe end end end def control_group content_tag(:div, class: 'control-group') do yield end end def controls options={} content_tag :div, class: 'controls' do yield end end def help_block string content_tag :p, class: 'help-block' do I18n.t string, :scope => [:activerecord, :help_strings, @template.controller_name.singularize] end end def form_actions options={} content_tag :div, :class => 'form-actions' do if current_user if (defined? object.is_published) && (object.id) && (object.is_published == true) pub_btn_txt = I18n.t :save_changes, :scope => [:layouts, :admin] elsif (!defined? object.is_published) && object.id pub_btn_txt = I18n.t :save_changes, :scope => [:layouts, :admin] else pub_btn_txt = I18n.t :publish, :scope => [:layouts, :admin] end c = submit_tag pub_btn_txt, :name => 'submit', :class => 'btn btn-primary' c << ' ' # you can not save a published object as a draft if (defined? object.is_published) && ((object.id.nil? == true)) c << submit_tag(I18n.t(:save_as_draft, :scope => [:layouts, :admin]), :name => 'draft', :class => 'btn') elsif (defined? object.is_published) && (object.is_published == false) && (object.id.nil? == false) c << submit_tag(I18n.t(:save_changes_in_draft, :scope => [:layouts, :admin]), :name => 'draft', :class => 'btn') end c else if (object.id.nil? == true) pub_btn_txt = I18n.t :save, :scope => [:layouts, :admin] else pub_btn_txt = I18n.t :save_changes, :scope => [:layouts, :admin] end c = submit_tag pub_btn_txt, :name => 'draft', :class => 'btn btn-primary' end # c += submit_tag I18n.t(:preview, :scope => [:layouts, :admin]), :name => 'preview', :class => 'btn pull-right' end end end end try to ONLY use the link adder module MesenForms class FormBuilder < ::ActionView::Helpers::FormBuilder delegate :content_tag, :button_tag, :submit_tag, :link_to, :current_user, :to => :@template %w[text_area text_field email_field url_field password_field collection_select].each do |method_name| define_method(method_name) do |attribute, *options| opts = options.extract_options! if opts[:skip_label] super(attribute, *options) else control_group do label(attribute, class: 'control-label')+ controls do if method_name == 'text_area' && opts[:cktext] cktext_area(attribute.to_sym, :toolbar => opts[:cktext], :rows => (opts[:rows] ? opts[:rows] : 5), :width => 322, :height => (opts[:height] ? opts[:height] : 200), :js_content_for => :ckeditor_js) else super(attribute, *options) end+ if opts[:help] help_block opts[:help] end end end end end end def errors options={} if object.errors.any? content_tag :div, :class => 'alert span7 alert-error' do content_tag(:a, "&times;".html_safe, href: "#", class: "close", data: {dismiss: "alert"})+ content_tag(:h3, I18n.t('activerecord.errors.template.header', :count => object.errors.size, :model => I18n.t(object.class.to_s.underscore, :scope => [:activerecord, :models])))+ content_tag(:ul) do object.errors.full_messages.reduce('') { |ccc, message| ccc << content_tag(:li, message) }.html_safe end end end end def image_upload(attribute, options={}) control_group do label(attribute, class: 'control-label')+ controls do if(defined?object[attribute] && !object[attribute].blank?) @template.image_tag.(object.instance_eval(attribute).url(:thumb)) end+ tag('br')+ file_field(attribute)+ if options[:help] help_block options[:help] end end end end def nested_form(attribute, options={}) control_group do label(attribute, class: 'control-label')+ controls do #content_tag :div, :class => 'well fields-wrapper' do #self.fields_for attribute do |field| # @template.render attribute.singularize << '_fields', :f => field #end #content_tag :div, :class => ('hidden' if object.instance_eval(attribute).any?) # I18n.t(object.class.to_s.underscore, :scope => [:activerecord, :models]) #end #end+ link_to_add_fields(I18n.t('Add ' + attribute.singularize, :scope => [:layouts, :admin]), self, attribute) end end end def datetime_select(attribute, options={}) control_group do label(attribute, class: 'control-label')+ controls do super+ if options[:help] help_block options[:help] end end end end def map_input(attribute, options={}) control_group do label(attribute, class: 'control-label')+ controls do hidden_field(:lat).html_safe+ hidden_field(:lng).html_safe end end end def control_group content_tag(:div, class: 'control-group') do yield end end def controls options={} content_tag :div, class: 'controls' do yield end end def help_block string content_tag :p, class: 'help-block' do I18n.t string, :scope => [:activerecord, :help_strings, @template.controller_name.singularize] end end def form_actions options={} content_tag :div, :class => 'form-actions' do if current_user if (defined? object.is_published) && (object.id) && (object.is_published == true) pub_btn_txt = I18n.t :save_changes, :scope => [:layouts, :admin] elsif (!defined? object.is_published) && object.id pub_btn_txt = I18n.t :save_changes, :scope => [:layouts, :admin] else pub_btn_txt = I18n.t :publish, :scope => [:layouts, :admin] end c = submit_tag pub_btn_txt, :name => 'submit', :class => 'btn btn-primary' c << ' ' # you can not save a published object as a draft if (defined? object.is_published) && ((object.id.nil? == true)) c << submit_tag(I18n.t(:save_as_draft, :scope => [:layouts, :admin]), :name => 'draft', :class => 'btn') elsif (defined? object.is_published) && (object.is_published == false) && (object.id.nil? == false) c << submit_tag(I18n.t(:save_changes_in_draft, :scope => [:layouts, :admin]), :name => 'draft', :class => 'btn') end c else if (object.id.nil? == true) pub_btn_txt = I18n.t :save, :scope => [:layouts, :admin] else pub_btn_txt = I18n.t :save_changes, :scope => [:layouts, :admin] end c = submit_tag pub_btn_txt, :name => 'draft', :class => 'btn btn-primary' end # c += submit_tag I18n.t(:preview, :scope => [:layouts, :admin]), :name => 'preview', :class => 'btn pull-right' end end end end
module Metasploit module Cache # Holds components of {VERSION} as defined by {http://semver.org/spec/v2.0.0.html semantic versioning v2.0.0}. module Version # # CONSTANTS # # The major version number. MAJOR = 0 # The minor version number, scoped to the {MAJOR} version number. MINOR = 64 # The patch version number, scoped to the {MAJOR} and {MINOR} version numbers. PATCH = 30 # # Module Methods # # The full version string, including the {Metasploit::Cache::Version::MAJOR}, # {Metasploit::Cache::Version::MINOR}, {Metasploit::Cache::Version::PATCH}, and optionally, the # `Metasploit::Cache::Version::PRERELEASE` in the # {http://semver.org/spec/v2.0.0.html semantic versioning v2.0.0} format. # # @return [String] '{Metasploit::Cache::Version::MAJOR}.{Metasploit::Cache::Version::MINOR}.{Metasploit::Cache::Version::PATCH}' on master. # '{Metasploit::Cache::Version::MAJOR}.{Metasploit::Cache::Version::MINOR}.{Metasploit::Cache::Version::PATCH}-PRERELEASE' # on any branch other than master. def self.full version = "#{MAJOR}.#{MINOR}.#{PATCH}" # :nocov: if defined? PRERELEASE version = "#{version}-#{PRERELEASE}" end # :nocov: version end # The full gem version string, including the {Metasploit::Cache::Version::MAJOR}, # {Metasploit::Cache::Version::MINOR}, {Metasploit::Cache::Version::PATCH}, and optionally, the # `Metasploit::Cache::Version::PRERELEASE` in the # {http://guides.rubygems.org/specification-reference/#version RubyGems versioning} format. # # @return [String] '{Metasploit::Cache::Version::MAJOR}.{Metasploit::Cache::Version::MINOR}.{Metasploit::Cache::Version::PATCH}' # on master. '{Metasploit::Cache::Version::MAJOR}.{Metasploit::Cache::Version::MINOR}.{Metasploit::Cache::Version::PATCH}.PRERELEASE' # on any branch other than master. def self.gem full.gsub('-', '.pre.') end end # (see Version.gem) GEM_VERSION = Version.gem # (see Version.full) VERSION = Version.full end end Upstream merge; version bump module Metasploit module Cache # Holds components of {VERSION} as defined by {http://semver.org/spec/v2.0.0.html semantic versioning v2.0.0}. module Version # # CONSTANTS # # The major version number. MAJOR = 0 # The minor version number, scoped to the {MAJOR} version number. MINOR = 64 # The patch version number, scoped to the {MAJOR} and {MINOR} version numbers. PATCH = 31 # # Module Methods # # The full version string, including the {Metasploit::Cache::Version::MAJOR}, # {Metasploit::Cache::Version::MINOR}, {Metasploit::Cache::Version::PATCH}, and optionally, the # `Metasploit::Cache::Version::PRERELEASE` in the # {http://semver.org/spec/v2.0.0.html semantic versioning v2.0.0} format. # # @return [String] '{Metasploit::Cache::Version::MAJOR}.{Metasploit::Cache::Version::MINOR}.{Metasploit::Cache::Version::PATCH}' on master. # '{Metasploit::Cache::Version::MAJOR}.{Metasploit::Cache::Version::MINOR}.{Metasploit::Cache::Version::PATCH}-PRERELEASE' # on any branch other than master. def self.full version = "#{MAJOR}.#{MINOR}.#{PATCH}" # :nocov: if defined? PRERELEASE version = "#{version}-#{PRERELEASE}" end # :nocov: version end # The full gem version string, including the {Metasploit::Cache::Version::MAJOR}, # {Metasploit::Cache::Version::MINOR}, {Metasploit::Cache::Version::PATCH}, and optionally, the # `Metasploit::Cache::Version::PRERELEASE` in the # {http://guides.rubygems.org/specification-reference/#version RubyGems versioning} format. # # @return [String] '{Metasploit::Cache::Version::MAJOR}.{Metasploit::Cache::Version::MINOR}.{Metasploit::Cache::Version::PATCH}' # on master. '{Metasploit::Cache::Version::MAJOR}.{Metasploit::Cache::Version::MINOR}.{Metasploit::Cache::Version::PATCH}.PRERELEASE' # on any branch other than master. def self.gem full.gsub('-', '.pre.') end end # (see Version.gem) GEM_VERSION = Version.gem # (see Version.full) VERSION = Version.full end end
module Metasploit module Cache # Holds components of {VERSION} as defined by {http://semver.org/spec/v2.0.0.html semantic versioning v2.0.0}. module Version # # CONSTANTS # # The major version number. MAJOR = 0 # The minor version number, scoped to the {MAJOR} version number. MINOR = 64 # The patch version number, scoped to the {MAJOR} and {MINOR} version numbers. PATCH = 18 # Remove on master PRERELEASE = 'payload-stage-class' # # Module Methods # # The full version string, including the {Metasploit::Cache::Version::MAJOR}, # {Metasploit::Cache::Version::MINOR}, {Metasploit::Cache::Version::PATCH}, and optionally, the # `Metasploit::Cache::Version::PRERELEASE` in the # {http://semver.org/spec/v2.0.0.html semantic versioning v2.0.0} format. # # @return [String] '{Metasploit::Cache::Version::MAJOR}.{Metasploit::Cache::Version::MINOR}.{Metasploit::Cache::Version::PATCH}' on master. # '{Metasploit::Cache::Version::MAJOR}.{Metasploit::Cache::Version::MINOR}.{Metasploit::Cache::Version::PATCH}-PRERELEASE' # on any branch other than master. def self.full version = "#{MAJOR}.#{MINOR}.#{PATCH}" # :nocov: if defined? PRERELEASE version = "#{version}-#{PRERELEASE}" end # :nocov: version end # The full gem version string, including the {Metasploit::Cache::Version::MAJOR}, # {Metasploit::Cache::Version::MINOR}, {Metasploit::Cache::Version::PATCH}, and optionally, the # `Metasploit::Cache::Version::PRERELEASE` in the # {http://guides.rubygems.org/specification-reference/#version RubyGems versioning} format. # # @return [String] '{Metasploit::Cache::Version::MAJOR}.{Metasploit::Cache::Version::MINOR}.{Metasploit::Cache::Version::PATCH}' # on master. '{Metasploit::Cache::Version::MAJOR}.{Metasploit::Cache::Version::MINOR}.{Metasploit::Cache::Version::PATCH}.PRERELEASE' # on any branch other than master. def self.gem full.gsub('-', '.pre.') end end # (see Version.gem) GEM_VERSION = Version.gem # (see Version.full) VERSION = Version.full end end Remove PRERELEASE module Metasploit module Cache # Holds components of {VERSION} as defined by {http://semver.org/spec/v2.0.0.html semantic versioning v2.0.0}. module Version # # CONSTANTS # # The major version number. MAJOR = 0 # The minor version number, scoped to the {MAJOR} version number. MINOR = 64 # The patch version number, scoped to the {MAJOR} and {MINOR} version numbers. PATCH = 18 # # Module Methods # # The full version string, including the {Metasploit::Cache::Version::MAJOR}, # {Metasploit::Cache::Version::MINOR}, {Metasploit::Cache::Version::PATCH}, and optionally, the # `Metasploit::Cache::Version::PRERELEASE` in the # {http://semver.org/spec/v2.0.0.html semantic versioning v2.0.0} format. # # @return [String] '{Metasploit::Cache::Version::MAJOR}.{Metasploit::Cache::Version::MINOR}.{Metasploit::Cache::Version::PATCH}' on master. # '{Metasploit::Cache::Version::MAJOR}.{Metasploit::Cache::Version::MINOR}.{Metasploit::Cache::Version::PATCH}-PRERELEASE' # on any branch other than master. def self.full version = "#{MAJOR}.#{MINOR}.#{PATCH}" # :nocov: if defined? PRERELEASE version = "#{version}-#{PRERELEASE}" end # :nocov: version end # The full gem version string, including the {Metasploit::Cache::Version::MAJOR}, # {Metasploit::Cache::Version::MINOR}, {Metasploit::Cache::Version::PATCH}, and optionally, the # `Metasploit::Cache::Version::PRERELEASE` in the # {http://guides.rubygems.org/specification-reference/#version RubyGems versioning} format. # # @return [String] '{Metasploit::Cache::Version::MAJOR}.{Metasploit::Cache::Version::MINOR}.{Metasploit::Cache::Version::PATCH}' # on master. '{Metasploit::Cache::Version::MAJOR}.{Metasploit::Cache::Version::MINOR}.{Metasploit::Cache::Version::PATCH}.PRERELEASE' # on any branch other than master. def self.gem full.gsub('-', '.pre.') end end # (see Version.gem) GEM_VERSION = Version.gem # (see Version.full) VERSION = Version.full end end
module Metasploit module Cache # Holds components of {VERSION} as defined by {http://semver.org/spec/v2.0.0.html semantic versioning v2.0.0}. module Version # # CONSTANTS # # The major version number. MAJOR = 0 # The minor version number, scoped to the {MAJOR} version number. MINOR = 64 # The patch version number, scoped to the {MAJOR} and {MINOR} version numbers. PATCH = 3 # The prerelease version, scoped to the {MAJOR}, {MINOR}, and {PATCH} version numbers. PRERELEASE = 'load-auxiliary-class-from-framework' # # Module Methods # # The full version string, including the {Metasploit::Cache::Version::MAJOR}, # {Metasploit::Cache::Version::MINOR}, {Metasploit::Cache::Version::PATCH}, and optionally, the # `Metasploit::Cache::Version::PRERELEASE` in the # {http://semver.org/spec/v2.0.0.html semantic versioning v2.0.0} format. # # @return [String] '{Metasploit::Cache::Version::MAJOR}.{Metasploit::Cache::Version::MINOR}.{Metasploit::Cache::Version::PATCH}' on master. # '{Metasploit::Cache::Version::MAJOR}.{Metasploit::Cache::Version::MINOR}.{Metasploit::Cache::Version::PATCH}-PRERELEASE' # on any branch other than master. def self.full version = "#{MAJOR}.#{MINOR}.#{PATCH}" # :nocov: if defined? PRERELEASE version = "#{version}-#{PRERELEASE}" end # :nocov: version end # The full gem version string, including the {Metasploit::Cache::Version::MAJOR}, # {Metasploit::Cache::Version::MINOR}, {Metasploit::Cache::Version::PATCH}, and optionally, the # `Metasploit::Cache::Version::PRERELEASE` in the # {http://guides.rubygems.org/specification-reference/#version RubyGems versioning} format. # # @return [String] '{Metasploit::Cache::Version::MAJOR}.{Metasploit::Cache::Version::MINOR}.{Metasploit::Cache::Version::PATCH}' # on master. '{Metasploit::Cache::Version::MAJOR}.{Metasploit::Cache::Version::MINOR}.{Metasploit::Cache::Version::PATCH}.PRERELEASE' # on any branch other than master. def self.gem full.gsub('-', '.pre.') end end # (see Version.gem) GEM_VERSION = Version.gem # (see Version.full) VERSION = Version.full end end Update version for branch MSP-12300 module Metasploit module Cache # Holds components of {VERSION} as defined by {http://semver.org/spec/v2.0.0.html semantic versioning v2.0.0}. module Version # # CONSTANTS # # The major version number. MAJOR = 0 # The minor version number, scoped to the {MAJOR} version number. MINOR = 64 # The patch version number, scoped to the {MAJOR} and {MINOR} version numbers. PATCH = 4 # The prerelease version, scoped to the {MAJOR}, {MINOR}, and {PATCH} version numbers. PRERELEASE = 'cells' # # Module Methods # # The full version string, including the {Metasploit::Cache::Version::MAJOR}, # {Metasploit::Cache::Version::MINOR}, {Metasploit::Cache::Version::PATCH}, and optionally, the # `Metasploit::Cache::Version::PRERELEASE` in the # {http://semver.org/spec/v2.0.0.html semantic versioning v2.0.0} format. # # @return [String] '{Metasploit::Cache::Version::MAJOR}.{Metasploit::Cache::Version::MINOR}.{Metasploit::Cache::Version::PATCH}' on master. # '{Metasploit::Cache::Version::MAJOR}.{Metasploit::Cache::Version::MINOR}.{Metasploit::Cache::Version::PATCH}-PRERELEASE' # on any branch other than master. def self.full version = "#{MAJOR}.#{MINOR}.#{PATCH}" # :nocov: if defined? PRERELEASE version = "#{version}-#{PRERELEASE}" end # :nocov: version end # The full gem version string, including the {Metasploit::Cache::Version::MAJOR}, # {Metasploit::Cache::Version::MINOR}, {Metasploit::Cache::Version::PATCH}, and optionally, the # `Metasploit::Cache::Version::PRERELEASE` in the # {http://guides.rubygems.org/specification-reference/#version RubyGems versioning} format. # # @return [String] '{Metasploit::Cache::Version::MAJOR}.{Metasploit::Cache::Version::MINOR}.{Metasploit::Cache::Version::PATCH}' # on master. '{Metasploit::Cache::Version::MAJOR}.{Metasploit::Cache::Version::MINOR}.{Metasploit::Cache::Version::PATCH}.PRERELEASE' # on any branch other than master. def self.gem full.gsub('-', '.pre.') end end # (see Version.gem) GEM_VERSION = Version.gem # (see Version.full) VERSION = Version.full end end
require 'middleman-core/templates' module Middleman module Swift class Template < Middleman::Templates::Base class_option 'css_dir', default: 'css', desc: 'The path to the css files' class_option 'js_dir', default: 'js', desc: 'The path to the javascript files' class_option 'images_dir', default: 'img', desc: 'The path to the image files' def self.source_root File.join(File.dirname(__FILE__), 'template') end def build_scaffold template 'shared/Gemfile.tt', File.join(location, 'Gemfile') template 'shared/config.tt', File.join(location, 'config.rb') copy_file 'source/index.html.slim', File.join(location, 'source/index.html.slim') copy_file 'source/layouts/layout.slim', File.join(location, 'source/layouts/layout.slim') copy_file 'source/404.html.slim', File.join(location, 'source/404.html.slim') copy_file 'source/crossdomain.xml', File.join(location, 'source/crossdomain.xml') copy_file 'source/humans.txt', File.join(location, 'source/humans.txt') copy_file 'source/robots.txt', File.join(location, 'source/robots.txt') empty_directory File.join(location, 'source', options[:css_dir]) copy_file 'source/css/screen.css.scss', File.join(location, 'source', options[:css_dir], 'screen.css.scss') copy_file 'source/css/framwork/_swift-framework.scss', File.join(location, 'source', options[:css_dir], 'framework/_swift-framework.scss') copy_file 'source/css/framework/_reset.scss', File.join(location, 'source', options[:css_dir], 'framework/_reset.scss') copy_file 'source/css/framework/typography.scss', File.join(location, 'source', options[:css_dir], 'framework/typography.scss') copy_file 'source/css/modules/.gitkeep', File.join(location, 'source', options[:css_dir], 'modules/.gitkeep') copy_file 'source/css/global/_global.scss', File.join(location, 'source', options[:css_dir], 'global/_global.scss') copy_file 'source/css/pages/_page-home.scss', File.join(location, 'source', options[:css_dir], 'pages/_page-home.scss') copy_file 'source/css/partials/_sidebar.scss', File.join(location, 'source', options[:css_dir], 'partials/_sidebar.scss') copy_file 'source/css/overrides/.gitkeep', File.join(location, 'source', options[:css_dir], 'overrides/.gitkeep') copy_file 'source/css/overrides/.gitkeep', File.join(location, 'source', options[:css_dir], 'overrides/.gitkeep') copy_file 'source/css/vendors/.gitkeep', File.join(location, 'source', options[:css_dir], 'vendors/.gitkeep') copy_file 'source/css/fonts/_fonts.scss', File.join(location, 'source', options[:css_dir], 'fonts/_fonts.scss') copy_file 'source/css/ie.css.scss', File.join(location, 'source', options[:css_dir], 'ie.css.scss') copy_file 'source/css/print.css.scss', File.join(location, 'source', options[:css_dir], 'print.css.scss') empty_directory File.join(location, 'source', options[:js_dir]) copy_file 'source/js/framework/boxsizing.htc', File.join(location, 'source', options[:js_dir], 'framework/boxsizing.htc') copy_file 'source/js/framework/PIE.htc', File.join(location, 'source', options[:js_dir], 'framework/PIE.htc') copy_file 'source/js/framework/PIE.php', File.join(location, 'source', options[:js_dir], 'framework/PIE.php') copy_file 'source/js/framework/respond.min.js', File.join(location, 'source', options[:js_dir], 'framework/respond.min.js') copy_file 'source/js/modules/.gitkeep', File.join(location, 'source', options[:js_dir], 'modules/.gitkeep') copy_file 'source/js/vendors/.gitkeep', File.join(location, 'source', options[:js_dir], 'vendors/.gitkeep') copy_file 'source/js/overrides/.gitkeep', File.join(location, 'source', options[:js_dir], 'overrides/.gitkeep') copy_file 'source/js/pages/.gitkeep', File.join(location, 'source', options[:js_dir], 'pages/.gitkeep') copy_file 'source/js/script.js', File.join(location, 'source', options[:js_dir], 'script.js') copy_file 'source/js/ie.js', File.join(location, 'source', options[:js_dir], 'ie.js') empty_directory File.join(location, 'source', options[:images_dir]) copy_file 'source/img/icon/.gitkeep', File.join(location, 'source', options[:images_dir], 'icon/.gitkeep') replace_css_img_dir end private def replace_css_img_dir f = File.open(File.join(location, 'source', options[:css_dir], 'screen.css.scss'), 'r') buf = f.read buf.gsub!(/IMG_DIR/, options[:images_dir]) f.close f = File.open(File.join(location, 'source', options[:css_dir], 'screen.css.scss'), 'w') f.write(buf) f.close end end end end Middleman::Templates.register :swift, Middleman::Swift::Template fixed typo require 'middleman-core/templates' module Middleman module Swift class Template < Middleman::Templates::Base class_option 'css_dir', default: 'css', desc: 'The path to the css files' class_option 'js_dir', default: 'js', desc: 'The path to the javascript files' class_option 'images_dir', default: 'img', desc: 'The path to the image files' def self.source_root File.join(File.dirname(__FILE__), 'template') end def build_scaffold template 'shared/Gemfile.tt', File.join(location, 'Gemfile') template 'shared/config.tt', File.join(location, 'config.rb') copy_file 'source/index.html.slim', File.join(location, 'source/index.html.slim') copy_file 'source/layouts/layout.slim', File.join(location, 'source/layouts/layout.slim') copy_file 'source/404.html.slim', File.join(location, 'source/404.html.slim') copy_file 'source/crossdomain.xml', File.join(location, 'source/crossdomain.xml') copy_file 'source/humans.txt', File.join(location, 'source/humans.txt') copy_file 'source/robots.txt', File.join(location, 'source/robots.txt') empty_directory File.join(location, 'source', options[:css_dir]) copy_file 'source/css/screen.css.scss', File.join(location, 'source', options[:css_dir], 'screen.css.scss') copy_file 'source/css/framework/_swift-framework.scss', File.join(location, 'source', options[:css_dir], 'framework/_swift-framework.scss') copy_file 'source/css/framework/_reset.scss', File.join(location, 'source', options[:css_dir], 'framework/_reset.scss') copy_file 'source/css/framework/typography.scss', File.join(location, 'source', options[:css_dir], 'framework/typography.scss') copy_file 'source/css/modules/.gitkeep', File.join(location, 'source', options[:css_dir], 'modules/.gitkeep') copy_file 'source/css/global/_global.scss', File.join(location, 'source', options[:css_dir], 'global/_global.scss') copy_file 'source/css/pages/_page-home.scss', File.join(location, 'source', options[:css_dir], 'pages/_page-home.scss') copy_file 'source/css/partials/_sidebar.scss', File.join(location, 'source', options[:css_dir], 'partials/_sidebar.scss') copy_file 'source/css/overrides/.gitkeep', File.join(location, 'source', options[:css_dir], 'overrides/.gitkeep') copy_file 'source/css/overrides/.gitkeep', File.join(location, 'source', options[:css_dir], 'overrides/.gitkeep') copy_file 'source/css/vendors/.gitkeep', File.join(location, 'source', options[:css_dir], 'vendors/.gitkeep') copy_file 'source/css/fonts/_fonts.scss', File.join(location, 'source', options[:css_dir], 'fonts/_fonts.scss') copy_file 'source/css/ie.css.scss', File.join(location, 'source', options[:css_dir], 'ie.css.scss') copy_file 'source/css/print.css.scss', File.join(location, 'source', options[:css_dir], 'print.css.scss') empty_directory File.join(location, 'source', options[:js_dir]) copy_file 'source/js/framework/boxsizing.htc', File.join(location, 'source', options[:js_dir], 'framework/boxsizing.htc') copy_file 'source/js/framework/PIE.htc', File.join(location, 'source', options[:js_dir], 'framework/PIE.htc') copy_file 'source/js/framework/PIE.php', File.join(location, 'source', options[:js_dir], 'framework/PIE.php') copy_file 'source/js/framework/respond.min.js', File.join(location, 'source', options[:js_dir], 'framework/respond.min.js') copy_file 'source/js/modules/.gitkeep', File.join(location, 'source', options[:js_dir], 'modules/.gitkeep') copy_file 'source/js/vendors/.gitkeep', File.join(location, 'source', options[:js_dir], 'vendors/.gitkeep') copy_file 'source/js/overrides/.gitkeep', File.join(location, 'source', options[:js_dir], 'overrides/.gitkeep') copy_file 'source/js/pages/.gitkeep', File.join(location, 'source', options[:js_dir], 'pages/.gitkeep') copy_file 'source/js/script.js', File.join(location, 'source', options[:js_dir], 'script.js') copy_file 'source/js/ie.js', File.join(location, 'source', options[:js_dir], 'ie.js') empty_directory File.join(location, 'source', options[:images_dir]) copy_file 'source/img/icon/.gitkeep', File.join(location, 'source', options[:images_dir], 'icon/.gitkeep') replace_css_img_dir end private def replace_css_img_dir f = File.open(File.join(location, 'source', options[:css_dir], 'screen.css.scss'), 'r') buf = f.read buf.gsub!(/IMG_DIR/, options[:images_dir]) f.close f = File.open(File.join(location, 'source', options[:css_dir], 'screen.css.scss'), 'w') f.write(buf) f.close end end end end Middleman::Templates.register :swift, Middleman::Swift::Template
# @package MiGA # @license Artistic-2.0 require 'miga/cli/action' require 'miga/remote_dataset' require 'csv' class MiGA::Cli::Action::NcbiGet < MiGA::Cli::Action def parse_cli cli.defaults = { query: false, unlink: false, reference: false, legacy_name: false, complete: false, chromosome: false, scaffold: false, contig: false, add_version: true, dry: false, get_md: false, only_md: false, save_every: 1 } cli.parse do |opt| cli.opt_object(opt, [:project]) opt.on( '-T', '--taxon STRING', '(Mandatory) Taxon name (e.g., a species binomial)' ) { |v| cli[:taxon] = v } opt.on( '-m', '--metadata STRING', 'Metadata as key-value pairs separated by = and delimited by comma', 'Values are saved as strings except for booleans (true / false) or nil' ) { |v| cli[:metadata] = v } cli_task_flags(opt) cli_name_modifiers(opt) cli_filters(opt) cli_save_actions(opt) opt.on( '--api-key STRING', 'NCBI API key' ) { |v| ENV['NCBI_API_KEY'] = v } end end def perform sanitize_cli p = cli.load_project ds = remote_list ds = discard_blacklisted(ds) d, downloaded = download_entries(ds, p) # Finalize cli.say "Datasets listed: #{d.size}" act = cli[:dry] ? 'to download' : 'downloaded' cli.say "Datasets #{act}: #{downloaded}" unless cli[:remote_list].nil? File.open(cli[:remote_list], 'w') do |fh| d.each { |i| fh.puts i } end end return unless cli[:unlink] unlink = p.dataset_names - d unlink.each { |i| p.unlink_dataset(i).remove! } cli.say "Datasets unlinked: #{unlink.size}" end private def cli_task_flags(opt) cli.opt_flag( opt, 'reference', 'Download all reference genomes (ignore any other status)' ) cli.opt_flag(opt, 'complete', 'Download complete genomes') cli.opt_flag(opt, 'chromosome', 'Download complete chromosomes') cli.opt_flag(opt, 'scaffold', 'Download genomes in scaffolds') cli.opt_flag(opt, 'contig', 'Download genomes in contigs') opt.on( '--all', 'Download all genomes (in any status)' ) do cli[:complete] = true cli[:chromosome] = true cli[:scaffold] = true cli[:contig] = true end end def cli_name_modifiers(opt) opt.on( '--no-version-name', 'Do not add sequence version to the dataset name', 'Only affects --complete and --chromosome' ) { |v| cli[:add_version] = v } cli.opt_flag( opt, 'legacy-name', 'Use dataset names based on chromosome entries instead of assembly', :legacy_name ) end def cli_filters(opt) opt.on( '--blacklist PATH', 'A file with dataset names to blacklist' ) { |v| cli[:blacklist] = v } cli.opt_flag(opt, 'dry', 'Do not download or save the datasets') opt.on( '--ignore-until STRING', 'Ignores all datasets until a name is found (useful for large reruns)' ) { |v| cli[:ignore_until] = v } cli.opt_flag( opt, 'get-metadata', 'Only download and update metadata for existing datasets', :get_md ) end def cli_save_actions(opt) cli.opt_flag( opt, 'only-metadata', 'Create datasets without input data but retrieve all metadata', :only_md ) opt.on( '--save-every INT', Integer, 'Save project every this many downloaded datasets', 'If zero, it saves the project only once upon completion', "By default: #{cli[:save_every]}" ) { |v| cli[:save_every] = v } opt.on( '-q', '--query', 'Register the datasets as queries, not reference datasets' ) { |v| cli[:query] = v } opt.on( '-u', '--unlink', 'Unlink all datasets in the project missing from the download list' ) { |v| cli[:unlink] = v } opt.on( '-R', '--remote-list PATH', 'Path to an output file with the list of all datasets listed remotely' ) { |v| cli[:remote_list] = v } end def sanitize_cli cli.ensure_par(taxon: '-T') tasks = %w[reference complete chromosome scaffold contig] unless tasks.any? { |i| cli[i.to_sym] } raise 'No action requested: pick at least one type of genome' end cli[:save_every] = 1 if cli[:dry] end def remote_list cli.say 'Downloading genome list' ds = {} url = remote_list_url doc = RemoteDataset.download_url(url) CSV.parse(doc, headers: true).each do |r| asm = r['assembly'] next if asm.nil? || asm.empty? || asm == '-' next unless r['ftp_path_genbank'] rep = remote_row_replicons(r) n = remote_row_name(r, rep, asm) # Register for download fna_url = '%s/%s_genomic.fna.gz' % [r['ftp_path_genbank'], File.basename(r['ftp_path_genbank'])] ds[n] = { ids: [fna_url], db: :assembly_gz, universe: :web, md: { type: :genome, ncbi_asm: asm, strain: r['strain'] } } ds[n][:md][:ncbi_nuccore] = rep.join(',') unless rep.nil? unless r['release_date'].nil? ds[n][:md][:release_date] = Time.parse(r['release_date']).to_s end end ds end def remote_row_replicons(r) return if r['replicons'].nil? r['replicons'] .split('; ') .map { |i| i.gsub(/.*:/, '') } .map { |i| i.gsub(%r{/.*}, '') } end def remote_row_name(r, rep, asm) return r['#organism'].miga_name if cli[:legacy_name] && cli[:reference] if cli[:legacy_name] && ['Complete', ' Chromosome'].include?(r['level']) acc = rep.nil? ? '' : rep.first else acc = asm end acc.gsub!(/\.\d+\Z/, '') unless cli[:add_version] "#{r['#organism']}_#{acc}".miga_name end def remote_list_url url_base = 'https://www.ncbi.nlm.nih.gov/genomes/solr2txt.cgi?' url_param = { q: '[display()].' \ 'from(GenomeAssemblies).' \ 'usingschema(/schema/GenomeAssemblies).' \ 'matching(tab==["Prokaryotes"] and q=="' \ "#{cli[:taxon]&.tr('"', "'")}\"", fields: 'organism|organism,assembly|assembly,replicons|replicons,' \ 'level|level,ftp_path_genbank|ftp_path_genbank,' \ 'release_date|release_date,strain|strain', nolimit: 'on' } if cli[:reference] url_param[:q] += ' and refseq_category==["representative"]' else status = { complete: 'Complete', chromosome: ' Chromosome', # <- The leading space is *VERY* important! scaffold: 'Scaffold', contig: 'Contig' }.map { |k, v| '"' + v + '"' if cli[k] }.compact.join(',') url_param[:q] += ' and level==[' + status + ']' end url_param[:q] += ')' url_base + URI.encode_www_form(url_param) end def discard_blacklisted(ds) unless cli[:blacklist].nil? cli.say "Discarding datasets in #{cli[:blacklist]}" File.readlines(cli[:blacklist]) .select { |i| i !~ /^#/ } .map(&:chomp) .each { |i| ds.delete i } end ds end def download_entries(ds, p) cli.say "Downloading #{ds.size} " + (ds.size == 1 ? 'entry' : 'entries') p.do_not_save = true if cli[:save_every] != 1 ignore = !cli[:ignore_until].nil? downloaded = 0 d = [] ds.each do |name, body| d << name cli.puts name ignore = false if ignore && name == cli[:ignore_until] next if ignore || p.dataset(name).nil? == cli[:get_md] downloaded += 1 unless cli[:dry] save_entry(name, body, p) p.save! if cli[:save_every] > 1 && (downloaded % cli[:save_every]).zero? end end p.do_not_save = false p.save! if cli[:save_every] != 1 [d, downloaded] end def save_entry(name, body, p) cli.say ' Locating remote dataset' body[:md][:metadata_only] = true if cli[:only_md] rd = RemoteDataset.new(body[:ids], body[:db], body[:universe]) if cli[:get_md] cli.say ' Updating dataset' rd.update_metadata(p.dataset(name), body[:md]) else cli.say ' Creating dataset' rd.save_to(p, name, !cli[:query], body[:md]) cli.add_metadata(p.add_dataset(name)) end end end New option: miga ncbi_get --max # @package MiGA # @license Artistic-2.0 require 'miga/cli/action' require 'miga/remote_dataset' require 'csv' class MiGA::Cli::Action::NcbiGet < MiGA::Cli::Action def parse_cli cli.defaults = { query: false, unlink: false, reference: false, legacy_name: false, complete: false, chromosome: false, scaffold: false, contig: false, add_version: true, dry: false, get_md: false, only_md: false, save_every: 1 } cli.parse do |opt| cli.opt_object(opt, [:project]) opt.on( '-T', '--taxon STRING', '(Mandatory) Taxon name (e.g., a species binomial)' ) { |v| cli[:taxon] = v } opt.on( '--max INT', Integer, 'Maximum number of datasets to download (by default: unlimited)' ) { |v| cli[:max_datasets] = v } opt.on( '-m', '--metadata STRING', 'Metadata as key-value pairs separated by = and delimited by comma', 'Values are saved as strings except for booleans (true / false) or nil' ) { |v| cli[:metadata] = v } cli_task_flags(opt) cli_name_modifiers(opt) cli_filters(opt) cli_save_actions(opt) opt.on( '--api-key STRING', 'NCBI API key' ) { |v| ENV['NCBI_API_KEY'] = v } end end def perform sanitize_cli p = cli.load_project ds = remote_list ds = discard_blacklisted(ds) ds = impose_limit(ds) d, downloaded = download_entries(ds, p) # Finalize cli.say "Datasets listed: #{d.size}" act = cli[:dry] ? 'to download' : 'downloaded' cli.say "Datasets #{act}: #{downloaded}" unless cli[:remote_list].nil? File.open(cli[:remote_list], 'w') do |fh| d.each { |i| fh.puts i } end end return unless cli[:unlink] unlink = p.dataset_names - d unlink.each { |i| p.unlink_dataset(i).remove! } cli.say "Datasets unlinked: #{unlink.size}" end private def cli_task_flags(opt) cli.opt_flag( opt, 'reference', 'Download all reference genomes (ignore any other status)' ) cli.opt_flag(opt, 'complete', 'Download complete genomes') cli.opt_flag(opt, 'chromosome', 'Download complete chromosomes') cli.opt_flag(opt, 'scaffold', 'Download genomes in scaffolds') cli.opt_flag(opt, 'contig', 'Download genomes in contigs') opt.on( '--all', 'Download all genomes (in any status)' ) do cli[:complete] = true cli[:chromosome] = true cli[:scaffold] = true cli[:contig] = true end end def cli_name_modifiers(opt) opt.on( '--no-version-name', 'Do not add sequence version to the dataset name', 'Only affects --complete and --chromosome' ) { |v| cli[:add_version] = v } cli.opt_flag( opt, 'legacy-name', 'Use dataset names based on chromosome entries instead of assembly', :legacy_name ) end def cli_filters(opt) opt.on( '--blacklist PATH', 'A file with dataset names to blacklist' ) { |v| cli[:blacklist] = v } cli.opt_flag(opt, 'dry', 'Do not download or save the datasets') opt.on( '--ignore-until STRING', 'Ignores all datasets until a name is found (useful for large reruns)' ) { |v| cli[:ignore_until] = v } cli.opt_flag( opt, 'get-metadata', 'Only download and update metadata for existing datasets', :get_md ) end def cli_save_actions(opt) cli.opt_flag( opt, 'only-metadata', 'Create datasets without input data but retrieve all metadata', :only_md ) opt.on( '--save-every INT', Integer, 'Save project every this many downloaded datasets', 'If zero, it saves the project only once upon completion', "By default: #{cli[:save_every]}" ) { |v| cli[:save_every] = v } opt.on( '-q', '--query', 'Register the datasets as queries, not reference datasets' ) { |v| cli[:query] = v } opt.on( '-u', '--unlink', 'Unlink all datasets in the project missing from the download list' ) { |v| cli[:unlink] = v } opt.on( '-R', '--remote-list PATH', 'Path to an output file with the list of all datasets listed remotely' ) { |v| cli[:remote_list] = v } end def sanitize_cli cli.ensure_par(taxon: '-T') tasks = %w[reference complete chromosome scaffold contig] unless tasks.any? { |i| cli[i.to_sym] } raise 'No action requested: pick at least one type of genome' end cli[:save_every] = 1 if cli[:dry] end def remote_list cli.say 'Downloading genome list' ds = {} url = remote_list_url doc = RemoteDataset.download_url(url) CSV.parse(doc, headers: true).each do |r| asm = r['assembly'] next if asm.nil? || asm.empty? || asm == '-' next unless r['ftp_path_genbank'] rep = remote_row_replicons(r) n = remote_row_name(r, rep, asm) # Register for download fna_url = '%s/%s_genomic.fna.gz' % [r['ftp_path_genbank'], File.basename(r['ftp_path_genbank'])] ds[n] = { ids: [fna_url], db: :assembly_gz, universe: :web, md: { type: :genome, ncbi_asm: asm, strain: r['strain'] } } ds[n][:md][:ncbi_nuccore] = rep.join(',') unless rep.nil? unless r['release_date'].nil? ds[n][:md][:release_date] = Time.parse(r['release_date']).to_s end end ds end def remote_row_replicons(r) return if r['replicons'].nil? r['replicons'] .split('; ') .map { |i| i.gsub(/.*:/, '') } .map { |i| i.gsub(%r{/.*}, '') } end def remote_row_name(r, rep, asm) return r['#organism'].miga_name if cli[:legacy_name] && cli[:reference] if cli[:legacy_name] && ['Complete', ' Chromosome'].include?(r['level']) acc = rep.nil? ? '' : rep.first else acc = asm end acc.gsub!(/\.\d+\Z/, '') unless cli[:add_version] "#{r['#organism']}_#{acc}".miga_name end def remote_list_url url_base = 'https://www.ncbi.nlm.nih.gov/genomes/solr2txt.cgi?' url_param = { q: '[display()].' \ 'from(GenomeAssemblies).' \ 'usingschema(/schema/GenomeAssemblies).' \ 'matching(tab==["Prokaryotes"] and q=="' \ "#{cli[:taxon]&.tr('"', "'")}\"", fields: 'organism|organism,assembly|assembly,replicons|replicons,' \ 'level|level,ftp_path_genbank|ftp_path_genbank,' \ 'release_date|release_date,strain|strain', nolimit: 'on' } if cli[:reference] url_param[:q] += ' and refseq_category==["representative"]' else status = { complete: 'Complete', chromosome: ' Chromosome', # <- The leading space is *VERY* important! scaffold: 'Scaffold', contig: 'Contig' }.map { |k, v| '"' + v + '"' if cli[k] }.compact.join(',') url_param[:q] += ' and level==[' + status + ']' end url_param[:q] += ')' url_base + URI.encode_www_form(url_param) end def discard_blacklisted(ds) unless cli[:blacklist].nil? cli.say "Discarding datasets in #{cli[:blacklist]}" File.readlines(cli[:blacklist]) .select { |i| i !~ /^#/ } .map(&:chomp) .each { |i| ds.delete i } end ds end def impose_limit(ds) max = cli[:max_datasets].to_i if !max.zero? && max < ds.size cli.say "Subsampling list from #{ds.size} to #{max} datasets" sample = ds.keys.sample(max) ds.select! { |k, _| sample.include? k } end ds end def download_entries(ds, p) cli.say "Downloading #{ds.size} " + (ds.size == 1 ? 'entry' : 'entries') p.do_not_save = true if cli[:save_every] != 1 ignore = !cli[:ignore_until].nil? downloaded = 0 d = [] ds.each do |name, body| d << name cli.puts name ignore = false if ignore && name == cli[:ignore_until] next if ignore || p.dataset(name).nil? == cli[:get_md] downloaded += 1 unless cli[:dry] save_entry(name, body, p) p.save! if cli[:save_every] > 1 && (downloaded % cli[:save_every]).zero? end end p.do_not_save = false p.save! if cli[:save_every] != 1 [d, downloaded] end def save_entry(name, body, p) cli.say ' Locating remote dataset' body[:md][:metadata_only] = true if cli[:only_md] rd = RemoteDataset.new(body[:ids], body[:db], body[:universe]) if cli[:get_md] cli.say ' Updating dataset' rd.update_metadata(p.dataset(name), body[:md]) else cli.say ' Creating dataset' rd.save_to(p, name, !cli[:query], body[:md]) cli.add_metadata(p.add_dataset(name)) end end end
require'morpheus/logging' require'morpheus/benchmarking' require'morpheus/terminal' require'morpheus/cli/cli_registry' require'morpheus/cli/option_parser' require'morpheus/cli/option_types' require'morpheus/cli/mixins/print_helper' require'morpheus/cli/credentials' #require'morpheus/cli/commands/shell' #require'morpheus/cli/commands/remote' require'morpheus/api/api_client' module Morpheus module Cli # Module to be included by every CLI command so that commands get registered # This mixin defines a print and puts method, and delegates # todo: use delegate module CliCommand def self.included(base) base.send :include, Morpheus::Cli::PrintHelper base.send :include, Morpheus::Benchmarking::HasBenchmarking base.extend ClassMethods Morpheus::Cli::CliRegistry.add(base, base.command_name) end # the beginning of instance variables from optparse! # this setting makes it easy for the called to disable prompting attr_reader :no_prompt # @return [Morpheus::Terminal] the terminal this command is being executed inside of def my_terminal @my_terminal ||= Morpheus::Terminal.instance end # set the terminal running this command. # @param term [MorpheusTerminal] the terminal this command is assigned to # @return the Terminal this command is being executed inside of def my_terminal=(term) if!term.is_a?(Morpheus::Terminal) raise "CliCommand (#{self.class}) my_terminal= expects object of type Terminal and instead got a #{term.class}" end @my_terminal = term end # delegate :print, to: :my_terminal # delegate :puts, to: :my_terminal # or... bum bum bummm # a paradigm shift away from include and use module functions instead. # module_function :print, puts # delegate :puts, to: :my_terminal def print(*msgs) my_terminal.stdout.print(*msgs) end def println(*msgs) print(*msgs) print "\n" end def puts(*msgs) my_terminal.stdout.puts(*msgs) end def print_error(*msgs) my_terminal.stderr.print(*msgs) end def puts_error(*msgs) my_terminal.stderr.puts(*msgs) end # todo: customizable output color, other than cyan. # def terminal_fg # end # def cyan # Term::ANSIColor.black # end # todo: use terminal.stdin # def readline(*msgs) # @my_terminal.stdin.readline(*msgs) # end # todo: maybe... # disabled prompting for this command # def noninteractive() # @no_prompt = true # self # end # whether to prompt or not, this is true by default. def interactive? @no_prompt!= true end def raise_command_error(msg, args=[], optparse=nil, exit_code=nil) raise Morpheus::Cli::CommandError.new(msg, args, optparse, exit_code) end def raise_args_error(msg, args=[], optparse=nil, exit_code=nil) raise Morpheus::Cli::CommandArgumentsError.new(msg, args, optparse, exit_code) end # parse_id_list splits returns the given id_list with its values split on a comma # your id values cannot contain a comma, atm... # @param id_list [String or Array of Strings] # @param delim [String] Default is a comma and any surrounding white space. # @return array of values def parse_id_list(id_list, delim=/\s*\,\s*/) [id_list].flatten.collect {|it| it? it.to_s.split(delim) : nil }.flatten.compact end def parse_bytes_param(bytes_param, option, assumed_unit = nil) if bytes_param && bytes_param.to_f > 0 bytes_param.upcase! multiplier = 1 unit = nil number = (bytes_param.to_f == bytes_param.to_i? bytes_param.to_i : bytes_param.to_f) if (bytes_param.end_with? 'GB') || ((!bytes_param.end_with? 'MB') && assumed_unit == 'GB') unit = 'GB' multiplier = 1024 * 1024 * 1024 elsif (bytes_param.end_with? 'MB') || assumed_unit == 'MB' unit = 'MB' multiplier = 1024 * 1024 end return {:bytes_param => bytes_param, :bytes => number * multiplier, :number => number, :multiplier => multiplier, :unit => unit} end raise_command_error "Invalid value for #{option} option" end # this returns all the options passed in by -O, parsed all nicely into objects. def parse_passed_options(options) passed_options = options[:options]? options[:options].reject {|k,v| k.is_a?(Symbol) } : {} return passed_options end # Appends Array of OptionType definitions to an OptionParser instance # This adds an option like --fieldContext.fieldName="VALUE" # @param opts [OptionParser] # @param options [Hash] output map that is being constructed # @param option_types [Array] list of OptionType definitions to add # @return void, this modifies the opts in place. def build_option_type_options(opts, options, option_types=[]) #opts.separator "" #opts.separator "Options:" options[:options] ||= {} # this is where these go..for now custom_options = options[:options] # add each one to the OptionParser option_types.each do |option_type| if option_type['fieldName'].empty? puts_error "Missing fieldName for option type: #{option_type}" if Morpheus::Logging.debug? next end full_field_name = option_type['fieldContext'].to_s.empty?? option_type['fieldName'] : "#{option_type['fieldContext']}.#{option_type['fieldName']}" field_namespace = full_field_name.split(".") field_name = field_namespace.pop description = "#{option_type['fieldLabel']}#{option_type['fieldAddOn']? ('(' + option_type['fieldAddOn'] + ') ') : '' }#{!option_type['required']?'(optional)' : ''}" if option_type['description'] # description << "\n #{option_type['description']}" description << " - #{option_type['description']}" end if option_type['defaultValue'] description << ". Default: #{option_type['defaultValue']}" end if option_type['helpBlock'] description << "\n #{option_type['helpBlock']}" end # description = option_type['description'].to_s # if option_type['defaultValue'] # description = "#{description} Default: #{option_type['defaultValue']}" # end # if option_type['required'] == true # description = "(Required) #{description}" # end value_label = "VALUE" if option_type['placeHolder'] value_label = option_type['placeHolder'] elsif option_type['type'] == 'checkbox' value_label = '[on|off]' # or.. true|false elsif option_type['type'] == 'number' value_label = 'NUMBER' elsif option_type['type'] =='multiSelect' value_label = 'LIST' # elsif option_type['type'] =='select' # value_label = 'SELECT' # elsif option['type'] =='select' end full_option = "--#{full_field_name} #{value_label}" # switch is an alias for the full option name, fieldName is the default if option_type['switch'] full_option = "--#{option_type['switch']} #{value_label}" end arg1, arg2 = full_option, String if option_type['shorthand'] arg1, arg2 = full_option, option_type['shorthand'] end opts.on(arg1, arg2, description) do |val| if option_type['type'] == 'checkbox' val = (val.to_s!= 'false' && val.to_s!= 'off') elsif option_type['dataType']!='string' # 'dataType':'string' added to cli to avoid auto conversion to JSON # attempt to parse JSON, this allows blank arrays for multiSelect like --tenants [] if (val.to_s[0] == '{' && val.to_s[-1] == '}') || (val.to_s[0] == '[' && val.to_s[-1] == ']') begin val = JSON.parse(val) rescue Morpheus::Logging::DarkPrinter.puts "Failed to parse option value '#{val}' as JSON" if Morpheus::Logging.debug? end end end cur_namespace = custom_options field_namespace.each do |ns| next if ns.empty? cur_namespace[ns.to_s] ||= {} cur_namespace = cur_namespace[ns.to_s] end cur_namespace[field_name] = val end # todo: all the various types # number # checkbox [on|off] # select for optionSource and selectOptions end opts end ## the standard options for a command that makes api requests (most of them) def build_standard_get_options(opts, options, includes=[], excludes=[]) build_common_options(opts, options, includes + [:query, :json, :yaml, :csv, :fields, :select, :delim, :quiet, :dry_run, :remote], excludes) end def build_standard_post_options(opts, options, includes=[], excludes=[]) build_common_options(opts, options, includes + [:options, :payload, :json, :quiet, :dry_run, :remote], excludes) end def build_standard_put_options(opts, options, includes=[], excludes=[]) build_standard_post_options(opts, options, includes, excludes) end def build_standard_delete_options(opts, options, includes=[], excludes=[]) build_common_options(opts, options, includes + [:auto_confirm, :query, :json, :quiet, :dry_run, :remote], excludes) end # list is GET that supports phrase,max,offset,sort,direction def build_standard_list_options(opts, options, includes=[], excludes=[]) build_standard_get_options(opts, options, [:list] + includes, excludes=[]) end def build_standard_add_options(opts, options, includes=[], excludes=[]) build_standard_post_options(opts, options, includes, excludes) end def build_standard_update_options(opts, options, includes=[], excludes=[]) build_standard_put_options(opts, options, includes, excludes) end def build_standard_remove_options(opts, options, includes=[], excludes=[]) build_standard_delete_options(opts, options, includes, excludes) end # number of decimal places to show with curreny def default_sigdig 2 end # appends to the passed OptionParser all the generic options # @param opts [OptionParser] the option parser object being constructed # @param options [Hash] the output Hash that is to being modified # @param includes [Array] which options to include eg. :options, :json, :remote # @return opts def build_common_options(opts, options, includes=[], excludes=[]) #opts.separator "" # opts.separator "Common options:" option_keys = includes.clone # todo: support --quiet everywhere # turn on some options all the time.. # unless command_name == "shell" # option_keys << :quiet unless option_keys.include?(:quiet) # end # ensure commands can always access options[:options], until we can deprecate it... options[:options] ||= {} while (option_key = option_keys.shift) do case option_key.to_sym when :tenant, :account # todo: let's deprecate this in favor of :tenant --tenant to keep -a reserved for --all perhaps? opts.on('--tenant TENANT', String, "Tenant (Account) Name or ID") do |val| options[:account] = val end opts.on('--tenant-id ID', String, "Tenant (Account) ID") do |val| options[:account_id] = val end # todo: let's deprecate this in favor of :tenant --tenant to keep -a reserved for --all perhaps? opts.on('-a','--account ACCOUNT', "Alias for --tenant") do |val| options[:account] = val end opts.on('-A','--account-id ID', "Tenant (Account) ID") do |val| options[:account_id] = val end opts.add_hidden_option('--tenant-id') if opts.is_a?(Morpheus::Cli::OptionParser) opts.add_hidden_option('-a, --account') if opts.is_a?(Morpheus::Cli::OptionParser) opts.add_hidden_option('-A, --account-id') if opts.is_a?(Morpheus::Cli::OptionParser) when :details opts.on('-a', '--all', "Show all details." ) do options[:details] = true end opts.on('--details', '--details', "Show more details" ) do options[:details] = true end opts.add_hidden_option('--details') when :sigdig opts.on('--sigdig DIGITS', "Significant digits to display for prices (currency). Default is #{default_sigdig}.") do |val| options[:sigdig] = val.to_i end when :options options[:options] ||= {} opts.on( '-O', '--option OPTION', "Option in the format -O field=\"value\"" ) do |option| # todo: look ahead and parse ALL the option=value args after -O switch #custom_option_args = option.split('=') custom_option_args = option.sub(/\s?\=\s?/, '__OPTION_DELIM__').split('__OPTION_DELIM__') custom_options = options[:options] option_name_args = custom_option_args[0].split('.') if option_name_args.count > 1 nested_options = custom_options option_name_args.each_with_index do |name_element,index| if index < option_name_args.count -
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! A picture represents a dynamically rendered image. //! //! # Overview //! //! Pictures consists of: //! //! - A number of primitives that are drawn onto the picture. //! - A composite operation describing how to composite this //! picture into its parent. //! - A configuration describing how to draw the primitives on //! this picture (e.g. in screen space or local space). //! //! The tree of pictures are generated during scene building. //! //! Depending on their composite operations pictures can be rendered into //! intermediate targets or folded into their parent picture. //! //! ## Picture caching //! //! Pictures can be cached to reduce the amount of rasterization happening per //! frame. //! //! When picture caching is enabled, the scene is cut into a small number of slices, //! typically: //! //! - content slice //! - UI slice //! - background UI slice which is hidden by the other two slices most of the time. //! //! Each of these slice is made up of fixed-size large tiles of 2048x512 pixels //! (or 128x128 for the UI slice). //! //! Tiles can be either cached rasterized content into a texture or "clear tiles" //! that contain only a solid color rectangle rendered directly during the composite //! pass. //! //! ## Invalidation //! //! Each tile keeps track of the elements that affect it, which can be: //! //! - primitives //! - clips //! - image keys //! - opacity bindings //! - transforms //! //! These dependency lists are built each frame and compared to the previous frame to //! see if the tile changed. //! //! The tile's primitive dependency information is organized in a quadtree, each node //! storing an index buffer of tile primitive dependencies. //! //! The union of the invalidated leaves of each quadtree produces a per-tile dirty rect //! which defines the scissor rect used when replaying the tile's drawing commands and //! can be used for partial present. //! //! ## Display List shape //! //! WR will first look for an iframe item in the root stacking context to apply //! picture caching to. If that's not found, it will apply to the entire root //! stacking context of the display list. Apart from that, the format of the //! display list is not important to picture caching. Each time a new scroll root //! is encountered, a new picture cache slice will be created. If the display //! list contains more than some arbitrary number of slices (currently 8), the //! content will all be squashed into a single slice, in order to save GPU memory //! and compositing performance. //! //! ## Compositor Surfaces //! //! Sometimes, a primitive would prefer to exist as a native compositor surface. //! This allows a large and/or regularly changing primitive (such as a video, or //! webgl canvas) to be updated each frame without invalidating the content of //! tiles, and can provide a significant performance win and battery saving. //! //! Since drawing a primitive as a compositor surface alters the ordering of //! primitives in a tile, we use 'overlay tiles' to ensure correctness. If a //! tile has a compositor surface, _and_ that tile has primitives that overlap //! the compositor surface rect, the tile switches to be drawn in alpha mode. //! //! We rely on only promoting compositor surfaces that are opaque primitives. //! With this assumption, the tile(s) that intersect the compositor surface get //! a 'cutout' in the rectangle where the compositor surface exists (not the //! entire tile), allowing that tile to be drawn as an alpha tile after the //! compositor surface. //! //! Tiles are only drawn in overlay mode if there is content that exists on top //! of the compositor surface. Otherwise, we can draw the tiles in the normal fast //! path before the compositor surface is drawn. Use of the per-tile valid and //! dirty rects ensure that we do a minimal amount of per-pixel work here to //! blend the overlay tile (this is not always optimal right now, but will be //! improved as a follow up). use api::{MixBlendMode, PremultipliedColorF, FilterPrimitiveKind}; use api::{PropertyBinding, PropertyBindingId, FilterPrimitive}; use api::{DebugFlags, ImageKey, ColorF, ColorU, PrimitiveFlags}; use api::{ImageRendering, ColorDepth, YuvColorSpace, YuvFormat, AlphaType}; use api::units::*; use crate::batch::BatchFilter; use crate::box_shadow::BLUR_SAMPLE_SCALE; use crate::clip::{ClipStore, ClipChainInstance, ClipChainId, ClipInstance}; use crate::spatial_tree::{ROOT_SPATIAL_NODE_INDEX, SpatialTree, CoordinateSpaceMapping, SpatialNodeIndex, VisibleFace }; use crate::composite::{CompositorKind, CompositeState, NativeSurfaceId, NativeTileId}; use crate::composite::{ExternalSurfaceDescriptor, ExternalSurfaceDependency}; use crate::debug_colors; use euclid::{vec2, vec3, Point2D, Scale, Size2D, Vector2D, Vector3D, Rect, Transform3D, SideOffsets2D}; use euclid::approxeq::ApproxEq; use crate::filterdata::SFilterData; use crate::intern::ItemUid; use crate::internal_types::{FastHashMap, FastHashSet, PlaneSplitter, Filter, PlaneSplitAnchor, TextureSource}; use crate::frame_builder::{FrameBuildingContext, FrameBuildingState, PictureState, PictureContext}; use crate::gpu_cache::{GpuCache, GpuCacheAddress, GpuCacheHandle}; use crate::gpu_types::{UvRectKind, ZBufferId}; use plane_split::{Clipper, Polygon, Splitter}; use crate::prim_store::{PrimitiveTemplateKind, PictureIndex, PrimitiveInstance, PrimitiveInstanceKind}; use crate::prim_store::{ColorBindingStorage, ColorBindingIndex, PrimitiveScratchBuffer}; use crate::print_tree::{PrintTree, PrintTreePrinter}; use crate::render_backend::{DataStores, FrameId}; use crate::render_task_graph::RenderTaskId; use crate::render_target::RenderTargetKind; use crate::render_task::{BlurTask, RenderTask, RenderTaskLocation, BlurTaskCache}; use crate::render_task::{StaticRenderTaskSurface, RenderTaskKind}; use crate::renderer::BlendMode; use crate::resource_cache::{ResourceCache, ImageGeneration, ImageRequest}; use crate::space::SpaceMapper; use crate::scene::SceneProperties; use smallvec::SmallVec; use std::{mem, u8, marker, u32}; use std::sync::atomic::{AtomicUsize, Ordering}; use std::collections::hash_map::Entry; use std::ops::Range; use crate::texture_cache::TextureCacheHandle; use crate::util::{MaxRect, VecHelper, MatrixHelpers, Recycler, raster_rect_to_device_pixels, ScaleOffset}; use crate::filterdata::{FilterDataHandle}; use crate::tile_cache::{SliceDebugInfo, TileDebugInfo, DirtyTileDebugInfo}; use crate::visibility::{PrimitiveVisibilityFlags, FrameVisibilityContext}; use crate::visibility::{VisibilityState, FrameVisibilityState}; #[cfg(any(feature = "capture", feature = "replay"))] use ron; #[cfg(feature = "capture")] use crate::scene_builder_thread::InternerUpdates; #[cfg(any(feature = "capture", feature = "replay"))] use crate::intern::{Internable, UpdateList}; #[cfg(any(feature = "capture", feature = "replay"))] use crate::clip::{ClipIntern, PolygonIntern}; #[cfg(any(feature = "capture", feature = "replay"))] use crate::filterdata::FilterDataIntern; #[cfg(any(feature = "capture", feature = "replay"))] use api::PrimitiveKeyKind; #[cfg(any(feature = "capture", feature = "replay"))] use crate::prim_store::backdrop::Backdrop; #[cfg(any(feature = "capture", feature = "replay"))] use crate::prim_store::borders::{ImageBorder, NormalBorderPrim}; #[cfg(any(feature = "capture", feature = "replay"))] use crate::prim_store::gradient::{LinearGradient, RadialGradient, ConicGradient}; #[cfg(any(feature = "capture", feature = "replay"))] use crate::prim_store::image::{Image, YuvImage}; #[cfg(any(feature = "capture", feature = "replay"))] use crate::prim_store::line_dec::LineDecoration; #[cfg(any(feature = "capture", feature = "replay"))] use crate::prim_store::picture::Picture; #[cfg(any(feature = "capture", feature = "replay"))] use crate::prim_store::text_run::TextRun; #[cfg(feature = "capture")] use std::fs::File; #[cfg(feature = "capture")] use std::io::prelude::*; #[cfg(feature = "capture")] use std::path::PathBuf; use crate::scene_building::{SliceFlags}; #[cfg(feature = "replay")] // used by tileview so don't use an internal_types FastHashMap use std::collections::HashMap; // Maximum blur radius for blur filter (different than box-shadow blur). // Taken from FilterNodeSoftware.cpp in Gecko. pub const MAX_BLUR_RADIUS: f32 = 100.; /// Specify whether a surface allows subpixel AA text rendering. #[derive(Debug, Copy, Clone)] pub enum SubpixelMode { /// This surface allows subpixel AA text Allow, /// Subpixel AA text cannot be drawn on this surface Deny, /// Subpixel AA can be drawn on this surface, if not intersecting /// with the excluded regions, and inside the allowed rect. Conditional { allowed_rect: PictureRect, }, } /// A comparable transform matrix, that compares with epsilon checks. #[derive(Debug, Clone)] struct MatrixKey { m: [f32; 16], } impl PartialEq for MatrixKey { fn eq(&self, other: &Self) -> bool { const EPSILON: f32 = 0.001; // TODO(gw): It's possible that we may need to adjust the epsilon // to be tighter on most of the matrix, except the // translation parts? for (i, j) in self.m.iter().zip(other.m.iter()) { if !i.approx_eq_eps(j, &EPSILON) { return false; } } true } } /// A comparable / hashable version of a coordinate space mapping. Used to determine /// if a transform dependency for a tile has changed. #[derive(Debug, PartialEq, Clone)] enum TransformKey { Local, ScaleOffset { scale_x: f32, scale_y: f32, offset_x: f32, offset_y: f32, }, Transform { m: MatrixKey, } } impl<Src, Dst> From<CoordinateSpaceMapping<Src, Dst>> for TransformKey { fn from(transform: CoordinateSpaceMapping<Src, Dst>) -> TransformKey { match transform { CoordinateSpaceMapping::Local => { TransformKey::Local } CoordinateSpaceMapping::ScaleOffset(ref scale_offset) => { TransformKey::ScaleOffset { scale_x: scale_offset.scale.x, scale_y: scale_offset.scale.y, offset_x: scale_offset.offset.x, offset_y: scale_offset.offset.y, } } CoordinateSpaceMapping::Transform(ref m) => { TransformKey::Transform { m: MatrixKey { m: m.to_array(), }, } } } } } /// Unit for tile coordinates. #[derive(Hash, Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)] pub struct TileCoordinate; // Geometry types for tile coordinates. pub type TileOffset = Point2D<i32, TileCoordinate>; // TileSize type is also used in used in lib.rs and cbindgen picks the wrong one when // generating headers. /// cbindgen:ignore pub type TileSize = Size2D<i32, TileCoordinate>; pub type TileRect = Rect<i32, TileCoordinate>; /// The maximum number of compositor surfaces that are allowed per picture cache. This /// is an arbitrary number that should be enough for common cases, but low enough to /// prevent performance and memory usage drastically degrading in pathological cases. const MAX_COMPOSITOR_SURFACES: usize = 4; /// The size in device pixels of a normal cached tile. pub const TILE_SIZE_DEFAULT: DeviceIntSize = DeviceIntSize { width: 1024, height: 512, _unit: marker::PhantomData, }; /// The size in device pixels of a tile for horizontal scroll bars pub const TILE_SIZE_SCROLLBAR_HORIZONTAL: DeviceIntSize = DeviceIntSize { width: 1024, height: 32, _unit: marker::PhantomData, }; /// The size in device pixels of a tile for vertical scroll bars pub const TILE_SIZE_SCROLLBAR_VERTICAL: DeviceIntSize = DeviceIntSize { width: 32, height: 1024, _unit: marker::PhantomData, }; /// The maximum size per axis of a surface, /// in WorldPixel coordinates. const MAX_SURFACE_SIZE: f32 = 4096.0; /// Maximum size of a compositor surface. const MAX_COMPOSITOR_SURFACES_SIZE: f32 = 8192.0; /// The maximum number of sub-dependencies (e.g. clips, transforms) we can handle /// per-primitive. If a primitive has more than this, it will invalidate every frame. const MAX_PRIM_SUB_DEPS: usize = u8::MAX as usize; /// Used to get unique tile IDs, even when the tile cache is /// destroyed between display lists / scenes. static NEXT_TILE_ID: AtomicUsize = AtomicUsize::new(0); fn clamp(value: i32, low: i32, high: i32) -> i32 { value.max(low).min(high) } fn clampf(value: f32, low: f32, high: f32) -> f32 { value.max(low).min(high) } /// Clamps the blur radius depending on scale factors. fn clamp_blur_radius(blur_radius: f32, scale_factors: (f32, f32)) -> f32 { // Clamping must occur after scale factors are applied, but scale factors are not applied // until later on. To clamp the blur radius, we first apply the scale factors and then clamp // and finally revert the scale factors. // TODO: the clamping should be done on a per-axis basis, but WR currently only supports // having a single value for both x and y blur. let largest_scale_factor = f32::max(scale_factors.0, scale_factors.1); let scaled_blur_radius = blur_radius * largest_scale_factor; if scaled_blur_radius > MAX_BLUR_RADIUS { MAX_BLUR_RADIUS / largest_scale_factor } else { // Return the original blur radius to avoid any rounding errors blur_radius } } /// An index into the prims array in a TileDescriptor. #[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct PrimitiveDependencyIndex(pub u32); /// Information about the state of a binding. #[derive(Debug)] pub struct BindingInfo<T> { /// The current value retrieved from dynamic scene properties. value: T, /// True if it was changed (or is new) since the last frame build. changed: bool, } /// Information stored in a tile descriptor for a binding. #[derive(Debug, PartialEq, Clone, Copy)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub enum Binding<T> { Value(T), Binding(PropertyBindingId), } impl<T> From<PropertyBinding<T>> for Binding<T> { fn from(binding: PropertyBinding<T>) -> Binding<T> { match binding { PropertyBinding::Binding(key, _) => Binding::Binding(key.id), PropertyBinding::Value(value) => Binding::Value(value), } } } pub type OpacityBinding = Binding<f32>; pub type OpacityBindingInfo = BindingInfo<f32>; pub type ColorBinding = Binding<ColorU>; pub type ColorBindingInfo = BindingInfo<ColorU>; /// A dependency for a transform is defined by the spatial node index + frame it was used #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct SpatialNodeKey { spatial_node_index: SpatialNodeIndex, frame_id: FrameId, } /// A helper for comparing spatial nodes between frames. The comparisons /// are done by-value, so that if the shape of the spatial node tree /// changes, invalidations aren't done simply due to the spatial node /// index changing between display lists. struct SpatialNodeComparer { /// The root spatial node index of the tile cache ref_spatial_node_index: SpatialNodeIndex, /// Maintains a map of currently active transform keys spatial_nodes: FastHashMap<SpatialNodeKey, TransformKey>, /// A cache of recent comparisons between prev and current spatial nodes compare_cache: FastHashMap<(SpatialNodeKey, SpatialNodeKey), bool>, /// A set of frames that we need to retain spatial node entries for referenced_frames: FastHashSet<FrameId>, } impl SpatialNodeComparer { /// Construct a new comparer fn new() -> Self { SpatialNodeComparer { ref_spatial_node_index: ROOT_SPATIAL_NODE_INDEX, spatial_nodes: FastHashMap::default(), compare_cache: FastHashMap::default(), referenced_frames: FastHashSet::default(), } } /// Advance to the next frame fn next_frame( &mut self, ref_spatial_node_index: SpatialNodeIndex, ) { // Drop any node information for unreferenced frames, to ensure that the // hashmap doesn't grow indefinitely! let referenced_frames = &self.referenced_frames; self.spatial_nodes.retain(|key, _| { referenced_frames.contains(&key.frame_id) }); // Update the root spatial node for this comparer self.ref_spatial_node_index = ref_spatial_node_index; self.compare_cache.clear(); self.referenced_frames.clear(); } /// Register a transform that is used, and build the transform key for it if new. fn register_used_transform( &mut self, spatial_node_index: SpatialNodeIndex, frame_id: FrameId, spatial_tree: &SpatialTree, ) { let key = SpatialNodeKey { spatial_node_index, frame_id, }; if let Entry::Vacant(entry) = self.spatial_nodes.entry(key) { entry.insert( get_transform_key( spatial_node_index, self.ref_spatial_node_index, spatial_tree, ) ); } } /// Return true if the transforms for two given spatial nodes are considered equivalent fn are_transforms_equivalent( &mut self, prev_spatial_node_key: &SpatialNodeKey, curr_spatial_node_key: &SpatialNodeKey, ) -> bool { let key = (*prev_spatial_node_key, *curr_spatial_node_key); let spatial_nodes = &self.spatial_nodes; *self.compare_cache .entry(key) .or_insert_with(|| { let prev = &spatial_nodes[&prev_spatial_node_key]; let curr = &spatial_nodes[&curr_spatial_node_key]; curr == prev }) } /// Ensure that the comparer won't GC any nodes for a given frame id fn retain_for_frame(&mut self, frame_id: FrameId) { self.referenced_frames.insert(frame_id); } } // Immutable context passed to picture cache tiles during pre_update struct TilePreUpdateContext { /// Maps from picture cache coords -> world space coords. pic_to_world_mapper: SpaceMapper<PicturePixel, WorldPixel>, /// The fractional position of the picture cache, which may /// require invalidation of all tiles. fract_offset: PictureVector2D, device_fract_offset: DeviceVector2D, /// The optional background color of the picture cache instance background_color: Option<ColorF>, /// The visible part of the screen in world coords. global_screen_world_rect: WorldRect, /// Current size of tiles in picture units. tile_size: PictureSize, /// The current frame id for this picture cache frame_id: FrameId, } // Immutable context passed to picture cache tiles during post_update struct TilePostUpdateContext<'a> { /// Maps from picture cache coords -> world space coords. pic_to_world_mapper: SpaceMapper<PicturePixel, WorldPixel>, /// Global scale factor from world -> device pixels. global_device_pixel_scale: DevicePixelScale, /// The local clip rect (in picture space) of the entire picture cache local_clip_rect: PictureRect, /// The calculated backdrop information for this cache instance. backdrop: Option<BackdropInfo>, /// Information about opacity bindings from the picture cache. opacity_bindings: &'a FastHashMap<PropertyBindingId, OpacityBindingInfo>, /// Information about color bindings from the picture cache. color_bindings: &'a FastHashMap<PropertyBindingId, ColorBindingInfo>, /// Current size in device pixels of tiles for this cache current_tile_size: DeviceIntSize, /// The local rect of the overall picture cache local_rect: PictureRect, /// Pre-allocated z-id to assign to tiles during post_update. z_id: ZBufferId, /// If true, the scale factor of the root transform for this picture /// cache changed, so we need to invalidate the tile and re-render. invalidate_all: bool, } // Mutable state passed to picture cache tiles during post_update struct TilePostUpdateState<'a> { /// Allow access to the texture cache for requesting tiles resource_cache: &'a mut ResourceCache, /// Current configuration and setup for compositing all the picture cache tiles in renderer. composite_state: &'a mut CompositeState, /// A cache of comparison results to avoid re-computation during invalidation. compare_cache: &'a mut FastHashMap<PrimitiveComparisonKey, PrimitiveCompareResult>, /// Information about transform node differences from last frame. spatial_node_comparer: &'a mut SpatialNodeComparer, } /// Information about the dependencies of a single primitive instance. struct PrimitiveDependencyInfo { /// Unique content identifier of the primitive. prim_uid: ItemUid, /// The (conservative) clipped area in picture space this primitive occupies. prim_clip_box: PictureBox2D, /// Image keys this primitive depends on. images: SmallVec<[ImageDependency; 8]>, /// Opacity bindings this primitive depends on. opacity_bindings: SmallVec<[OpacityBinding; 4]>, /// Color binding this primitive depends on. color_binding: Option<ColorBinding>, /// Clips that this primitive depends on. clips: SmallVec<[ItemUid; 8]>, /// Spatial nodes references by the clip dependencies of this primitive. spatial_nodes: SmallVec<[SpatialNodeIndex; 4]>, } impl PrimitiveDependencyInfo { /// Construct dependency info for a new primitive. fn new( prim_uid: ItemUid, prim_clip_box: PictureBox2D, ) -> Self { PrimitiveDependencyInfo { prim_uid, images: SmallVec::new(), opacity_bindings: SmallVec::new(), color_binding: None, prim_clip_box, clips: SmallVec::new(), spatial_nodes: SmallVec::new(), } } } /// A stable ID for a given tile, to help debugging. These are also used /// as unique identifiers for tile surfaces when using a native compositor. #[derive(Debug, Copy, Clone, PartialEq, PartialOrd, Ord, Eq)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct TileId(pub usize); /// A descriptor for the kind of texture that a picture cache tile will /// be drawn into. #[derive(Debug)] pub enum SurfaceTextureDescriptor { /// When using the WR compositor, the tile is drawn into an entry /// in the WR texture cache. TextureCache { handle: TextureCacheHandle }, /// When using an OS compositor, the tile is drawn into a native /// surface identified by arbitrary id. Native { /// The arbitrary id of this tile. id: Option<NativeTileId>, }, } /// This is the same as a `SurfaceTextureDescriptor` but has been resolved /// into a texture cache handle (if appropriate) that can be used by the /// batching and compositing code in the renderer. #[derive(Clone, Debug, Eq, PartialEq, Hash)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub enum ResolvedSurfaceTexture { TextureCache { /// The texture ID to draw to. texture: TextureSource, }, Native { /// The arbitrary id of this tile. id: NativeTileId, /// The size of the tile in device pixels. size: DeviceIntSize, } } impl SurfaceTextureDescriptor { /// Create a resolved surface texture for this descriptor pub fn resolve( &self, resource_cache: &ResourceCache, size: DeviceIntSize, ) -> ResolvedSurfaceTexture { match self { SurfaceTextureDescriptor::TextureCache { handle } => { let cache_item = resource_cache.texture_cache.get(handle); ResolvedSurfaceTexture::TextureCache { texture: cache_item.texture_id, } } SurfaceTextureDescriptor::Native { id } => { ResolvedSurfaceTexture::Native { id: id.expect("bug: native surface not allocated"), size, } } } } } /// The backing surface for this tile. #[derive(Debug)] pub enum TileSurface { Texture { /// Descriptor for the surface that this tile draws into. descriptor: SurfaceTextureDescriptor, }, Color { color: ColorF, }, Clear, } impl TileSurface { fn kind(&self) -> &'static str { match *self { TileSurface::Color { .. } => "Color", TileSurface::Texture { .. } => "Texture", TileSurface::Clear => "Clear", } } } /// Optional extra information returned by is_same when /// logging is enabled. #[derive(Debug, Copy, Clone, PartialEq)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub enum CompareHelperResult<T> { /// Primitives match Equal, /// Counts differ Count { prev_count: u8, curr_count: u8, }, /// Sentinel Sentinel, /// Two items are not equal NotEqual { prev: T, curr: T, }, /// User callback returned true on item PredicateTrue { curr: T }, } /// The result of a primitive dependency comparison. Size is a u8 /// since this is a hot path in the code, and keeping the data small /// is a performance win. #[derive(Debug, Copy, Clone, PartialEq)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] #[repr(u8)] pub enum PrimitiveCompareResult { /// Primitives match Equal, /// Something in the PrimitiveDescriptor was different Descriptor, /// The clip node content or spatial node changed Clip, /// The value of the transform changed Transform, /// An image dependency was dirty Image, /// The value of an opacity binding changed OpacityBinding, /// The value of a color binding changed ColorBinding, } /// A more detailed version of PrimitiveCompareResult used when /// debug logging is enabled. #[derive(Debug, Copy, Clone, PartialEq)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub enum PrimitiveCompareResultDetail { /// Primitives match Equal, /// Something in the PrimitiveDescriptor was different Descriptor { old: PrimitiveDescriptor, new: PrimitiveDescriptor, }, /// The clip node content or spatial node changed Clip { detail: CompareHelperResult<ItemUid>, }, /// The value of the transform changed Transform { detail: CompareHelperResult<SpatialNodeKey>, }, /// An image dependency was dirty Image { detail: CompareHelperResult<ImageDependency>, }, /// The value of an opacity binding changed OpacityBinding { detail: CompareHelperResult<OpacityBinding>, }, /// The value of a color binding changed ColorBinding { detail: CompareHelperResult<ColorBinding>, }, } /// Debugging information about why a tile was invalidated #[derive(Debug,Clone)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub enum InvalidationReason { /// The fractional offset changed FractionalOffset { old: DeviceVector2D, new: DeviceVector2D, }, /// The background color changed BackgroundColor { old: Option<ColorF>, new: Option<ColorF>, }, /// The opaque state of the backing native surface changed SurfaceOpacityChanged{ became_opaque: bool }, /// There was no backing texture (evicted or never rendered) NoTexture, /// There was no backing native surface (never rendered, or recreated) NoSurface, /// The primitive count in the dependency list was different PrimCount { old: Option<Vec<ItemUid>>, new: Option<Vec<ItemUid>>, }, /// The content of one of the primitives was different Content { /// What changed in the primitive that was different prim_compare_result: PrimitiveCompareResult, prim_compare_result_detail: Option<PrimitiveCompareResultDetail>, }, // The compositor type changed CompositorKindChanged, // The valid region of the tile changed ValidRectChanged, // The overall scale of the picture cache changed ScaleChanged, } /// A minimal subset of Tile for debug capturing #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct TileSerializer { pub rect: PictureRect, pub current_descriptor: TileDescriptor, pub device_fract_offset: DeviceVector2D, pub id: TileId, pub root: TileNode, pub background_color: Option<ColorF>, pub invalidation_reason: Option<InvalidationReason> } /// A minimal subset of TileCacheInstance for debug capturing #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct TileCacheInstanceSerializer { pub slice: usize, pub tiles: FastHashMap<TileOffset, TileSerializer>, pub background_color: Option<ColorF>, pub fract_offset: PictureVector2D, } /// Information about a cached tile. pub struct Tile { /// The grid position of this tile within the picture cache pub tile_offset: TileOffset, /// The current world rect of this tile. pub world_tile_rect: WorldRect, /// The current local rect of this tile. pub local_tile_rect: PictureRect, /// Same as local_tile_rect, but in min/max form as an optimization pub local_tile_box: PictureBox2D, /// The picture space dirty rect for this tile. local_dirty_rect: PictureRect, /// The device space dirty rect for this tile. /// TODO(gw): We have multiple dirty rects available due to the quadtree above. In future, /// expose these as multiple dirty rects, which will help in some cases. pub device_dirty_rect: DeviceRect, /// Device space rect that contains valid pixels region of this tile. pub device_valid_rect: DeviceRect, /// Uniquely describes the content of this tile, in a way that can be /// (reasonably) efficiently hashed and compared. pub current_descriptor: TileDescriptor, /// The content descriptor for this tile from the previous frame. pub prev_descriptor: TileDescriptor, /// Handle to the backing surface for this tile. pub surface: Option<TileSurface>, /// If true, this tile is marked valid, and the existing texture /// cache handle can be used. Tiles are invalidated during the /// build_dirty_regions method. pub is_valid: bool, /// If true, this tile intersects with the currently visible screen /// rect, and will be drawn. pub is_visible: bool, /// The current fractional offset of the cache transform root. If this changes, /// all tiles need to be invalidated and redrawn, since snapping differences are /// likely to occur. device_fract_offset: DeviceVector2D, /// The tile id is stable between display lists and / or frames, /// if the tile is retained. Useful for debugging tile evictions. pub id: TileId, /// If true, the tile was determined to be opaque, which means blending /// can be disabled when drawing it. pub is_opaque: bool, /// Root node of the quadtree dirty rect tracker. root: TileNode, /// The last rendered background color on this tile. background_color: Option<ColorF>, /// The first reason the tile was invalidated this frame. invalidation_reason: Option<InvalidationReason>, /// The local space valid rect for all primitives that affect this tile. local_valid_rect: PictureBox2D, /// z-buffer id for this tile pub z_id: ZBufferId, /// The last frame this tile had its dependencies updated (dependency updating is /// skipped if a tile is off-screen). pub last_updated_frame_id: FrameId, } impl Tile { /// Construct a new, invalid tile. fn new(tile_offset: TileOffset) -> Self { let id = TileId(NEXT_TILE_ID.fetch_add(1, Ordering::Relaxed)); Tile { tile_offset, local_tile_rect: PictureRect::zero(), local_tile_box: PictureBox2D::zero(), world_tile_rect: WorldRect::zero(), device_valid_rect: DeviceRect::zero(), local_dirty_rect: PictureRect::zero(), device_dirty_rect: DeviceRect::zero(), surface: None, current_descriptor: TileDescriptor::new(), prev_descriptor: TileDescriptor::new(), is_valid: false, is_visible: false, device_fract_offset: DeviceVector2D::zero(), id, is_opaque: false, root: TileNode::new_leaf(Vec::new()), background_color: None, invalidation_reason: None, local_valid_rect: PictureBox2D::zero(), z_id: ZBufferId::invalid(), last_updated_frame_id: FrameId::INVALID, } } /// Print debug information about this tile to a tree printer. fn print(&self, pt: &mut dyn PrintTreePrinter) { pt.new_level(format!("Tile {:?}", self.id)); pt.add_item(format!("local_tile_rect: {:?}", self.local_tile_rect)); pt.add_item(format!("device_fract_offset: {:?}", self.device_fract_offset)); pt.add_item(format!("background_color: {:?}", self.background_color)); pt.add_item(format!("invalidation_reason: {:?}", self.invalidation_reason)); self.current_descriptor.print(pt); pt.end_level(); } /// Check if the content of the previous and current tile descriptors match fn update_dirty_rects( &mut self, ctx: &TilePostUpdateContext, state: &mut TilePostUpdateState, invalidation_reason: &mut Option<InvalidationReason>, frame_context: &FrameVisibilityContext, ) -> PictureRect { let mut prim_comparer = PrimitiveComparer::new( &self.prev_descriptor, &self.current_descriptor, state.resource_cache, state.spatial_node_comparer, ctx.opacity_bindings, ctx.color_bindings, ); let mut dirty_rect = PictureBox2D::zero(); self.root.update_dirty_rects( &self.prev_descriptor.prims, &self.current_descriptor.prims, &mut prim_comparer, &mut dirty_rect, state.compare_cache, invalidation_reason, frame_context, ); dirty_rect.to_rect() } /// Invalidate a tile based on change in content. This /// must be called even if the tile is not currently /// visible on screen. We might be able to improve this /// later by changing how ComparableVec is used. fn update_content_validity( &mut self, ctx: &TilePostUpdateContext, state: &mut TilePostUpdateState, frame_context: &FrameVisibilityContext, ) { // Check if the contents of the primitives, clips, and // other dependencies are the same. state.compare_cache.clear(); let mut invalidation_reason = None; let dirty_rect = self.update_dirty_rects( ctx, state, &mut invalidation_reason, frame_context, ); if !dirty_rect.is_empty() { self.invalidate( Some(dirty_rect), invalidation_reason.expect("bug: no invalidation_reason"), ); } if ctx.invalidate_all { self.invalidate(None, InvalidationReason::ScaleChanged); } // TODO(gw): We can avoid invalidating the whole tile in some cases here, // but it should be a fairly rare invalidation case. if self.current_descriptor.local_valid_rect != self.prev_descriptor.local_valid_rect { self.invalidate(None, InvalidationReason::ValidRectChanged); state.composite_state.dirty_rects_are_valid = false; } } /// Invalidate this tile. If `invalidation_rect` is None, the entire /// tile is invalidated. fn invalidate( &mut self, invalidation_rect: Option<PictureRect>, reason: InvalidationReason, ) { self.is_valid = false; match invalidation_rect { Some(rect) => { self.local_dirty_rect = self.local_dirty_rect.union(&rect); } None => { self.local_dirty_rect = self.local_tile_rect; } } if self.invalidation_reason.is_none() { self.invalidation_reason = Some(reason); } } /// Called during pre_update of a tile cache instance. Allows the /// tile to setup state before primitive dependency calculations. fn pre_update( &mut self, ctx: &TilePreUpdateContext, ) { // Ensure each tile is offset by the appropriate amount from the // origin, such that the content origin will be a whole number and // the snapping will be consistent. self.local_tile_rect = PictureRect::new( PicturePoint::new( self.tile_offset.x as f32 * ctx.tile_size.width + ctx.fract_offset.x, self.tile_offset.y as f32 * ctx.tile_size.height + ctx.fract_offset.y, ), ctx.tile_size, ); self.local_tile_box = PictureBox2D::new( self.local_tile_rect.origin, self.local_tile_rect.bottom_right(), ); self.local_valid_rect = PictureBox2D::zero(); self.invalidation_reason = None; self.world_tile_rect = ctx.pic_to_world_mapper .map(&self.local_tile_rect) .expect("bug: map local tile rect"); // Check if this tile is currently on screen. self.is_visible = self.world_tile_rect.intersects(&ctx.global_screen_world_rect); // If the tile isn't visible, early exit, skipping the normal set up to // validate dependencies. Instead, we will only compare the current tile // dependencies the next time it comes into view. if !self.is_visible { return; } // We may need to rerender if glyph subpixel positions have changed. Note // that we update the tile fract offset itself after we have completed // invalidation. This allows for other whole tile invalidation cases to // update the fract offset appropriately. let fract_delta = self.device_fract_offset - ctx.device_fract_offset; let fract_changed = fract_delta.x.abs() > 0.01 || fract_delta.y.abs() > 0.01; if fract_changed { self.invalidate(None, InvalidationReason::FractionalOffset { old: self.device_fract_offset, new: ctx.device_fract_offset }); } if ctx.background_color != self.background_color { self.invalidate(None, InvalidationReason::BackgroundColor { old: self.background_color, new: ctx.background_color }); self.background_color = ctx.background_color; } // Clear any dependencies so that when we rebuild them we // can compare if the tile has the same content. mem::swap( &mut self.current_descriptor, &mut self.prev_descriptor, ); self.current_descriptor.clear(); self.root.clear(self.local_tile_rect.to_box2d()); // Since this tile is determined to be visible, it will get updated // dependencies, so update the frame id we are storing dependencies for. self.last_updated_frame_id = ctx.frame_id; } /// Add dependencies for a given primitive to this tile. fn add_prim_dependency( &mut self, info: &PrimitiveDependencyInfo, ) { // If this tile isn't currently visible, we don't want to update the dependencies // for this tile, as an optimization, since it won't be drawn anyway. if !self.is_visible { return; } // Incorporate the bounding rect of the primitive in the local valid rect // for this tile. This is used to minimize the size of the scissor rect // during rasterization and the draw rect during composition of partial tiles. self.local_valid_rect = self.local_valid_rect.union(&info.prim_clip_box); // Include any image keys this tile depends on. self.current_descriptor.images.extend_from_slice(&info.images); // Include any opacity bindings this primitive depends on. self.current_descriptor.opacity_bindings.extend_from_slice(&info.opacity_bindings); // Include any clip nodes that this primitive depends on. self.current_descriptor.clips.extend_from_slice(&info.clips); // Include any transforms that this primitive depends on. for spatial_node_index in &info.spatial_nodes { self.current_descriptor.transforms.push( SpatialNodeKey { spatial_node_index: *spatial_node_index, frame_id: self.last_updated_frame_id, } ); } // Include any color bindings this primitive depends on. if info.color_binding.is_some() { self.current_descriptor.color_bindings.insert( self.current_descriptor.color_bindings.len(), info.color_binding.unwrap()); } // TODO(gw): The prim_clip_rect can be impacted by the clip rect of the display port, // which can cause invalidations when a new display list with changed // display port is received. To work around this, clamp the prim clip rect // to the tile boundaries - if the clip hasn't affected the tile, then the // changed clip can't affect the content of the primitive on this tile. // In future, we could consider supplying the display port clip from Gecko // in a different way (e.g. as a scroll frame clip) which still provides // the desired clip for checkerboarding, but doesn't require this extra // work below. // TODO(gw): This is a hot part of the code - we could probably optimize further by: // - Using min/max instead of clamps below (if we guarantee the rects are well formed) let tile_p0 = self.local_tile_box.min; let tile_p1 = self.local_tile_box.max; let prim_clip_box = PictureBox2D::new( PicturePoint::new( clampf(info.prim_clip_box.min.x, tile_p0.x, tile_p1.x), clampf(info.prim_clip_box.min.y, tile_p0.y, tile_p1.y), ), PicturePoint::new( clampf(info.prim_clip_box.max.x, tile_p0.x, tile_p1.x), clampf(info.prim_clip_box.max.y, tile_p0.y, tile_p1.y), ), ); // Update the tile descriptor, used for tile comparison during scene swaps. let prim_index = PrimitiveDependencyIndex(self.current_descriptor.prims.len() as u32); // We know that the casts below will never overflow because the array lengths are // truncated to MAX_PRIM_SUB_DEPS during update_prim_dependencies. debug_assert!(info.spatial_nodes.len() <= MAX_PRIM_SUB_DEPS); debug_assert!(info.clips.len() <= MAX_PRIM_SUB_DEPS); debug_assert!(info.images.len() <= MAX_PRIM_SUB_DEPS); debug_assert!(info.opacity_bindings.len() <= MAX_PRIM_SUB_DEPS); self.current_descriptor.prims.push(PrimitiveDescriptor { prim_uid: info.prim_uid, prim_clip_box, transform_dep_count: info.spatial_nodes.len() as u8, clip_dep_count: info.clips.len() as u8, image_dep_count: info.images.len() as u8, opacity_binding_dep_count: info.opacity_bindings.len() as u8, color_binding_dep_count: if info.color_binding.is_some() { 1 } else { 0 } as u8, }); // Add this primitive to the dirty rect quadtree. self.root.add_prim(prim_index, &info.prim_clip_box); } /// Called during tile cache instance post_update. Allows invalidation and dirty /// rect calculation after primitive dependencies have been updated. fn post_update( &mut self, ctx: &TilePostUpdateContext, state: &mut TilePostUpdateState, frame_context: &FrameVisibilityContext, ) -> bool { // Register the frame id of this tile with the spatial node comparer, to ensure // that it doesn't GC any spatial nodes from the comparer that are referenced // by this tile. Must be done before we early exit below, so that we retain // spatial node info even for tiles that are currently not visible. state.spatial_node_comparer.retain_for_frame(self.last_updated_frame_id); // If tile is not visible, just early out from here - we don't update dependencies // so don't want to invalidate, merge, split etc. The tile won't need to be drawn // (and thus updated / invalidated) until it is on screen again. if !self.is_visible { return false; } // Calculate the overall valid rect for this tile. self.current_descriptor.local_valid_rect = self.local_valid_rect.to_rect(); // TODO(gw): In theory, the local tile rect should always have an // intersection with the overall picture rect. In practice, // due to some accuracy issues with how fract_offset (and // fp accuracy) are used in the calling method, this isn't // always true. In this case, it's safe to set the local // valid rect to zero, which means it will be clipped out // and not affect the scene. In future, we should fix the // accuracy issue above, so that this assumption holds, but // it shouldn't have any noticeable effect on performance // or memory usage (textures should never get allocated). self.current_descriptor.local_valid_rect = self.local_tile_rect .intersection(&ctx.local_rect) .and_then(|r| r.intersection(&self.current_descriptor.local_valid_rect)) .unwrap_or_else(PictureRect::zero); // The device_valid_rect is referenced during `update_content_validity` so it // must be updated here first. let world_valid_rect = ctx.pic_to_world_mapper .map(&self.current_descriptor.local_valid_rect) .expect("bug: map local valid rect"); // The device rect is guaranteed to be aligned on a device pixel - the round // is just to deal with float accuracy. However, the valid rect is not // always aligned to a device pixel. To handle this, round out to get all // required pixels, and intersect with the tile device rect. let device_rect = (self.world_tile_rect * ctx.global_device_pixel_scale).round(); self.device_valid_rect = (world_valid_rect * ctx.global_device_pixel_scale) .round_out() .intersection(&device_rect) .unwrap_or_else(DeviceRect::zero); // Invalidate the tile based on the content changing. self.update_content_validity(ctx, state, frame_context); // If there are no primitives there is no need to draw or cache it. if self.current_descriptor.prims.is_empty() { // If there is a native compositor surface allocated for this (now empty) tile // it must be freed here, otherwise the stale tile with previous contents will // be composited. If the tile subsequently gets new primitives added to it, the // surface will be re-allocated when it's added to the composite draw list. if let Some(TileSurface::Texture { descriptor: SurfaceTextureDescriptor::Native { mut id, .. }, .. }) = self.surface.take() { if let Some(id) = id.take() { state.resource_cache.destroy_compositor_tile(id); } } self.is_visible = false; return false; } // Check if this tile can be considered opaque. Opacity state must be updated only // after all early out checks have been performed. Otherwise, we might miss updating // the native surface next time this tile becomes visible. let clipped_rect = self.current_descriptor.local_valid_rect .intersection(&ctx.local_clip_rect) .unwrap_or_else(PictureRect::zero); let has_opaque_bg_color = self.background_color.map_or(false, |c| c.a >= 1.0); let has_opaque_backdrop = ctx.backdrop.map_or(false, |b| b.opaque_rect.contains_rect(&clipped_rect)); let is_opaque = has_opaque_bg_color || has_opaque_backdrop; // Set the correct z_id for this tile self.z_id = ctx.z_id; if is_opaque != self.is_opaque { // If opacity changed, the native compositor surface and all tiles get invalidated. // (this does nothing if not using native compositor mode). // TODO(gw): This property probably changes very rarely, so it is OK to invalidate // everything in this case. If it turns out that this isn't true, we could // consider other options, such as per-tile opacity (natively supported // on CoreAnimation, and supported if backed by non-virtual surfaces in // DirectComposition). if let Some(TileSurface::Texture { descriptor: SurfaceTextureDescriptor::Native { ref mut id, .. }, .. }) = self.surface { if let Some(id) = id.take() { state.resource_cache.destroy_compositor_tile(id); } } // Invalidate the entire tile to force a redraw. self.invalidate(None, InvalidationReason::SurfaceOpacityChanged { became_opaque: is_opaque }); self.is_opaque = is_opaque; } // Check if the selected composite mode supports dirty rect updates. For Draw composite // mode, we can always update the content with smaller dirty rects, unless there is a // driver bug to workaround. For native composite mode, we can only use dirty rects if // the compositor supports partial surface updates. let (supports_dirty_rects, supports_simple_prims) = match state.composite_state.compositor_kind { CompositorKind::Draw { .. } => { (frame_context.config.gpu_supports_render_target_partial_update, true) } CompositorKind::Native { max_update_rects, .. } => { (max_update_rects > 0, false) } }; // TODO(gw): Consider using smaller tiles and/or tile splits for // native compositors that don't support dirty rects. if supports_dirty_rects { // Only allow splitting for normal content sized tiles if ctx.current_tile_size == state.resource_cache.texture_cache.default_picture_tile_size() { let max_split_level = 3; // Consider splitting / merging dirty regions self.root.maybe_merge_or_split( 0, &self.current_descriptor.prims, max_split_level, ); } } // The dirty rect will be set correctly by now. If the underlying platform // doesn't support partial updates, and this tile isn't valid, force the dirty // rect to be the size of the entire tile. if !self.is_valid && !supports_dirty_rects { self.local_dirty_rect = self.local_tile_rect; } // See if this tile is a simple color, in which case we can just draw // it as a rect, and avoid allocating a texture surface and drawing it. // TODO(gw): Initial native compositor interface doesn't support simple // color tiles. We can definitely support this in DC, so this // should be added as a follow up. let is_simple_prim = ctx.backdrop.map_or(false, |b| b.kind.is_some()) && self.current_descriptor.prims.len() == 1 && self.is_opaque && supports_simple_prims; // Set up the backing surface for this tile. let surface = if is_simple_prim { // If we determine the tile can be represented by a color, set the // surface unconditionally (this will drop any previously used // texture cache backing surface). match ctx.backdrop.unwrap().kind { Some(BackdropKind::Color { color }) => { TileSurface::Color { color, } } Some(BackdropKind::Clear) => { TileSurface::Clear } None => { // This should be prevented by the is_simple_prim check above. unreachable!(); } } } else { // If this tile will be backed by a surface, we want to retain // the texture handle from the previous frame, if possible. If // the tile was previously a color, or not set, then just set // up a new texture cache handle. match self.surface.take() { Some(TileSurface::Texture { descriptor }) => { // Reuse the existing descriptor and vis mask TileSurface::Texture { descriptor, } } Some(TileSurface::Color { .. }) | Some(TileSurface::Clear) | None => { // This is the case where we are constructing a tile surface that // involves drawing to a texture. Create the correct surface // descriptor depending on the compositing mode that will read // the output. let descriptor = match state.composite_state.compositor_kind { CompositorKind::Draw { .. } => { // For a texture cache entry, create an invalid handle that // will be allocated when update_picture_cache is called. SurfaceTextureDescriptor::TextureCache { handle: TextureCacheHandle::invalid(), } } CompositorKind::Native { .. } => { // Create a native surface surface descriptor, but don't allocate // a surface yet. The surface is allocated *after* occlusion // culling occurs, so that only visible tiles allocate GPU memory. SurfaceTextureDescriptor::Native { id: None, } } }; TileSurface::Texture { descriptor, } } } }; // Store the current surface backing info for use during batching. self.surface = Some(surface); true } } /// Defines a key that uniquely identifies a primitive instance. #[derive(Debug, Copy, Clone)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct PrimitiveDescriptor { /// Uniquely identifies the content of the primitive template. pub prim_uid: ItemUid, /// The clip rect for this primitive. Included here in /// dependencies since there is no entry in the clip chain /// dependencies for the local clip rect. pub prim_clip_box: PictureBox2D, /// The number of extra dependencies that this primitive has. transform_dep_count: u8, image_dep_count: u8, opacity_binding_dep_count: u8, clip_dep_count: u8, color_binding_dep_count: u8, } impl PartialEq for PrimitiveDescriptor { fn eq(&self, other: &Self) -> bool { const EPSILON: f32 = 0.001; if self.prim_uid != other.prim_uid { return false; } if !self.prim_clip_box.min.x.approx_eq_eps(&other.prim_clip_box.min.x, &EPSILON) { return false; } if !self.prim_clip_box.min.y.approx_eq_eps(&other.prim_clip_box.min.y, &EPSILON) { return false; } if !self.prim_clip_box.max.x.approx_eq_eps(&other.prim_clip_box.max.x, &EPSILON) { return false; } if !self.prim_clip_box.max.y.approx_eq_eps(&other.prim_clip_box.max.y, &EPSILON) { return false; } true } } /// A small helper to compare two arrays of primitive dependencies. struct CompareHelper<'a, T> where T: Copy { offset_curr: usize, offset_prev: usize, curr_items: &'a [T], prev_items: &'a [T], } impl<'a, T> CompareHelper<'a, T> where T: Copy + PartialEq { /// Construct a new compare helper for a current / previous set of dependency information. fn new( prev_items: &'a [T], curr_items: &'a [T], ) -> Self { CompareHelper { offset_curr: 0, offset_prev: 0, curr_items, prev_items, } } /// Reset the current position in the dependency array to the start fn reset(&mut self) { self.offset_prev = 0; self.offset_curr = 0; } /// Test if two sections of the dependency arrays are the same, by checking both /// item equality, and a user closure to see if the content of the item changed. fn is_same<F>( &self, prev_count: u8, curr_count: u8, mut f: F, opt_detail: Option<&mut CompareHelperResult<T>>, ) -> bool where F: FnMut(&T, &T) -> bool { // If the number of items is different, trivial reject. if prev_count != curr_count { if let Some(detail) = opt_detail { *detail = CompareHelperResult::Count{ prev_count, curr_count }; } return false; } // If both counts are 0, then no need to check these dependencies. if curr_count == 0 { if let Some(detail) = opt_detail { *detail = CompareHelperResult::Equal; } return true; } // If both counts are u8::MAX, this is a sentinel that we can't compare these // deps, so just trivial reject. if curr_count as usize == MAX_PRIM_SUB_DEPS { if let Some(detail) = opt_detail { *detail = CompareHelperResult::Sentinel; } return false; } let end_prev = self.offset_prev + prev_count as usize; let end_curr = self.offset_curr + curr_count as usize; let curr_items = &self.curr_items[self.offset_curr .. end_curr]; let prev_items = &self.prev_items[self.offset_prev .. end_prev]; for (curr, prev) in curr_items.iter().zip(prev_items.iter()) { if !f(prev, curr) { if let Some(detail) = opt_detail { *detail = CompareHelperResult::PredicateTrue{ curr: *curr }; } return false; } } if let Some(detail) = opt_detail { *detail = CompareHelperResult::Equal; } true } // Advance the prev dependency array by a given amount fn advance_prev(&mut self, count: u8) { self.offset_prev += count as usize; } // Advance the current dependency array by a given amount fn advance_curr(&mut self, count: u8) { self.offset_curr += count as usize; } } /// Uniquely describes the content of this tile, in a way that can be /// (reasonably) efficiently hashed and compared. #[cfg_attr(any(feature="capture",feature="replay"), derive(Clone))] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct TileDescriptor { /// List of primitive instance unique identifiers. The uid is guaranteed /// to uniquely describe the content of the primitive template, while /// the other parameters describe the clip chain and instance params. pub prims: Vec<PrimitiveDescriptor>, /// List of clip node descriptors. clips: Vec<ItemUid>, /// List of image keys that this tile depends on. images: Vec<ImageDependency>, /// The set of opacity bindings that this tile depends on. // TODO(gw): Ugh, get rid of all opacity binding support! opacity_bindings: Vec<OpacityBinding>, /// List of the effects of transforms that we care about /// tracking for this tile. transforms: Vec<SpatialNodeKey>, /// Picture space rect that contains valid pixels region of this tile. local_valid_rect: PictureRect, /// List of the effects of color that we care about /// tracking for this tile. color_bindings: Vec<ColorBinding>, } impl TileDescriptor { fn new() -> Self { TileDescriptor { prims: Vec::new(), clips: Vec::new(), opacity_bindings: Vec::new(), images: Vec::new(), transforms: Vec::new(), local_valid_rect: PictureRect::zero(), color_bindings: Vec::new(), } } /// Print debug information about this tile descriptor to a tree printer. fn print(&self, pt: &mut dyn PrintTreePrinter) { pt.new_level("current_descriptor".to_string()); pt.new_level("prims".to_string()); for prim in &self.prims { pt.new_level(format!("prim uid={}", prim.prim_uid.get_uid())); pt.add_item(format!("clip: p0={},{} p1={},{}", prim.prim_clip_box.min.x, prim.prim_clip_box.min.y, prim.prim_clip_box.max.x, prim.prim_clip_box.max.y, )); pt.add_item(format!("deps: t={} i={} o={} c={} color={}", prim.transform_dep_count, prim.image_dep_count, prim.opacity_binding_dep_count, prim.clip_dep_count, prim.color_binding_dep_count, )); pt.end_level(); } pt.end_level(); if !self.clips.is_empty() { pt.new_level("clips".to_string()); for clip in &self.clips { pt.new_level(format!("clip uid={}", clip.get_uid())); pt.end_level(); } pt.end_level(); } if !self.images.is_empty() { pt.new_level("images".to_string()); for info in &self.images { pt.new_level(format!("key={:?}", info.key)); pt.add_item(format!("generation={:?}", info.generation)); pt.end_level(); } pt.end_level(); } if !self.opacity_bindings.is_empty() { pt.new_level("opacity_bindings".to_string()); for opacity_binding in &self.opacity_bindings { pt.new_level(format!("binding={:?}", opacity_binding)); pt.end_level(); } pt.end_level(); } if !self.transforms.is_empty() { pt.new_level("transforms".to_string()); for transform in &self.transforms { pt.new_level(format!("spatial_node={:?}", transform)); pt.end_level(); } pt.end_level(); } if !self.color_bindings.is_empty() { pt.new_level("color_bindings".to_string()); for color_binding in &self.color_bindings { pt.new_level(format!("binding={:?}", color_binding)); pt.end_level(); } pt.end_level(); } pt.end_level(); } /// Clear the dependency information for a tile, when the dependencies /// are being rebuilt. fn clear(&mut self) { self.prims.clear(); self.clips.clear(); self.opacity_bindings.clear(); self.images.clear(); self.transforms.clear(); self.local_valid_rect = PictureRect::zero(); self.color_bindings.clear(); } } /// Represents the dirty region of a tile cache picture. #[derive(Clone)] pub struct DirtyRegion { /// The individual filters that make up this region. pub filters: Vec<BatchFilter>, /// The overall dirty rect, a combination of dirty_rects pub combined: WorldRect, /// Spatial node of the picture cache this region represents spatial_node_index: SpatialNodeIndex, } impl DirtyRegion { /// Construct a new dirty region tracker. pub fn new( spatial_node_index: SpatialNodeIndex, ) -> Self { DirtyRegion { filters: Vec::with_capacity(16), combined: WorldRect::zero(), spatial_node_index, } } /// Reset the dirty regions back to empty pub fn reset( &mut self, spatial_node_index: SpatialNodeIndex, ) { self.filters.clear(); self.combined = WorldRect::zero(); self.spatial_node_index = spatial_node_index; } /// Add a dirty region to the tracker. Returns the visibility mask that corresponds to /// this region in the tracker. pub fn add_dirty_region( &mut self, rect_in_pic_space: PictureRect, sub_slice_index: SubSliceIndex, spatial_tree: &SpatialTree, ) { let map_pic_to_world = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, self.spatial_node_index, WorldRect::max_rect(), spatial_tree, ); let world_rect = map_pic_to_world .map(&rect_in_pic_space) .expect("bug"); // Include this in the overall dirty rect self.combined = self.combined.union(&world_rect); self.filters.push(BatchFilter { rect_in_pic_space, sub_slice_index, }); } // TODO(gw): This returns a heap allocated object. Perhaps we can simplify this // logic? Although - it's only used very rarely so it may not be an issue. pub fn inflate( &self, inflate_amount: f32, spatial_tree: &SpatialTree, ) -> DirtyRegion { let map_pic_to_world = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, self.spatial_node_index, WorldRect::max_rect(), spatial_tree, ); let mut filters = Vec::with_capacity(self.filters.len()); let mut combined = WorldRect::zero(); for filter in &self.filters { let rect_in_pic_space = filter.rect_in_pic_space.inflate(inflate_amount, inflate_amount); let world_rect = map_pic_to_world .map(&rect_in_pic_space) .expect("bug"); combined = combined.union(&world_rect); filters.push(BatchFilter { rect_in_pic_space, sub_slice_index: filter.sub_slice_index, }); } DirtyRegion { filters, combined, spatial_node_index: self.spatial_node_index, } } } #[derive(Debug, Copy, Clone)] pub enum BackdropKind { Color { color: ColorF, }, Clear, } /// Stores information about the calculated opaque backdrop of this slice. #[derive(Debug, Copy, Clone)] pub struct BackdropInfo { /// The picture space rectangle that is known to be opaque. This is used /// to determine where subpixel AA can be used, and where alpha blending /// can be disabled. pub opaque_rect: PictureRect, /// Kind of the backdrop pub kind: Option<BackdropKind>, } impl BackdropInfo { fn empty() -> Self { BackdropInfo { opaque_rect: PictureRect::zero(), kind: None, } } } #[derive(Clone)] pub struct TileCacheLoggerSlice { pub serialized_slice: String, pub local_to_world_transform: Transform3D<f32, PicturePixel, WorldPixel>, } #[cfg(any(feature = "capture", feature = "replay"))] macro_rules! declare_tile_cache_logger_updatelists { ( $( $name:ident : $ty:ty, )+ ) => { #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] struct TileCacheLoggerUpdateListsSerializer { pub ron_string: Vec<String>, } pub struct TileCacheLoggerUpdateLists { $( /// Generate storage, one per interner. /// the tuple is a workaround to avoid the need for multiple /// fields that start with $name (macro concatenation). /// the string is .ron serialized updatelist at capture time; /// the updates is the list of DataStore updates (avoid UpdateList /// due to Default() requirements on the Keys) reconstructed at /// load time. pub $name: (Vec<String>, Vec<UpdateList<<$ty as Internable>::Key>>), )+ } impl TileCacheLoggerUpdateLists { pub fn new() -> Self { TileCacheLoggerUpdateLists { $( $name : ( Vec::new(), Vec::new() ), )+ } } /// serialize all interners in updates to .ron #[cfg(feature = "capture")] fn serialize_updates( &mut self, updates: &InternerUpdates ) { $( self.$name.0.push(ron::ser::to_string_pretty(&updates.$name, Default::default()).unwrap()); )+ } fn is_empty(&self) -> bool { $( if !self.$name.0.is_empty() { return false; } )+ true } #[cfg(feature = "capture")] fn to_ron(&self) -> String { let mut serializer = TileCacheLoggerUpdateListsSerializer { ron_string: Vec::new() }; $( serializer.ron_string.push( ron::ser::to_string_pretty(&self.$name.0, Default::default()).unwrap()); )+ ron::ser::to_string_pretty(&serializer, Default::default()).unwrap() } #[cfg(feature = "replay")] pub fn from_ron(&mut self, text: &str) { let serializer : TileCacheLoggerUpdateListsSerializer = match ron::de::from_str(&text) { Ok(data) => { data } Err(e) => { println!("ERROR: failed to deserialize updatelist: {:?}\n{:?}", &text, e); return; } }; let mut index = 0; $( let ron_lists : Vec<String> = ron::de::from_str(&serializer.ron_string[index]).unwrap(); self.$name.1 = ron_lists.iter() .map( |list| ron::de::from_str(&list).unwrap() ) .collect(); index = index + 1; )+ // error: value assigned to `index` is never read let _ = index; } /// helper method to add a stringified version of all interned keys into /// a lookup table based on ItemUid. Use strings as a form of type erasure /// so all UpdateLists can go into a single map. /// Then during analysis, when we see an invalidation reason due to /// "ItemUid such and such was added to the tile primitive list", the lookup /// allows mapping that back into something readable. #[cfg(feature = "replay")] pub fn insert_in_lookup( &mut self, itemuid_to_string: &mut HashMap<ItemUid, String>) { $( { for list in &self.$name.1 { for insertion in &list.insertions { itemuid_to_string.insert( insertion.uid, format!("{:?}", insertion.value)); } } } )+ } } } } #[cfg(any(feature = "capture", feature = "replay"))] crate::enumerate_interners!(declare_tile_cache_logger_updatelists); #[cfg(not(any(feature = "capture", feature = "replay")))] pub struct TileCacheLoggerUpdateLists { } #[cfg(not(any(feature = "capture", feature = "replay")))] impl TileCacheLoggerUpdateLists { pub fn new() -> Self { TileCacheLoggerUpdateLists {} } fn is_empty(&self) -> bool { true } } /// Log tile cache activity for one single frame. /// Also stores the commands sent to the interning data_stores /// so we can see which items were created or destroyed this frame, /// and correlate that with tile invalidation activity. pub struct TileCacheLoggerFrame { /// slices in the frame, one per take_context call pub slices: Vec<TileCacheLoggerSlice>, /// interning activity pub update_lists: TileCacheLoggerUpdateLists } impl TileCacheLoggerFrame { pub fn new() -> Self { TileCacheLoggerFrame { slices: Vec::new(), update_lists: TileCacheLoggerUpdateLists::new() } } pub fn is_empty(&self) -> bool { self.slices.is_empty() && self.update_lists.is_empty() } } /// Log tile cache activity whenever anything happens in take_context. pub struct TileCacheLogger { /// next write pointer pub write_index : usize, /// ron serialization of tile caches; pub frames: Vec<TileCacheLoggerFrame> } impl TileCacheLogger { pub fn new( num_frames: usize ) -> Self { let mut frames = Vec::with_capacity(num_frames); for _i in 0..num_frames { // no Clone so no resize frames.push(TileCacheLoggerFrame::new()); } TileCacheLogger { write_index: 0, frames } } pub fn is_enabled(&self) -> bool { !self.frames.is_empty() } #[cfg(feature = "capture")] pub fn add( &mut self, serialized_slice: String, local_to_world_transform: Transform3D<f32, PicturePixel, WorldPixel> ) { if !self.is_enabled() { return; } self.frames[self.write_index].slices.push( TileCacheLoggerSlice { serialized_slice, local_to_world_transform }); } #[cfg(feature = "capture")] pub fn serialize_updates(&mut self, updates: &InternerUpdates) { if !self.is_enabled() { return; } self.frames[self.write_index].update_lists.serialize_updates(updates); } /// see if anything was written in this frame, and if so, /// advance the write index in a circular way and clear the /// recorded string. pub fn advance(&mut self) { if !self.is_enabled() || self.frames[self.write_index].is_empty() { return; } self.write_index = self.write_index + 1; if self.write_index >= self.frames.len() { self.write_index = 0; } self.frames[self.write_index] = TileCacheLoggerFrame::new(); } #[cfg(feature = "capture")] pub fn save_capture( &self, root: &PathBuf ) { if !self.is_enabled() { return; } use std::fs; info!("saving tile cache log"); let path_tile_cache = root.join("tile_cache"); if !path_tile_cache.is_dir() { fs::create_dir(&path_tile_cache).unwrap(); } let mut files_written = 0; for ix in 0..self.frames.len() { // ...and start with write_index, since that's the oldest entry // that we're about to overwrite. However when we get to // save_capture, we've add()ed entries but haven't advance()d yet, // so the actual oldest entry is write_index + 1 let index = (self.write_index + 1 + ix) % self.frames.len(); if self.frames[index].is_empty() { continue; } let filename = path_tile_cache.join(format!("frame{:05}.ron", files_written)); let mut output = File::create(filename).unwrap(); output.write_all(b"// slice data\n").unwrap(); output.write_all(b"[\n").unwrap(); for item in &self.frames[index].slices { output.write_all(b"( transform:\n").unwrap(); let transform = ron::ser::to_string_pretty( &item.local_to_world_transform, Default::default()).unwrap(); output.write_all(transform.as_bytes()).unwrap(); output.write_all(b",\n tile_cache:\n").unwrap(); output.write_all(item.serialized_slice.as_bytes()).unwrap(); output.write_all(b"\n),\n").unwrap(); } output.write_all(b"]\n\n").unwrap(); output.write_all(b"// @@@ chunk @@@\n\n").unwrap(); output.write_all(b"// interning data\n").unwrap(); output.write_all(self.frames[index].update_lists.to_ron().as_bytes()).unwrap(); files_written = files_written + 1; } } } /// Represents the native surfaces created for a picture cache, if using /// a native compositor. An opaque and alpha surface is always created, /// but tiles are added to a surface based on current opacity. If the /// calculated opacity of a tile changes, the tile is invalidated and /// attached to a different native surface. This means that we don't /// need to invalidate the entire surface if only some tiles are changing /// opacity. It also means we can take advantage of opaque tiles on cache /// slices where only some of the tiles are opaque. There is an assumption /// that creating a native surface is cheap, and only when a tile is added /// to a surface is there a significant cost. This assumption holds true /// for the current native compositor implementations on Windows and Mac. pub struct NativeSurface { /// Native surface for opaque tiles pub opaque: NativeSurfaceId, /// Native surface for alpha tiles pub alpha: NativeSurfaceId, } /// Hash key for an external native compositor surface #[derive(PartialEq, Eq, Hash)] pub struct ExternalNativeSurfaceKey { /// The YUV/RGB image keys that are used to draw this surface. pub image_keys: [ImageKey; 3], /// The current device size of the surface. pub size: DeviceIntSize, /// True if this is an 'external' compositor surface created via /// Compositor::create_external_surface. pub is_external_surface: bool, } /// Information about a native compositor surface cached between frames. pub struct ExternalNativeSurface { /// If true, the surface was used this frame. Used for a simple form /// of GC to remove old surfaces. pub used_this_frame: bool, /// The native compositor surface handle pub native_surface_id: NativeSurfaceId, /// List of image keys, and current image generations, that are drawn in this surface. /// The image generations are used to check if the compositor surface is dirty and /// needs to be updated. pub image_dependencies: [ImageDependency; 3], } /// The key that identifies a tile cache instance. For now, it's simple the index of /// the slice as it was created during scene building. #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct SliceId(usize); impl SliceId { pub fn new(index: usize) -> Self { SliceId(index) } } /// Information that is required to reuse or create a new tile cache. Created /// during scene building and passed to the render backend / frame builder. pub struct TileCacheParams { // Index of the slice (also effectively the key of the tile cache, though we use SliceId where that matters) pub slice: usize, // Flags describing content of this cache (e.g. scrollbars) pub slice_flags: SliceFlags, // The anchoring spatial node / scroll root pub spatial_node_index: SpatialNodeIndex, // Optional background color of this tilecache. If present, can be used as an optimization // to enable opaque blending and/or subpixel AA in more places. pub background_color: Option<ColorF>, // List of clips shared by all prims that are promoted to this tile cache pub shared_clips: Vec<ClipInstance>, // The clip chain handle representing `shared_clips` pub shared_clip_chain: ClipChainId, // Virtual surface sizes are always square, so this represents both the width and height pub virtual_surface_size: i32, // The number of compositor surfaces that are being requested for this tile cache. // This is only a suggestion - the tile cache will clamp this as a reasonable number // and only promote a limited number of surfaces. pub compositor_surface_count: usize, } /// Defines which sub-slice (effectively a z-index) a primitive exists on within /// a picture cache instance. #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] #[derive(Debug, Copy, Clone, PartialEq)] pub struct SubSliceIndex(u8); impl SubSliceIndex { pub const DEFAULT: SubSliceIndex = SubSliceIndex(0); pub fn new(index: usize) -> Self { SubSliceIndex(index as u8) } /// Return true if this sub-slice is the primary sub-slice (for now, we assume /// that only the primary sub-slice may be opaque and support subpixel AA, for example). pub fn is_primary(&self) -> bool { self.0 == 0 } } /// Wrapper struct around an external surface descriptor with a little more information /// that the picture caching code needs. pub struct CompositorSurface { // External surface descriptor used by compositing logic pub descriptor: ExternalSurfaceDescriptor, // The compositor surface rect + any intersecting prims. Later prims that intersect // with this must be added to the next sub-slice. prohibited_rect: PictureRect, // If the compositor surface content is opaque. pub is_opaque: bool, } /// A SubSlice represents a potentially overlapping set of tiles within a picture cache. Most /// picture cache instances will have only a single sub-slice. The exception to this is when /// a picture cache has compositor surfaces, in which case sub slices are used to interleave /// content under or order the compositor surface(s). pub struct SubSlice { /// Hash of tiles present in this picture. pub tiles: FastHashMap<TileOffset, Box<Tile>>, /// The allocated compositor surfaces for this picture cache. May be None if /// not using native compositor, or if the surface was destroyed and needs /// to be reallocated next time this surface contains valid tiles. pub native_surface: Option<NativeSurface>, /// List of compositor surfaces that have been promoted from primitives /// in this tile cache. pub compositor_surfaces: Vec<CompositorSurface>, } impl SubSlice { /// Construct a new sub-slice fn new() -> Self { SubSlice { tiles: FastHashMap::default(), native_surface: None, compositor_surfaces: Vec::new(), } } /// Reset the list of compositor surfaces that follow this sub-slice. /// Built per-frame, since APZ may change whether an image is suitable to be a compositor surface. fn reset(&mut self) { self.compositor_surfaces.clear(); } /// Resize the tile grid to match a new tile bounds fn resize(&mut self, new_tile_rect: TileRect) -> FastHashMap<TileOffset, Box<Tile>> { let mut old_tiles = mem::replace(&mut self.tiles, FastHashMap::default()); self.tiles.reserve(new_tile_rect.size.area() as usize); for y in new_tile_rect.origin.y .. new_tile_rect.origin.y + new_tile_rect.size.height { for x in new_tile_rect.origin.x .. new_tile_rect.origin.x + new_tile_rect.size.width { let key = TileOffset::new(x, y); let tile = old_tiles .remove(&key) .unwrap_or_else(|| { Box::new(Tile::new(key)) }); self.tiles.insert(key, tile); } } old_tiles } } /// Represents a cache of tiles that make up a picture primitives. pub struct TileCacheInstance { /// Index of the tile cache / slice for this frame builder. It's determined /// by the setup_picture_caching method during flattening, which splits the /// picture tree into multiple slices. It's used as a simple input to the tile /// keys. It does mean we invalidate tiles if a new layer gets inserted / removed /// between display lists - this seems very unlikely to occur on most pages, but /// can be revisited if we ever notice that. pub slice: usize, /// Propagated information about the slice pub slice_flags: SliceFlags, /// The currently selected tile size to use for this cache pub current_tile_size: DeviceIntSize, /// The list of sub-slices in this tile cache pub sub_slices: Vec<SubSlice>, /// The positioning node for this tile cache. pub spatial_node_index: SpatialNodeIndex, /// List of opacity bindings, with some extra information /// about whether they changed since last frame. opacity_bindings: FastHashMap<PropertyBindingId, OpacityBindingInfo>, /// Switch back and forth between old and new bindings hashmaps to avoid re-allocating. old_opacity_bindings: FastHashMap<PropertyBindingId, OpacityBindingInfo>, /// A helper to compare transforms between previous and current frame. spatial_node_comparer: SpatialNodeComparer, /// List of color bindings, with some extra information /// about whether they changed since last frame. color_bindings: FastHashMap<PropertyBindingId, ColorBindingInfo>, /// Switch back and forth between old and new bindings hashmaps to avoid re-allocating. old_color_bindings: FastHashMap<PropertyBindingId, ColorBindingInfo>, /// The current dirty region tracker for this picture. pub dirty_region: DirtyRegion, /// Current size of tiles in picture units. tile_size: PictureSize, /// Tile coords of the currently allocated grid. tile_rect: TileRect, /// Pre-calculated versions of the tile_rect above, used to speed up the /// calculations in get_tile_coords_for_rect. tile_bounds_p0: TileOffset, tile_bounds_p1: TileOffset, /// Local rect (unclipped) of the picture this cache covers. pub local_rect: PictureRect, /// The local clip rect, from the shared clips of this picture. pub local_clip_rect: PictureRect, /// The surface index that this tile cache will be drawn into. surface_index: SurfaceIndex, /// The background color from the renderer. If this is set opaque, we know it's /// fine to clear the tiles to this and allow subpixel text on the first slice. pub background_color: Option<ColorF>, /// Information about the calculated backdrop content of this cache. pub backdrop: BackdropInfo, /// The allowed subpixel mode for this surface, which depends on the detected /// opacity of the background. pub subpixel_mode: SubpixelMode, /// A list of clip handles that exist on every (top-level) primitive in this picture. /// It's often the case that these are root / fixed position clips. By handling them /// here, we can avoid applying them to the items, which reduces work, but more importantly /// reduces invalidations. pub shared_clips: Vec<ClipInstance>, /// The clip chain that represents the shared_clips above. Used to build the local /// clip rect for this tile cache. shared_clip_chain: ClipChainId, /// The current transform of the picture cache root spatial node root_transform: ScaleOffset, /// The number of frames until this cache next evaluates what tile size to use. /// If a picture rect size is regularly changing just around a size threshold, /// we don't want to constantly invalidate and reallocate different tile size /// configuration each frame. frames_until_size_eval: usize, /// The current fractional offset of the cached picture fract_offset: PictureVector2D, /// The current device fractional offset of the cached picture device_fract_offset: DeviceVector2D, /// For DirectComposition, virtual surfaces don't support negative coordinates. However, /// picture cache tile coordinates can be negative. To handle this, we apply an offset /// to each tile in DirectComposition. We want to change this as little as possible, /// to avoid invalidating tiles. However, if we have a picture cache tile coordinate /// which is outside the virtual surface bounds, we must change this to allow /// correct remapping of the coordinates passed to BeginDraw in DC. virtual_offset: DeviceIntPoint, /// keep around the hash map used as compare_cache to avoid reallocating it each /// frame. compare_cache: FastHashMap<PrimitiveComparisonKey, PrimitiveCompareResult>, /// The current device position of this cache. Used to set the compositor /// offset of the surface when building the visual tree. pub device_position: DevicePoint, /// The currently considered tile size override. Used to check if we should /// re-evaluate tile size, even if the frame timer hasn't expired. tile_size_override: Option<DeviceIntSize>, /// A cache of compositor surfaces that are retained between frames pub external_native_surface_cache: FastHashMap<ExternalNativeSurfaceKey, ExternalNativeSurface>, /// Current frame ID of this tile cache instance. Used for book-keeping / garbage collecting frame_id: FrameId, } enum SurfacePromotionResult { Failed, Success { flip_y: bool, } } impl TileCacheInstance { pub fn new(params: TileCacheParams) -> Self { // Determine how many sub-slices we need. Clamp to an arbitrary limit to ensure // we don't create a huge number of OS compositor tiles and sub-slices. let sub_slice_count = params.compositor_surface_count.min(MAX_COMPOSITOR_SURFACES) + 1; let mut sub_slices = Vec::with_capacity(sub_slice_count); for _ in 0 .. sub_slice_count { sub_slices.push(SubSlice::new()); } TileCacheInstance { slice: params.slice, slice_flags: params.slice_flags, spatial_node_index: params.spatial_node_index, sub_slices, opacity_bindings: FastHashMap::default(), old_opacity_bindings: FastHashMap::default(), spatial_node_comparer: SpatialNodeComparer::new(), color_bindings: FastHashMap::default(), old_color_bindings: FastHashMap::default(), dirty_region: DirtyRegion::new(params.spatial_node_index), tile_size: PictureSize::zero(), tile_rect: TileRect::zero(), tile_bounds_p0: TileOffset::zero(), tile_bounds_p1: TileOffset::zero(), local_rect: PictureRect::zero(), local_clip_rect: PictureRect::zero(), surface_index: SurfaceIndex(0), background_color: params.background_color, backdrop: BackdropInfo::empty(), subpixel_mode: SubpixelMode::Allow, root_transform: ScaleOffset::identity(), shared_clips: params.shared_clips, shared_clip_chain: params.shared_clip_chain, current_tile_size: DeviceIntSize::zero(), frames_until_size_eval: 0, fract_offset: PictureVector2D::zero(), device_fract_offset: DeviceVector2D::zero(), // Default to centering the virtual offset in the middle of the DC virtual surface virtual_offset: DeviceIntPoint::new( params.virtual_surface_size / 2, params.virtual_surface_size / 2, ), compare_cache: FastHashMap::default(), device_position: DevicePoint::zero(), tile_size_override: None, external_native_surface_cache: FastHashMap::default(), frame_id: FrameId::INVALID, } } /// Return the total number of tiles allocated by this tile cache pub fn tile_count(&self) -> usize { self.tile_rect.size.area() as usize * self.sub_slices.len() } /// Reset this tile cache with the updated parameters from a new scene /// that has arrived. This allows the tile cache to be retained across /// new scenes. pub fn prepare_for_new_scene( &mut self, params: TileCacheParams, resource_cache: &mut ResourceCache, ) { // We should only receive updated state for matching slice key assert_eq!(self.slice, params.slice); // Determine how many sub-slices we need, based on how many compositor surface prims are // in the supplied primitive list. let required_sub_slice_count = params.compositor_surface_count.min(MAX_COMPOSITOR_SURFACES) + 1; if self.sub_slices.len() != required_sub_slice_count { self.tile_rect = TileRect::zero(); if self.sub_slices.len() > required_sub_slice_count { let old_sub_slices = self.sub_slices.split_off(required_sub_slice_count); for mut sub_slice in old_sub_slices { for tile in sub_slice.tiles.values_mut() { if let Some(TileSurface::Texture { descriptor: SurfaceTextureDescriptor::Native { ref mut id, .. }, .. }) = tile.surface { if let Some(id) = id.take() { resource_cache.destroy_compositor_tile(id); } } } if let Some(native_surface) = sub_slice.native_surface { resource_cache.destroy_compositor_surface(native_surface.opaque); resource_cache.destroy_compositor_surface(native_surface.alpha); } } } else { while self.sub_slices.len() < required_sub_slice_count { self.sub_slices.push(SubSlice::new()); } } } // Store the parameters from the scene builder for this slice. Other // params in the tile cache are retained and reused, or are always // updated during pre/post_update. self.slice_flags = params.slice_flags; self.spatial_node_index = params.spatial_node_index; self.background_color = params.background_color; self.shared_clips = params.shared_clips; self.shared_clip_chain = params.shared_clip_chain; // Since the slice flags may have changed, ensure we re-evaluate the // appropriate tile size for this cache next update. self.frames_until_size_eval = 0; } /// Destroy any manually managed resources before this picture cache is /// destroyed, such as native compositor surfaces. pub fn destroy( self, resource_cache: &mut ResourceCache, ) { for sub_slice in self.sub_slices { if let Some(native_surface) = sub_slice.native_surface { resource_cache.destroy_compositor_surface(native_surface.opaque); resource_cache.destroy_compositor_surface(native_surface.alpha); } } for (_, external_surface) in self.external_native_surface_cache { resource_cache.destroy_compositor_surface(external_surface.native_surface_id) } } /// Get the tile coordinates for a given rectangle. fn get_tile_coords_for_rect( &self, rect: &PictureRect, ) -> (TileOffset, TileOffset) { // Get the tile coordinates in the picture space. let mut p0 = TileOffset::new( (rect.origin.x / self.tile_size.width).floor() as i32, (rect.origin.y / self.tile_size.height).floor() as i32, ); let mut p1 = TileOffset::new( ((rect.origin.x + rect.size.width) / self.tile_size.width).ceil() as i32, ((rect.origin.y + rect.size.height) / self.tile_size.height).ceil() as i32, ); // Clamp the tile coordinates here to avoid looping over irrelevant tiles later on. p0.x = clamp(p0.x, self.tile_bounds_p0.x, self.tile_bounds_p1.x); p0.y = clamp(p0.y, self.tile_bounds_p0.y, self.tile_bounds_p1.y); p1.x = clamp(p1.x, self.tile_bounds_p0.x, self.tile_bounds_p1.x); p1.y = clamp(p1.y, self.tile_bounds_p0.y, self.tile_bounds_p1.y); (p0, p1) } /// Update transforms, opacity, color bindings and tile rects. pub fn pre_update( &mut self, pic_rect: PictureRect, surface_index: SurfaceIndex, frame_context: &FrameVisibilityContext, frame_state: &mut FrameVisibilityState, ) -> WorldRect { self.surface_index = surface_index; self.local_rect = pic_rect; self.local_clip_rect = PictureRect::max_rect(); for sub_slice in &mut self.sub_slices { sub_slice.reset(); } // Reset the opaque rect + subpixel mode, as they are calculated // during the prim dependency checks. self.backdrop = BackdropInfo::empty(); let pic_to_world_mapper = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, self.spatial_node_index, frame_context.global_screen_world_rect, frame_context.spatial_tree, ); // If there is a valid set of shared clips, build a clip chain instance for this, // which will provide a local clip rect. This is useful for establishing things // like whether the backdrop rect supplied by Gecko can be considered opaque. if self.shared_clip_chain != ClipChainId::NONE { let shared_clips = &mut frame_state.scratch.picture.clip_chain_ids; shared_clips.clear(); let map_local_to_surface = SpaceMapper::new( self.spatial_node_index, pic_rect, ); let mut current_clip_chain_id = self.shared_clip_chain; while current_clip_chain_id != ClipChainId::NONE { shared_clips.push(current_clip_chain_id); let clip_chain_node = &frame_state.clip_store.clip_chain_nodes[current_clip_chain_id.0 as usize]; current_clip_chain_id = clip_chain_node.parent_clip_chain_id; } frame_state.clip_store.set_active_clips( LayoutRect::max_rect(), self.spatial_node_index, map_local_to_surface.ref_spatial_node_index, &shared_clips, frame_context.spatial_tree, &mut frame_state.data_stores.clip, ); let clip_chain_instance = frame_state.clip_store.build_clip_chain_instance( pic_rect.cast_unit(), &map_local_to_surface, &pic_to_world_mapper, frame_context.spatial_tree, frame_state.gpu_cache, frame_state.resource_cache, frame_context.global_device_pixel_scale, &frame_context.global_screen_world_rect, &mut frame_state.data_stores.clip, true, false, ); // Ensure that if the entire picture cache is clipped out, the local // clip rect is zero. This makes sure we don't register any occluders // that are actually off-screen. self.local_clip_rect = clip_chain_instance.map_or(PictureRect::zero(), |clip_chain_instance| { clip_chain_instance.pic_clip_rect }); } // Advance the current frame ID counter for this picture cache (must be done // after any retained prev state is taken above). self.frame_id.advance(); // Notify the spatial node comparer that a new frame has started, and the // current reference spatial node for this tile cache. self.spatial_node_comparer.next_frame(self.spatial_node_index); // At the start of the frame, step through each current compositor surface // and mark it as unused. Later, this is used to free old compositor surfaces. // TODO(gw): In future, we might make this more sophisticated - for example, // retaining them for >1 frame if unused, or retaining them in some // kind of pool to reduce future allocations. for external_native_surface in self.external_native_surface_cache.values_mut() { external_native_surface.used_this_frame = false; } // Only evaluate what tile size to use fairly infrequently, so that we don't end // up constantly invalidating and reallocating tiles if the picture rect size is // changing near a threshold value. if self.frames_until_size_eval == 0 || self.tile_size_override != frame_context.config.tile_size_override { // Work out what size tile is appropriate for this picture cache. let desired_tile_size = match frame_context.config.tile_size_override { Some(tile_size_override) => { tile_size_override } None => { if self.slice_flags.contains(SliceFlags::IS_SCROLLBAR) { if pic_rect.size.width <= pic_rect.size.height { TILE_SIZE_SCROLLBAR_VERTICAL } else { TILE_SIZE_SCROLLBAR_HORIZONTAL } } else { frame_state.resource_cache.texture_cache.default_picture_tile_size() } } }; // If the desired tile size has changed, then invalidate and drop any // existing tiles. if desired_tile_size != self.current_tile_size { for sub_slice in &mut self.sub_slices { // Destroy any native surfaces on the tiles that will be dropped due // to resizing. if let Some(native_surface) = sub_slice.native_surface.take() { frame_state.resource_cache.destroy_compositor_surface(native_surface.opaque); frame_state.resource_cache.destroy_compositor_surface(native_surface.alpha); } sub_slice.tiles.clear(); } self.tile_rect = TileRect::zero(); self.current_tile_size = desired_tile_size; } // Reset counter until next evaluating the desired tile size. This is an // arbitrary value. self.frames_until_size_eval = 120; self.tile_size_override = frame_context.config.tile_size_override; } // Map an arbitrary point in picture space to world space, to work out // what the fractional translation is that's applied by this scroll root. // TODO(gw): I'm not 100% sure this is right. At least, in future, we should // make a specific API for this, and/or enforce that the picture // cache transform only includes scale and/or translation (we // already ensure it doesn't have perspective). let world_origin = pic_to_world_mapper .map(&PictureRect::new(PicturePoint::zero(), PictureSize::new(1.0, 1.0))) .expect("bug: unable to map origin to world space") .origin; // Get the desired integer device coordinate let device_origin = world_origin * frame_context.global_device_pixel_scale; let desired_device_origin = device_origin.round(); self.device_position = desired_device_origin; self.device_fract_offset = desired_device_origin - device_origin; // Unmap from device space to world space rect let ref_world_rect = WorldRect::new( desired_device_origin / frame_context.global_device_pixel_scale, WorldSize::new(1.0, 1.0), ); // Unmap from world space to picture space; this should be the fractional offset // required in picture space to align in device space self.fract_offset = pic_to_world_mapper .unmap(&ref_world_rect) .expect("bug: unable to unmap ref world rect") .origin .to_vector(); // Do a hacky diff of opacity binding values from the last frame. This is // used later on during tile invalidation tests. let current_properties = frame_context.scene_properties.float_properties(); mem::swap(&mut self.opacity_bindings, &mut self.old_opacity_bindings); self.opacity_bindings.clear(); for (id, value) in current_properties { let changed = match self.old_opacity_bindings.get(id) { Some(old_property) => !old_property.value.approx_eq(value), None => true, }; self.opacity_bindings.insert(*id, OpacityBindingInfo { value: *value, changed, }); } // Do a hacky diff of color binding values from the last frame. This is // used later on during tile invalidation tests. let current_properties = frame_context.scene_properties.color_properties(); mem::swap(&mut self.color_bindings, &mut self.old_color_bindings); self.color_bindings.clear(); for (id, value) in current_properties { let changed = match self.old_color_bindings.get(id) { Some(old_property) => old_property.value != (*value).into(), None => true, }; self.color_bindings.insert(*id, ColorBindingInfo { value: (*value).into(), changed, }); } let world_tile_size = WorldSize::new( self.current_tile_size.width as f32 / frame_context.global_device_pixel_scale.0, self.current_tile_size.height as f32 / frame_context.global_device_pixel_scale.0, ); // We know that this is an exact rectangle, since we (for now) only support tile // caches where the scroll root is in the root coordinate system. let local_tile_rect = pic_to_world_mapper .unmap(&WorldRect::new(WorldPoint::zero(), world_tile_size)) .expect("bug: unable to get local tile rect"); self.tile_size = local_tile_rect.size; let screen_rect_in_pic_space = pic_to_world_mapper .unmap(&frame_context.global_screen_world_rect) .expect("unable to unmap screen rect"); // Inflate the needed rect a bit, so that we retain tiles that we have drawn // but have just recently gone off-screen. This means that we avoid re-drawing // tiles if the user is scrolling up and down small amounts, at the cost of // a bit of extra texture memory. let desired_rect_in_pic_space = screen_rect_in_pic_space .inflate(0.0, 1.0 * self.tile_size.height); let needed_rect_in_pic_space = desired_rect_in_pic_space .intersection(&pic_rect) .unwrap_or_else(PictureRect::zero); let p0 = needed_rect_in_pic_space.origin; let p1 = needed_rect_in_pic_space.bottom_right(); let x0 = (p0.x / local_tile_rect.size.width).floor() as i32; let x1 = (p1.x / local_tile_rect.size.width).ceil() as i32; let y0 = (p0.y / local_tile_rect.size.height).floor() as i32; let y1 = (p1.y / local_tile_rect.size.height).ceil() as i32; let x_tiles = x1 - x0; let y_tiles = y1 - y0; let new_tile_rect = TileRect::new( TileOffset::new(x0, y0), TileSize::new(x_tiles, y_tiles), ); // Determine whether the current bounds of the tile grid will exceed the // bounds of the DC virtual surface, taking into account the current // virtual offset. If so, we need to invalidate all tiles, and set up // a new virtual offset, centered around the current tile grid. let virtual_surface_size = frame_context.config.compositor_kind.get_virtual_surface_size(); // We only need to invalidate in this case if the underlying platform // uses virtual surfaces. if virtual_surface_size > 0 { // Get the extremities of the tile grid after virtual offset is applied let tx0 = self.virtual_offset.x + x0 * self.current_tile_size.width; let ty0 = self.virtual_offset.y + y0 * self.current_tile_size.height; let tx1 = self.virtual_offset.x + (x1+1) * self.current_tile_size.width; let ty1 = self.virtual_offset.y + (y1+1) * self.current_tile_size.height; let need_new_virtual_offset = tx0 < 0 || ty0 < 0 || tx1 >= virtual_surface_size || ty1 >= virtual_surface_size; if need_new_virtual_offset { // Calculate a new virtual offset, centered around the middle of the // current tile grid. This means we won't need to invalidate and get // a new offset for a long time! self.virtual_offset = DeviceIntPoint::new( (virtual_surface_size/2) - ((x0 + x1) / 2) * self.current_tile_size.width, (virtual_surface_size/2) - ((y0 + y1) / 2) * self.current_tile_size.height, ); // Invalidate all native tile surfaces. They will be re-allocated next time // they are scheduled to be rasterized. for sub_slice in &mut self.sub_slices { for tile in sub_slice.tiles.values_mut() { if let Some(TileSurface::Texture { descriptor: SurfaceTextureDescriptor::Native { ref mut id, .. }, .. }) = tile.surface { if let Some(id) = id.take() { frame_state.resource_cache.destroy_compositor_tile(id); tile.surface = None; // Invalidate the entire tile to force a redraw. // TODO(gw): Add a new invalidation reason for virtual offset changing tile.invalidate(None, InvalidationReason::CompositorKindChanged); } } } // Destroy the native virtual surfaces. They will be re-allocated next time a tile // that references them is scheduled to draw. if let Some(native_surface) = sub_slice.native_surface.take() { frame_state.resource_cache.destroy_compositor_surface(native_surface.opaque); frame_state.resource_cache.destroy_compositor_surface(native_surface.alpha); } } } } // Rebuild the tile grid if the picture cache rect has changed. if new_tile_rect != self.tile_rect { for sub_slice in &mut self.sub_slices { let mut old_tiles = sub_slice.resize(new_tile_rect); // When old tiles that remain after the loop, dirty rects are not valid. if !old_tiles.is_empty() { frame_state.composite_state.dirty_rects_are_valid = false; } // Any old tiles that remain after the loop above are going to be dropped. For // simple composite mode, the texture cache handle will expire and be collected // by the texture cache. For native compositor mode, we need to explicitly // invoke a callback to the client to destroy that surface. frame_state.composite_state.destroy_native_tiles( old_tiles.values_mut(), frame_state.resource_cache, ); } } // This is duplicated information from tile_rect, but cached here to avoid // redundant calculations during get_tile_coords_for_rect self.tile_bounds_p0 = TileOffset::new(x0, y0); self.tile_bounds_p1 = TileOffset::new(x1, y1); self.tile_rect = new_tile_rect; let mut world_culling_rect = WorldRect::zero(); let mut ctx = TilePreUpdateContext { pic_to_world_mapper, fract_offset: self.fract_offset, device_fract_offset: self.device_fract_offset, background_color: self.background_color, global_screen_world_rect: frame_context.global_screen_world_rect, tile_size: self.tile_size, frame_id: self.frame_id, }; // Pre-update each tile for sub_slice in &mut self.sub_slices { for tile in sub_slice.tiles.values_mut() { tile.pre_update(&ctx); // Only include the tiles that are currently in view into the world culling // rect. This is a very important optimization for a couple of reasons: // (1) Primitives that intersect with tiles in the grid that are not currently // visible can be skipped from primitive preparation, clip chain building // and tile dependency updates. // (2) When we need to allocate an off-screen surface for a child picture (for // example a CSS filter) we clip the size of the GPU surface to the world // culling rect below (to ensure we draw enough of it to be sampled by any // tiles that reference it). Making the world culling rect only affected // by visible tiles (rather than the entire virtual tile display port) can // result in allocating _much_ smaller GPU surfaces for cases where the // true off-screen surface size is very large. if tile.is_visible { world_culling_rect = world_culling_rect.union(&tile.world_tile_rect); } } // The background color can only be applied to the first sub-slice. ctx.background_color = None; } // If compositor mode is changed, need to drop all incompatible tiles. match frame_context.config.compositor_kind { CompositorKind::Draw { .. } => { for sub_slice in &mut self.sub_slices { for tile in sub_slice.tiles.values_mut() { if let Some(TileSurface::Texture { descriptor: SurfaceTextureDescriptor::Native { ref mut id, .. }, .. }) = tile.surface { if let Some(id) = id.take() { frame_state.resource_cache.destroy_compositor_tile(id); } tile.surface = None; // Invalidate the entire tile to force a redraw. tile.invalidate(None, InvalidationReason::CompositorKindChanged); } } if let Some(native_surface) = sub_slice.native_surface.take() { frame_state.resource_cache.destroy_compositor_surface(native_surface.opaque); frame_state.resource_cache.destroy_compositor_surface(native_surface.alpha); } } for (_, external_surface) in self.external_native_surface_cache.drain() { frame_state.resource_cache.destroy_compositor_surface(external_surface.native_surface_id) } } CompositorKind::Native { .. } => { // This could hit even when compositor mode is not changed, // then we need to check if there are incompatible tiles. for sub_slice in &mut self.sub_slices { for tile in sub_slice.tiles.values_mut() { if let Some(TileSurface::Texture { descriptor: SurfaceTextureDescriptor::TextureCache { .. }, .. }) = tile.surface { tile.surface = None; // Invalidate the entire tile to force a redraw. tile.invalidate(None, InvalidationReason::CompositorKindChanged); } } } } } world_culling_rect } fn can_promote_to_surface( &mut self, flags: PrimitiveFlags, prim_clip_chain: &ClipChainInstance, prim_spatial_node_index: SpatialNodeIndex, is_root_tile_cache: bool, sub_slice_index: usize, frame_context: &FrameVisibilityContext, ) -> SurfacePromotionResult { // Check if this primitive _wants_ to be promoted to a compositor surface. if !flags.contains(PrimitiveFlags::PREFER_COMPOSITOR_SURFACE) { return SurfacePromotionResult::Failed; } // For now, only support a small (arbitrary) number of compositor surfaces. if sub_slice_index == MAX_COMPOSITOR_SURFACES { return SurfacePromotionResult::Failed; } // If a complex clip is being applied to this primitive, it can't be // promoted directly to a compositor surface (we might be able to // do this in limited cases in future, some native compositors do // support rounded rect clips, for example) if prim_clip_chain.needs_mask { return SurfacePromotionResult::Failed; } // If not on the root picture cache, it has some kind of // complex effect (such as a filter, mix-blend-mode or 3d transform). if !is_root_tile_cache { return SurfacePromotionResult::Failed; } let mapper : SpaceMapper<PicturePixel, WorldPixel> = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, prim_spatial_node_index, frame_context.global_screen_world_rect, &frame_context.spatial_tree); let transform = mapper.get_transform(); if !transform.is_2d_scale_translation() { return SurfacePromotionResult::Failed; } if transform.m11 < 0.0 { return SurfacePromotionResult::Failed; } if self.slice_flags.contains(SliceFlags::IS_BLEND_CONTAINER) { return SurfacePromotionResult::Failed; } SurfacePromotionResult::Success { flip_y: transform.m22 < 0.0, } } fn setup_compositor_surfaces_yuv( &mut self, sub_slice_index: usize, prim_info: &mut PrimitiveDependencyInfo, flags: PrimitiveFlags, local_prim_rect: LayoutRect, prim_spatial_node_index: SpatialNodeIndex, pic_clip_rect: PictureRect, frame_context: &FrameVisibilityContext, image_dependencies: &[ImageDependency;3], api_keys: &[ImageKey; 3], resource_cache: &mut ResourceCache, composite_state: &mut CompositeState, gpu_cache: &mut GpuCache, image_rendering: ImageRendering, color_depth: ColorDepth, color_space: YuvColorSpace, format: YuvFormat, ) -> bool { for &key in api_keys { // TODO: See comment in setup_compositor_surfaces_rgb. resource_cache.request_image(ImageRequest { key, rendering: image_rendering, tile: None, }, gpu_cache, ); } self.setup_compositor_surfaces_impl( sub_slice_index, prim_info, flags, local_prim_rect, prim_spatial_node_index, pic_clip_rect, frame_context, ExternalSurfaceDependency::Yuv { image_dependencies: *image_dependencies, color_space, format, rescale: color_depth.rescaling_factor(), }, api_keys, resource_cache, composite_state, image_rendering, true, ) } fn setup_compositor_surfaces_rgb( &mut self, sub_slice_index: usize, prim_info: &mut PrimitiveDependencyInfo, flags: PrimitiveFlags, local_prim_rect: LayoutRect, prim_spatial_node_index: SpatialNodeIndex, pic_clip_rect: PictureRect, frame_context: &FrameVisibilityContext, image_dependency: ImageDependency, api_key: ImageKey, resource_cache: &mut ResourceCache, composite_state: &mut CompositeState, gpu_cache: &mut GpuCache, image_rendering: ImageRendering, flip_y: bool, ) -> bool { let mut api_keys = [ImageKey::DUMMY; 3]; api_keys[0] = api_key; // TODO: The picture compositing code requires images promoted // into their own picture cache slices to be requested every // frame even if they are not visible. However the image updates // are only reached on the prepare pass for visible primitives. // So we make sure to trigger an image request when promoting // the image here. resource_cache.request_image(ImageRequest { key: api_key, rendering: image_rendering, tile: None, }, gpu_cache, ); let is_opaque = resource_cache.get_image_properties(api_key) .map_or(false, |properties| properties.descriptor.is_opaque()); self.setup_compositor_surfaces_impl( sub_slice_index, prim_info, flags, local_prim_rect, prim_spatial_node_index, pic_clip_rect, frame_context, ExternalSurfaceDependency::Rgb { image_dependency, flip_y, }, &api_keys, resource_cache, composite_state, image_rendering, is_opaque, ) } // returns false if composition is not available for this surface, // and the non-compositor path should be used to draw it instead. fn setup_compositor_surfaces_impl( &mut self, sub_slice_index: usize, prim_info: &mut PrimitiveDependencyInfo, flags: PrimitiveFlags, local_prim_rect: LayoutRect, prim_spatial_node_index: SpatialNodeIndex, pic_clip_rect: PictureRect, frame_context: &FrameVisibilityContext, dependency: ExternalSurfaceDependency, api_keys: &[ImageKey; 3], resource_cache: &mut ResourceCache, composite_state: &mut CompositeState, image_rendering: ImageRendering, is_opaque: bool, ) -> bool { let map_local_to_surface = SpaceMapper::new_with_target( self.spatial_node_index, prim_spatial_node_index, self.local_rect, frame_context.spatial_tree, ); // Map the primitive local rect into picture space. let prim_rect = match map_local_to_surface.map(&local_prim_rect) { Some(rect) => rect, None => return true, }; // If the rect is invalid, no need to create dependencies. if prim_rect.size.is_empty() { return true; } let pic_to_world_mapper = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, self.spatial_node_index, frame_context.global_screen_world_rect, frame_context.spatial_tree, ); let world_clip_rect = pic_to_world_mapper .map(&prim_info.prim_clip_box.to_rect()) .expect("bug: unable to map clip to world space"); let is_visible = world_clip_rect.intersects(&frame_context.global_screen_world_rect); if !is_visible { return true; } let world_rect = pic_to_world_mapper .map(&prim_rect) .expect("bug: unable to map the primitive to world space"); let device_rect = (world_rect * frame_context.global_device_pixel_scale).round(); // TODO(gw): Is there any case where if the primitive ends up on a fractional // boundary we want to _skip_ promoting to a compositor surface and // draw it as part of the content? let (surface_rect, transform) = match composite_state.compositor_kind { CompositorKind::Draw { .. } => { (device_rect, Transform3D::identity()) } CompositorKind::Native { .. } => { // If we have a Native Compositor, then we can support doing the transformation // as part of compositing. Use the local prim rect for the external surface, and // compute the full local to device transform to provide to the compositor. let surface_to_world_mapper : SpaceMapper<PicturePixel, WorldPixel> = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, prim_spatial_node_index, frame_context.global_screen_world_rect, frame_context.spatial_tree, ); let prim_origin = Vector3D::new(local_prim_rect.origin.x, local_prim_rect.origin.y, 0.0); let world_to_device_scale = Transform3D::from_scale(frame_context.global_device_pixel_scale); let transform = surface_to_world_mapper.get_transform().pre_translate(prim_origin).then(&world_to_device_scale); (local_prim_rect.cast_unit(), transform) } }; let clip_rect = (world_clip_rect * frame_context.global_device_pixel_scale).round(); if surface_rect.size.width >= MAX_COMPOSITOR_SURFACES_SIZE || surface_rect.size.height >= MAX_COMPOSITOR_SURFACES_SIZE { return false; } // If this primitive is an external image, and supports being used // directly by a native compositor, then lookup the external image id // so we can pass that through. let external_image_id = if flags.contains(PrimitiveFlags::SUPPORTS_EXTERNAL_COMPOSITOR_SURFACE) { resource_cache.get_image_properties(api_keys[0]) .and_then(|properties| properties.external_image) .and_then(|image| Some(image.id)) } else { None }; // When using native compositing, we need to find an existing native surface // handle to use, or allocate a new one. For existing native surfaces, we can // also determine whether this needs to be updated, depending on whether the // image generation(s) of the planes have changed since last composite. let (native_surface_id, update_params) = match composite_state.compositor_kind { CompositorKind::Draw { .. } => { (None, None) } CompositorKind::Native { .. } => { let native_surface_size = surface_rect.size.round().to_i32(); let key = ExternalNativeSurfaceKey { image_keys: *api_keys, size: native_surface_size, is_external_surface: external_image_id.is_some(), }; let native_surface = self.external_native_surface_cache .entry(key) .or_insert_with(|| { // No existing surface, so allocate a new compositor surface. let native_surface_id = match external_image_id { Some(_external_image) => { // If we have a suitable external image, then create an external // surface to attach to. resource_cache.create_compositor_external_surface(is_opaque) } None => { // Otherwise create a normal compositor surface and a single // compositor tile that covers the entire surface. let native_surface_id = resource_cache.create_compositor_surface( DeviceIntPoint::zero(), native_surface_size, is_opaque, ); let tile_id = NativeTileId { surface_id: native_surface_id, x: 0, y: 0, }; resource_cache.create_compositor_tile(tile_id); native_surface_id } }; ExternalNativeSurface { used_this_frame: true, native_surface_id, image_dependencies: [ImageDependency::INVALID; 3], } }); // Mark that the surface is referenced this frame so that the // backing native surface handle isn't freed. native_surface.used_this_frame = true; let update_params = match external_image_id { Some(external_image) => { // If this is an external image surface, then there's no update // to be done. Just attach the current external image to the surface // and we're done. resource_cache.attach_compositor_external_image( native_surface.native_surface_id, external_image, ); None } None => { // If the image dependencies match, there is no need to update // the backing native surface. match dependency { ExternalSurfaceDependency::Yuv{ image_dependencies, .. } => { if image_dependencies == native_surface.image_dependencies { None } else { Some(native_surface_size) } }, ExternalSurfaceDependency::Rgb{ image_dependency, .. } => { if image_dependency == native_surface.image_dependencies[0] { None } else { Some(native_surface_size) } }, } } }; (Some(native_surface.native_surface_id), update_params) } }; // For compositor surfaces, if we didn't find an earlier sub-slice to add to, // we know we can append to the current slice. assert!(sub_slice_index < self.sub_slices.len() - 1); let sub_slice = &mut self.sub_slices[sub_slice_index]; // Each compositor surface allocates a unique z-id sub_slice.compositor_surfaces.push(CompositorSurface { prohibited_rect: pic_clip_rect, is_opaque, descriptor: ExternalSurfaceDescriptor { local_rect: prim_info.prim_clip_box.to_rect(), local_clip_rect: prim_info.prim_clip_box.to_rect(), dependency, image_rendering, device_rect, surface_rect, clip_rect, transform: transform.cast_unit(), z_id: ZBufferId::invalid(), native_surface_id, update_params, }, }); true } /// Update the dependencies for each tile for a given primitive instance. pub fn update_prim_dependencies( &mut self, prim_instance: &mut PrimitiveInstance, prim_spatial_node_index: SpatialNodeIndex, local_prim_rect: LayoutRect, frame_context: &FrameVisibilityContext, data_stores: &DataStores, clip_store: &ClipStore, pictures: &[PicturePrimitive], resource_cache: &mut ResourceCache, color_bindings: &ColorBindingStorage, surface_stack: &[SurfaceIndex], composite_state: &mut CompositeState, gpu_cache: &mut GpuCache, is_root_tile_cache: bool, ) { // This primitive exists on the last element on the current surface stack. profile_scope!("update_prim_dependencies"); let prim_surface_index = *surface_stack.last().unwrap(); let prim_clip_chain = &prim_instance.vis.clip_chain; // If the primitive is directly drawn onto this picture cache surface, then // the pic_clip_rect is in the same space. If not, we need to map it from // the surface space into the picture cache space. let on_picture_surface = prim_surface_index == self.surface_index; let pic_clip_rect = if on_picture_surface { prim_clip_chain.pic_clip_rect } else { // We want to get the rect in the tile cache surface space that this primitive // occupies, in order to enable correct invalidation regions. Each surface // that exists in the chain between this primitive and the tile cache surface // may have an arbitrary inflation factor (for example, in the case of a series // of nested blur elements). To account for this, step through the current // surface stack, mapping the primitive rect into each surface space, including // the inflation factor from each intermediate surface. let mut current_pic_clip_rect = prim_clip_chain.pic_clip_rect; let mut current_spatial_node_index = frame_context .surfaces[prim_surface_index.0] .surface_spatial_node_index; for surface_index in surface_stack.iter().rev() { let surface = &frame_context.surfaces[surface_index.0]; let map_local_to_surface = SpaceMapper::new_with_target( surface.surface_spatial_node_index, current_spatial_node_index, surface.rect, frame_context.spatial_tree, ); // Map the rect into the parent surface, and inflate if this surface requires // it. If the rect can't be mapping (e.g. due to an invalid transform) then // just bail out from the dependencies and cull this primitive. current_pic_clip_rect = match map_local_to_surface.map(&current_pic_clip_rect) { Some(rect) => { rect.inflate(surface.inflation_factor, surface.inflation_factor) } None => { return; } }; current_spatial_node_index = surface.surface_spatial_node_index; } current_pic_clip_rect }; // Get the tile coordinates in the picture space. let (p0, p1) = self.get_tile_coords_for_rect(&pic_clip_rect); // If the primitive is outside the tiling rects, it's known to not // be visible. if p0.x == p1.x || p0.y == p1.y { return; } // Build the list of resources that this primitive has dependencies on. let mut prim_info = PrimitiveDependencyInfo::new( prim_instance.uid(), pic_clip_rect.to_box2d(), ); let mut sub_slice_index = self.sub_slices.len() - 1; // Only need to evaluate sub-slice regions if we have compositor surfaces present if sub_slice_index > 0 { // Find the first sub-slice we can add this primitive to (we want to add // prims to the primary surface if possible, so they get subpixel AA). for (i, sub_slice) in self.sub_slices.iter_mut().enumerate() { let mut intersects_prohibited_region = false; for surface in &mut sub_slice.compositor_surfaces { if pic_clip_rect.intersects(&surface.prohibited_rect) { surface.prohibited_rect = surface.prohibited_rect.union(&pic_clip_rect); intersects_prohibited_region = true; } } if !intersects_prohibited_region { sub_slice_index = i; break; } } } // Include the prim spatial node, if differs relative to cache root. if prim_spatial_node_index != self.spatial_node_index { prim_info.spatial_nodes.push(prim_spatial_node_index); } // If there was a clip chain, add any clip dependencies to the list for this tile. let clip_instances = &clip_store .clip_node_instances[prim_clip_chain.clips_range.to_range()]; for clip_instance in clip_instances { prim_info.clips.push(clip_instance.handle.uid()); // If the clip has the same spatial node, the relative transform // will always be the same, so there's no need to depend on it. if clip_instance.spatial_node_index != self.spatial_node_index && !prim_info.spatial_nodes.contains(&clip_instance.spatial_node_index) { prim_info.spatial_nodes.push(clip_instance.spatial_node_index); } } // Certain primitives may select themselves to be a backdrop candidate, which is // then applied below. let mut backdrop_candidate = None; // For pictures, we don't (yet) know the valid clip rect, so we can't correctly // use it to calculate the local bounding rect for the tiles. If we include them // then we may calculate a bounding rect that is too large, since it won't include // the clip bounds of the picture. Excluding them from the bounding rect here // fixes any correctness issues (the clips themselves are considered when we // consider the bounds of the primitives that are *children* of the picture), // however it does potentially result in some un-necessary invalidations of a // tile (in cases where the picture local rect affects the tile, but the clip // rect eventually means it doesn't affect that tile). // TODO(gw): Get picture clips earlier (during the initial picture traversal // pass) so that we can calculate these correctly. match prim_instance.kind { PrimitiveInstanceKind::Picture { pic_index,.. } => { // Pictures can depend on animated opacity bindings. let pic = &pictures[pic_index.0]; if let Some(PictureCompositeMode::Filter(Filter::Opacity(binding, _))) = pic.requested_composite_mode { prim_info.opacity_bindings.push(binding.into()); } } PrimitiveInstanceKind::Rectangle { data_handle, color_binding_index, .. } => { // Rectangles can only form a backdrop candidate if they are known opaque. // TODO(gw): We could resolve the opacity binding here, but the common // case for background rects is that they don't have animated opacity. let color = match data_stores.prim[data_handle].kind { PrimitiveTemplateKind::Rectangle { color, .. } => { frame_context.scene_properties.resolve_color(&color) } _ => unreachable!(), }; if color.a >= 1.0 { backdrop_candidate = Some(BackdropInfo { opaque_rect: pic_clip_rect, kind: Some(BackdropKind::Color { color }), }); } if color_binding_index != ColorBindingIndex::INVALID { prim_info.color_binding = Some(color_bindings[color_binding_index].into()); } } PrimitiveInstanceKind::Image { data_handle, ref mut is_compositor_surface, .. } => { let image_key = &data_stores.image[data_handle]; let image_data = &image_key.kind; let mut promote_to_surface = false; let mut promote_with_flip_y = false; match self.can_promote_to_surface(image_key.common.flags, prim_clip_chain, prim_spatial_node_index, is_root_tile_cache, sub_slice_index, frame_context) { SurfacePromotionResult::Failed => { } SurfacePromotionResult::Success{flip_y} => { promote_to_surface = true; promote_with_flip_y = flip_y; } } // Native OS compositors (DC and CA, at least) support premultiplied alpha // only. If we have an image that's not pre-multiplied alpha, we can't promote it. if image_data.alpha_type == AlphaType::Alpha { promote_to_surface = false; } if let Some(image_properties) = resource_cache.get_image_properties(image_data.key) { // For an image to be a possible opaque backdrop, it must: // - Have a valid, opaque image descriptor // - Not use tiling (since they can fail to draw) // - Not having any spacing / padding // - Have opaque alpha in the instance (flattened) color if image_properties.descriptor.is_opaque() && image_properties.tiling.is_none() && image_data.tile_spacing == LayoutSize::zero() && image_data.color.a >= 1.0 { backdrop_candidate = Some(BackdropInfo { opaque_rect: pic_clip_rect, kind: None, }); } } if promote_to_surface { promote_to_surface = self.setup_compositor_surfaces_rgb( sub_slice_index, &mut prim_info, image_key.common.flags, local_prim_rect, prim_spatial_node_index, pic_clip_rect, frame_context, ImageDependency { key: image_data.key, generation: resource_cache.get_image_generation(image_data.key), }, image_data.key, resource_cache, composite_state, gpu_cache, image_data.image_rendering, promote_with_flip_y, ); } *is_compositor_surface = promote_to_surface; if promote_to_surface { prim_instance.vis.state = VisibilityState::Culled; return; } else { prim_info.images.push(ImageDependency { key: image_data.key, generation: resource_cache.get_image_generation(image_data.key), }); } } PrimitiveInstanceKind::YuvImage { data_handle, ref mut is_compositor_surface, .. } => { let prim_data = &data_stores.yuv_image[data_handle]; let mut promote_to_surface = match self.can_promote_to_surface( prim_data.common.flags, prim_clip_chain, prim_spatial_node_index, is_root_tile_cache, sub_slice_index, frame_context) { SurfacePromotionResult::Failed => false, SurfacePromotionResult::Success{flip_y} => !flip_y, }; // TODO(gw): When we support RGBA images for external surfaces, we also // need to check if opaque (YUV images are implicitly opaque). // If this primitive is being promoted to a surface, construct an external // surface descriptor for use later during batching and compositing. We only // add the image keys for this primitive as a dependency if this is _not_ // a promoted surface, since we don't want the tiles to invalidate when the // video content changes, if it's a compositor surface! if promote_to_surface { // Build dependency for each YUV plane, with current image generation for // later detection of when the composited surface has changed. let mut image_dependencies = [ImageDependency::INVALID; 3]; for (key, dep) in prim_data.kind.yuv_key.iter().cloned().zip(image_dependencies.iter_mut()) { *dep = ImageDependency { key, generation: resource_cache.get_image_generation(key), } } promote_to_surface = self.setup_compositor_surfaces_yuv( sub_slice_index, &mut prim_info, prim_data.common.flags, local_prim_rect, prim_spatial_node_index, pic_clip_rect, frame_context, &image_dependencies, &prim_data.kind.yuv_key, resource_cache, composite_state, gpu_cache, prim_data.kind.image_rendering, prim_data.kind.color_depth, prim_data.kind.color_space, prim_data.kind.format, ); } // Store on the YUV primitive instance whether this is a promoted surface. // This is used by the batching code to determine whether to draw the // image to the content tiles, or just a transparent z-write. *is_compositor_surface = promote_to_surface; if promote_to_surface { prim_instance.vis.state = VisibilityState::Culled; return; } else { prim_info.images.extend( prim_data.kind.yuv_key.iter().map(|key| { ImageDependency { key: *key, generation: resource_cache.get_image_generation(*key), } }) ); } } PrimitiveInstanceKind::ImageBorder { data_handle, .. } => { let border_data = &data_stores.image_border[data_handle].kind; prim_info.images.push(ImageDependency { key: border_data.request.key, generation: resource_cache.get_image_generation(border_data.request.key), }); } PrimitiveInstanceKind::Clear { .. } => { backdrop_candidate = Some(BackdropInfo { opaque_rect: pic_clip_rect, kind: Some(BackdropKind::Clear), }); } PrimitiveInstanceKind::LinearGradient { data_handle, .. } | PrimitiveInstanceKind::CachedLinearGradient { data_handle, .. } => { let gradient_data = &data_stores.linear_grad[data_handle]; if gradient_data.stops_opacity.is_opaque && gradient_data.tile_spacing == LayoutSize::zero() { backdrop_candidate = Some(BackdropInfo { opaque_rect: pic_clip_rect, kind: None, }); } } PrimitiveInstanceKind::ConicGradient { data_handle, .. } => { let gradient_data = &data_stores.conic_grad[data_handle]; if gradient_data.stops_opacity.is_opaque && gradient_data.tile_spacing == LayoutSize::zero() { backdrop_candidate = Some(BackdropInfo { opaque_rect: pic_clip_rect, kind: None, }); } } PrimitiveInstanceKind::RadialGradient { data_handle, .. } => { let gradient_data = &data_stores.radial_grad[data_handle]; if gradient_data.stops_opacity.is_opaque && gradient_data.tile_spacing == LayoutSize::zero() { backdrop_candidate = Some(BackdropInfo { opaque_rect: pic_clip_rect, kind: None, }); } } PrimitiveInstanceKind::LineDecoration { .. } | PrimitiveInstanceKind::NormalBorder { .. } | PrimitiveInstanceKind::TextRun { .. } | PrimitiveInstanceKind::Backdrop { .. } => { // These don't contribute dependencies } }; // If this primitive considers itself a backdrop candidate, apply further // checks to see if it matches all conditions to be a backdrop. let mut vis_flags = PrimitiveVisibilityFlags::empty(); let sub_slice = &mut self.sub_slices[sub_slice_index]; if let Some(backdrop_candidate) = backdrop_candidate { let is_suitable_backdrop = match backdrop_candidate.kind { Some(BackdropKind::Clear) => { // Clear prims are special - they always end up in their own slice, // and always set the backdrop. In future, we hope to completely // remove clear prims, since they don't integrate with the compositing // system cleanly. true } Some(BackdropKind::Color { .. }) | None => { // Check a number of conditions to see if we can consider this // primitive as an opaque backdrop rect. Several of these are conservative // checks and could be relaxed in future. However, these checks // are quick and capture the common cases of background rects and images. // Specifically, we currently require: // - The primitive is on the main picture cache surface. // - Same coord system as picture cache (ensures rects are axis-aligned). // - No clip masks exist. let same_coord_system = { let prim_spatial_node = &frame_context.spatial_tree .spatial_nodes[prim_spatial_node_index.0 as usize]; let surface_spatial_node = &frame_context.spatial_tree .spatial_nodes[self.spatial_node_index.0 as usize]; prim_spatial_node.coordinate_system_id == surface_spatial_node.coordinate_system_id }; same_coord_system && on_picture_surface } }; if sub_slice_index == 0 && is_suitable_backdrop && sub_slice.compositor_surfaces.is_empty() && !prim_clip_chain.needs_mask { if backdrop_candidate.opaque_rect.contains_rect(&self.backdrop.opaque_rect) { self.backdrop.opaque_rect = backdrop_candidate.opaque_rect; } if let Some(kind) = backdrop_candidate.kind { if backdrop_candidate.opaque_rect.contains_rect(&self.local_rect) { // If we have a color backdrop, mark the visibility flags // of the primitive so it is skipped during batching (and // also clears any previous primitives). if let BackdropKind::Color { .. } = kind { vis_flags |= PrimitiveVisibilityFlags::IS_BACKDROP; } self.backdrop.kind = Some(kind); } } } } // Record any new spatial nodes in the used list. for spatial_node_index in &prim_info.spatial_nodes { self.spatial_node_comparer.register_used_transform( *spatial_node_index, self.frame_id, frame_context.spatial_tree, ); } // Truncate the lengths of dependency arrays to the max size we can handle. // Any arrays this size or longer will invalidate every frame. prim_info.clips.truncate(MAX_PRIM_SUB_DEPS); prim_info.opacity_bindings.truncate(MAX_PRIM_SUB_DEPS); prim_info.spatial_nodes.truncate(MAX_PRIM_SUB_DEPS); prim_info.images.truncate(MAX_PRIM_SUB_DEPS); // Normalize the tile coordinates before adding to tile dependencies. // For each affected tile, mark any of the primitive dependencies. for y in p0.y .. p1.y { for x in p0.x .. p1.x { // TODO(gw): Convert to 2d array temporarily to avoid hash lookups per-tile? let key = TileOffset::new(x, y); let tile = sub_slice.tiles.get_mut(&key).expect("bug: no tile"); tile.add_prim_dependency(&prim_info); } } prim_instance.vis.state = VisibilityState::Coarse { filter: BatchFilter { rect_in_pic_space: pic_clip_rect, sub_slice_index: SubSliceIndex::new(sub_slice_index), }, vis_flags, }; } /// Print debug information about this picture cache to a tree printer. fn print(&self) { // TODO(gw): This initial implementation is very basic - just printing // the picture cache state to stdout. In future, we can // make this dump each frame to a file, and produce a report // stating which frames had invalidations. This will allow // diff'ing the invalidation states in a visual tool. let mut pt = PrintTree::new("Picture Cache"); pt.new_level(format!("Slice {:?}", self.slice)); pt.add_item(format!("fract_offset: {:?}", self.fract_offset)); pt.add_item(format!("background_color: {:?}", self.background_color)); for (sub_slice_index, sub_slice) in self.sub_slices.iter().enumerate() { pt.new_level(format!("SubSlice {:?}", sub_slice_index)); for y in self.tile_bounds_p0.y .. self.tile_bounds_p1.y { for x in self.tile_bounds_p0.x .. self.tile_bounds_p1.x { let key = TileOffset::new(x, y); let tile = &sub_slice.tiles[&key]; tile.print(&mut pt); } } pt.end_level(); } pt.end_level(); } fn calculate_subpixel_mode(&self) -> SubpixelMode { let has_opaque_bg_color = self.background_color.map_or(false, |c| c.a >= 1.0); // If the overall tile cache is known opaque, subpixel AA is allowed everywhere if has_opaque_bg_color { return SubpixelMode::Allow; } // If we didn't find any valid opaque backdrop, no subpixel AA allowed if self.backdrop.opaque_rect.is_empty() { return SubpixelMode::Deny; } // If the opaque backdrop rect covers the entire tile cache surface, // we can allow subpixel AA anywhere, skipping the per-text-run tests // later on during primitive preparation. if self.backdrop.opaque_rect.contains_rect(&self.local_rect) { return SubpixelMode::Allow; } // If none of the simple cases above match, we need test where we can support subpixel AA. // TODO(gw): In future, it may make sense to have > 1 inclusion rect, // but this handles the common cases. // TODO(gw): If a text run gets animated such that it's moving in a way that is // sometimes intersecting with the video rect, this can result in subpixel // AA flicking on/off for that text run. It's probably very rare, but // something we should handle in future. SubpixelMode::Conditional { allowed_rect: self.backdrop.opaque_rect, } } /// Apply any updates after prim dependency updates. This applies /// any late tile invalidations, and sets up the dirty rect and /// set of tile blits. pub fn post_update( &mut self, frame_context: &FrameVisibilityContext, frame_state: &mut FrameVisibilityState, ) { self.dirty_region.reset(self.spatial_node_index); self.subpixel_mode = self.calculate_subpixel_mode(); let map_pic_to_world = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, self.spatial_node_index, frame_context.global_screen_world_rect, frame_context.spatial_tree, ); // A simple GC of the native external surface cache, to remove and free any // surfaces that were not referenced during the update_prim_dependencies pass. self.external_native_surface_cache.retain(|_, surface| { if !surface.used_this_frame { // If we removed an external surface, we need to mark the dirty rects as // invalid so a full composite occurs on the next frame. frame_state.composite_state.dirty_rects_are_valid = false; frame_state.resource_cache.destroy_compositor_surface(surface.native_surface_id); } surface.used_this_frame }); // Detect if the picture cache was scrolled or scaled. In this case, // the device space dirty rects aren't applicable (until we properly // integrate with OS compositors that can handle scrolling slices). let root_transform = frame_context .spatial_tree .get_relative_transform( self.spatial_node_index, ROOT_SPATIAL_NODE_INDEX, ); let root_transform = match root_transform { CoordinateSpaceMapping::Local => ScaleOffset::identity(), CoordinateSpaceMapping::ScaleOffset(scale_offset) => scale_offset, CoordinateSpaceMapping::Transform(..) => panic!("bug: picture caches don't support complex transforms"), }; const EPSILON: f32 = 0.001; let root_translation_changed = !root_transform.offset.x.approx_eq_eps(&self.root_transform.offset.x, &EPSILON) || !root_transform.offset.y.approx_eq_eps(&self.root_transform.offset.y, &EPSILON); let root_scale_changed = !root_transform.scale.x.approx_eq_eps(&self.root_transform.scale.x, &EPSILON) || !root_transform.scale.y.approx_eq_eps(&self.root_transform.scale.y, &EPSILON); if root_translation_changed || root_scale_changed || frame_context.config.force_invalidation { self.root_transform = root_transform; frame_state.composite_state.dirty_rects_are_valid = false; } let pic_to_world_mapper = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, self.spatial_node_index, frame_context.global_screen_world_rect, frame_context.spatial_tree, ); let mut ctx = TilePostUpdateContext { pic_to_world_mapper, global_device_pixel_scale: frame_context.global_device_pixel_scale, local_clip_rect: self.local_clip_rect, backdrop: None, opacity_bindings: &self.opacity_bindings, color_bindings: &self.color_bindings, current_tile_size: self.current_tile_size, local_rect: self.local_rect, z_id: ZBufferId::invalid(), invalidate_all: root_scale_changed || frame_context.config.force_invalidation, }; let mut state = TilePostUpdateState { resource_cache: frame_state.resource_cache, composite_state: frame_state.composite_state, compare_cache: &mut self.compare_cache, spatial_node_comparer: &mut self.spatial_node_comparer, }; // Step through each tile and invalidate if the dependencies have changed. Determine // the current opacity setting and whether it's changed. for (i, sub_slice) in self.sub_slices.iter_mut().enumerate().rev() { // The backdrop is only relevant for the first sub-slice if i == 0 { ctx.backdrop = Some(self.backdrop); } for compositor_surface in sub_slice.compositor_surfaces.iter_mut().rev() { compositor_surface.descriptor.z_id = state.composite_state.z_generator.next(); } ctx.z_id = state.composite_state.z_generator.next(); for tile in sub_slice.tiles.values_mut() { tile.post_update(&ctx, &mut state, frame_context); } } // Register any opaque external compositor surfaces as potential occluders. This // is especially useful when viewing video in full-screen mode, as it is // able to occlude every background tile (avoiding allocation, rasterizion // and compositing). for sub_slice in &self.sub_slices { for compositor_surface in &sub_slice.compositor_surfaces { if compositor_surface.is_opaque { let local_surface_rect = compositor_surface .descriptor .local_rect .intersection(&compositor_surface.descriptor.local_clip_rect) .and_then(|r| { r.intersection(&self.local_clip_rect) }); if let Some(local_surface_rect) = local_surface_rect { let world_surface_rect = map_pic_to_world .map(&local_surface_rect) .expect("bug: unable to map external surface to world space"); frame_state.composite_state.register_occluder( compositor_surface.descriptor.z_id, world_surface_rect, ); } } } } // Register the opaque region of this tile cache as an occluder, which // is used later in the frame to occlude other tiles. if !self.backdrop.opaque_rect.is_empty() { let z_id_backdrop = frame_state.composite_state.z_generator.next(); let backdrop_rect = self.backdrop.opaque_rect .intersection(&self.local_rect) .and_then(|r| { r.intersection(&self.local_clip_rect) }); if let Some(backdrop_rect) = backdrop_rect { let world_backdrop_rect = map_pic_to_world .map(&backdrop_rect) .expect("bug: unable to map backdrop to world space"); // Since we register the entire backdrop rect, use the opaque z-id for the // picture cache slice. frame_state.composite_state.register_occluder( z_id_backdrop, world_backdrop_rect, ); } } } } pub struct PictureScratchBuffer { surface_stack: Vec<SurfaceIndex>, clip_chain_ids: Vec<ClipChainId>, } impl Default for PictureScratchBuffer { fn default() -> Self { PictureScratchBuffer { surface_stack: Vec::new(), clip_chain_ids: Vec::new(), } } } impl PictureScratchBuffer { pub fn begin_frame(&mut self) { self.surface_stack.clear(); self.clip_chain_ids.clear(); } pub fn recycle(&mut self, recycler: &mut Recycler) { recycler.recycle_vec(&mut self.surface_stack); } } /// Maintains a stack of picture and surface information, that /// is used during the initial picture traversal. pub struct PictureUpdateState<'a> { surfaces: &'a mut Vec<SurfaceInfo>, surface_stack: Vec<SurfaceIndex>, } impl<'a> PictureUpdateState<'a> { pub fn update_all( buffers: &mut PictureScratchBuffer, surfaces: &'a mut Vec<SurfaceInfo>, pic_index: PictureIndex, picture_primitives: &mut [PicturePrimitive], frame_context: &FrameBuildingContext, gpu_cache: &mut GpuCache, clip_store: &ClipStore, data_stores: &mut DataStores, ) { profile_scope!("UpdatePictures"); profile_marker!("UpdatePictures"); let mut state = PictureUpdateState { surfaces, surface_stack: buffers.surface_stack.take().cleared(), }; state.surface_stack.push(SurfaceIndex(0)); state.update( pic_index, picture_primitives, frame_context, gpu_cache, clip_store, data_stores, ); buffers.surface_stack = state.surface_stack.take(); } /// Return the current surface fn current_surface(&self) -> &SurfaceInfo { &self.surfaces[self.surface_stack.last().unwrap().0] } /// Return the current surface (mutable) fn current_surface_mut(&mut self) -> &mut SurfaceInfo { &mut self.surfaces[self.surface_stack.last().unwrap().0] } /// Push a new surface onto the update stack. fn push_surface( &mut self, surface: SurfaceInfo, ) -> SurfaceIndex { let surface_index = SurfaceIndex(self.surfaces.len()); self.surfaces.push(surface); self.surface_stack.push(surface_index); surface_index } /// Pop a surface on the way up the picture traversal fn pop_surface(&mut self) -> SurfaceIndex{ self.surface_stack.pop().unwrap() } /// Update a picture, determining surface configuration, /// rasterization roots, and (in future) whether there /// are cached surfaces that can be used by this picture. fn update( &mut self, pic_index: PictureIndex, picture_primitives: &mut [PicturePrimitive], frame_context: &FrameBuildingContext, gpu_cache: &mut GpuCache, clip_store: &ClipStore, data_stores: &mut DataStores, ) { if let Some(prim_list) = picture_primitives[pic_index.0].pre_update( self, frame_context, ) { for child_pic_index in &prim_list.child_pictures { self.update( *child_pic_index, picture_primitives, frame_context, gpu_cache, clip_store, data_stores, ); } picture_primitives[pic_index.0].post_update( prim_list, self, frame_context, data_stores, ); } } } #[derive(Debug, Copy, Clone, PartialEq)] #[cfg_attr(feature = "capture", derive(Serialize))] pub struct SurfaceIndex(pub usize); pub const ROOT_SURFACE_INDEX: SurfaceIndex = SurfaceIndex(0); /// Describes the render task configuration for a picture surface. #[derive(Debug)] pub enum SurfaceRenderTasks { /// The common type of surface is a single render task Simple(RenderTaskId), /// Some surfaces draw their content, and then have further tasks applied /// to that input (such as blur passes for shadows). These tasks have a root /// (the output of the surface), and a port (for attaching child task dependencies /// to the content). Chained { root_task_id: RenderTaskId, port_task_id: RenderTaskId }, /// Picture caches are a single surface consisting of multiple render /// tasks, one per tile with dirty content. Tiled(Vec<RenderTaskId>), } /// Information about an offscreen surface. For now, /// it contains information about the size and coordinate /// system of the surface. In the future, it will contain /// information about the contents of the surface, which /// will allow surfaces to be cached / retained between /// frames and display lists. #[derive(Debug)] pub struct SurfaceInfo { /// A local rect defining the size of this surface, in the /// coordinate system of the surface itself. pub rect: PictureRect, /// Part of the surface that we know to be opaque. pub opaque_rect: PictureRect, /// Helper structs for mapping local rects in different /// coordinate systems into the surface coordinates. pub map_local_to_surface: SpaceMapper<LayoutPixel, PicturePixel>, /// Defines the positioning node for the surface itself, /// and the rasterization root for this surface. pub raster_spatial_node_index: SpatialNodeIndex, pub surface_spatial_node_index: SpatialNodeIndex, /// This is set when the render task is created. pub render_tasks: Option<SurfaceRenderTasks>, /// How much the local surface rect should be inflated (for blur radii). pub inflation_factor: f32, /// The device pixel ratio specific to this surface. pub device_pixel_scale: DevicePixelScale, /// The scale factors of the surface to raster transform. pub scale_factors: (f32, f32), /// The allocated device rect for this surface pub device_rect: Option<DeviceRect>, } impl SurfaceInfo { pub fn new( surface_spatial_node_index: SpatialNodeIndex, raster_spatial_node_index: SpatialNodeIndex, inflation_factor: f32, world_rect: WorldRect, spatial_tree: &SpatialTree, device_pixel_scale: DevicePixelScale, scale_factors: (f32, f32), ) -> Self { let map_surface_to_world = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, surface_spatial_node_index, world_rect, spatial_tree, ); let pic_bounds = map_surface_to_world .unmap(&map_surface_to_world.bounds) .unwrap_or_else(PictureRect::max_rect); let map_local_to_surface = SpaceMapper::new( surface_spatial_node_index, pic_bounds, ); SurfaceInfo { rect: PictureRect::zero(), opaque_rect: PictureRect::zero(), map_local_to_surface, render_tasks: None, raster_spatial_node_index, surface_spatial_node_index, inflation_factor, device_pixel_scale, scale_factors, device_rect: None, } } pub fn get_device_rect(&self) -> DeviceRect { self.device_rect.expect("bug: queried before surface was initialized") } } #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] pub struct RasterConfig { /// How this picture should be composited into /// the parent surface. pub composite_mode: PictureCompositeMode, /// Index to the surface descriptor for this /// picture. pub surface_index: SurfaceIndex, /// Whether this picture establishes a rasterization root. pub establishes_raster_root: bool, /// Scaling factor applied to fit within MAX_SURFACE_SIZE when /// establishing a raster root. /// Most code doesn't need to know about it, since it is folded /// into device_pixel_scale when the rendertask is set up. /// However e.g. text rasterization uses it to ensure consistent /// on-screen font size. pub root_scaling_factor: f32, /// The world rect of this picture clipped to the current culling /// rect. This is used for determining the size of the render /// target rect for this surface, and calculating raster scale /// factors. pub clipped_bounding_rect: WorldRect, } bitflags! { /// A set of flags describing why a picture may need a backing surface. #[cfg_attr(feature = "capture", derive(Serialize))] pub struct BlitReason: u32 { /// Mix-blend-mode on a child that requires isolation. const ISOLATE = 1; /// Clip node that _might_ require a surface. const CLIP = 2; /// Preserve-3D requires a surface for plane-splitting. const PRESERVE3D = 4; /// A backdrop that is reused which requires a surface. const BACKDROP = 8; } } /// Specifies how this Picture should be composited /// onto the target it belongs to. #[allow(dead_code)] #[derive(Debug, Clone)] #[cfg_attr(feature = "capture", derive(Serialize))] pub enum PictureCompositeMode { /// Apply CSS mix-blend-mode effect. MixBlend(MixBlendMode), /// Apply a CSS filter (except component transfer). Filter(Filter), /// Apply a component transfer filter. ComponentTransferFilter(FilterDataHandle), /// Draw to intermediate surface, copy straight across. This /// is used for CSS isolation, and plane splitting. Blit(BlitReason), /// Used to cache a picture as a series of tiles. TileCache { slice_id: SliceId, }, /// Apply an SVG filter SvgFilter(Vec<FilterPrimitive>, Vec<SFilterData>), } impl PictureCompositeMode { pub fn inflate_picture_rect(&self, picture_rect: PictureRect, scale_factors: (f32, f32)) -> PictureRect { let mut result_rect = picture_rect; match self { PictureCompositeMode::Filter(filter) => match filter { Filter::Blur(width, height) => { let width_factor = clamp_blur_radius(*width, scale_factors).ceil() * BLUR_SAMPLE_SCALE; let height_factor = clamp_blur_radius(*height, scale_factors).ceil() * BLUR_SAMPLE_SCALE; result_rect = picture_rect.inflate(width_factor, height_factor); }, Filter::DropShadows(shadows) => { let mut max_inflation: f32 = 0.0; for shadow in shadows { max_inflation = max_inflation.max(shadow.blur_radius); } max_inflation = clamp_blur_radius(max_inflation, scale_factors).ceil() * BLUR_SAMPLE_SCALE; result_rect = picture_rect.inflate(max_inflation, max_inflation); }, _ => {} } PictureCompositeMode::SvgFilter(primitives, _) => { let mut output_rects = Vec::with_capacity(primitives.len()); for (cur_index, primitive) in primitives.iter().enumerate() { let output_rect = match primitive.kind { FilterPrimitiveKind::Blur(ref primitive) => { let input = primitive.input.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect); let width_factor = primitive.width.round() * BLUR_SAMPLE_SCALE; let height_factor = primitive.height.round() * BLUR_SAMPLE_SCALE; input.inflate(width_factor, height_factor) } FilterPrimitiveKind::DropShadow(ref primitive) => { let inflation_factor = primitive.shadow.blur_radius.ceil() * BLUR_SAMPLE_SCALE; let input = primitive.input.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect); let shadow_rect = input.inflate(inflation_factor, inflation_factor); input.union(&shadow_rect.translate(primitive.shadow.offset * Scale::new(1.0))) } FilterPrimitiveKind::Blend(ref primitive) => { primitive.input1.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect) .union(&primitive.input2.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect)) } FilterPrimitiveKind::Composite(ref primitive) => { primitive.input1.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect) .union(&primitive.input2.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect)) } FilterPrimitiveKind::Identity(ref primitive) => primitive.input.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect), FilterPrimitiveKind::Opacity(ref primitive) => primitive.input.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect), FilterPrimitiveKind::ColorMatrix(ref primitive) => primitive.input.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect), FilterPrimitiveKind::ComponentTransfer(ref primitive) => primitive.input.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect), FilterPrimitiveKind::Offset(ref primitive) => { let input_rect = primitive.input.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect); input_rect.translate(primitive.offset * Scale::new(1.0)) }, FilterPrimitiveKind::Flood(..) => picture_rect, }; output_rects.push(output_rect); result_rect = result_rect.union(&output_rect); } } _ => {}, } result_rect } } /// Enum value describing the place of a picture in a 3D context. #[derive(Clone, Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] pub enum Picture3DContext<C> { /// The picture is not a part of 3D context sub-hierarchy. Out, /// The picture is a part of 3D context. In { /// Additional data per child for the case of this a root of 3D hierarchy. root_data: Option<Vec<C>>, /// The spatial node index of an "ancestor" element, i.e. one /// that establishes the transformed element's containing block. /// /// See CSS spec draft for more details: /// https://drafts.csswg.org/css-transforms-2/#accumulated-3d-transformation-matrix-computation ancestor_index: SpatialNodeIndex, }, } /// Information about a preserve-3D hierarchy child that has been plane-split /// and ordered according to the view direction. #[derive(Clone, Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] pub struct OrderedPictureChild { pub anchor: PlaneSplitAnchor, pub spatial_node_index: SpatialNodeIndex, pub gpu_address: GpuCacheAddress, } bitflags! { /// A set of flags describing why a picture may need a backing surface. #[cfg_attr(feature = "capture", derive(Serialize))] pub struct ClusterFlags: u32 { /// Whether this cluster is visible when the position node is a backface. const IS_BACKFACE_VISIBLE = 1; /// This flag is set during the first pass picture traversal, depending on whether /// the cluster is visible or not. It's read during the second pass when primitives /// consult their owning clusters to see if the primitive itself is visible. const IS_VISIBLE = 2; /// Is a backdrop-filter cluster that requires special handling during post_update. const IS_BACKDROP_FILTER = 4; } } /// Descriptor for a cluster of primitives. For now, this is quite basic but will be /// extended to handle more spatial clustering of primitives. #[cfg_attr(feature = "capture", derive(Serialize))] pub struct PrimitiveCluster { /// The positioning node for this cluster. pub spatial_node_index: SpatialNodeIndex, /// The bounding rect of the cluster, in the local space of the spatial node. /// This is used to quickly determine the overall bounding rect for a picture /// during the first picture traversal, which is needed for local scale /// determination, and render task size calculations. bounding_rect: LayoutRect, /// a part of the cluster that we know to be opaque if any. Does not always /// describe the entire opaque region, but all content within that rect must /// be opaque. pub opaque_rect: LayoutRect, /// The range of primitive instance indices associated with this cluster. pub prim_range: Range<usize>, /// Various flags / state for this cluster. pub flags: ClusterFlags, } impl PrimitiveCluster { /// Construct a new primitive cluster for a given positioning node. fn new( spatial_node_index: SpatialNodeIndex, flags: ClusterFlags, first_instance_index: usize, ) -> Self { PrimitiveCluster { bounding_rect: LayoutRect::zero(), opaque_rect: LayoutRect::zero(), spatial_node_index, flags, prim_range: first_instance_index..first_instance_index } } /// Return true if this cluster is compatible with the given params pub fn is_compatible( &self, spatial_node_index: SpatialNodeIndex, flags: ClusterFlags, ) -> bool { self.flags == flags && self.spatial_node_index == spatial_node_index } pub fn prim_range(&self) -> Range<usize> { self.prim_range.clone() } /// Add a primitive instance to this cluster, at the start or end fn add_instance( &mut self, culling_rect: &LayoutRect, instance_index: usize, ) { debug_assert_eq!(instance_index, self.prim_range.end); self.bounding_rect = self.bounding_rect.union(culling_rect); self.prim_range.end += 1; } } /// A list of primitive instances that are added to a picture /// This ensures we can keep a list of primitives that /// are pictures, for a fast initial traversal of the picture /// tree without walking the instance list. #[cfg_attr(feature = "capture", derive(Serialize))] pub struct PrimitiveList { /// List of primitives grouped into clusters. pub clusters: Vec<PrimitiveCluster>, pub prim_instances: Vec<PrimitiveInstance>, pub child_pictures: Vec<PictureIndex>, /// The number of preferred compositor surfaces that were found when /// adding prims to this list. pub compositor_surface_count: usize, } impl PrimitiveList { /// Construct an empty primitive list. This is /// just used during the take_context / restore_context /// borrow check dance, which will be removed as the /// picture traversal pass is completed. pub fn empty() -> Self { PrimitiveList { clusters: Vec::new(), prim_instances: Vec::new(), child_pictures: Vec::new(), compositor_surface_count: 0, } } /// Add a primitive instance to the end of the list pub fn add_prim( &mut self, prim_instance: PrimitiveInstance, prim_rect: LayoutRect, spatial_node_index: SpatialNodeIndex, prim_flags: PrimitiveFlags, ) { let mut flags = ClusterFlags::empty(); // Pictures are always put into a new cluster, to make it faster to // iterate all pictures in a given primitive list. match prim_instance.kind { PrimitiveInstanceKind::Picture { pic_index, .. } => { self.child_pictures.push(pic_index); } PrimitiveInstanceKind::Backdrop { .. } => { flags.insert(ClusterFlags::IS_BACKDROP_FILTER); } _ => {} } if prim_flags.contains(PrimitiveFlags::IS_BACKFACE_VISIBLE) { flags.insert(ClusterFlags::IS_BACKFACE_VISIBLE); } if prim_flags.contains(PrimitiveFlags::PREFER_COMPOSITOR_SURFACE) { self.compositor_surface_count += 1; } let culling_rect = prim_instance.clip_set.local_clip_rect .intersection(&prim_rect) .unwrap_or_else(LayoutRect::zero); // Primitive lengths aren't evenly distributed among primitive lists: // We often have a large amount of single primitive lists, a // few below 20~30 primitives, and even fewer lists (maybe a couple) // in the multiple hundreds with nothing in between. // We can see in profiles that reallocating vectors while pushing // primitives is taking a large amount of the total scene build time, // so we take advantage of what we know about the length distributions // to go for an adapted vector growth pattern that avoids over-allocating // for the many small allocations while avoiding a lot of reallocation by // quickly converging to the common sizes. // Rust's default vector growth strategy (when pushing elements one by one) // is to double the capacity every time. let prims_len = self.prim_instances.len(); if prims_len == self.prim_instances.capacity() { let next_alloc = match prims_len { 1 ..= 31 => 32 - prims_len, 32 ..= 256 => 512 - prims_len, _ => prims_len * 2, }; self.prim_instances.reserve(next_alloc); } let instance_index = prims_len; self.prim_instances.push(prim_instance); if let Some(cluster) = self.clusters.last_mut() { if cluster.is_compatible(spatial_node_index, flags) { cluster.add_instance(&culling_rect, instance_index); return; } } // Same idea with clusters, using a different distribution. let clusters_len = self.clusters.len(); if clusters_len == self.clusters.capacity() { let next_alloc = match clusters_len { 1 ..= 15 => 16 - clusters_len, 16 ..= 127 => 128 - clusters_len, _ => clusters_len * 2, }; self.clusters.reserve(next_alloc); } let mut cluster = PrimitiveCluster::new( spatial_node_index, flags, instance_index, ); cluster.add_instance(&culling_rect, instance_index); self.clusters.push(cluster); } /// Returns true if there are no clusters (and thus primitives) pub fn is_empty(&self) -> bool { self.clusters.is_empty() } } /// Defines configuration options for a given picture primitive. #[cfg_attr(feature = "capture", derive(Serialize))] pub struct PictureOptions { /// If true, WR should inflate the bounding rect of primitives when /// using a filter effect that requires inflation. pub inflate_if_required: bool, } impl Default for PictureOptions { fn default() -> Self { PictureOptions { inflate_if_required: true, } } } #[cfg_attr(feature = "capture", derive(Serialize))] pub struct PicturePrimitive { /// List of primitives, and associated info for this picture. pub prim_list: PrimitiveList, #[cfg_attr(feature = "capture", serde(skip))] pub state: Option<PictureState>, /// If true, apply the local clip rect to primitive drawn /// in this picture. pub apply_local_clip_rect: bool, /// If false and transform ends up showing the back of the picture, /// it will be considered invisible. pub is_backface_visible: bool, pub primary_render_task_id: Option<RenderTaskId>, /// If a mix-blend-mode, contains the render task for /// the readback of the framebuffer that we use to sample /// from in the mix-blend-mode shader. /// For drop-shadow filter, this will store the original /// picture task which would be rendered on screen after /// blur pass. pub secondary_render_task_id: Option<RenderTaskId>, /// How this picture should be composited. /// If None, don't composite - just draw directly on parent surface. pub requested_composite_mode: Option<PictureCompositeMode>, pub raster_config: Option<RasterConfig>, pub context_3d: Picture3DContext<OrderedPictureChild>, // Optional cache handles for storing extra data // in the GPU cache, depending on the type of // picture. pub extra_gpu_data_handles: SmallVec<[GpuCacheHandle; 1]>, /// The spatial node index of this picture when it is /// composited into the parent picture. pub spatial_node_index: SpatialNodeIndex, /// The conservative local rect of this picture. It is /// built dynamically during the first picture traversal. /// It is composed of already snapped primitives. pub estimated_local_rect: LayoutRect, /// The local rect of this picture. It is built /// dynamically during the frame visibility update. It /// differs from the estimated_local_rect because it /// will not contain culled primitives, takes into /// account surface inflation and the whole clip chain. /// It is frequently the same, but may be quite /// different depending on how much was culled. pub precise_local_rect: LayoutRect, /// Store the state of the previous precise local rect /// for this picture. We need this in order to know when /// to invalidate segments / drop-shadow gpu cache handles. pub prev_precise_local_rect: LayoutRect, /// If false, this picture needs to (re)build segments /// if it supports segment rendering. This can occur /// if the local rect of the picture changes due to /// transform animation and/or scrolling. pub segments_are_valid: bool, /// The config options for this picture. pub options: PictureOptions, /// Set to true if we know for sure the picture is fully opaque. pub is_opaque: bool, } impl PicturePrimitive { pub fn print<T: PrintTreePrinter>( &self, pictures: &[Self], self_index: PictureIndex, pt: &mut T, ) { pt.new_level(format!("{:?}", self_index)); pt.add_item(format!("cluster_count: {:?}", self.prim_list.clusters.len())); pt.add_item(format!("estimated_local_rect: {:?}", self.estimated_local_rect)); pt.add_item(format!("precise_local_rect: {:?}", self.precise_local_rect)); pt.add_item(format!("spatial_node_index: {:?}", self.spatial_node_index)); pt.add_item(format!("raster_config: {:?}", self.raster_config)); pt.add_item(format!("requested_composite_mode: {:?}", self.requested_composite_mode)); for child_pic_index in &self.prim_list.child_pictures { pictures[child_pic_index.0].print(pictures, *child_pic_index, pt); } pt.end_level(); } /// Returns true if this picture supports segmented rendering. pub fn can_use_segments(&self) -> bool { match self.raster_config { // TODO(gw): Support brush segment rendering for filter and mix-blend // shaders. It's possible this already works, but I'm just // applying this optimization to Blit mode for now. Some(RasterConfig { composite_mode: PictureCompositeMode::MixBlend(..), .. }) | Some(RasterConfig { composite_mode: PictureCompositeMode::Filter(..), .. }) | Some(RasterConfig { composite_mode: PictureCompositeMode::ComponentTransferFilter(..), .. }) | Some(RasterConfig { composite_mode: PictureCompositeMode::TileCache { .. }, .. }) | Some(RasterConfig { composite_mode: PictureCompositeMode::SvgFilter(..), .. }) | None => { false } Some(RasterConfig { composite_mode: PictureCompositeMode::Blit(reason), ..}) => { reason == BlitReason::CLIP } } } fn resolve_scene_properties(&mut self, properties: &SceneProperties) -> bool { match self.requested_composite_mode { Some(PictureCompositeMode::Filter(ref mut filter)) => { match *filter { Filter::Opacity(ref binding, ref mut value) => { *value = properties.resolve_float(binding); } _ => {} } filter.is_visible() } _ => true, } } pub fn is_visible(&self) -> bool { match self.requested_composite_mode { Some(PictureCompositeMode::Filter(ref filter)) => { filter.is_visible() } _ => true, } } // TODO(gw): We have the PictureOptions struct available. We // should move some of the parameter list in this // method to be part of the PictureOptions, and // avoid adding new parameters here. pub fn new_image( requested_composite_mode: Option<PictureCompositeMode>, context_3d: Picture3DContext<OrderedPictureChild>, apply_local_clip_rect: bool, flags: PrimitiveFlags, prim_list: PrimitiveList, spatial_node_index: SpatialNodeIndex, options: PictureOptions, ) -> Self { PicturePrimitive { prim_list, state: None, primary_render_task_id: None, secondary_render_task_id: None, requested_composite_mode, raster_config: None, context_3d, extra_gpu_data_handles: SmallVec::new(), apply_local_clip_rect, is_backface_visible: flags.contains(PrimitiveFlags::IS_BACKFACE_VISIBLE), spatial_node_index, estimated_local_rect: LayoutRect::zero(), precise_local_rect: LayoutRect::zero(), prev_precise_local_rect: LayoutRect::zero(), options, segments_are_valid: false, is_opaque: false, } } pub fn take_context( &mut self, pic_index: PictureIndex, surface_spatial_node_index: SpatialNodeIndex, raster_spatial_node_index: SpatialNodeIndex, parent_surface_index: SurfaceIndex, parent_subpixel_mode: SubpixelMode, frame_state: &mut FrameBuildingState, frame_context: &FrameBuildingContext, scratch: &mut PrimitiveScratchBuffer, tile_cache_logger: &mut TileCacheLogger, tile_caches: &mut FastHashMap<SliceId, Box<TileCacheInstance>>, ) -> Option<(PictureContext, PictureState, PrimitiveList)> { self.primary_render_task_id = None; self.secondary_render_task_id = None; if !self.is_visible() { return None; } profile_scope!("take_context"); // Extract the raster and surface spatial nodes from the raster // config, if this picture establishes a surface. Otherwise just // pass in the spatial node indices from the parent context. let (raster_spatial_node_index, surface_spatial_node_index, surface_index, inflation_factor) = match self.raster_config { Some(ref raster_config) => { let surface = &frame_state.surfaces[raster_config.surface_index.0]; ( surface.raster_spatial_node_index, self.spatial_node_index, raster_config.surface_index, surface.inflation_factor, ) } None => { ( raster_spatial_node_index, surface_spatial_node_index, parent_surface_index, 0.0, ) } }; let map_pic_to_world = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, surface_spatial_node_index, frame_context.global_screen_world_rect, frame_context.spatial_tree, ); let pic_bounds = map_pic_to_world.unmap(&map_pic_to_world.bounds) .unwrap_or_else(PictureRect::max_rect); let map_local_to_pic = SpaceMapper::new( surface_spatial_node_index, pic_bounds, ); let (map_raster_to_world, map_pic_to_raster) = create_raster_mappers( surface_spatial_node_index, raster_spatial_node_index, frame_context.global_screen_world_rect, frame_context.spatial_tree, ); let plane_splitter = match self.context_3d { Picture3DContext::Out => { None } Picture3DContext::In { root_data: Some(_), .. } => { Some(PlaneSplitter::new()) } Picture3DContext::In { root_data: None, .. } => { None } }; match self.raster_config { Some(RasterConfig { surface_index, composite_mode: PictureCompositeMode::TileCache { slice_id }, .. }) => { let tile_cache = tile_caches.get_mut(&slice_id).unwrap(); let mut debug_info = SliceDebugInfo::new(); let mut surface_tasks = Vec::with_capacity(tile_cache.tile_count()); let mut surface_device_rect = DeviceRect::zero(); let device_pixel_scale = frame_state .surfaces[surface_index.0] .device_pixel_scale; // Get the overall world space rect of the picture cache. Used to clip // the tile rects below for occlusion testing to the relevant area. let world_clip_rect = map_pic_to_world .map(&tile_cache.local_clip_rect) .expect("bug: unable to map clip rect"); let device_clip_rect = (world_clip_rect * frame_context.global_device_pixel_scale).round(); for (sub_slice_index, sub_slice) in tile_cache.sub_slices.iter_mut().enumerate() { for tile in sub_slice.tiles.values_mut() { surface_device_rect = surface_device_rect.union(&tile.device_valid_rect); if tile.is_visible { // Get the world space rect that this tile will actually occupy on screem let device_draw_rect = device_clip_rect.intersection(&tile.device_valid_rect); // If that draw rect is occluded by some set of tiles in front of it, // then mark it as not visible and skip drawing. When it's not occluded // it will fail this test, and get rasterized by the render task setup // code below. match device_draw_rect { Some(device_draw_rect) => { // Only check for occlusion on visible tiles that are fixed position. if tile_cache.spatial_node_index == ROOT_SPATIAL_NODE_INDEX && frame_state.composite_state.occluders.is_tile_occluded(tile.z_id, device_draw_rect) { // If this tile has an allocated native surface, free it, since it's completely // occluded. We will need to re-allocate this surface if it becomes visible, // but that's likely to be rare (e.g. when there is no content display list // for a frame or two during a tab switch). let surface = tile.surface.as_mut().expect("no tile surface set!"); if let TileSurface::Texture { descriptor: SurfaceTextureDescriptor::Native { id, .. }, .. } = surface { if let Some(id) = id.take() { frame_state.resource_cache.destroy_compositor_tile(id); } } tile.is_visible = false; if frame_context.fb_config.testing { debug_info.tiles.insert( tile.tile_offset, TileDebugInfo::Occluded, ); } continue; } } None => { tile.is_visible = false; } } } // If we get here, we want to ensure that the surface remains valid in the texture // cache, _even if_ it's not visible due to clipping or being scrolled off-screen. // This ensures that we retain valid tiles that are off-screen, but still in the // display port of this tile cache instance. if let Some(TileSurface::Texture { descriptor, .. }) = tile.surface.as_ref() { if let SurfaceTextureDescriptor::TextureCache { ref handle, .. } = descriptor { frame_state.resource_cache.texture_cache.request( handle, frame_state.gpu_cache, ); } } // If the tile has been found to be off-screen / clipped, skip any further processing. if !tile.is_visible { if frame_context.fb_config.testing { debug_info.tiles.insert( tile.tile_offset, TileDebugInfo::Culled, ); } continue; } if frame_context.debug_flags.contains(DebugFlags::PICTURE_CACHING_DBG) { tile.root.draw_debug_rects( &map_pic_to_world, tile.is_opaque, tile.current_descriptor.local_valid_rect, scratch, frame_context.global_device_pixel_scale, ); let label_offset = DeviceVector2D::new( 20.0 + sub_slice_index as f32 * 20.0, 30.0 + sub_slice_index as f32 * 20.0, ); let tile_device_rect = tile.world_tile_rect * frame_context.global_device_pixel_scale; if tile_device_rect.size.height >= label_offset.y { let surface = tile.surface.as_ref().expect("no tile surface set!"); scratch.push_debug_string( tile_device_rect.origin + label_offset, debug_colors::RED, format!("{:?}: s={} is_opaque={} surface={} sub={}", tile.id, tile_cache.slice, tile.is_opaque, surface.kind(), sub_slice_index, ), ); } } if let TileSurface::Texture { descriptor, .. } = tile.surface.as_mut().unwrap() { match descriptor { SurfaceTextureDescriptor::TextureCache { ref handle, .. } => { // Invalidate if the backing texture was evicted. if frame_state.resource_cache.texture_cache.is_allocated(handle) { // Request the backing texture so it won't get evicted this frame. // We specifically want to mark the tile texture as used, even // if it's detected not visible below and skipped. This is because // we maintain the set of tiles we care about based on visibility // during pre_update. If a tile still exists after that, we are // assuming that it's either visible or we want to retain it for // a while in case it gets scrolled back onto screen soon. // TODO(gw): Consider switching to manual eviction policy? frame_state.resource_cache.texture_cache.request(handle, frame_state.gpu_cache); } else { // If the texture was evicted on a previous frame, we need to assume // that the entire tile rect is dirty. tile.invalidate(None, InvalidationReason::NoTexture); } } SurfaceTextureDescriptor::Native { id, .. } => { if id.is_none() { // There is no current surface allocation, so ensure the entire tile is invalidated tile.invalidate(None, InvalidationReason::NoSurface); } } } } // Ensure that the dirty rect doesn't extend outside the local valid rect. tile.local_dirty_rect = tile.local_dirty_rect .intersection(&tile.current_descriptor.local_valid_rect) .unwrap_or_else(PictureRect::zero); // Update the world/device dirty rect let world_dirty_rect = map_pic_to_world.map(&tile.local_dirty_rect).expect("bug"); let device_rect = (tile.world_tile_rect * frame_context.global_device_pixel_scale).round(); tile.device_dirty_rect = (world_dirty_rect * frame_context.global_device_pixel_scale) .round_out() .intersection(&device_rect) .unwrap_or_else(DeviceRect::zero); if tile.is_valid { if frame_context.fb_config.testing { debug_info.tiles.insert( tile.tile_offset, TileDebugInfo::Valid, ); } continue; } // Add this dirty rect to the dirty region tracker. This must be done outside the if statement below, // so that we include in the dirty region tiles that are handled by a background color only (no // surface allocation). tile_cache.dirty_region.add_dirty_region( tile.local_dirty_rect, SubSliceIndex::new(sub_slice_index), frame_context.spatial_tree, ); // Ensure that this texture is allocated. if let TileSurface::Texture { ref mut descriptor } = tile.surface.as_mut().unwrap() { match descriptor { SurfaceTextureDescriptor::TextureCache { ref mut handle } => { if !frame_state.resource_cache.texture_cache.is_allocated(handle) { frame_state.resource_cache.texture_cache.update_picture_cache( tile_cache.current_tile_size, handle, frame_state.gpu_cache, ); } } SurfaceTextureDescriptor::Native { id } => { if id.is_none() { // Allocate a native surface id if we're in native compositing mode, // and we don't have a surface yet (due to first frame, or destruction // due to tile size changing etc). if sub_slice.native_surface.is_none() { let opaque = frame_state .resource_cache .create_compositor_surface( tile_cache.virtual_offset, tile_cache.current_tile_size, true, ); let alpha = frame_state .resource_cache .create_compositor_surface( tile_cache.virtual_offset, tile_cache.current_tile_size, false, ); sub_slice.native_surface = Some(NativeSurface { opaque, alpha, }); } // Create the tile identifier and allocate it. let surface_id = if tile.is_opaque { sub_slice.native_surface.as_ref().unwrap().opaque } else { sub_slice.native_surface.as_ref().unwrap().alpha }; let tile_id = NativeTileId { surface_id, x: tile.tile_offset.x, y: tile.tile_offset.y, }; frame_state.resource_cache.create_compositor_tile(tile_id); *id = Some(tile_id); } } } let content_origin_f = tile.world_tile_rect.origin * device_pixel_scale; let content_origin = content_origin_f.round(); debug_assert!((content_origin_f.x - content_origin.x).abs() < 0.01); debug_assert!((content_origin_f.y - content_origin.y).abs() < 0.01); let surface = descriptor.resolve( frame_state.resource_cache, tile_cache.current_tile_size, ); let scissor_rect = tile.device_dirty_rect .translate(-device_rect.origin.to_vector()) .round() .to_i32(); let valid_rect = tile.device_valid_rect .translate(-device_rect.origin.to_vector()) .round() .to_i32(); let task_size = tile_cache.current_tile_size; let batch_filter = BatchFilter { rect_in_pic_space: tile.local_dirty_rect, sub_slice_index: SubSliceIndex::new(sub_slice_index), }; let render_task_id = frame_state.rg_builder.add().init( RenderTask::new( RenderTaskLocation::Static { surface: StaticRenderTaskSurface::PictureCache { surface, }, rect: task_size.into(), }, RenderTaskKind::new_picture( task_size, tile_cache.current_tile_size.to_f32(), pic_index, content_origin, surface_spatial_node_index, device_pixel_scale, Some(batch_filter), Some(scissor_rect), Some(valid_rect), ) ), ); surface_tasks.push(render_task_id); } if frame_context.fb_config.testing { debug_info.tiles.insert( tile.tile_offset, TileDebugInfo::Dirty(DirtyTileDebugInfo { local_valid_rect: tile.current_descriptor.local_valid_rect, local_dirty_rect: tile.local_dirty_rect, }), ); } // If the entire tile valid region is dirty, we can update the fract offset // at which the tile was rendered. if tile.device_dirty_rect.contains_rect(&tile.device_valid_rect) { tile.device_fract_offset = tile_cache.device_fract_offset; } // Now that the tile is valid, reset the dirty rect. tile.local_dirty_rect = PictureRect::zero(); tile.is_valid = true; } } // If invalidation debugging is enabled, dump the picture cache state to a tree printer. if frame_context.debug_flags.contains(DebugFlags::INVALIDATION_DBG) { tile_cache.print(); } // If testing mode is enabled, write some information about the current state // of this picture cache (made available in RenderResults). if frame_context.fb_config.testing { frame_state.composite_state .picture_cache_debug .slices .insert( tile_cache.slice, debug_info, ); } frame_state.init_surface_tiled( surface_index, surface_tasks, surface_device_rect, ); } Some(ref mut raster_config) => { let pic_rect = self.precise_local_rect.cast_unit(); let mut device_pixel_scale = frame_state .surfaces[raster_config.surface_index.0] .device_pixel_scale; let scale_factors = frame_state .surfaces[raster_config.surface_index.0] .scale_factors; // If the primitive has a filter that can sample with an offset, the clip rect has // to take it into account. let clip_inflation = match raster_config.composite_mode { PictureCompositeMode::Filter(Filter::DropShadows(ref shadows)) => { let mut max_offset = vec2(0.0, 0.0); let mut min_offset = vec2(0.0, 0.0); for shadow in shadows { let offset = layout_vector_as_picture_vector(shadow.offset); max_offset = max_offset.max(offset); min_offset = min_offset.min(offset); } // Get the shadow offsets in world space. let raster_min = map_pic_to_raster.map_vector(min_offset); let raster_max = map_pic_to_raster.map_vector(max_offset); let world_min = map_raster_to_world.map_vector(raster_min); let world_max = map_raster_to_world.map_vector(raster_max); // Grow the clip in the opposite direction of the shadow's offset. SideOffsets2D::from_vectors_outer( -world_max.max(vec2(0.0, 0.0)), -world_min.min(vec2(0.0, 0.0)), ) } _ => SideOffsets2D::zero(), }; let (mut clipped, mut unclipped) = match get_raster_rects( pic_rect, &map_pic_to_raster, &map_raster_to_world, raster_config.clipped_bounding_rect.outer_rect(clip_inflation), device_pixel_scale, ) { Some(info) => info, None => { return None } }; let transform = map_pic_to_raster.get_transform(); /// If the picture (raster_config) establishes a raster root, /// its requested resolution won't be clipped by the parent or /// viewport; so we need to make sure the requested resolution is /// "reasonable", ie. <= MAX_SURFACE_SIZE. If not, scale the /// picture down until it fits that limit. This results in a new /// device_rect, a new unclipped rect, and a new device_pixel_scale. /// /// Since the adjusted device_pixel_scale is passed into the /// RenderTask (and then the shader via RenderTaskData) this mostly /// works transparently, reusing existing support for variable DPI /// support. The on-the-fly scaling can be seen as on-the-fly, /// per-task DPI adjustment. Logical pixels are unaffected. /// /// The scaling factor is returned to the caller; blur radius, /// font size, etc. need to be scaled accordingly. fn adjust_scale_for_max_surface_size( raster_config: &RasterConfig, max_target_size: i32, pic_rect: PictureRect, map_pic_to_raster: &SpaceMapper<PicturePixel, RasterPixel>, map_raster_to_world: &SpaceMapper<RasterPixel, WorldPixel>, clipped_prim_bounding_rect: WorldRect, device_pixel_scale : &mut DevicePixelScale, device_rect: &mut DeviceRect, unclipped: &mut DeviceRect) -> Option<f32> { let limit = if raster_config.establishes_raster_root { MAX_SURFACE_SIZE } else { max_target_size as f32 }; if device_rect.size.width > limit || device_rect.size.height > limit { // round_out will grow by 1 integer pixel if origin is on a // fractional position, so keep that margin for error with -1: let scale = (limit as f32 - 1.0) / (f32::max(device_rect.size.width, device_rect.size.height)); *device_pixel_scale = *device_pixel_scale * Scale::new(scale); let new_device_rect = device_rect.to_f32() * Scale::new(scale); *device_rect = new_device_rect.round_out(); *unclipped = match get_raster_rects( pic_rect, &map_pic_to_raster, &map_raster_to_world, clipped_prim_bounding_rect, *device_pixel_scale ) { Some(info) => info.1, None => { return None } }; Some(scale) } else { None } } let primary_render_task_id; match raster_config.composite_mode { PictureCompositeMode::TileCache { .. } => { unreachable!("handled above"); } PictureCompositeMode::Filter(Filter::Blur(width, height)) => { let width_std_deviation = clamp_blur_radius(width, scale_factors) * device_pixel_scale.0; let height_std_deviation = clamp_blur_radius(height, scale_factors) * device_pixel_scale.0; let mut blur_std_deviation = DeviceSize::new( width_std_deviation * scale_factors.0, height_std_deviation * scale_factors.1 ); let mut device_rect = if self.options.inflate_if_required { let inflation_factor = frame_state.surfaces[raster_config.surface_index.0].inflation_factor; let inflation_factor = inflation_factor * device_pixel_scale.0; // The clipped field is the part of the picture that is visible // on screen. The unclipped field is the screen-space rect of // the complete picture, if no screen / clip-chain was applied // (this includes the extra space for blur region). To ensure // that we draw a large enough part of the picture to get correct // blur results, inflate that clipped area by the blur range, and // then intersect with the total screen rect, to minimize the // allocation size. clipped .inflate(inflation_factor * scale_factors.0, inflation_factor * scale_factors.1) .intersection(&unclipped) .unwrap() } else { clipped }; let mut original_size = device_rect.size; // Adjust the size to avoid introducing sampling errors during the down-scaling passes. // what would be even better is to rasterize the picture at the down-scaled size // directly. device_rect.size = BlurTask::adjusted_blur_source_size( device_rect.size, blur_std_deviation, ); if let Some(scale) = adjust_scale_for_max_surface_size( raster_config, frame_context.fb_config.max_target_size, pic_rect, &map_pic_to_raster, &map_raster_to_world, raster_config.clipped_bounding_rect, &mut device_pixel_scale, &mut device_rect, &mut unclipped, ) { blur_std_deviation = blur_std_deviation * scale; original_size = original_size.to_f32() * scale; raster_config.root_scaling_factor = scale; } let uv_rect_kind = calculate_uv_rect_kind( &pic_rect, &transform, &device_rect, device_pixel_scale, ); let task_size = device_rect.size.to_i32(); let picture_task_id = frame_state.rg_builder.add().init( RenderTask::new_dynamic( task_size, RenderTaskKind::new_picture( task_size, unclipped.size, pic_index, device_rect.origin, surface_spatial_node_index, device_pixel_scale, None, None, None, ) ).with_uv_rect_kind(uv_rect_kind) ); let blur_render_task_id = RenderTask::new_blur( blur_std_deviation, picture_task_id, frame_state.rg_builder, RenderTargetKind::Color, None, original_size.to_i32(), ); primary_render_task_id = Some(blur_render_task_id); frame_state.init_surface_chain( raster_config.surface_index, blur_render_task_id, picture_task_id, parent_surface_index, device_rect, ); } PictureCompositeMode::Filter(Filter::DropShadows(ref shadows)) => { let mut max_std_deviation = 0.0; for shadow in shadows { max_std_deviation = f32::max(max_std_deviation, shadow.blur_radius); } max_std_deviation = clamp_blur_radius(max_std_deviation, scale_factors) * device_pixel_scale.0; let max_blur_range = max_std_deviation * BLUR_SAMPLE_SCALE; // We cast clipped to f32 instead of casting unclipped to i32 // because unclipped can overflow an i32. let mut device_rect = clipped .inflate(max_blur_range * scale_factors.0, max_blur_range * scale_factors.1) .intersection(&unclipped) .unwrap(); device_rect.size = BlurTask::adjusted_blur_source_size( device_rect.size, DeviceSize::new( max_std_deviation * scale_factors.0, max_std_deviation * scale_factors.1 ), ); if let Some(scale) = adjust_scale_for_max_surface_size( raster_config, frame_context.fb_config.max_target_size, pic_rect, &map_pic_to_raster, &map_raster_to_world, raster_config.clipped_bounding_rect, &mut device_pixel_scale, &mut device_rect, &mut unclipped, ) { // std_dev adjusts automatically from using device_pixel_scale raster_config.root_scaling_factor = scale; } let uv_rect_kind = calculate_uv_rect_kind( &pic_rect, &transform, &device_rect, device_pixel_scale, ); let task_size = device_rect.size.to_i32(); let picture_task_id = frame_state.rg_builder.add().init( RenderTask::new_dynamic( task_size, RenderTaskKind::new_picture( task_size, unclipped.size, pic_index, device_rect.origin, surface_spatial_node_index, device_pixel_scale, None, None, None, ), ).with_uv_rect_kind(uv_rect_kind) ); // Add this content picture as a dependency of the parent surface, to // ensure it isn't free'd after the shadow uses it as an input. frame_state.add_child_render_task( parent_surface_index, picture_task_id, ); let mut blur_tasks = BlurTaskCache::default(); self.extra_gpu_data_handles.resize(shadows.len(), GpuCacheHandle::new()); let mut blur_render_task_id = picture_task_id; for shadow in shadows { let blur_radius = clamp_blur_radius(shadow.blur_radius, scale_factors) * device_pixel_scale.0; blur_render_task_id = RenderTask::new_blur( DeviceSize::new( blur_radius * scale_factors.0, blur_radius * scale_factors.1, ), picture_task_id, frame_state.rg_builder, RenderTargetKind::Color, Some(&mut blur_tasks), device_rect.size.to_i32(), ); } primary_render_task_id = Some(blur_render_task_id); self.secondary_render_task_id = Some(picture_task_id); frame_state.init_surface_chain( raster_config.surface_index, blur_render_task_id, picture_task_id, parent_surface_index, device_rect, ); } PictureCompositeMode::MixBlend(mode) if BlendMode::from_mix_blend_mode( mode, frame_context.fb_config.gpu_supports_advanced_blend, frame_context.fb_config.advanced_blend_is_coherent, frame_context.fb_config.dual_source_blending_is_enabled && frame_context.fb_config.dual_source_blending_is_supported, ).is_none() => { if let Some(scale) = adjust_scale_for_max_surface_size( raster_config, frame_context.fb_config.max_target_size, pic_rect, &map_pic_to_raster, &map_raster_to_world, raster_config.clipped_bounding_rect, &mut device_pixel_scale, &mut clipped, &mut unclipped, ) { raster_config.root_scaling_factor = scale; } let uv_rect_kind = calculate_uv_rect_kind( &pic_rect, &transform, &clipped, device_pixel_scale, ); let parent_surface = &frame_state.surfaces[parent_surface_index.0]; let parent_raster_spatial_node_index = parent_surface.raster_spatial_node_index; let parent_device_pixel_scale = parent_surface.device_pixel_scale; // Create a space mapper that will allow mapping from the local rect // of the mix-blend primitive into the space of the surface that we // need to read back from. Note that we use the parent's raster spatial // node here, so that we are in the correct device space of the parent // surface, whether it establishes a raster root or not. let map_pic_to_parent = SpaceMapper::new_with_target( parent_raster_spatial_node_index, self.spatial_node_index, RasterRect::max_rect(), // TODO(gw): May need a conservative estimate? frame_context.spatial_tree, ); let pic_in_raster_space = map_pic_to_parent .map(&pic_rect) .expect("bug: unable to map mix-blend content into parent"); // Apply device pixel ratio for parent surface to get into device // pixels for that surface. let backdrop_rect = raster_rect_to_device_pixels( pic_in_raster_space, parent_device_pixel_scale, ); let parent_surface_rect = parent_surface.get_device_rect(); // If there is no available parent surface to read back from (for example, if // the parent surface is affected by a clip that doesn't affect the child // surface), then create a dummy 16x16 readback. In future, we could alter // the composite mode of this primitive to skip the mix-blend, but for simplicity // we just create a dummy readback for now. let readback_task_id = match backdrop_rect.intersection(&parent_surface_rect) { Some(available_rect) => { // Calculate the UV coords necessary for the shader to sampler // from the primitive rect within the readback region. This is // 0..1 for aligned surfaces, but doing it this way allows // accurate sampling if the primitive bounds have fractional values. let backdrop_uv = calculate_uv_rect_kind( &pic_rect, &map_pic_to_parent.get_transform(), &available_rect, parent_device_pixel_scale, ); frame_state.rg_builder.add().init( RenderTask::new_dynamic( available_rect.size.to_i32(), RenderTaskKind::new_readback(Some(available_rect.origin)), ).with_uv_rect_kind(backdrop_uv) ) } None => { frame_state.rg_builder.add().init( RenderTask::new_dynamic( DeviceIntSize::new(16, 16), RenderTaskKind::new_readback(None), ) ) } }; frame_state.add_child_render_task( parent_surface_index, readback_task_id, ); self.secondary_render_task_id = Some(readback_task_id); let task_size = clipped.size.to_i32(); let render_task_id = frame_state.rg_builder.add().init( RenderTask::new_dynamic( task_size, RenderTaskKind::new_picture( task_size, unclipped.size, pic_index, clipped.origin, surface_spatial_node_index, device_pixel_scale, None, None, None, ) ).with_uv_rect_kind(uv_rect_kind) ); primary_render_task_id = Some(render_task_id); frame_state.init_surface( raster_config.surface_index, render_task_id, parent_surface_index, clipped, ); } PictureCompositeMode::Filter(..) => { if let Some(scale) = adjust_scale_for_max_surface_size( raster_config, frame_context.fb_config.max_target_size, pic_rect, &map_pic_to_raster, &map_raster_to_world, raster_config.clipped_bounding_rect, &mut device_pixel_scale, &mut clipped, &mut unclipped, ) { raster_config.root_scaling_factor = scale; } let uv_rect_kind = calculate_uv_rect_kind( &pic_rect, &transform, &clipped, device_pixel_scale, ); let task_size = clipped.size.to_i32(); let render_task_id = frame_state.rg_builder.add().init( RenderTask::new_dynamic( task_size, RenderTaskKind::new_picture( task_size, unclipped.size, pic_index, clipped.origin, surface_spatial_node_index, device_pixel_scale, None, None, None, ) ).with_uv_rect_kind(uv_rect_kind) ); primary_render_task_id = Some(render_task_id); frame_state.init_surface( raster_config.surface_index, render_task_id, parent_surface_index, clipped, ); } PictureCompositeMode::ComponentTransferFilter(..) => { if let Some(scale) = adjust_scale_for_max_surface_size( raster_config, frame_context.fb_config.max_target_size, pic_rect, &map_pic_to_raster, &map_raster_to_world, raster_config.clipped_bounding_rect, &mut device_pixel_scale, &mut clipped, &mut unclipped, ) { raster_config.root_scaling_factor = scale; } let uv_rect_kind = calculate_uv_rect_kind( &pic_rect, &transform, &clipped, device_pixel_scale, ); let task_size = clipped.size.to_i32(); let render_task_id = frame_state.rg_builder.add().init( RenderTask::new_dynamic( task_size, RenderTaskKind::new_picture( task_size, unclipped.size, pic_index, clipped.origin, surface_spatial_node_index, device_pixel_scale, None, None, None, ) ).with_uv_rect_kind(uv_rect_kind) ); primary_render_task_id = Some(render_task_id); frame_state.init_surface( raster_config.surface_index, render_task_id, parent_surface_index, clipped, ); } PictureCompositeMode::MixBlend(..) | PictureCompositeMode::Blit(_) => { if let Some(scale) = adjust_scale_for_max_surface_size( raster_config, frame_context.fb_config.max_target_size, pic_rect, &map_pic_to_raster, &map_raster_to_world, raster_config.clipped_bounding_rect, &mut device_pixel_scale, &mut clipped, &mut unclipped, ) { raster_config.root_scaling_factor = scale; } let uv_rect_kind = calculate_uv_rect_kind( &pic_rect, &transform, &clipped, device_pixel_scale, ); let task_size = clipped.size.to_i32(); let render_task_id = frame_state.rg_builder.add().init( RenderTask::new_dynamic( task_size, RenderTaskKind::new_picture( task_size, unclipped.size, pic_index, clipped.origin, surface_spatial_node_index, device_pixel_scale, None, None, None, ) ).with_uv_rect_kind(uv_rect_kind) ); primary_render_task_id = Some(render_task_id); frame_state.init_surface( raster_config.surface_index, render_task_id, parent_surface_index, clipped, ); } PictureCompositeMode::SvgFilter(ref primitives, ref filter_datas) => { if let Some(scale) = adjust_scale_for_max_surface_size( raster_config, frame_context.fb_config.max_target_size, pic_rect, &map_pic_to_raster, &map_raster_to_world, raster_config.clipped_bounding_rect, &mut device_pixel_scale, &mut clipped, &mut unclipped, ) { raster_config.root_scaling_factor = scale; } let uv_rect_kind = calculate_uv_rect_kind( &pic_rect, &transform, &clipped, device_pixel_scale, ); let task_size = clipped.size.to_i32(); let picture_task_id = frame_state.rg_builder.add().init( RenderTask::new_dynamic( task_size, RenderTaskKind::new_picture( task_size, unclipped.size, pic_index, clipped.origin, surface_spatial_node_index, device_pixel_scale, None, None, None, ) ).with_uv_rect_kind(uv_rect_kind) ); let filter_task_id = RenderTask::new_svg_filter( primitives, filter_datas, frame_state.rg_builder, clipped.size.to_i32(), uv_rect_kind, picture_task_id, device_pixel_scale, ); primary_render_task_id = Some(filter_task_id); frame_state.init_surface_chain( raster_config.surface_index, filter_task_id, picture_task_id, parent_surface_index, clipped, ); } } self.primary_render_task_id = primary_render_task_id; // Update the device pixel ratio in the surface, in case it was adjusted due // to the surface being too large. This ensures the correct scale is available // in case it's used as input to a parent mix-blend-mode readback. frame_state .surfaces[raster_config.surface_index.0] .device_pixel_scale = device_pixel_scale; } None => {} }; #[cfg(feature = "capture")] { if frame_context.debug_flags.contains(DebugFlags::TILE_CACHE_LOGGING_DBG) { if let Some(PictureCompositeMode::TileCache { slice_id }) = self.requested_composite_mode { if let Some(ref tile_cache) = tile_caches.get(&slice_id) { // extract just the fields that we're interested in let mut tile_cache_tiny = TileCacheInstanceSerializer { slice: tile_cache.slice, tiles: FastHashMap::default(), background_color: tile_cache.background_color, fract_offset: tile_cache.fract_offset }; // TODO(gw): Debug output only writes the primary sub-slice for now for (key, tile) in &tile_cache.sub_slices.first().unwrap().tiles { tile_cache_tiny.tiles.insert(*key, TileSerializer { rect: tile.local_tile_rect, current_descriptor: tile.current_descriptor.clone(), device_fract_offset: tile.device_fract_offset, id: tile.id, root: tile.root.clone(), background_color: tile.background_color, invalidation_reason: tile.invalidation_reason.clone() }); } let text = ron::ser::to_string_pretty(&tile_cache_tiny, Default::default()).unwrap(); tile_cache_logger.add(text, map_pic_to_world.get_transform()); } } } } #[cfg(not(feature = "capture"))] { let _tile_cache_logger = tile_cache_logger; // unused variable fix } let state = PictureState { //TODO: check for MAX_CACHE_SIZE here? map_local_to_pic, map_pic_to_world, map_pic_to_raster, map_raster_to_world, plane_splitter, }; let mut dirty_region_count = 0; // If this is a picture cache, push the dirty region to ensure any // child primitives are culled and clipped to the dirty rect(s). if let Some(RasterConfig { composite_mode: PictureCompositeMode::TileCache { slice_id }, .. }) = self.raster_config { let dirty_region = tile_caches[&slice_id].dirty_region.clone(); frame_state.push_dirty_region(dirty_region); dirty_region_count += 1; } if inflation_factor > 0.0 { let inflated_region = frame_state.current_dirty_region().inflate( inflation_factor, frame_context.spatial_tree, ); frame_state.push_dirty_region(inflated_region); dirty_region_count += 1; } // Disallow subpixel AA if an intermediate surface is needed. // TODO(lsalzman): allow overriding parent if intermediate surface is opaque let subpixel_mode = match self.raster_config { Some(RasterConfig { ref composite_mode, .. }) => { let subpixel_mode = match composite_mode { PictureCompositeMode::TileCache { slice_id } => { tile_caches[&slice_id].subpixel_mode } PictureCompositeMode::Blit(..) | PictureCompositeMode::ComponentTransferFilter(..) | PictureCompositeMode::Filter(..) | PictureCompositeMode::MixBlend(..) | PictureCompositeMode::SvgFilter(..) => { // TODO(gw): We can take advantage of the same logic that // exists in the opaque rect detection for tile // caches, to allow subpixel text on other surfaces // that can be detected as opaque. SubpixelMode::Deny } }; subpixel_mode } None => { SubpixelMode::Allow } }; // Still disable subpixel AA if parent forbids it let subpixel_mode = match (parent_subpixel_mode, subpixel_mode) { (SubpixelMode::Allow, SubpixelMode::Allow) => { // Both parent and this surface unconditionally allow subpixel AA SubpixelMode::Allow } (SubpixelMode::Allow, SubpixelMode::Conditional { allowed_rect }) => { // Parent allows, but we are conditional subpixel AA SubpixelMode::Conditional { allowed_rect, } } (SubpixelMode::Conditional { allowed_rect }, SubpixelMode::Allow) => { // Propagate conditional subpixel mode to child pictures that allow subpixel AA SubpixelMode::Conditional { allowed_rect, } } (SubpixelMode::Conditional { .. }, SubpixelMode::Conditional { ..}) => { unreachable!("bug: only top level picture caches have conditional subpixel"); } (SubpixelMode::Deny, _) | (_, SubpixelMode::Deny) => { // Either parent or this surface explicitly deny subpixel, these take precedence SubpixelMode::Deny } }; let context = PictureContext { pic_index, apply_local_clip_rect: self.apply_local_clip_rect, raster_spatial_node_index, surface_spatial_node_index, surface_index, dirty_region_count, subpixel_mode, }; let prim_list = mem::replace(&mut self.prim_list, PrimitiveList::empty()); Some((context, state, prim_list)) } pub fn restore_context( &mut self, prim_list: PrimitiveList, context: PictureContext, state: PictureState, frame_state: &mut FrameBuildingState, ) { // Pop any dirty regions this picture set for _ in 0 .. context.dirty_region_count { frame_state.pop_dirty_region(); } self.prim_list = prim_list; self.state = Some(state); } pub fn take_state(&mut self) -> PictureState { self.state.take().expect("bug: no state present!") } /// Add a primitive instance to the plane splitter. The function would generate /// an appropriate polygon, clip it against the frustum, and register with the /// given plane splitter. pub fn add_split_plane( splitter: &mut PlaneSplitter, spatial_tree: &SpatialTree, prim_spatial_node_index: SpatialNodeIndex, original_local_rect: LayoutRect, combined_local_clip_rect: &LayoutRect, world_rect: WorldRect, plane_split_anchor: PlaneSplitAnchor, ) -> bool { let transform = spatial_tree .get_world_transform(prim_spatial_node_index); let matrix = transform.clone().into_transform().cast(); // Apply the local clip rect here, before splitting. This is // because the local clip rect can't be applied in the vertex // shader for split composites, since we are drawing polygons // rather that rectangles. The interpolation still works correctly // since we determine the UVs by doing a bilerp with a factor // from the original local rect. let local_rect = match original_local_rect .intersection(combined_local_clip_rect) { Some(rect) => rect.cast(), None => return false, }; let world_rect = world_rect.cast(); match transform { CoordinateSpaceMapping::Local => { let polygon = Polygon::from_rect( local_rect * Scale::new(1.0), plane_split_anchor, ); splitter.add(polygon); } CoordinateSpaceMapping::ScaleOffset(scale_offset) if scale_offset.scale == Vector2D::new(1.0, 1.0) => { let inv_matrix = scale_offset.inverse().to_transform().cast(); let polygon = Polygon::from_transformed_rect_with_inverse( local_rect, &matrix, &inv_matrix, plane_split_anchor, ).unwrap(); splitter.add(polygon); } CoordinateSpaceMapping::ScaleOffset(_) | CoordinateSpaceMapping::Transform(_) => { let mut clipper = Clipper::new(); let results = clipper.clip_transformed( Polygon::from_rect( local_rect, plane_split_anchor, ), &matrix, Some(world_rect), ); if let Ok(results) = results { for poly in results { splitter.add(poly); } } } } true } pub fn resolve_split_planes( &mut self, splitter: &mut PlaneSplitter, gpu_cache: &mut GpuCache, spatial_tree: &SpatialTree, ) { let ordered = match self.context_3d { Picture3DContext::In { root_data: Some(ref mut list), .. } => list, _ => panic!("Expected to find 3D context root"), }; ordered.clear(); // Process the accumulated split planes and order them for rendering. // Z axis is directed at the screen, `sort` is ascending, and we need back-to-front order. let sorted = splitter.sort(vec3(0.0, 0.0, 1.0)); ordered.reserve(sorted.len()); for poly in sorted { let cluster = &self.prim_list.clusters[poly.anchor.cluster_index]; let spatial_node_index = cluster.spatial_node_index; let transform = match spatial_tree .get_world_transform(spatial_node_index) .inverse() { Some(transform) => transform.into_transform(), // logging this would be a bit too verbose None => continue, }; let local_points = [ transform.transform_point3d(poly.points[0].cast()), transform.transform_point3d(poly.points[1].cast()), transform.transform_point3d(poly.points[2].cast()), transform.transform_point3d(poly.points[3].cast()), ]; // If any of the points are un-transformable, just drop this // plane from drawing. if local_points.iter().any(|p| p.is_none()) { continue; } let p0 = local_points[0].unwrap(); let p1 = local_points[1].unwrap(); let p2 = local_points[2].unwrap(); let p3 = local_points[3].unwrap(); let gpu_blocks = [ [p0.x, p0.y, p1.x, p1.y].into(), [p2.x, p2.y, p3.x, p3.y].into(), ]; let gpu_handle = gpu_cache.push_per_frame_blocks(&gpu_blocks); let gpu_address = gpu_cache.get_address(&gpu_handle); ordered.push(OrderedPictureChild { anchor: poly.anchor, spatial_node_index, gpu_address, }); } } /// Called during initial picture traversal, before we know the /// bounding rect of children. It is possible to determine the /// surface / raster config now though. fn pre_update( &mut self, state: &mut PictureUpdateState, frame_context: &FrameBuildingContext, ) -> Option<PrimitiveList> { // Reset raster config in case we early out below. self.raster_config = None; // Resolve animation properties, and early out if the filter // properties make this picture invisible. if !self.resolve_scene_properties(frame_context.scene_properties) { return None; } // For out-of-preserve-3d pictures, the backface visibility is determined by // the local transform only. // Note: we aren't taking the transform relativce to the parent picture, // since picture tree can be more dense than the corresponding spatial tree. if !self.is_backface_visible { if let Picture3DContext::Out = self.context_3d { match frame_context.spatial_tree.get_local_visible_face(self.spatial_node_index) { VisibleFace::Front => {} VisibleFace::Back => return None, } } } // See if this picture actually needs a surface for compositing. // TODO(gw): FPC: Remove the actual / requested composite mode distinction. let actual_composite_mode = self.requested_composite_mode.clone(); if let Some(composite_mode) = actual_composite_mode { // Retrieve the positioning node information for the parent surface. let parent_raster_node_index = state.current_surface().raster_spatial_node_index; let parent_device_pixel_scale = state.current_surface().device_pixel_scale; let surface_spatial_node_index = self.spatial_node_index; let surface_to_parent_transform = frame_context.spatial_tree .get_relative_transform(surface_spatial_node_index, parent_raster_node_index); // Check if there is perspective or if an SVG filter is applied, and thus whether a new // rasterization root should be established. let establishes_raster_root = match composite_mode { PictureCompositeMode::TileCache { .. } => { // Picture caches are special cased - they never need to establish a raster root. In future, // we will probably remove TileCache as a specific composite mode. false } PictureCompositeMode::SvgFilter(..) => { // Filters must be applied before transforms, to do this, we can mark this picture as establishing a raster root. true } PictureCompositeMode::MixBlend(..) | PictureCompositeMode::Filter(..) | PictureCompositeMode::ComponentTransferFilter(..) | PictureCompositeMode::Blit(..) => { // TODO(gw): As follow ups, individually move each of these composite modes to create raster roots. surface_to_parent_transform.is_perspective() } }; let (raster_spatial_node_index, device_pixel_scale) = if establishes_raster_root { // If a raster root is established, this surface should be scaled based on the scale factors of the surface raster to parent raster transform. // This scaling helps ensure that the content in this surface does not become blurry or pixelated when composited in the parent surface. let scale_factors = surface_to_parent_transform.scale_factors(); // Pick the largest scale factor of the transform for the scaling factor. // Currently, we ensure that the scaling factor is >= 1.0 as a smaller scale factor can result in blurry output. let scaling_factor = scale_factors.0.max(scale_factors.1).max(1.0); let device_pixel_scale = parent_device_pixel_scale * Scale::new(scaling_factor); (surface_spatial_node_index, device_pixel_scale) } else { (parent_raster_node_index, parent_device_pixel_scale) }; let scale_factors = frame_context .spatial_tree .get_relative_transform(surface_spatial_node_index, raster_spatial_node_index) .scale_factors(); // This inflation factor is to be applied to all primitives within the surface. // Only inflate if the caller hasn't already inflated the bounding rects for this filter. let mut inflation_factor = 0.0; if self.options.inflate_if_required { match composite_mode { PictureCompositeMode::Filter(Filter::Blur(width, height)) => { let blur_radius = f32::max(clamp_blur_radius(width, scale_factors), clamp_blur_radius(height, scale_factors)); // The amount of extra space needed for primitives inside // this picture to ensure the visibility check is correct. inflation_factor = blur_radius * BLUR_SAMPLE_SCALE; } PictureCompositeMode::SvgFilter(ref primitives, _) => { let mut max = 0.0; for primitive in primitives { if let FilterPrimitiveKind::Blur(ref blur) = primitive.kind { max = f32::max(max, blur.width); max = f32::max(max, blur.height); } } inflation_factor = clamp_blur_radius(max, scale_factors) * BLUR_SAMPLE_SCALE; } PictureCompositeMode::Filter(Filter::DropShadows(ref shadows)) => { // TODO(gw): This is incorrect, since we don't consider the drop shadow // offset. However, fixing that is a larger task, so this is // an improvement on the current case (this at least works where // the offset of the drop-shadow is ~0, which is often true). // Can't use max_by_key here since f32 isn't Ord let mut max_blur_radius: f32 = 0.0; for shadow in shadows { max_blur_radius = max_blur_radius.max(shadow.blur_radius); } inflation_factor = clamp_blur_radius(max_blur_radius, scale_factors) * BLUR_SAMPLE_SCALE; } _ => {} } } let surface = SurfaceInfo::new( surface_spatial_node_index, raster_spatial_node_index, inflation_factor, frame_context.global_screen_world_rect, &frame_context.spatial_tree, device_pixel_scale, scale_factors, ); self.raster_config = Some(RasterConfig { composite_mode, establishes_raster_root, surface_index: state.push_surface(surface), root_scaling_factor: 1.0, clipped_bounding_rect: WorldRect::zero(), }); } Some(mem::replace(&mut self.prim_list, PrimitiveList::empty())) } /// Called after updating child pictures during the initial /// picture traversal. fn post_update( &mut self, prim_list: PrimitiveList, state: &mut PictureUpdateState, frame_context: &FrameBuildingContext, data_stores: &mut DataStores, ) { // Restore the pictures list used during recursion. self.prim_list = prim_list; let surface = state.current_surface_mut(); for cluster in &mut self.prim_list.clusters { cluster.flags.remove(ClusterFlags::IS_VISIBLE); // Skip the cluster if backface culled. if !cluster.flags.contains(ClusterFlags::IS_BACKFACE_VISIBLE) { // For in-preserve-3d primitives and pictures, the backface visibility is // evaluated relative to the containing block. if let Picture3DContext::In { ancestor_index, .. } = self.context_3d { let mut face = VisibleFace::Front; frame_context.spatial_tree.get_relative_transform_with_face( cluster.spatial_node_index, ancestor_index, Some(&mut face), ); if face == VisibleFace::Back { continue } } } // No point including this cluster if it can't be transformed let spatial_node = &frame_context .spatial_tree .spatial_nodes[cluster.spatial_node_index.0 as usize]; if !spatial_node.invertible { continue; } // Update any primitives/cluster bounding rects that can only be done // with information available during frame building. if cluster.flags.contains(ClusterFlags::IS_BACKDROP_FILTER) { let backdrop_to_world_mapper = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, cluster.spatial_node_index, LayoutRect::max_rect(), frame_context.spatial_tree, ); for prim_instance in &mut self.prim_list.prim_instances[cluster.prim_range()] { match prim_instance.kind { PrimitiveInstanceKind::Backdrop { data_handle, .. } => { // The actual size and clip rect of this primitive are determined by computing the bounding // box of the projected rect of the backdrop-filter element onto the backdrop. let prim_data = &mut data_stores.backdrop[data_handle]; let spatial_node_index = prim_data.kind.spatial_node_index; // We cannot use the relative transform between the backdrop and the element because // that doesn't take into account any projection transforms that both spatial nodes are children of. // Instead, we first project from the element to the world space and get a flattened 2D bounding rect // in the screen space, we then map this rect from the world space to the backdrop space to get the // proper bounding box where the backdrop-filter needs to be processed. let prim_to_world_mapper = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, spatial_node_index, LayoutRect::max_rect(), frame_context.spatial_tree, ); // First map to the screen and get a flattened rect let prim_rect = prim_to_world_mapper.map(&prim_data.kind.border_rect).unwrap_or_else(LayoutRect::zero); // Backwards project the flattened rect onto the backdrop let prim_rect = backdrop_to_world_mapper.unmap(&prim_rect).unwrap_or_else(LayoutRect::zero); // TODO(aosmond): Is this safe? Updating the primitive size during // frame building is usually problematic since scene building will cache // the primitive information in the GPU already. prim_data.common.prim_rect = prim_rect; prim_instance.clip_set.local_clip_rect = prim_rect; // Update the cluster bounding rect now that we have the backdrop rect. cluster.bounding_rect = cluster.bounding_rect.union(&prim_rect); } _ => { panic!("BUG: unexpected deferred primitive kind for cluster updates"); } } } } // Map the cluster bounding rect into the space of the surface, and // include it in the surface bounding rect. surface.map_local_to_surface.set_target_spatial_node( cluster.spatial_node_index, frame_context.spatial_tree, ); // Mark the cluster visible, since it passed the invertible and // backface checks. cluster.flags.insert(ClusterFlags::IS_VISIBLE); if let Some(cluster_rect) = surface.map_local_to_surface.map(&cluster.bounding_rect) { surface.rect = surface.rect.union(&cluster_rect); } } // If this picture establishes a surface, then map the surface bounding // rect into the parent surface coordinate space, and propagate that up // to the parent. if let Some(ref mut raster_config) = self.raster_config { let surface = state.current_surface_mut(); // Inflate the local bounding rect if required by the filter effect. if self.options.inflate_if_required { surface.rect = raster_config.composite_mode.inflate_picture_rect(surface.rect, surface.scale_factors); } let mut surface_rect = surface.rect * Scale::new(1.0); // Pop this surface from the stack let surface_index = state.pop_surface(); debug_assert_eq!(surface_index, raster_config.surface_index); // Set the estimated and precise local rects. The precise local rect // may be changed again during frame visibility. self.estimated_local_rect = surface_rect; self.precise_local_rect = surface_rect; // Drop shadows draw both a content and shadow rect, so need to expand the local // rect of any surfaces to be composited in parent surfaces correctly. match raster_config.composite_mode { PictureCompositeMode::Filter(Filter::DropShadows(ref shadows)) => { for shadow in shadows { let shadow_rect = self.estimated_local_rect.translate(shadow.offset); surface_rect = surface_rect.union(&shadow_rect); } } _ => {} } // Propagate up to parent surface, now that we know this surface's static rect let parent_surface = state.current_surface_mut(); parent_surface.map_local_to_surface.set_target_spatial_node( self.spatial_node_index, frame_context.spatial_tree, ); if let Some(parent_surface_rect) = parent_surface .map_local_to_surface .map(&surface_rect) { parent_surface.rect = parent_surface.rect.union(&parent_surface_rect); } } } pub fn prepare_for_render( &mut self, frame_context: &FrameBuildingContext, frame_state: &mut FrameBuildingState, data_stores: &mut DataStores, ) -> bool { let mut pic_state_for_children = self.take_state(); if let Some(ref mut splitter) = pic_state_for_children.plane_splitter { self.resolve_split_planes( splitter, &mut frame_state.gpu_cache, &frame_context.spatial_tree, ); } let raster_config = match self.raster_config { Some(ref mut raster_config) => raster_config, None => { return true } }; // TODO(gw): Almost all of the Picture types below use extra_gpu_cache_data // to store the same type of data. The exception is the filter // with a ColorMatrix, which stores the color matrix here. It's // probably worth tidying this code up to be a bit more consistent. // Perhaps store the color matrix after the common data, even though // it's not used by that shader. match raster_config.composite_mode { PictureCompositeMode::TileCache { .. } => {} PictureCompositeMode::Filter(Filter::Blur(..)) => {} PictureCompositeMode::Filter(Filter::DropShadows(ref shadows)) => { self.extra_gpu_data_handles.resize(shadows.len(), GpuCacheHandle::new()); for (shadow, extra_handle) in shadows.iter().zip(self.extra_gpu_data_handles.iter_mut()) { if let Some(mut request) = frame_state.gpu_cache.request(extra_handle) { // Basic brush primitive header is (see end of prepare_prim_for_render_inner in prim_store.rs) // [brush specific data] // [segment_rect, segment data] let shadow_rect = self.precise_local_rect.translate(shadow.offset); // ImageBrush colors request.push(shadow.color.premultiplied()); request.push(PremultipliedColorF::WHITE); request.push([ self.precise_local_rect.size.width, self.precise_local_rect.size.height, 0.0, 0.0, ]); // segment rect / extra data request.push(shadow_rect); request.push([0.0, 0.0, 0.0, 0.0]); } } } PictureCompositeMode::Filter(ref filter) => { match *filter { Filter::ColorMatrix(ref m) => { if self.extra_gpu_data_handles.is_empty() { self.extra_gpu_data_handles.push(GpuCacheHandle::new()); } if let Some(mut request) = frame_state.gpu_cache.request(&mut self.extra_gpu_data_handles[0]) { for i in 0..5 { request.push([m[i*4], m[i*4+1], m[i*4+2], m[i*4+3]]); } } } Filter::Flood(ref color) => { if self.extra_gpu_data_handles.is_empty() { self.extra_gpu_data_handles.push(GpuCacheHandle::new()); } if let Some(mut request) = frame_state.gpu_cache.request(&mut self.extra_gpu_data_handles[0]) { request.push(color.to_array()); } } _ => {} } } PictureCompositeMode::ComponentTransferFilter(handle) => { let filter_data = &mut data_stores.filter_data[handle]; filter_data.update(frame_state); } PictureCompositeMode::MixBlend(..) | PictureCompositeMode::Blit(_) | PictureCompositeMode::SvgFilter(..) => {} } true } } // Calculate a single homogeneous screen-space UV for a picture. fn calculate_screen_uv( local_pos: &PicturePoint, transform: &PictureToRasterTransform, rendered_rect: &DeviceRect, device_pixel_scale: DevicePixelScale, ) -> DeviceHomogeneousVector { let raster_pos = transform.transform_point2d_homogeneous(*local_pos); DeviceHomogeneousVector::new( (raster_pos.x * device_pixel_scale.0 - rendered_rect.origin.x * raster_pos.w) / rendered_rect.size.width, (raster_pos.y * device_pixel_scale.0 - rendered_rect.origin.y * raster_pos.w) / rendered_rect.size.height, 0.0, raster_pos.w, ) } // Calculate a UV rect within an image based on the screen space // vertex positions of a picture. fn calculate_uv_rect_kind( pic_rect: &PictureRect, transform: &PictureToRasterTransform, rendered_rect: &DeviceRect, device_pixel_scale: DevicePixelScale, ) -> UvRectKind { let top_left = calculate_screen_uv( &pic_rect.origin, transform, &rendered_rect, device_pixel_scale, ); let top_right = calculate_screen_uv( &pic_rect.top_right(), transform, &rendered_rect, device_pixel_scale, ); let bottom_left = calculate_screen_uv( &pic_rect.bottom_left(), transform, &rendered_rect, device_pixel_scale, ); let bottom_right = calculate_screen_uv( &pic_rect.bottom_right(), transform, &rendered_rect, device_pixel_scale, ); UvRectKind::Quad { top_left, top_right, bottom_left, bottom_right, } } fn create_raster_mappers( surface_spatial_node_index: SpatialNodeIndex, raster_spatial_node_index: SpatialNodeIndex, world_rect: WorldRect, spatial_tree: &SpatialTree, ) -> (SpaceMapper<RasterPixel, WorldPixel>, SpaceMapper<PicturePixel, RasterPixel>) { let map_raster_to_world = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, raster_spatial_node_index, world_rect, spatial_tree, ); let raster_bounds = map_raster_to_world.unmap(&world_rect) .unwrap_or_else(RasterRect::max_rect); let map_pic_to_raster = SpaceMapper::new_with_target( raster_spatial_node_index, surface_spatial_node_index, raster_bounds, spatial_tree, ); (map_raster_to_world, map_pic_to_raster) } fn get_transform_key( spatial_node_index: SpatialNodeIndex, cache_spatial_node_index: SpatialNodeIndex, spatial_tree: &SpatialTree, ) -> TransformKey { // Note: this is the only place where we don't know beforehand if the tile-affecting // spatial node is below or above the current picture. let transform = if cache_spatial_node_index >= spatial_node_index { spatial_tree .get_relative_transform( cache_spatial_node_index, spatial_node_index, ) } else { spatial_tree .get_relative_transform( spatial_node_index, cache_spatial_node_index, ) }; transform.into() } /// A key for storing primitive comparison results during tile dependency tests. #[derive(Debug, Copy, Clone, Eq, Hash, PartialEq)] struct PrimitiveComparisonKey { prev_index: PrimitiveDependencyIndex, curr_index: PrimitiveDependencyIndex, } /// Information stored an image dependency #[derive(Debug, Copy, Clone, PartialEq)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct ImageDependency { pub key: ImageKey, pub generation: ImageGeneration, } impl ImageDependency { pub const INVALID: ImageDependency = ImageDependency { key: ImageKey::DUMMY, generation: ImageGeneration::INVALID, }; } /// A helper struct to compare a primitive and all its sub-dependencies. struct PrimitiveComparer<'a> { clip_comparer: CompareHelper<'a, ItemUid>, transform_comparer: CompareHelper<'a, SpatialNodeKey>, image_comparer: CompareHelper<'a, ImageDependency>, opacity_comparer: CompareHelper<'a, OpacityBinding>, color_comparer: CompareHelper<'a, ColorBinding>, resource_cache: &'a ResourceCache, spatial_node_comparer: &'a mut SpatialNodeComparer, opacity_bindings: &'a FastHashMap<PropertyBindingId, OpacityBindingInfo>, color_bindings: &'a FastHashMap<PropertyBindingId, ColorBindingInfo>, } impl<'a> PrimitiveComparer<'a> { fn new( prev: &'a TileDescriptor, curr: &'a TileDescriptor, resource_cache: &'a ResourceCache, spatial_node_comparer: &'a mut SpatialNodeComparer, opacity_bindings: &'a FastHashMap<PropertyBindingId, OpacityBindingInfo>, color_bindings: &'a FastHashMap<PropertyBindingId, ColorBindingInfo>, ) -> Self { let clip_comparer = CompareHelper::new( &prev.clips, &curr.clips, ); let transform_comparer = CompareHelper::new( &prev.transforms, &curr.transforms, ); let image_comparer = CompareHelper::new( &prev.images, &curr.images, ); let opacity_comparer = CompareHelper::new( &prev.opacity_bindings, &curr.opacity_bindings, ); let color_comparer = CompareHelper::new( &prev.color_bindings, &curr.color_bindings, ); PrimitiveComparer { clip_comparer, transform_comparer, image_comparer, opacity_comparer, color_comparer, resource_cache, spatial_node_comparer, opacity_bindings, color_bindings, } } fn reset(&mut self) { self.clip_comparer.reset(); self.transform_comparer.reset(); self.image_comparer.reset(); self.opacity_comparer.reset(); self.color_comparer.reset(); } fn advance_prev(&mut self, prim: &PrimitiveDescriptor) { self.clip_comparer.advance_prev(prim.clip_dep_count); self.transform_comparer.advance_prev(prim.transform_dep_count); self.image_comparer.advance_prev(prim.image_dep_count); self.opacity_comparer.advance_prev(prim.opacity_binding_dep_count); self.color_comparer.advance_prev(prim.color_binding_dep_count); } fn advance_curr(&mut self, prim: &PrimitiveDescriptor) { self.clip_comparer.advance_curr(prim.clip_dep_count); self.transform_comparer.advance_curr(prim.transform_dep_count); self.image_comparer.advance_curr(prim.image_dep_count); self.opacity_comparer.advance_curr(prim.opacity_binding_dep_count); self.color_comparer.advance_curr(prim.color_binding_dep_count); } /// Check if two primitive descriptors are the same. fn compare_prim( &mut self, prev: &PrimitiveDescriptor, curr: &PrimitiveDescriptor, opt_detail: Option<&mut PrimitiveCompareResultDetail>, ) -> PrimitiveCompareResult { let resource_cache = self.resource_cache; let spatial_node_comparer = &mut self.spatial_node_comparer; let opacity_bindings = self.opacity_bindings; let color_bindings = self.color_bindings; // Check equality of the PrimitiveDescriptor if prev != curr { if let Some(detail) = opt_detail { *detail = PrimitiveCompareResultDetail::Descriptor{ old: *prev, new: *curr }; } return PrimitiveCompareResult::Descriptor; } // Check if any of the clips this prim has are different. let mut clip_result = CompareHelperResult::Equal; if !self.clip_comparer.is_same( prev.clip_dep_count, curr.clip_dep_count, |prev, curr| { prev == curr }, if opt_detail.is_some() { Some(&mut clip_result) } else { None } ) { if let Some(detail) = opt_detail { *detail = PrimitiveCompareResultDetail::Clip{ detail: clip_result }; } return PrimitiveCompareResult::Clip; } // Check if any of the transforms this prim has are different. let mut transform_result = CompareHelperResult::Equal; if !self.transform_comparer.is_same( prev.transform_dep_count, curr.transform_dep_count, |prev, curr| { spatial_node_comparer.are_transforms_equivalent(prev, curr) }, if opt_detail.is_some() { Some(&mut transform_result) } else { None }, ) { if let Some(detail) = opt_detail { *detail = PrimitiveCompareResultDetail::Transform{ detail: transform_result }; } return PrimitiveCompareResult::Transform; } // Check if any of the images this prim has are different. let mut image_result = CompareHelperResult::Equal; if !self.image_comparer.is_same( prev.image_dep_count, curr.image_dep_count, |prev, curr| { prev == curr && resource_cache.get_image_generation(curr.key) == curr.generation }, if opt_detail.is_some() { Some(&mut image_result) } else { None }, ) { if let Some(detail) = opt_detail { *detail = PrimitiveCompareResultDetail::Image{ detail: image_result }; } return PrimitiveCompareResult::Image; } // Check if any of the opacity bindings this prim has are different. let mut bind_result = CompareHelperResult::Equal; if !self.opacity_comparer.is_same( prev.opacity_binding_dep_count, curr.opacity_binding_dep_count, |prev, curr| { if prev != curr { return false; } if let OpacityBinding::Binding(id) = curr { if opacity_bindings .get(id) .map_or(true, |info| info.changed) { return false; } } true }, if opt_detail.is_some() { Some(&mut bind_result) } else { None }, ) { if let Some(detail) = opt_detail { *detail = PrimitiveCompareResultDetail::OpacityBinding{ detail: bind_result }; } return PrimitiveCompareResult::OpacityBinding; } // Check if any of the color bindings this prim has are different. let mut bind_result = CompareHelperResult::Equal; if !self.color_comparer.is_same( prev.color_binding_dep_count, curr.color_binding_dep_count, |prev, curr| { if prev != curr { return false; } if let ColorBinding::Binding(id) = curr { if color_bindings .get(id) .map_or(true, |info| info.changed) { return false; } } true }, if opt_detail.is_some() { Some(&mut bind_result) } else { None }, ) { if let Some(detail) = opt_detail { *detail = PrimitiveCompareResultDetail::ColorBinding{ detail: bind_result }; } return PrimitiveCompareResult::ColorBinding; } PrimitiveCompareResult::Equal } } /// Details for a node in a quadtree that tracks dirty rects for a tile. #[cfg_attr(any(feature="capture",feature="replay"), derive(Clone))] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub enum TileNodeKind { Leaf { /// The index buffer of primitives that affected this tile previous frame #[cfg_attr(any(feature = "capture", feature = "replay"), serde(skip))] prev_indices: Vec<PrimitiveDependencyIndex>, /// The index buffer of primitives that affect this tile on this frame #[cfg_attr(any(feature = "capture", feature = "replay"), serde(skip))] curr_indices: Vec<PrimitiveDependencyIndex>, /// A bitset of which of the last 64 frames have been dirty for this leaf. #[cfg_attr(any(feature = "capture", feature = "replay"), serde(skip))] dirty_tracker: u64, /// The number of frames since this node split or merged. #[cfg_attr(any(feature = "capture", feature = "replay"), serde(skip))] frames_since_modified: usize, }, Node { /// The four children of this node children: Vec<TileNode>, }, } /// The kind of modification that a tile wants to do #[derive(Copy, Clone, PartialEq, Debug)] enum TileModification { Split, Merge, } /// A node in the dirty rect tracking quadtree. #[cfg_attr(any(feature="capture",feature="replay"), derive(Clone))] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct TileNode { /// Leaf or internal node pub kind: TileNodeKind, /// Rect of this node in the same space as the tile cache picture pub rect: PictureBox2D, } impl TileNode { /// Construct a new leaf node, with the given primitive dependency index buffer fn new_leaf(curr_indices: Vec<PrimitiveDependencyIndex>) -> Self { TileNode { kind: TileNodeKind::Leaf { prev_indices: Vec::new(), curr_indices, dirty_tracker: 0, frames_since_modified: 0, }, rect: PictureBox2D::zero(), } } /// Draw debug information about this tile node fn draw_debug_rects( &self, pic_to_world_mapper: &SpaceMapper<PicturePixel, WorldPixel>, is_opaque: bool, local_valid_rect: PictureRect, scratch: &mut PrimitiveScratchBuffer, global_device_pixel_scale: DevicePixelScale, ) { match self.kind { TileNodeKind::Leaf { dirty_tracker, .. } => { let color = if (dirty_tracker & 1) != 0 { debug_colors::RED } else if is_opaque { debug_colors::GREEN } else { debug_colors::YELLOW }; if let Some(local_rect) = local_valid_rect.intersection(&self.rect.to_rect()) { let world_rect = pic_to_world_mapper .map(&local_rect) .unwrap(); let device_rect = world_rect * global_device_pixel_scale; let outer_color = color.scale_alpha(0.3); let inner_color = outer_color.scale_alpha(0.5); scratch.push_debug_rect( device_rect.inflate(-3.0, -3.0), outer_color, inner_color ); } } TileNodeKind::Node { ref children, .. } => { for child in children.iter() { child.draw_debug_rects( pic_to_world_mapper, is_opaque, local_valid_rect, scratch, global_device_pixel_scale, ); } } } } /// Calculate the four child rects for a given node fn get_child_rects( rect: &PictureBox2D, result: &mut [PictureBox2D; 4], ) { let p0 = rect.min; let p1 = rect.max; let pc = p0 + rect.size() * 0.5; *result = [ PictureBox2D::new( p0, pc, ), PictureBox2D::new( PicturePoint::new(pc.x, p0.y), PicturePoint::new(p1.x, pc.y), ), PictureBox2D::new( PicturePoint::new(p0.x, pc.y), PicturePoint::new(pc.x, p1.y), ), PictureBox2D::new( pc, p1, ), ]; } /// Called during pre_update, to clear the current dependencies fn clear( &mut self, rect: PictureBox2D, ) { self.rect = rect; match self.kind { TileNodeKind::Leaf { ref mut prev_indices, ref mut curr_indices, ref mut dirty_tracker, ref mut frames_since_modified } => { // Swap current dependencies to be the previous frame mem::swap(prev_indices, curr_indices); curr_indices.clear(); // Note that another frame has passed in the dirty bit trackers *dirty_tracker = *dirty_tracker << 1; *frames_since_modified += 1; } TileNodeKind::Node { ref mut children, .. } => { let mut child_rects = [PictureBox2D::zero(); 4]; TileNode::get_child_rects(&rect, &mut child_rects); assert_eq!(child_rects.len(), children.len()); for (child, rect) in children.iter_mut().zip(child_rects.iter()) { child.clear(*rect); } } } } /// Add a primitive dependency to this node fn add_prim( &mut self, index: PrimitiveDependencyIndex, prim_rect: &PictureBox2D, ) { match self.kind { TileNodeKind::Leaf { ref mut curr_indices, .. } => { curr_indices.push(index); } TileNodeKind::Node { ref mut children, .. } => { for child in children.iter_mut() { if child.rect.intersects(prim_rect) { child.add_prim(index, prim_rect); } } } } } /// Apply a merge or split operation to this tile, if desired fn maybe_merge_or_split( &mut self, level: i32, curr_prims: &[PrimitiveDescriptor], max_split_levels: i32, ) { // Determine if this tile wants to split or merge let mut tile_mod = None; fn get_dirty_frames( dirty_tracker: u64, frames_since_modified: usize, ) -> Option<u32> { // Only consider splitting or merging at least 64 frames since we last changed if frames_since_modified > 64 { // Each bit in the tracker is a frame that was recently invalidated Some(dirty_tracker.count_ones()) } else { None } } match self.kind { TileNodeKind::Leaf { dirty_tracker, frames_since_modified, .. } => { // Only consider splitting if the tree isn't too deep. if level < max_split_levels { if let Some(dirty_frames) = get_dirty_frames(dirty_tracker, frames_since_modified) { // If the tile has invalidated > 50% of the recent number of frames, split. if dirty_frames > 32 { tile_mod = Some(TileModification::Split); } } } } TileNodeKind::Node { ref children, .. } => { // There's two conditions that cause a node to merge its children: // (1) If _all_ the child nodes are constantly invalidating, then we are wasting // CPU time tracking dependencies for each child, so merge them. // (2) If _none_ of the child nodes are recently invalid, then the page content // has probably changed, and we no longer need to track fine grained dependencies here. let mut static_count = 0; let mut changing_count = 0; for child in children { // Only consider merging nodes at the edge of the tree. if let TileNodeKind::Leaf { dirty_tracker, frames_since_modified, .. } = child.kind { if let Some(dirty_frames) = get_dirty_frames(dirty_tracker, frames_since_modified) { if dirty_frames == 0 { // Hasn't been invalidated for some time static_count += 1; } else if dirty_frames == 64 { // Is constantly being invalidated changing_count += 1; } } } // Only merge if all the child tiles are in agreement. Otherwise, we have some // that are invalidating / static, and it's worthwhile tracking dependencies for // them individually. if static_count == 4 || changing_count == 4 { tile_mod = Some(TileModification::Merge); } } } } match tile_mod { Some(TileModification::Split) => { // To split a node, take the current dependency index buffer for this node, and // split it into child index buffers. let curr_indices = match self.kind { TileNodeKind::Node { .. } => { unreachable!("bug - only leaves can split"); } TileNodeKind::Leaf { ref mut curr_indices, .. } => { curr_indices.take() } }; let mut child_rects = [PictureBox2D::zero(); 4]; TileNode::get_child_rects(&self.rect, &mut child_rects); let mut child_indices = [ Vec::new(), Vec::new(), Vec::new(), Vec::new(), ]; // Step through the index buffer, and add primitives to each of the children // that they intersect. for index in curr_indices { let prim = &curr_prims[index.0 as usize]; for (child_rect, indices) in child_rects.iter().zip(child_indices.iter_mut()) { if prim.prim_clip_box.intersects(child_rect) { indices.push(index); } } } // Create the child nodes and switch from leaf -> node. let children = child_indices .iter_mut() .map(|i| TileNode::new_leaf(mem::replace(i, Vec::new()))) .collect(); self.kind = TileNodeKind::Node { children, }; } Some(TileModification::Merge) => { // Construct a merged index buffer by collecting the dependency index buffers // from each child, and merging them into a de-duplicated index buffer. let merged_indices = match self.kind { TileNodeKind::Node { ref mut children, .. } => { let mut merged_indices = Vec::new(); for child in children.iter() { let child_indices = match child.kind { TileNodeKind::Leaf { ref curr_indices, .. } => { curr_indices } TileNodeKind::Node { .. } => { unreachable!("bug: child is not a leaf"); } }; merged_indices.extend_from_slice(child_indices); } merged_indices.sort(); merged_indices.dedup(); merged_indices } TileNodeKind::Leaf { .. } => { unreachable!("bug - trying to merge a leaf"); } }; // Switch from a node to a leaf, with the combined index buffer self.kind = TileNodeKind::Leaf { prev_indices: Vec::new(), curr_indices: merged_indices, dirty_tracker: 0, frames_since_modified: 0, }; } None => { // If this node didn't merge / split, then recurse into children // to see if they want to split / merge. if let TileNodeKind::Node { ref mut children, .. } = self.kind { for child in children.iter_mut() { child.maybe_merge_or_split( level+1, curr_prims, max_split_levels, ); } } } } } /// Update the dirty state of this node, building the overall dirty rect fn update_dirty_rects( &mut self, prev_prims: &[PrimitiveDescriptor], curr_prims: &[PrimitiveDescriptor], prim_comparer: &mut PrimitiveComparer, dirty_rect: &mut PictureBox2D, compare_cache: &mut FastHashMap<PrimitiveComparisonKey, PrimitiveCompareResult>, invalidation_reason: &mut Option<InvalidationReason>, frame_context: &FrameVisibilityContext, ) { match self.kind { TileNodeKind::Node { ref mut children, .. } => { for child in children.iter_mut() { child.update_dirty_rects( prev_prims, curr_prims, prim_comparer, dirty_rect, compare_cache, invalidation_reason, frame_context, ); } } TileNodeKind::Leaf { ref prev_indices, ref curr_indices, ref mut dirty_tracker, .. } => { // If the index buffers are of different length, they must be different if prev_indices.len() == curr_indices.len() { let mut prev_i0 = 0; let mut prev_i1 = 0; prim_comparer.reset(); // Walk each index buffer, comparing primitives for (prev_index, curr_index) in prev_indices.iter().zip(curr_indices.iter()) { let i0 = prev_index.0 as usize; let i1 = curr_index.0 as usize; // Advance the dependency arrays for each primitive (this handles // prims that may be skipped by these index buffers). for i in prev_i0 .. i0 { prim_comparer.advance_prev(&prev_prims[i]); } for i in prev_i1 .. i1 { prim_comparer.advance_curr(&curr_prims[i]); } // Compare the primitives, caching the result in a hash map // to save comparisons in other tree nodes. let key = PrimitiveComparisonKey { prev_index: *prev_index, curr_index: *curr_index, }; #[cfg(any(feature = "capture", feature = "replay"))] let mut compare_detail = PrimitiveCompareResultDetail::Equal; #[cfg(any(feature = "capture", feature = "replay"))] let prim_compare_result_detail = if frame_context.debug_flags.contains(DebugFlags::TILE_CACHE_LOGGING_DBG) { Some(&mut compare_detail) } else { None }; #[cfg(not(any(feature = "capture", feature = "replay")))] let compare_detail = PrimitiveCompareResultDetail::Equal; #[cfg(not(any(feature = "capture", feature = "replay")))] let prim_compare_result_detail = None; let prim_compare_result = *compare_cache .entry(key) .or_insert_with(|| { let prev = &prev_prims[i0]; let curr = &curr_prims[i1]; prim_comparer.compare_prim(prev, curr, prim_compare_result_detail) }); // If not the same, mark this node as dirty and update the dirty rect if prim_compare_result != PrimitiveCompareResult::Equal { if invalidation_reason.is_none() { *invalidation_reason = Some(InvalidationReason::Content { prim_compare_result, prim_compare_result_detail: Some(compare_detail) }); } *dirty_rect = self.rect.union(dirty_rect); *dirty_tracker = *dirty_tracker | 1; break; } prev_i0 = i0; prev_i1 = i1; } } else { if invalidation_reason.is_none() { // if and only if tile logging is enabled, do the expensive step of // converting indices back to ItemUids and allocating old and new vectors // to store them in. #[cfg(any(feature = "capture", feature = "replay"))] { if frame_context.debug_flags.contains(DebugFlags::TILE_CACHE_LOGGING_DBG) { let old = prev_indices.iter().map( |i| prev_prims[i.0 as usize].prim_uid ).collect(); let new = curr_indices.iter().map( |i| curr_prims[i.0 as usize].prim_uid ).collect(); *invalidation_reason = Some(InvalidationReason::PrimCount { old: Some(old), new: Some(new) }); } else { *invalidation_reason = Some(InvalidationReason::PrimCount { old: None, new: None }); } } #[cfg(not(any(feature = "capture", feature = "replay")))] { *invalidation_reason = Some(InvalidationReason::PrimCount { old: None, new: None }); } } *dirty_rect = self.rect.union(dirty_rect); *dirty_tracker = *dirty_tracker | 1; } } } } } impl CompositeState { // A helper function to destroy all native surfaces for a given list of tiles pub fn destroy_native_tiles<'a, I: Iterator<Item = &'a mut Box<Tile>>>( &mut self, tiles_iter: I, resource_cache: &mut ResourceCache, ) { // Any old tiles that remain after the loop above are going to be dropped. For // simple composite mode, the texture cache handle will expire and be collected // by the texture cache. For native compositor mode, we need to explicitly // invoke a callback to the client to destroy that surface. if let CompositorKind::Native { .. } = self.compositor_kind { for tile in tiles_iter { // Only destroy native surfaces that have been allocated. It's // possible for display port tiles to be created that never // come on screen, and thus never get a native surface allocated. if let Some(TileSurface::Texture { descriptor: SurfaceTextureDescriptor::Native { ref mut id, .. }, .. }) = tile.surface { if let Some(id) = id.take() { resource_cache.destroy_compositor_tile(id); } } } } } } pub fn get_raster_rects( pic_rect: PictureRect, map_to_raster: &SpaceMapper<PicturePixel, RasterPixel>, map_to_world: &SpaceMapper<RasterPixel, WorldPixel>, prim_bounding_rect: WorldRect, device_pixel_scale: DevicePixelScale, ) -> Option<(DeviceRect, DeviceRect)> { let unclipped_raster_rect = map_to_raster.map(&pic_rect)?; let unclipped = raster_rect_to_device_pixels( unclipped_raster_rect, device_pixel_scale, ); let unclipped_world_rect = map_to_world.map(&unclipped_raster_rect)?; let clipped_world_rect = unclipped_world_rect.intersection(&prim_bounding_rect)?; // We don't have to be able to do the back-projection from world into raster. // Rendering only cares one way, so if that fails, we fall back to the full rect. let clipped_raster_rect = match map_to_world.unmap(&clipped_world_rect) { Some(rect) => rect.intersection(&unclipped_raster_rect)?, None => return Some((unclipped, unclipped)), }; let clipped = raster_rect_to_device_pixels( clipped_raster_rect, device_pixel_scale, ); // Ensure that we won't try to allocate a zero-sized clip render task. if clipped.is_empty() { return None; } Some((clipped, unclipped)) } Bug 1712590 - Don't request ImageKey::DUMMY in setup_compositor_surfaces_yuv. r=gfx-reviewers,bradwerth This avoids a bunch of warning-spam (and some hashmap lookups). Differential Revision: https://phabricator.services.mozilla.com/D115822 [ghsync] From https://hg.mozilla.org/mozilla-central/rev/d259a754c1110c501dd7f049cb2f2ebd3dfb7f1f /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! A picture represents a dynamically rendered image. //! //! # Overview //! //! Pictures consists of: //! //! - A number of primitives that are drawn onto the picture. //! - A composite operation describing how to composite this //! picture into its parent. //! - A configuration describing how to draw the primitives on //! this picture (e.g. in screen space or local space). //! //! The tree of pictures are generated during scene building. //! //! Depending on their composite operations pictures can be rendered into //! intermediate targets or folded into their parent picture. //! //! ## Picture caching //! //! Pictures can be cached to reduce the amount of rasterization happening per //! frame. //! //! When picture caching is enabled, the scene is cut into a small number of slices, //! typically: //! //! - content slice //! - UI slice //! - background UI slice which is hidden by the other two slices most of the time. //! //! Each of these slice is made up of fixed-size large tiles of 2048x512 pixels //! (or 128x128 for the UI slice). //! //! Tiles can be either cached rasterized content into a texture or "clear tiles" //! that contain only a solid color rectangle rendered directly during the composite //! pass. //! //! ## Invalidation //! //! Each tile keeps track of the elements that affect it, which can be: //! //! - primitives //! - clips //! - image keys //! - opacity bindings //! - transforms //! //! These dependency lists are built each frame and compared to the previous frame to //! see if the tile changed. //! //! The tile's primitive dependency information is organized in a quadtree, each node //! storing an index buffer of tile primitive dependencies. //! //! The union of the invalidated leaves of each quadtree produces a per-tile dirty rect //! which defines the scissor rect used when replaying the tile's drawing commands and //! can be used for partial present. //! //! ## Display List shape //! //! WR will first look for an iframe item in the root stacking context to apply //! picture caching to. If that's not found, it will apply to the entire root //! stacking context of the display list. Apart from that, the format of the //! display list is not important to picture caching. Each time a new scroll root //! is encountered, a new picture cache slice will be created. If the display //! list contains more than some arbitrary number of slices (currently 8), the //! content will all be squashed into a single slice, in order to save GPU memory //! and compositing performance. //! //! ## Compositor Surfaces //! //! Sometimes, a primitive would prefer to exist as a native compositor surface. //! This allows a large and/or regularly changing primitive (such as a video, or //! webgl canvas) to be updated each frame without invalidating the content of //! tiles, and can provide a significant performance win and battery saving. //! //! Since drawing a primitive as a compositor surface alters the ordering of //! primitives in a tile, we use 'overlay tiles' to ensure correctness. If a //! tile has a compositor surface, _and_ that tile has primitives that overlap //! the compositor surface rect, the tile switches to be drawn in alpha mode. //! //! We rely on only promoting compositor surfaces that are opaque primitives. //! With this assumption, the tile(s) that intersect the compositor surface get //! a 'cutout' in the rectangle where the compositor surface exists (not the //! entire tile), allowing that tile to be drawn as an alpha tile after the //! compositor surface. //! //! Tiles are only drawn in overlay mode if there is content that exists on top //! of the compositor surface. Otherwise, we can draw the tiles in the normal fast //! path before the compositor surface is drawn. Use of the per-tile valid and //! dirty rects ensure that we do a minimal amount of per-pixel work here to //! blend the overlay tile (this is not always optimal right now, but will be //! improved as a follow up). use api::{MixBlendMode, PremultipliedColorF, FilterPrimitiveKind}; use api::{PropertyBinding, PropertyBindingId, FilterPrimitive}; use api::{DebugFlags, ImageKey, ColorF, ColorU, PrimitiveFlags}; use api::{ImageRendering, ColorDepth, YuvColorSpace, YuvFormat, AlphaType}; use api::units::*; use crate::batch::BatchFilter; use crate::box_shadow::BLUR_SAMPLE_SCALE; use crate::clip::{ClipStore, ClipChainInstance, ClipChainId, ClipInstance}; use crate::spatial_tree::{ROOT_SPATIAL_NODE_INDEX, SpatialTree, CoordinateSpaceMapping, SpatialNodeIndex, VisibleFace }; use crate::composite::{CompositorKind, CompositeState, NativeSurfaceId, NativeTileId}; use crate::composite::{ExternalSurfaceDescriptor, ExternalSurfaceDependency}; use crate::debug_colors; use euclid::{vec2, vec3, Point2D, Scale, Size2D, Vector2D, Vector3D, Rect, Transform3D, SideOffsets2D}; use euclid::approxeq::ApproxEq; use crate::filterdata::SFilterData; use crate::intern::ItemUid; use crate::internal_types::{FastHashMap, FastHashSet, PlaneSplitter, Filter, PlaneSplitAnchor, TextureSource}; use crate::frame_builder::{FrameBuildingContext, FrameBuildingState, PictureState, PictureContext}; use crate::gpu_cache::{GpuCache, GpuCacheAddress, GpuCacheHandle}; use crate::gpu_types::{UvRectKind, ZBufferId}; use plane_split::{Clipper, Polygon, Splitter}; use crate::prim_store::{PrimitiveTemplateKind, PictureIndex, PrimitiveInstance, PrimitiveInstanceKind}; use crate::prim_store::{ColorBindingStorage, ColorBindingIndex, PrimitiveScratchBuffer}; use crate::print_tree::{PrintTree, PrintTreePrinter}; use crate::render_backend::{DataStores, FrameId}; use crate::render_task_graph::RenderTaskId; use crate::render_target::RenderTargetKind; use crate::render_task::{BlurTask, RenderTask, RenderTaskLocation, BlurTaskCache}; use crate::render_task::{StaticRenderTaskSurface, RenderTaskKind}; use crate::renderer::BlendMode; use crate::resource_cache::{ResourceCache, ImageGeneration, ImageRequest}; use crate::space::SpaceMapper; use crate::scene::SceneProperties; use smallvec::SmallVec; use std::{mem, u8, marker, u32}; use std::sync::atomic::{AtomicUsize, Ordering}; use std::collections::hash_map::Entry; use std::ops::Range; use crate::texture_cache::TextureCacheHandle; use crate::util::{MaxRect, VecHelper, MatrixHelpers, Recycler, raster_rect_to_device_pixels, ScaleOffset}; use crate::filterdata::{FilterDataHandle}; use crate::tile_cache::{SliceDebugInfo, TileDebugInfo, DirtyTileDebugInfo}; use crate::visibility::{PrimitiveVisibilityFlags, FrameVisibilityContext}; use crate::visibility::{VisibilityState, FrameVisibilityState}; #[cfg(any(feature = "capture", feature = "replay"))] use ron; #[cfg(feature = "capture")] use crate::scene_builder_thread::InternerUpdates; #[cfg(any(feature = "capture", feature = "replay"))] use crate::intern::{Internable, UpdateList}; #[cfg(any(feature = "capture", feature = "replay"))] use crate::clip::{ClipIntern, PolygonIntern}; #[cfg(any(feature = "capture", feature = "replay"))] use crate::filterdata::FilterDataIntern; #[cfg(any(feature = "capture", feature = "replay"))] use api::PrimitiveKeyKind; #[cfg(any(feature = "capture", feature = "replay"))] use crate::prim_store::backdrop::Backdrop; #[cfg(any(feature = "capture", feature = "replay"))] use crate::prim_store::borders::{ImageBorder, NormalBorderPrim}; #[cfg(any(feature = "capture", feature = "replay"))] use crate::prim_store::gradient::{LinearGradient, RadialGradient, ConicGradient}; #[cfg(any(feature = "capture", feature = "replay"))] use crate::prim_store::image::{Image, YuvImage}; #[cfg(any(feature = "capture", feature = "replay"))] use crate::prim_store::line_dec::LineDecoration; #[cfg(any(feature = "capture", feature = "replay"))] use crate::prim_store::picture::Picture; #[cfg(any(feature = "capture", feature = "replay"))] use crate::prim_store::text_run::TextRun; #[cfg(feature = "capture")] use std::fs::File; #[cfg(feature = "capture")] use std::io::prelude::*; #[cfg(feature = "capture")] use std::path::PathBuf; use crate::scene_building::{SliceFlags}; #[cfg(feature = "replay")] // used by tileview so don't use an internal_types FastHashMap use std::collections::HashMap; // Maximum blur radius for blur filter (different than box-shadow blur). // Taken from FilterNodeSoftware.cpp in Gecko. pub const MAX_BLUR_RADIUS: f32 = 100.; /// Specify whether a surface allows subpixel AA text rendering. #[derive(Debug, Copy, Clone)] pub enum SubpixelMode { /// This surface allows subpixel AA text Allow, /// Subpixel AA text cannot be drawn on this surface Deny, /// Subpixel AA can be drawn on this surface, if not intersecting /// with the excluded regions, and inside the allowed rect. Conditional { allowed_rect: PictureRect, }, } /// A comparable transform matrix, that compares with epsilon checks. #[derive(Debug, Clone)] struct MatrixKey { m: [f32; 16], } impl PartialEq for MatrixKey { fn eq(&self, other: &Self) -> bool { const EPSILON: f32 = 0.001; // TODO(gw): It's possible that we may need to adjust the epsilon // to be tighter on most of the matrix, except the // translation parts? for (i, j) in self.m.iter().zip(other.m.iter()) { if !i.approx_eq_eps(j, &EPSILON) { return false; } } true } } /// A comparable / hashable version of a coordinate space mapping. Used to determine /// if a transform dependency for a tile has changed. #[derive(Debug, PartialEq, Clone)] enum TransformKey { Local, ScaleOffset { scale_x: f32, scale_y: f32, offset_x: f32, offset_y: f32, }, Transform { m: MatrixKey, } } impl<Src, Dst> From<CoordinateSpaceMapping<Src, Dst>> for TransformKey { fn from(transform: CoordinateSpaceMapping<Src, Dst>) -> TransformKey { match transform { CoordinateSpaceMapping::Local => { TransformKey::Local } CoordinateSpaceMapping::ScaleOffset(ref scale_offset) => { TransformKey::ScaleOffset { scale_x: scale_offset.scale.x, scale_y: scale_offset.scale.y, offset_x: scale_offset.offset.x, offset_y: scale_offset.offset.y, } } CoordinateSpaceMapping::Transform(ref m) => { TransformKey::Transform { m: MatrixKey { m: m.to_array(), }, } } } } } /// Unit for tile coordinates. #[derive(Hash, Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)] pub struct TileCoordinate; // Geometry types for tile coordinates. pub type TileOffset = Point2D<i32, TileCoordinate>; // TileSize type is also used in used in lib.rs and cbindgen picks the wrong one when // generating headers. /// cbindgen:ignore pub type TileSize = Size2D<i32, TileCoordinate>; pub type TileRect = Rect<i32, TileCoordinate>; /// The maximum number of compositor surfaces that are allowed per picture cache. This /// is an arbitrary number that should be enough for common cases, but low enough to /// prevent performance and memory usage drastically degrading in pathological cases. const MAX_COMPOSITOR_SURFACES: usize = 4; /// The size in device pixels of a normal cached tile. pub const TILE_SIZE_DEFAULT: DeviceIntSize = DeviceIntSize { width: 1024, height: 512, _unit: marker::PhantomData, }; /// The size in device pixels of a tile for horizontal scroll bars pub const TILE_SIZE_SCROLLBAR_HORIZONTAL: DeviceIntSize = DeviceIntSize { width: 1024, height: 32, _unit: marker::PhantomData, }; /// The size in device pixels of a tile for vertical scroll bars pub const TILE_SIZE_SCROLLBAR_VERTICAL: DeviceIntSize = DeviceIntSize { width: 32, height: 1024, _unit: marker::PhantomData, }; /// The maximum size per axis of a surface, /// in WorldPixel coordinates. const MAX_SURFACE_SIZE: f32 = 4096.0; /// Maximum size of a compositor surface. const MAX_COMPOSITOR_SURFACES_SIZE: f32 = 8192.0; /// The maximum number of sub-dependencies (e.g. clips, transforms) we can handle /// per-primitive. If a primitive has more than this, it will invalidate every frame. const MAX_PRIM_SUB_DEPS: usize = u8::MAX as usize; /// Used to get unique tile IDs, even when the tile cache is /// destroyed between display lists / scenes. static NEXT_TILE_ID: AtomicUsize = AtomicUsize::new(0); fn clamp(value: i32, low: i32, high: i32) -> i32 { value.max(low).min(high) } fn clampf(value: f32, low: f32, high: f32) -> f32 { value.max(low).min(high) } /// Clamps the blur radius depending on scale factors. fn clamp_blur_radius(blur_radius: f32, scale_factors: (f32, f32)) -> f32 { // Clamping must occur after scale factors are applied, but scale factors are not applied // until later on. To clamp the blur radius, we first apply the scale factors and then clamp // and finally revert the scale factors. // TODO: the clamping should be done on a per-axis basis, but WR currently only supports // having a single value for both x and y blur. let largest_scale_factor = f32::max(scale_factors.0, scale_factors.1); let scaled_blur_radius = blur_radius * largest_scale_factor; if scaled_blur_radius > MAX_BLUR_RADIUS { MAX_BLUR_RADIUS / largest_scale_factor } else { // Return the original blur radius to avoid any rounding errors blur_radius } } /// An index into the prims array in a TileDescriptor. #[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct PrimitiveDependencyIndex(pub u32); /// Information about the state of a binding. #[derive(Debug)] pub struct BindingInfo<T> { /// The current value retrieved from dynamic scene properties. value: T, /// True if it was changed (or is new) since the last frame build. changed: bool, } /// Information stored in a tile descriptor for a binding. #[derive(Debug, PartialEq, Clone, Copy)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub enum Binding<T> { Value(T), Binding(PropertyBindingId), } impl<T> From<PropertyBinding<T>> for Binding<T> { fn from(binding: PropertyBinding<T>) -> Binding<T> { match binding { PropertyBinding::Binding(key, _) => Binding::Binding(key.id), PropertyBinding::Value(value) => Binding::Value(value), } } } pub type OpacityBinding = Binding<f32>; pub type OpacityBindingInfo = BindingInfo<f32>; pub type ColorBinding = Binding<ColorU>; pub type ColorBindingInfo = BindingInfo<ColorU>; /// A dependency for a transform is defined by the spatial node index + frame it was used #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct SpatialNodeKey { spatial_node_index: SpatialNodeIndex, frame_id: FrameId, } /// A helper for comparing spatial nodes between frames. The comparisons /// are done by-value, so that if the shape of the spatial node tree /// changes, invalidations aren't done simply due to the spatial node /// index changing between display lists. struct SpatialNodeComparer { /// The root spatial node index of the tile cache ref_spatial_node_index: SpatialNodeIndex, /// Maintains a map of currently active transform keys spatial_nodes: FastHashMap<SpatialNodeKey, TransformKey>, /// A cache of recent comparisons between prev and current spatial nodes compare_cache: FastHashMap<(SpatialNodeKey, SpatialNodeKey), bool>, /// A set of frames that we need to retain spatial node entries for referenced_frames: FastHashSet<FrameId>, } impl SpatialNodeComparer { /// Construct a new comparer fn new() -> Self { SpatialNodeComparer { ref_spatial_node_index: ROOT_SPATIAL_NODE_INDEX, spatial_nodes: FastHashMap::default(), compare_cache: FastHashMap::default(), referenced_frames: FastHashSet::default(), } } /// Advance to the next frame fn next_frame( &mut self, ref_spatial_node_index: SpatialNodeIndex, ) { // Drop any node information for unreferenced frames, to ensure that the // hashmap doesn't grow indefinitely! let referenced_frames = &self.referenced_frames; self.spatial_nodes.retain(|key, _| { referenced_frames.contains(&key.frame_id) }); // Update the root spatial node for this comparer self.ref_spatial_node_index = ref_spatial_node_index; self.compare_cache.clear(); self.referenced_frames.clear(); } /// Register a transform that is used, and build the transform key for it if new. fn register_used_transform( &mut self, spatial_node_index: SpatialNodeIndex, frame_id: FrameId, spatial_tree: &SpatialTree, ) { let key = SpatialNodeKey { spatial_node_index, frame_id, }; if let Entry::Vacant(entry) = self.spatial_nodes.entry(key) { entry.insert( get_transform_key( spatial_node_index, self.ref_spatial_node_index, spatial_tree, ) ); } } /// Return true if the transforms for two given spatial nodes are considered equivalent fn are_transforms_equivalent( &mut self, prev_spatial_node_key: &SpatialNodeKey, curr_spatial_node_key: &SpatialNodeKey, ) -> bool { let key = (*prev_spatial_node_key, *curr_spatial_node_key); let spatial_nodes = &self.spatial_nodes; *self.compare_cache .entry(key) .or_insert_with(|| { let prev = &spatial_nodes[&prev_spatial_node_key]; let curr = &spatial_nodes[&curr_spatial_node_key]; curr == prev }) } /// Ensure that the comparer won't GC any nodes for a given frame id fn retain_for_frame(&mut self, frame_id: FrameId) { self.referenced_frames.insert(frame_id); } } // Immutable context passed to picture cache tiles during pre_update struct TilePreUpdateContext { /// Maps from picture cache coords -> world space coords. pic_to_world_mapper: SpaceMapper<PicturePixel, WorldPixel>, /// The fractional position of the picture cache, which may /// require invalidation of all tiles. fract_offset: PictureVector2D, device_fract_offset: DeviceVector2D, /// The optional background color of the picture cache instance background_color: Option<ColorF>, /// The visible part of the screen in world coords. global_screen_world_rect: WorldRect, /// Current size of tiles in picture units. tile_size: PictureSize, /// The current frame id for this picture cache frame_id: FrameId, } // Immutable context passed to picture cache tiles during post_update struct TilePostUpdateContext<'a> { /// Maps from picture cache coords -> world space coords. pic_to_world_mapper: SpaceMapper<PicturePixel, WorldPixel>, /// Global scale factor from world -> device pixels. global_device_pixel_scale: DevicePixelScale, /// The local clip rect (in picture space) of the entire picture cache local_clip_rect: PictureRect, /// The calculated backdrop information for this cache instance. backdrop: Option<BackdropInfo>, /// Information about opacity bindings from the picture cache. opacity_bindings: &'a FastHashMap<PropertyBindingId, OpacityBindingInfo>, /// Information about color bindings from the picture cache. color_bindings: &'a FastHashMap<PropertyBindingId, ColorBindingInfo>, /// Current size in device pixels of tiles for this cache current_tile_size: DeviceIntSize, /// The local rect of the overall picture cache local_rect: PictureRect, /// Pre-allocated z-id to assign to tiles during post_update. z_id: ZBufferId, /// If true, the scale factor of the root transform for this picture /// cache changed, so we need to invalidate the tile and re-render. invalidate_all: bool, } // Mutable state passed to picture cache tiles during post_update struct TilePostUpdateState<'a> { /// Allow access to the texture cache for requesting tiles resource_cache: &'a mut ResourceCache, /// Current configuration and setup for compositing all the picture cache tiles in renderer. composite_state: &'a mut CompositeState, /// A cache of comparison results to avoid re-computation during invalidation. compare_cache: &'a mut FastHashMap<PrimitiveComparisonKey, PrimitiveCompareResult>, /// Information about transform node differences from last frame. spatial_node_comparer: &'a mut SpatialNodeComparer, } /// Information about the dependencies of a single primitive instance. struct PrimitiveDependencyInfo { /// Unique content identifier of the primitive. prim_uid: ItemUid, /// The (conservative) clipped area in picture space this primitive occupies. prim_clip_box: PictureBox2D, /// Image keys this primitive depends on. images: SmallVec<[ImageDependency; 8]>, /// Opacity bindings this primitive depends on. opacity_bindings: SmallVec<[OpacityBinding; 4]>, /// Color binding this primitive depends on. color_binding: Option<ColorBinding>, /// Clips that this primitive depends on. clips: SmallVec<[ItemUid; 8]>, /// Spatial nodes references by the clip dependencies of this primitive. spatial_nodes: SmallVec<[SpatialNodeIndex; 4]>, } impl PrimitiveDependencyInfo { /// Construct dependency info for a new primitive. fn new( prim_uid: ItemUid, prim_clip_box: PictureBox2D, ) -> Self { PrimitiveDependencyInfo { prim_uid, images: SmallVec::new(), opacity_bindings: SmallVec::new(), color_binding: None, prim_clip_box, clips: SmallVec::new(), spatial_nodes: SmallVec::new(), } } } /// A stable ID for a given tile, to help debugging. These are also used /// as unique identifiers for tile surfaces when using a native compositor. #[derive(Debug, Copy, Clone, PartialEq, PartialOrd, Ord, Eq)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct TileId(pub usize); /// A descriptor for the kind of texture that a picture cache tile will /// be drawn into. #[derive(Debug)] pub enum SurfaceTextureDescriptor { /// When using the WR compositor, the tile is drawn into an entry /// in the WR texture cache. TextureCache { handle: TextureCacheHandle }, /// When using an OS compositor, the tile is drawn into a native /// surface identified by arbitrary id. Native { /// The arbitrary id of this tile. id: Option<NativeTileId>, }, } /// This is the same as a `SurfaceTextureDescriptor` but has been resolved /// into a texture cache handle (if appropriate) that can be used by the /// batching and compositing code in the renderer. #[derive(Clone, Debug, Eq, PartialEq, Hash)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub enum ResolvedSurfaceTexture { TextureCache { /// The texture ID to draw to. texture: TextureSource, }, Native { /// The arbitrary id of this tile. id: NativeTileId, /// The size of the tile in device pixels. size: DeviceIntSize, } } impl SurfaceTextureDescriptor { /// Create a resolved surface texture for this descriptor pub fn resolve( &self, resource_cache: &ResourceCache, size: DeviceIntSize, ) -> ResolvedSurfaceTexture { match self { SurfaceTextureDescriptor::TextureCache { handle } => { let cache_item = resource_cache.texture_cache.get(handle); ResolvedSurfaceTexture::TextureCache { texture: cache_item.texture_id, } } SurfaceTextureDescriptor::Native { id } => { ResolvedSurfaceTexture::Native { id: id.expect("bug: native surface not allocated"), size, } } } } } /// The backing surface for this tile. #[derive(Debug)] pub enum TileSurface { Texture { /// Descriptor for the surface that this tile draws into. descriptor: SurfaceTextureDescriptor, }, Color { color: ColorF, }, Clear, } impl TileSurface { fn kind(&self) -> &'static str { match *self { TileSurface::Color { .. } => "Color", TileSurface::Texture { .. } => "Texture", TileSurface::Clear => "Clear", } } } /// Optional extra information returned by is_same when /// logging is enabled. #[derive(Debug, Copy, Clone, PartialEq)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub enum CompareHelperResult<T> { /// Primitives match Equal, /// Counts differ Count { prev_count: u8, curr_count: u8, }, /// Sentinel Sentinel, /// Two items are not equal NotEqual { prev: T, curr: T, }, /// User callback returned true on item PredicateTrue { curr: T }, } /// The result of a primitive dependency comparison. Size is a u8 /// since this is a hot path in the code, and keeping the data small /// is a performance win. #[derive(Debug, Copy, Clone, PartialEq)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] #[repr(u8)] pub enum PrimitiveCompareResult { /// Primitives match Equal, /// Something in the PrimitiveDescriptor was different Descriptor, /// The clip node content or spatial node changed Clip, /// The value of the transform changed Transform, /// An image dependency was dirty Image, /// The value of an opacity binding changed OpacityBinding, /// The value of a color binding changed ColorBinding, } /// A more detailed version of PrimitiveCompareResult used when /// debug logging is enabled. #[derive(Debug, Copy, Clone, PartialEq)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub enum PrimitiveCompareResultDetail { /// Primitives match Equal, /// Something in the PrimitiveDescriptor was different Descriptor { old: PrimitiveDescriptor, new: PrimitiveDescriptor, }, /// The clip node content or spatial node changed Clip { detail: CompareHelperResult<ItemUid>, }, /// The value of the transform changed Transform { detail: CompareHelperResult<SpatialNodeKey>, }, /// An image dependency was dirty Image { detail: CompareHelperResult<ImageDependency>, }, /// The value of an opacity binding changed OpacityBinding { detail: CompareHelperResult<OpacityBinding>, }, /// The value of a color binding changed ColorBinding { detail: CompareHelperResult<ColorBinding>, }, } /// Debugging information about why a tile was invalidated #[derive(Debug,Clone)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub enum InvalidationReason { /// The fractional offset changed FractionalOffset { old: DeviceVector2D, new: DeviceVector2D, }, /// The background color changed BackgroundColor { old: Option<ColorF>, new: Option<ColorF>, }, /// The opaque state of the backing native surface changed SurfaceOpacityChanged{ became_opaque: bool }, /// There was no backing texture (evicted or never rendered) NoTexture, /// There was no backing native surface (never rendered, or recreated) NoSurface, /// The primitive count in the dependency list was different PrimCount { old: Option<Vec<ItemUid>>, new: Option<Vec<ItemUid>>, }, /// The content of one of the primitives was different Content { /// What changed in the primitive that was different prim_compare_result: PrimitiveCompareResult, prim_compare_result_detail: Option<PrimitiveCompareResultDetail>, }, // The compositor type changed CompositorKindChanged, // The valid region of the tile changed ValidRectChanged, // The overall scale of the picture cache changed ScaleChanged, } /// A minimal subset of Tile for debug capturing #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct TileSerializer { pub rect: PictureRect, pub current_descriptor: TileDescriptor, pub device_fract_offset: DeviceVector2D, pub id: TileId, pub root: TileNode, pub background_color: Option<ColorF>, pub invalidation_reason: Option<InvalidationReason> } /// A minimal subset of TileCacheInstance for debug capturing #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct TileCacheInstanceSerializer { pub slice: usize, pub tiles: FastHashMap<TileOffset, TileSerializer>, pub background_color: Option<ColorF>, pub fract_offset: PictureVector2D, } /// Information about a cached tile. pub struct Tile { /// The grid position of this tile within the picture cache pub tile_offset: TileOffset, /// The current world rect of this tile. pub world_tile_rect: WorldRect, /// The current local rect of this tile. pub local_tile_rect: PictureRect, /// Same as local_tile_rect, but in min/max form as an optimization pub local_tile_box: PictureBox2D, /// The picture space dirty rect for this tile. local_dirty_rect: PictureRect, /// The device space dirty rect for this tile. /// TODO(gw): We have multiple dirty rects available due to the quadtree above. In future, /// expose these as multiple dirty rects, which will help in some cases. pub device_dirty_rect: DeviceRect, /// Device space rect that contains valid pixels region of this tile. pub device_valid_rect: DeviceRect, /// Uniquely describes the content of this tile, in a way that can be /// (reasonably) efficiently hashed and compared. pub current_descriptor: TileDescriptor, /// The content descriptor for this tile from the previous frame. pub prev_descriptor: TileDescriptor, /// Handle to the backing surface for this tile. pub surface: Option<TileSurface>, /// If true, this tile is marked valid, and the existing texture /// cache handle can be used. Tiles are invalidated during the /// build_dirty_regions method. pub is_valid: bool, /// If true, this tile intersects with the currently visible screen /// rect, and will be drawn. pub is_visible: bool, /// The current fractional offset of the cache transform root. If this changes, /// all tiles need to be invalidated and redrawn, since snapping differences are /// likely to occur. device_fract_offset: DeviceVector2D, /// The tile id is stable between display lists and / or frames, /// if the tile is retained. Useful for debugging tile evictions. pub id: TileId, /// If true, the tile was determined to be opaque, which means blending /// can be disabled when drawing it. pub is_opaque: bool, /// Root node of the quadtree dirty rect tracker. root: TileNode, /// The last rendered background color on this tile. background_color: Option<ColorF>, /// The first reason the tile was invalidated this frame. invalidation_reason: Option<InvalidationReason>, /// The local space valid rect for all primitives that affect this tile. local_valid_rect: PictureBox2D, /// z-buffer id for this tile pub z_id: ZBufferId, /// The last frame this tile had its dependencies updated (dependency updating is /// skipped if a tile is off-screen). pub last_updated_frame_id: FrameId, } impl Tile { /// Construct a new, invalid tile. fn new(tile_offset: TileOffset) -> Self { let id = TileId(NEXT_TILE_ID.fetch_add(1, Ordering::Relaxed)); Tile { tile_offset, local_tile_rect: PictureRect::zero(), local_tile_box: PictureBox2D::zero(), world_tile_rect: WorldRect::zero(), device_valid_rect: DeviceRect::zero(), local_dirty_rect: PictureRect::zero(), device_dirty_rect: DeviceRect::zero(), surface: None, current_descriptor: TileDescriptor::new(), prev_descriptor: TileDescriptor::new(), is_valid: false, is_visible: false, device_fract_offset: DeviceVector2D::zero(), id, is_opaque: false, root: TileNode::new_leaf(Vec::new()), background_color: None, invalidation_reason: None, local_valid_rect: PictureBox2D::zero(), z_id: ZBufferId::invalid(), last_updated_frame_id: FrameId::INVALID, } } /// Print debug information about this tile to a tree printer. fn print(&self, pt: &mut dyn PrintTreePrinter) { pt.new_level(format!("Tile {:?}", self.id)); pt.add_item(format!("local_tile_rect: {:?}", self.local_tile_rect)); pt.add_item(format!("device_fract_offset: {:?}", self.device_fract_offset)); pt.add_item(format!("background_color: {:?}", self.background_color)); pt.add_item(format!("invalidation_reason: {:?}", self.invalidation_reason)); self.current_descriptor.print(pt); pt.end_level(); } /// Check if the content of the previous and current tile descriptors match fn update_dirty_rects( &mut self, ctx: &TilePostUpdateContext, state: &mut TilePostUpdateState, invalidation_reason: &mut Option<InvalidationReason>, frame_context: &FrameVisibilityContext, ) -> PictureRect { let mut prim_comparer = PrimitiveComparer::new( &self.prev_descriptor, &self.current_descriptor, state.resource_cache, state.spatial_node_comparer, ctx.opacity_bindings, ctx.color_bindings, ); let mut dirty_rect = PictureBox2D::zero(); self.root.update_dirty_rects( &self.prev_descriptor.prims, &self.current_descriptor.prims, &mut prim_comparer, &mut dirty_rect, state.compare_cache, invalidation_reason, frame_context, ); dirty_rect.to_rect() } /// Invalidate a tile based on change in content. This /// must be called even if the tile is not currently /// visible on screen. We might be able to improve this /// later by changing how ComparableVec is used. fn update_content_validity( &mut self, ctx: &TilePostUpdateContext, state: &mut TilePostUpdateState, frame_context: &FrameVisibilityContext, ) { // Check if the contents of the primitives, clips, and // other dependencies are the same. state.compare_cache.clear(); let mut invalidation_reason = None; let dirty_rect = self.update_dirty_rects( ctx, state, &mut invalidation_reason, frame_context, ); if !dirty_rect.is_empty() { self.invalidate( Some(dirty_rect), invalidation_reason.expect("bug: no invalidation_reason"), ); } if ctx.invalidate_all { self.invalidate(None, InvalidationReason::ScaleChanged); } // TODO(gw): We can avoid invalidating the whole tile in some cases here, // but it should be a fairly rare invalidation case. if self.current_descriptor.local_valid_rect != self.prev_descriptor.local_valid_rect { self.invalidate(None, InvalidationReason::ValidRectChanged); state.composite_state.dirty_rects_are_valid = false; } } /// Invalidate this tile. If `invalidation_rect` is None, the entire /// tile is invalidated. fn invalidate( &mut self, invalidation_rect: Option<PictureRect>, reason: InvalidationReason, ) { self.is_valid = false; match invalidation_rect { Some(rect) => { self.local_dirty_rect = self.local_dirty_rect.union(&rect); } None => { self.local_dirty_rect = self.local_tile_rect; } } if self.invalidation_reason.is_none() { self.invalidation_reason = Some(reason); } } /// Called during pre_update of a tile cache instance. Allows the /// tile to setup state before primitive dependency calculations. fn pre_update( &mut self, ctx: &TilePreUpdateContext, ) { // Ensure each tile is offset by the appropriate amount from the // origin, such that the content origin will be a whole number and // the snapping will be consistent. self.local_tile_rect = PictureRect::new( PicturePoint::new( self.tile_offset.x as f32 * ctx.tile_size.width + ctx.fract_offset.x, self.tile_offset.y as f32 * ctx.tile_size.height + ctx.fract_offset.y, ), ctx.tile_size, ); self.local_tile_box = PictureBox2D::new( self.local_tile_rect.origin, self.local_tile_rect.bottom_right(), ); self.local_valid_rect = PictureBox2D::zero(); self.invalidation_reason = None; self.world_tile_rect = ctx.pic_to_world_mapper .map(&self.local_tile_rect) .expect("bug: map local tile rect"); // Check if this tile is currently on screen. self.is_visible = self.world_tile_rect.intersects(&ctx.global_screen_world_rect); // If the tile isn't visible, early exit, skipping the normal set up to // validate dependencies. Instead, we will only compare the current tile // dependencies the next time it comes into view. if !self.is_visible { return; } // We may need to rerender if glyph subpixel positions have changed. Note // that we update the tile fract offset itself after we have completed // invalidation. This allows for other whole tile invalidation cases to // update the fract offset appropriately. let fract_delta = self.device_fract_offset - ctx.device_fract_offset; let fract_changed = fract_delta.x.abs() > 0.01 || fract_delta.y.abs() > 0.01; if fract_changed { self.invalidate(None, InvalidationReason::FractionalOffset { old: self.device_fract_offset, new: ctx.device_fract_offset }); } if ctx.background_color != self.background_color { self.invalidate(None, InvalidationReason::BackgroundColor { old: self.background_color, new: ctx.background_color }); self.background_color = ctx.background_color; } // Clear any dependencies so that when we rebuild them we // can compare if the tile has the same content. mem::swap( &mut self.current_descriptor, &mut self.prev_descriptor, ); self.current_descriptor.clear(); self.root.clear(self.local_tile_rect.to_box2d()); // Since this tile is determined to be visible, it will get updated // dependencies, so update the frame id we are storing dependencies for. self.last_updated_frame_id = ctx.frame_id; } /// Add dependencies for a given primitive to this tile. fn add_prim_dependency( &mut self, info: &PrimitiveDependencyInfo, ) { // If this tile isn't currently visible, we don't want to update the dependencies // for this tile, as an optimization, since it won't be drawn anyway. if !self.is_visible { return; } // Incorporate the bounding rect of the primitive in the local valid rect // for this tile. This is used to minimize the size of the scissor rect // during rasterization and the draw rect during composition of partial tiles. self.local_valid_rect = self.local_valid_rect.union(&info.prim_clip_box); // Include any image keys this tile depends on. self.current_descriptor.images.extend_from_slice(&info.images); // Include any opacity bindings this primitive depends on. self.current_descriptor.opacity_bindings.extend_from_slice(&info.opacity_bindings); // Include any clip nodes that this primitive depends on. self.current_descriptor.clips.extend_from_slice(&info.clips); // Include any transforms that this primitive depends on. for spatial_node_index in &info.spatial_nodes { self.current_descriptor.transforms.push( SpatialNodeKey { spatial_node_index: *spatial_node_index, frame_id: self.last_updated_frame_id, } ); } // Include any color bindings this primitive depends on. if info.color_binding.is_some() { self.current_descriptor.color_bindings.insert( self.current_descriptor.color_bindings.len(), info.color_binding.unwrap()); } // TODO(gw): The prim_clip_rect can be impacted by the clip rect of the display port, // which can cause invalidations when a new display list with changed // display port is received. To work around this, clamp the prim clip rect // to the tile boundaries - if the clip hasn't affected the tile, then the // changed clip can't affect the content of the primitive on this tile. // In future, we could consider supplying the display port clip from Gecko // in a different way (e.g. as a scroll frame clip) which still provides // the desired clip for checkerboarding, but doesn't require this extra // work below. // TODO(gw): This is a hot part of the code - we could probably optimize further by: // - Using min/max instead of clamps below (if we guarantee the rects are well formed) let tile_p0 = self.local_tile_box.min; let tile_p1 = self.local_tile_box.max; let prim_clip_box = PictureBox2D::new( PicturePoint::new( clampf(info.prim_clip_box.min.x, tile_p0.x, tile_p1.x), clampf(info.prim_clip_box.min.y, tile_p0.y, tile_p1.y), ), PicturePoint::new( clampf(info.prim_clip_box.max.x, tile_p0.x, tile_p1.x), clampf(info.prim_clip_box.max.y, tile_p0.y, tile_p1.y), ), ); // Update the tile descriptor, used for tile comparison during scene swaps. let prim_index = PrimitiveDependencyIndex(self.current_descriptor.prims.len() as u32); // We know that the casts below will never overflow because the array lengths are // truncated to MAX_PRIM_SUB_DEPS during update_prim_dependencies. debug_assert!(info.spatial_nodes.len() <= MAX_PRIM_SUB_DEPS); debug_assert!(info.clips.len() <= MAX_PRIM_SUB_DEPS); debug_assert!(info.images.len() <= MAX_PRIM_SUB_DEPS); debug_assert!(info.opacity_bindings.len() <= MAX_PRIM_SUB_DEPS); self.current_descriptor.prims.push(PrimitiveDescriptor { prim_uid: info.prim_uid, prim_clip_box, transform_dep_count: info.spatial_nodes.len() as u8, clip_dep_count: info.clips.len() as u8, image_dep_count: info.images.len() as u8, opacity_binding_dep_count: info.opacity_bindings.len() as u8, color_binding_dep_count: if info.color_binding.is_some() { 1 } else { 0 } as u8, }); // Add this primitive to the dirty rect quadtree. self.root.add_prim(prim_index, &info.prim_clip_box); } /// Called during tile cache instance post_update. Allows invalidation and dirty /// rect calculation after primitive dependencies have been updated. fn post_update( &mut self, ctx: &TilePostUpdateContext, state: &mut TilePostUpdateState, frame_context: &FrameVisibilityContext, ) -> bool { // Register the frame id of this tile with the spatial node comparer, to ensure // that it doesn't GC any spatial nodes from the comparer that are referenced // by this tile. Must be done before we early exit below, so that we retain // spatial node info even for tiles that are currently not visible. state.spatial_node_comparer.retain_for_frame(self.last_updated_frame_id); // If tile is not visible, just early out from here - we don't update dependencies // so don't want to invalidate, merge, split etc. The tile won't need to be drawn // (and thus updated / invalidated) until it is on screen again. if !self.is_visible { return false; } // Calculate the overall valid rect for this tile. self.current_descriptor.local_valid_rect = self.local_valid_rect.to_rect(); // TODO(gw): In theory, the local tile rect should always have an // intersection with the overall picture rect. In practice, // due to some accuracy issues with how fract_offset (and // fp accuracy) are used in the calling method, this isn't // always true. In this case, it's safe to set the local // valid rect to zero, which means it will be clipped out // and not affect the scene. In future, we should fix the // accuracy issue above, so that this assumption holds, but // it shouldn't have any noticeable effect on performance // or memory usage (textures should never get allocated). self.current_descriptor.local_valid_rect = self.local_tile_rect .intersection(&ctx.local_rect) .and_then(|r| r.intersection(&self.current_descriptor.local_valid_rect)) .unwrap_or_else(PictureRect::zero); // The device_valid_rect is referenced during `update_content_validity` so it // must be updated here first. let world_valid_rect = ctx.pic_to_world_mapper .map(&self.current_descriptor.local_valid_rect) .expect("bug: map local valid rect"); // The device rect is guaranteed to be aligned on a device pixel - the round // is just to deal with float accuracy. However, the valid rect is not // always aligned to a device pixel. To handle this, round out to get all // required pixels, and intersect with the tile device rect. let device_rect = (self.world_tile_rect * ctx.global_device_pixel_scale).round(); self.device_valid_rect = (world_valid_rect * ctx.global_device_pixel_scale) .round_out() .intersection(&device_rect) .unwrap_or_else(DeviceRect::zero); // Invalidate the tile based on the content changing. self.update_content_validity(ctx, state, frame_context); // If there are no primitives there is no need to draw or cache it. if self.current_descriptor.prims.is_empty() { // If there is a native compositor surface allocated for this (now empty) tile // it must be freed here, otherwise the stale tile with previous contents will // be composited. If the tile subsequently gets new primitives added to it, the // surface will be re-allocated when it's added to the composite draw list. if let Some(TileSurface::Texture { descriptor: SurfaceTextureDescriptor::Native { mut id, .. }, .. }) = self.surface.take() { if let Some(id) = id.take() { state.resource_cache.destroy_compositor_tile(id); } } self.is_visible = false; return false; } // Check if this tile can be considered opaque. Opacity state must be updated only // after all early out checks have been performed. Otherwise, we might miss updating // the native surface next time this tile becomes visible. let clipped_rect = self.current_descriptor.local_valid_rect .intersection(&ctx.local_clip_rect) .unwrap_or_else(PictureRect::zero); let has_opaque_bg_color = self.background_color.map_or(false, |c| c.a >= 1.0); let has_opaque_backdrop = ctx.backdrop.map_or(false, |b| b.opaque_rect.contains_rect(&clipped_rect)); let is_opaque = has_opaque_bg_color || has_opaque_backdrop; // Set the correct z_id for this tile self.z_id = ctx.z_id; if is_opaque != self.is_opaque { // If opacity changed, the native compositor surface and all tiles get invalidated. // (this does nothing if not using native compositor mode). // TODO(gw): This property probably changes very rarely, so it is OK to invalidate // everything in this case. If it turns out that this isn't true, we could // consider other options, such as per-tile opacity (natively supported // on CoreAnimation, and supported if backed by non-virtual surfaces in // DirectComposition). if let Some(TileSurface::Texture { descriptor: SurfaceTextureDescriptor::Native { ref mut id, .. }, .. }) = self.surface { if let Some(id) = id.take() { state.resource_cache.destroy_compositor_tile(id); } } // Invalidate the entire tile to force a redraw. self.invalidate(None, InvalidationReason::SurfaceOpacityChanged { became_opaque: is_opaque }); self.is_opaque = is_opaque; } // Check if the selected composite mode supports dirty rect updates. For Draw composite // mode, we can always update the content with smaller dirty rects, unless there is a // driver bug to workaround. For native composite mode, we can only use dirty rects if // the compositor supports partial surface updates. let (supports_dirty_rects, supports_simple_prims) = match state.composite_state.compositor_kind { CompositorKind::Draw { .. } => { (frame_context.config.gpu_supports_render_target_partial_update, true) } CompositorKind::Native { max_update_rects, .. } => { (max_update_rects > 0, false) } }; // TODO(gw): Consider using smaller tiles and/or tile splits for // native compositors that don't support dirty rects. if supports_dirty_rects { // Only allow splitting for normal content sized tiles if ctx.current_tile_size == state.resource_cache.texture_cache.default_picture_tile_size() { let max_split_level = 3; // Consider splitting / merging dirty regions self.root.maybe_merge_or_split( 0, &self.current_descriptor.prims, max_split_level, ); } } // The dirty rect will be set correctly by now. If the underlying platform // doesn't support partial updates, and this tile isn't valid, force the dirty // rect to be the size of the entire tile. if !self.is_valid && !supports_dirty_rects { self.local_dirty_rect = self.local_tile_rect; } // See if this tile is a simple color, in which case we can just draw // it as a rect, and avoid allocating a texture surface and drawing it. // TODO(gw): Initial native compositor interface doesn't support simple // color tiles. We can definitely support this in DC, so this // should be added as a follow up. let is_simple_prim = ctx.backdrop.map_or(false, |b| b.kind.is_some()) && self.current_descriptor.prims.len() == 1 && self.is_opaque && supports_simple_prims; // Set up the backing surface for this tile. let surface = if is_simple_prim { // If we determine the tile can be represented by a color, set the // surface unconditionally (this will drop any previously used // texture cache backing surface). match ctx.backdrop.unwrap().kind { Some(BackdropKind::Color { color }) => { TileSurface::Color { color, } } Some(BackdropKind::Clear) => { TileSurface::Clear } None => { // This should be prevented by the is_simple_prim check above. unreachable!(); } } } else { // If this tile will be backed by a surface, we want to retain // the texture handle from the previous frame, if possible. If // the tile was previously a color, or not set, then just set // up a new texture cache handle. match self.surface.take() { Some(TileSurface::Texture { descriptor }) => { // Reuse the existing descriptor and vis mask TileSurface::Texture { descriptor, } } Some(TileSurface::Color { .. }) | Some(TileSurface::Clear) | None => { // This is the case where we are constructing a tile surface that // involves drawing to a texture. Create the correct surface // descriptor depending on the compositing mode that will read // the output. let descriptor = match state.composite_state.compositor_kind { CompositorKind::Draw { .. } => { // For a texture cache entry, create an invalid handle that // will be allocated when update_picture_cache is called. SurfaceTextureDescriptor::TextureCache { handle: TextureCacheHandle::invalid(), } } CompositorKind::Native { .. } => { // Create a native surface surface descriptor, but don't allocate // a surface yet. The surface is allocated *after* occlusion // culling occurs, so that only visible tiles allocate GPU memory. SurfaceTextureDescriptor::Native { id: None, } } }; TileSurface::Texture { descriptor, } } } }; // Store the current surface backing info for use during batching. self.surface = Some(surface); true } } /// Defines a key that uniquely identifies a primitive instance. #[derive(Debug, Copy, Clone)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct PrimitiveDescriptor { /// Uniquely identifies the content of the primitive template. pub prim_uid: ItemUid, /// The clip rect for this primitive. Included here in /// dependencies since there is no entry in the clip chain /// dependencies for the local clip rect. pub prim_clip_box: PictureBox2D, /// The number of extra dependencies that this primitive has. transform_dep_count: u8, image_dep_count: u8, opacity_binding_dep_count: u8, clip_dep_count: u8, color_binding_dep_count: u8, } impl PartialEq for PrimitiveDescriptor { fn eq(&self, other: &Self) -> bool { const EPSILON: f32 = 0.001; if self.prim_uid != other.prim_uid { return false; } if !self.prim_clip_box.min.x.approx_eq_eps(&other.prim_clip_box.min.x, &EPSILON) { return false; } if !self.prim_clip_box.min.y.approx_eq_eps(&other.prim_clip_box.min.y, &EPSILON) { return false; } if !self.prim_clip_box.max.x.approx_eq_eps(&other.prim_clip_box.max.x, &EPSILON) { return false; } if !self.prim_clip_box.max.y.approx_eq_eps(&other.prim_clip_box.max.y, &EPSILON) { return false; } true } } /// A small helper to compare two arrays of primitive dependencies. struct CompareHelper<'a, T> where T: Copy { offset_curr: usize, offset_prev: usize, curr_items: &'a [T], prev_items: &'a [T], } impl<'a, T> CompareHelper<'a, T> where T: Copy + PartialEq { /// Construct a new compare helper for a current / previous set of dependency information. fn new( prev_items: &'a [T], curr_items: &'a [T], ) -> Self { CompareHelper { offset_curr: 0, offset_prev: 0, curr_items, prev_items, } } /// Reset the current position in the dependency array to the start fn reset(&mut self) { self.offset_prev = 0; self.offset_curr = 0; } /// Test if two sections of the dependency arrays are the same, by checking both /// item equality, and a user closure to see if the content of the item changed. fn is_same<F>( &self, prev_count: u8, curr_count: u8, mut f: F, opt_detail: Option<&mut CompareHelperResult<T>>, ) -> bool where F: FnMut(&T, &T) -> bool { // If the number of items is different, trivial reject. if prev_count != curr_count { if let Some(detail) = opt_detail { *detail = CompareHelperResult::Count{ prev_count, curr_count }; } return false; } // If both counts are 0, then no need to check these dependencies. if curr_count == 0 { if let Some(detail) = opt_detail { *detail = CompareHelperResult::Equal; } return true; } // If both counts are u8::MAX, this is a sentinel that we can't compare these // deps, so just trivial reject. if curr_count as usize == MAX_PRIM_SUB_DEPS { if let Some(detail) = opt_detail { *detail = CompareHelperResult::Sentinel; } return false; } let end_prev = self.offset_prev + prev_count as usize; let end_curr = self.offset_curr + curr_count as usize; let curr_items = &self.curr_items[self.offset_curr .. end_curr]; let prev_items = &self.prev_items[self.offset_prev .. end_prev]; for (curr, prev) in curr_items.iter().zip(prev_items.iter()) { if !f(prev, curr) { if let Some(detail) = opt_detail { *detail = CompareHelperResult::PredicateTrue{ curr: *curr }; } return false; } } if let Some(detail) = opt_detail { *detail = CompareHelperResult::Equal; } true } // Advance the prev dependency array by a given amount fn advance_prev(&mut self, count: u8) { self.offset_prev += count as usize; } // Advance the current dependency array by a given amount fn advance_curr(&mut self, count: u8) { self.offset_curr += count as usize; } } /// Uniquely describes the content of this tile, in a way that can be /// (reasonably) efficiently hashed and compared. #[cfg_attr(any(feature="capture",feature="replay"), derive(Clone))] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct TileDescriptor { /// List of primitive instance unique identifiers. The uid is guaranteed /// to uniquely describe the content of the primitive template, while /// the other parameters describe the clip chain and instance params. pub prims: Vec<PrimitiveDescriptor>, /// List of clip node descriptors. clips: Vec<ItemUid>, /// List of image keys that this tile depends on. images: Vec<ImageDependency>, /// The set of opacity bindings that this tile depends on. // TODO(gw): Ugh, get rid of all opacity binding support! opacity_bindings: Vec<OpacityBinding>, /// List of the effects of transforms that we care about /// tracking for this tile. transforms: Vec<SpatialNodeKey>, /// Picture space rect that contains valid pixels region of this tile. local_valid_rect: PictureRect, /// List of the effects of color that we care about /// tracking for this tile. color_bindings: Vec<ColorBinding>, } impl TileDescriptor { fn new() -> Self { TileDescriptor { prims: Vec::new(), clips: Vec::new(), opacity_bindings: Vec::new(), images: Vec::new(), transforms: Vec::new(), local_valid_rect: PictureRect::zero(), color_bindings: Vec::new(), } } /// Print debug information about this tile descriptor to a tree printer. fn print(&self, pt: &mut dyn PrintTreePrinter) { pt.new_level("current_descriptor".to_string()); pt.new_level("prims".to_string()); for prim in &self.prims { pt.new_level(format!("prim uid={}", prim.prim_uid.get_uid())); pt.add_item(format!("clip: p0={},{} p1={},{}", prim.prim_clip_box.min.x, prim.prim_clip_box.min.y, prim.prim_clip_box.max.x, prim.prim_clip_box.max.y, )); pt.add_item(format!("deps: t={} i={} o={} c={} color={}", prim.transform_dep_count, prim.image_dep_count, prim.opacity_binding_dep_count, prim.clip_dep_count, prim.color_binding_dep_count, )); pt.end_level(); } pt.end_level(); if !self.clips.is_empty() { pt.new_level("clips".to_string()); for clip in &self.clips { pt.new_level(format!("clip uid={}", clip.get_uid())); pt.end_level(); } pt.end_level(); } if !self.images.is_empty() { pt.new_level("images".to_string()); for info in &self.images { pt.new_level(format!("key={:?}", info.key)); pt.add_item(format!("generation={:?}", info.generation)); pt.end_level(); } pt.end_level(); } if !self.opacity_bindings.is_empty() { pt.new_level("opacity_bindings".to_string()); for opacity_binding in &self.opacity_bindings { pt.new_level(format!("binding={:?}", opacity_binding)); pt.end_level(); } pt.end_level(); } if !self.transforms.is_empty() { pt.new_level("transforms".to_string()); for transform in &self.transforms { pt.new_level(format!("spatial_node={:?}", transform)); pt.end_level(); } pt.end_level(); } if !self.color_bindings.is_empty() { pt.new_level("color_bindings".to_string()); for color_binding in &self.color_bindings { pt.new_level(format!("binding={:?}", color_binding)); pt.end_level(); } pt.end_level(); } pt.end_level(); } /// Clear the dependency information for a tile, when the dependencies /// are being rebuilt. fn clear(&mut self) { self.prims.clear(); self.clips.clear(); self.opacity_bindings.clear(); self.images.clear(); self.transforms.clear(); self.local_valid_rect = PictureRect::zero(); self.color_bindings.clear(); } } /// Represents the dirty region of a tile cache picture. #[derive(Clone)] pub struct DirtyRegion { /// The individual filters that make up this region. pub filters: Vec<BatchFilter>, /// The overall dirty rect, a combination of dirty_rects pub combined: WorldRect, /// Spatial node of the picture cache this region represents spatial_node_index: SpatialNodeIndex, } impl DirtyRegion { /// Construct a new dirty region tracker. pub fn new( spatial_node_index: SpatialNodeIndex, ) -> Self { DirtyRegion { filters: Vec::with_capacity(16), combined: WorldRect::zero(), spatial_node_index, } } /// Reset the dirty regions back to empty pub fn reset( &mut self, spatial_node_index: SpatialNodeIndex, ) { self.filters.clear(); self.combined = WorldRect::zero(); self.spatial_node_index = spatial_node_index; } /// Add a dirty region to the tracker. Returns the visibility mask that corresponds to /// this region in the tracker. pub fn add_dirty_region( &mut self, rect_in_pic_space: PictureRect, sub_slice_index: SubSliceIndex, spatial_tree: &SpatialTree, ) { let map_pic_to_world = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, self.spatial_node_index, WorldRect::max_rect(), spatial_tree, ); let world_rect = map_pic_to_world .map(&rect_in_pic_space) .expect("bug"); // Include this in the overall dirty rect self.combined = self.combined.union(&world_rect); self.filters.push(BatchFilter { rect_in_pic_space, sub_slice_index, }); } // TODO(gw): This returns a heap allocated object. Perhaps we can simplify this // logic? Although - it's only used very rarely so it may not be an issue. pub fn inflate( &self, inflate_amount: f32, spatial_tree: &SpatialTree, ) -> DirtyRegion { let map_pic_to_world = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, self.spatial_node_index, WorldRect::max_rect(), spatial_tree, ); let mut filters = Vec::with_capacity(self.filters.len()); let mut combined = WorldRect::zero(); for filter in &self.filters { let rect_in_pic_space = filter.rect_in_pic_space.inflate(inflate_amount, inflate_amount); let world_rect = map_pic_to_world .map(&rect_in_pic_space) .expect("bug"); combined = combined.union(&world_rect); filters.push(BatchFilter { rect_in_pic_space, sub_slice_index: filter.sub_slice_index, }); } DirtyRegion { filters, combined, spatial_node_index: self.spatial_node_index, } } } #[derive(Debug, Copy, Clone)] pub enum BackdropKind { Color { color: ColorF, }, Clear, } /// Stores information about the calculated opaque backdrop of this slice. #[derive(Debug, Copy, Clone)] pub struct BackdropInfo { /// The picture space rectangle that is known to be opaque. This is used /// to determine where subpixel AA can be used, and where alpha blending /// can be disabled. pub opaque_rect: PictureRect, /// Kind of the backdrop pub kind: Option<BackdropKind>, } impl BackdropInfo { fn empty() -> Self { BackdropInfo { opaque_rect: PictureRect::zero(), kind: None, } } } #[derive(Clone)] pub struct TileCacheLoggerSlice { pub serialized_slice: String, pub local_to_world_transform: Transform3D<f32, PicturePixel, WorldPixel>, } #[cfg(any(feature = "capture", feature = "replay"))] macro_rules! declare_tile_cache_logger_updatelists { ( $( $name:ident : $ty:ty, )+ ) => { #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] struct TileCacheLoggerUpdateListsSerializer { pub ron_string: Vec<String>, } pub struct TileCacheLoggerUpdateLists { $( /// Generate storage, one per interner. /// the tuple is a workaround to avoid the need for multiple /// fields that start with $name (macro concatenation). /// the string is .ron serialized updatelist at capture time; /// the updates is the list of DataStore updates (avoid UpdateList /// due to Default() requirements on the Keys) reconstructed at /// load time. pub $name: (Vec<String>, Vec<UpdateList<<$ty as Internable>::Key>>), )+ } impl TileCacheLoggerUpdateLists { pub fn new() -> Self { TileCacheLoggerUpdateLists { $( $name : ( Vec::new(), Vec::new() ), )+ } } /// serialize all interners in updates to .ron #[cfg(feature = "capture")] fn serialize_updates( &mut self, updates: &InternerUpdates ) { $( self.$name.0.push(ron::ser::to_string_pretty(&updates.$name, Default::default()).unwrap()); )+ } fn is_empty(&self) -> bool { $( if !self.$name.0.is_empty() { return false; } )+ true } #[cfg(feature = "capture")] fn to_ron(&self) -> String { let mut serializer = TileCacheLoggerUpdateListsSerializer { ron_string: Vec::new() }; $( serializer.ron_string.push( ron::ser::to_string_pretty(&self.$name.0, Default::default()).unwrap()); )+ ron::ser::to_string_pretty(&serializer, Default::default()).unwrap() } #[cfg(feature = "replay")] pub fn from_ron(&mut self, text: &str) { let serializer : TileCacheLoggerUpdateListsSerializer = match ron::de::from_str(&text) { Ok(data) => { data } Err(e) => { println!("ERROR: failed to deserialize updatelist: {:?}\n{:?}", &text, e); return; } }; let mut index = 0; $( let ron_lists : Vec<String> = ron::de::from_str(&serializer.ron_string[index]).unwrap(); self.$name.1 = ron_lists.iter() .map( |list| ron::de::from_str(&list).unwrap() ) .collect(); index = index + 1; )+ // error: value assigned to `index` is never read let _ = index; } /// helper method to add a stringified version of all interned keys into /// a lookup table based on ItemUid. Use strings as a form of type erasure /// so all UpdateLists can go into a single map. /// Then during analysis, when we see an invalidation reason due to /// "ItemUid such and such was added to the tile primitive list", the lookup /// allows mapping that back into something readable. #[cfg(feature = "replay")] pub fn insert_in_lookup( &mut self, itemuid_to_string: &mut HashMap<ItemUid, String>) { $( { for list in &self.$name.1 { for insertion in &list.insertions { itemuid_to_string.insert( insertion.uid, format!("{:?}", insertion.value)); } } } )+ } } } } #[cfg(any(feature = "capture", feature = "replay"))] crate::enumerate_interners!(declare_tile_cache_logger_updatelists); #[cfg(not(any(feature = "capture", feature = "replay")))] pub struct TileCacheLoggerUpdateLists { } #[cfg(not(any(feature = "capture", feature = "replay")))] impl TileCacheLoggerUpdateLists { pub fn new() -> Self { TileCacheLoggerUpdateLists {} } fn is_empty(&self) -> bool { true } } /// Log tile cache activity for one single frame. /// Also stores the commands sent to the interning data_stores /// so we can see which items were created or destroyed this frame, /// and correlate that with tile invalidation activity. pub struct TileCacheLoggerFrame { /// slices in the frame, one per take_context call pub slices: Vec<TileCacheLoggerSlice>, /// interning activity pub update_lists: TileCacheLoggerUpdateLists } impl TileCacheLoggerFrame { pub fn new() -> Self { TileCacheLoggerFrame { slices: Vec::new(), update_lists: TileCacheLoggerUpdateLists::new() } } pub fn is_empty(&self) -> bool { self.slices.is_empty() && self.update_lists.is_empty() } } /// Log tile cache activity whenever anything happens in take_context. pub struct TileCacheLogger { /// next write pointer pub write_index : usize, /// ron serialization of tile caches; pub frames: Vec<TileCacheLoggerFrame> } impl TileCacheLogger { pub fn new( num_frames: usize ) -> Self { let mut frames = Vec::with_capacity(num_frames); for _i in 0..num_frames { // no Clone so no resize frames.push(TileCacheLoggerFrame::new()); } TileCacheLogger { write_index: 0, frames } } pub fn is_enabled(&self) -> bool { !self.frames.is_empty() } #[cfg(feature = "capture")] pub fn add( &mut self, serialized_slice: String, local_to_world_transform: Transform3D<f32, PicturePixel, WorldPixel> ) { if !self.is_enabled() { return; } self.frames[self.write_index].slices.push( TileCacheLoggerSlice { serialized_slice, local_to_world_transform }); } #[cfg(feature = "capture")] pub fn serialize_updates(&mut self, updates: &InternerUpdates) { if !self.is_enabled() { return; } self.frames[self.write_index].update_lists.serialize_updates(updates); } /// see if anything was written in this frame, and if so, /// advance the write index in a circular way and clear the /// recorded string. pub fn advance(&mut self) { if !self.is_enabled() || self.frames[self.write_index].is_empty() { return; } self.write_index = self.write_index + 1; if self.write_index >= self.frames.len() { self.write_index = 0; } self.frames[self.write_index] = TileCacheLoggerFrame::new(); } #[cfg(feature = "capture")] pub fn save_capture( &self, root: &PathBuf ) { if !self.is_enabled() { return; } use std::fs; info!("saving tile cache log"); let path_tile_cache = root.join("tile_cache"); if !path_tile_cache.is_dir() { fs::create_dir(&path_tile_cache).unwrap(); } let mut files_written = 0; for ix in 0..self.frames.len() { // ...and start with write_index, since that's the oldest entry // that we're about to overwrite. However when we get to // save_capture, we've add()ed entries but haven't advance()d yet, // so the actual oldest entry is write_index + 1 let index = (self.write_index + 1 + ix) % self.frames.len(); if self.frames[index].is_empty() { continue; } let filename = path_tile_cache.join(format!("frame{:05}.ron", files_written)); let mut output = File::create(filename).unwrap(); output.write_all(b"// slice data\n").unwrap(); output.write_all(b"[\n").unwrap(); for item in &self.frames[index].slices { output.write_all(b"( transform:\n").unwrap(); let transform = ron::ser::to_string_pretty( &item.local_to_world_transform, Default::default()).unwrap(); output.write_all(transform.as_bytes()).unwrap(); output.write_all(b",\n tile_cache:\n").unwrap(); output.write_all(item.serialized_slice.as_bytes()).unwrap(); output.write_all(b"\n),\n").unwrap(); } output.write_all(b"]\n\n").unwrap(); output.write_all(b"// @@@ chunk @@@\n\n").unwrap(); output.write_all(b"// interning data\n").unwrap(); output.write_all(self.frames[index].update_lists.to_ron().as_bytes()).unwrap(); files_written = files_written + 1; } } } /// Represents the native surfaces created for a picture cache, if using /// a native compositor. An opaque and alpha surface is always created, /// but tiles are added to a surface based on current opacity. If the /// calculated opacity of a tile changes, the tile is invalidated and /// attached to a different native surface. This means that we don't /// need to invalidate the entire surface if only some tiles are changing /// opacity. It also means we can take advantage of opaque tiles on cache /// slices where only some of the tiles are opaque. There is an assumption /// that creating a native surface is cheap, and only when a tile is added /// to a surface is there a significant cost. This assumption holds true /// for the current native compositor implementations on Windows and Mac. pub struct NativeSurface { /// Native surface for opaque tiles pub opaque: NativeSurfaceId, /// Native surface for alpha tiles pub alpha: NativeSurfaceId, } /// Hash key for an external native compositor surface #[derive(PartialEq, Eq, Hash)] pub struct ExternalNativeSurfaceKey { /// The YUV/RGB image keys that are used to draw this surface. pub image_keys: [ImageKey; 3], /// The current device size of the surface. pub size: DeviceIntSize, /// True if this is an 'external' compositor surface created via /// Compositor::create_external_surface. pub is_external_surface: bool, } /// Information about a native compositor surface cached between frames. pub struct ExternalNativeSurface { /// If true, the surface was used this frame. Used for a simple form /// of GC to remove old surfaces. pub used_this_frame: bool, /// The native compositor surface handle pub native_surface_id: NativeSurfaceId, /// List of image keys, and current image generations, that are drawn in this surface. /// The image generations are used to check if the compositor surface is dirty and /// needs to be updated. pub image_dependencies: [ImageDependency; 3], } /// The key that identifies a tile cache instance. For now, it's simple the index of /// the slice as it was created during scene building. #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct SliceId(usize); impl SliceId { pub fn new(index: usize) -> Self { SliceId(index) } } /// Information that is required to reuse or create a new tile cache. Created /// during scene building and passed to the render backend / frame builder. pub struct TileCacheParams { // Index of the slice (also effectively the key of the tile cache, though we use SliceId where that matters) pub slice: usize, // Flags describing content of this cache (e.g. scrollbars) pub slice_flags: SliceFlags, // The anchoring spatial node / scroll root pub spatial_node_index: SpatialNodeIndex, // Optional background color of this tilecache. If present, can be used as an optimization // to enable opaque blending and/or subpixel AA in more places. pub background_color: Option<ColorF>, // List of clips shared by all prims that are promoted to this tile cache pub shared_clips: Vec<ClipInstance>, // The clip chain handle representing `shared_clips` pub shared_clip_chain: ClipChainId, // Virtual surface sizes are always square, so this represents both the width and height pub virtual_surface_size: i32, // The number of compositor surfaces that are being requested for this tile cache. // This is only a suggestion - the tile cache will clamp this as a reasonable number // and only promote a limited number of surfaces. pub compositor_surface_count: usize, } /// Defines which sub-slice (effectively a z-index) a primitive exists on within /// a picture cache instance. #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] #[derive(Debug, Copy, Clone, PartialEq)] pub struct SubSliceIndex(u8); impl SubSliceIndex { pub const DEFAULT: SubSliceIndex = SubSliceIndex(0); pub fn new(index: usize) -> Self { SubSliceIndex(index as u8) } /// Return true if this sub-slice is the primary sub-slice (for now, we assume /// that only the primary sub-slice may be opaque and support subpixel AA, for example). pub fn is_primary(&self) -> bool { self.0 == 0 } } /// Wrapper struct around an external surface descriptor with a little more information /// that the picture caching code needs. pub struct CompositorSurface { // External surface descriptor used by compositing logic pub descriptor: ExternalSurfaceDescriptor, // The compositor surface rect + any intersecting prims. Later prims that intersect // with this must be added to the next sub-slice. prohibited_rect: PictureRect, // If the compositor surface content is opaque. pub is_opaque: bool, } /// A SubSlice represents a potentially overlapping set of tiles within a picture cache. Most /// picture cache instances will have only a single sub-slice. The exception to this is when /// a picture cache has compositor surfaces, in which case sub slices are used to interleave /// content under or order the compositor surface(s). pub struct SubSlice { /// Hash of tiles present in this picture. pub tiles: FastHashMap<TileOffset, Box<Tile>>, /// The allocated compositor surfaces for this picture cache. May be None if /// not using native compositor, or if the surface was destroyed and needs /// to be reallocated next time this surface contains valid tiles. pub native_surface: Option<NativeSurface>, /// List of compositor surfaces that have been promoted from primitives /// in this tile cache. pub compositor_surfaces: Vec<CompositorSurface>, } impl SubSlice { /// Construct a new sub-slice fn new() -> Self { SubSlice { tiles: FastHashMap::default(), native_surface: None, compositor_surfaces: Vec::new(), } } /// Reset the list of compositor surfaces that follow this sub-slice. /// Built per-frame, since APZ may change whether an image is suitable to be a compositor surface. fn reset(&mut self) { self.compositor_surfaces.clear(); } /// Resize the tile grid to match a new tile bounds fn resize(&mut self, new_tile_rect: TileRect) -> FastHashMap<TileOffset, Box<Tile>> { let mut old_tiles = mem::replace(&mut self.tiles, FastHashMap::default()); self.tiles.reserve(new_tile_rect.size.area() as usize); for y in new_tile_rect.origin.y .. new_tile_rect.origin.y + new_tile_rect.size.height { for x in new_tile_rect.origin.x .. new_tile_rect.origin.x + new_tile_rect.size.width { let key = TileOffset::new(x, y); let tile = old_tiles .remove(&key) .unwrap_or_else(|| { Box::new(Tile::new(key)) }); self.tiles.insert(key, tile); } } old_tiles } } /// Represents a cache of tiles that make up a picture primitives. pub struct TileCacheInstance { /// Index of the tile cache / slice for this frame builder. It's determined /// by the setup_picture_caching method during flattening, which splits the /// picture tree into multiple slices. It's used as a simple input to the tile /// keys. It does mean we invalidate tiles if a new layer gets inserted / removed /// between display lists - this seems very unlikely to occur on most pages, but /// can be revisited if we ever notice that. pub slice: usize, /// Propagated information about the slice pub slice_flags: SliceFlags, /// The currently selected tile size to use for this cache pub current_tile_size: DeviceIntSize, /// The list of sub-slices in this tile cache pub sub_slices: Vec<SubSlice>, /// The positioning node for this tile cache. pub spatial_node_index: SpatialNodeIndex, /// List of opacity bindings, with some extra information /// about whether they changed since last frame. opacity_bindings: FastHashMap<PropertyBindingId, OpacityBindingInfo>, /// Switch back and forth between old and new bindings hashmaps to avoid re-allocating. old_opacity_bindings: FastHashMap<PropertyBindingId, OpacityBindingInfo>, /// A helper to compare transforms between previous and current frame. spatial_node_comparer: SpatialNodeComparer, /// List of color bindings, with some extra information /// about whether they changed since last frame. color_bindings: FastHashMap<PropertyBindingId, ColorBindingInfo>, /// Switch back and forth between old and new bindings hashmaps to avoid re-allocating. old_color_bindings: FastHashMap<PropertyBindingId, ColorBindingInfo>, /// The current dirty region tracker for this picture. pub dirty_region: DirtyRegion, /// Current size of tiles in picture units. tile_size: PictureSize, /// Tile coords of the currently allocated grid. tile_rect: TileRect, /// Pre-calculated versions of the tile_rect above, used to speed up the /// calculations in get_tile_coords_for_rect. tile_bounds_p0: TileOffset, tile_bounds_p1: TileOffset, /// Local rect (unclipped) of the picture this cache covers. pub local_rect: PictureRect, /// The local clip rect, from the shared clips of this picture. pub local_clip_rect: PictureRect, /// The surface index that this tile cache will be drawn into. surface_index: SurfaceIndex, /// The background color from the renderer. If this is set opaque, we know it's /// fine to clear the tiles to this and allow subpixel text on the first slice. pub background_color: Option<ColorF>, /// Information about the calculated backdrop content of this cache. pub backdrop: BackdropInfo, /// The allowed subpixel mode for this surface, which depends on the detected /// opacity of the background. pub subpixel_mode: SubpixelMode, /// A list of clip handles that exist on every (top-level) primitive in this picture. /// It's often the case that these are root / fixed position clips. By handling them /// here, we can avoid applying them to the items, which reduces work, but more importantly /// reduces invalidations. pub shared_clips: Vec<ClipInstance>, /// The clip chain that represents the shared_clips above. Used to build the local /// clip rect for this tile cache. shared_clip_chain: ClipChainId, /// The current transform of the picture cache root spatial node root_transform: ScaleOffset, /// The number of frames until this cache next evaluates what tile size to use. /// If a picture rect size is regularly changing just around a size threshold, /// we don't want to constantly invalidate and reallocate different tile size /// configuration each frame. frames_until_size_eval: usize, /// The current fractional offset of the cached picture fract_offset: PictureVector2D, /// The current device fractional offset of the cached picture device_fract_offset: DeviceVector2D, /// For DirectComposition, virtual surfaces don't support negative coordinates. However, /// picture cache tile coordinates can be negative. To handle this, we apply an offset /// to each tile in DirectComposition. We want to change this as little as possible, /// to avoid invalidating tiles. However, if we have a picture cache tile coordinate /// which is outside the virtual surface bounds, we must change this to allow /// correct remapping of the coordinates passed to BeginDraw in DC. virtual_offset: DeviceIntPoint, /// keep around the hash map used as compare_cache to avoid reallocating it each /// frame. compare_cache: FastHashMap<PrimitiveComparisonKey, PrimitiveCompareResult>, /// The current device position of this cache. Used to set the compositor /// offset of the surface when building the visual tree. pub device_position: DevicePoint, /// The currently considered tile size override. Used to check if we should /// re-evaluate tile size, even if the frame timer hasn't expired. tile_size_override: Option<DeviceIntSize>, /// A cache of compositor surfaces that are retained between frames pub external_native_surface_cache: FastHashMap<ExternalNativeSurfaceKey, ExternalNativeSurface>, /// Current frame ID of this tile cache instance. Used for book-keeping / garbage collecting frame_id: FrameId, } enum SurfacePromotionResult { Failed, Success { flip_y: bool, } } impl TileCacheInstance { pub fn new(params: TileCacheParams) -> Self { // Determine how many sub-slices we need. Clamp to an arbitrary limit to ensure // we don't create a huge number of OS compositor tiles and sub-slices. let sub_slice_count = params.compositor_surface_count.min(MAX_COMPOSITOR_SURFACES) + 1; let mut sub_slices = Vec::with_capacity(sub_slice_count); for _ in 0 .. sub_slice_count { sub_slices.push(SubSlice::new()); } TileCacheInstance { slice: params.slice, slice_flags: params.slice_flags, spatial_node_index: params.spatial_node_index, sub_slices, opacity_bindings: FastHashMap::default(), old_opacity_bindings: FastHashMap::default(), spatial_node_comparer: SpatialNodeComparer::new(), color_bindings: FastHashMap::default(), old_color_bindings: FastHashMap::default(), dirty_region: DirtyRegion::new(params.spatial_node_index), tile_size: PictureSize::zero(), tile_rect: TileRect::zero(), tile_bounds_p0: TileOffset::zero(), tile_bounds_p1: TileOffset::zero(), local_rect: PictureRect::zero(), local_clip_rect: PictureRect::zero(), surface_index: SurfaceIndex(0), background_color: params.background_color, backdrop: BackdropInfo::empty(), subpixel_mode: SubpixelMode::Allow, root_transform: ScaleOffset::identity(), shared_clips: params.shared_clips, shared_clip_chain: params.shared_clip_chain, current_tile_size: DeviceIntSize::zero(), frames_until_size_eval: 0, fract_offset: PictureVector2D::zero(), device_fract_offset: DeviceVector2D::zero(), // Default to centering the virtual offset in the middle of the DC virtual surface virtual_offset: DeviceIntPoint::new( params.virtual_surface_size / 2, params.virtual_surface_size / 2, ), compare_cache: FastHashMap::default(), device_position: DevicePoint::zero(), tile_size_override: None, external_native_surface_cache: FastHashMap::default(), frame_id: FrameId::INVALID, } } /// Return the total number of tiles allocated by this tile cache pub fn tile_count(&self) -> usize { self.tile_rect.size.area() as usize * self.sub_slices.len() } /// Reset this tile cache with the updated parameters from a new scene /// that has arrived. This allows the tile cache to be retained across /// new scenes. pub fn prepare_for_new_scene( &mut self, params: TileCacheParams, resource_cache: &mut ResourceCache, ) { // We should only receive updated state for matching slice key assert_eq!(self.slice, params.slice); // Determine how many sub-slices we need, based on how many compositor surface prims are // in the supplied primitive list. let required_sub_slice_count = params.compositor_surface_count.min(MAX_COMPOSITOR_SURFACES) + 1; if self.sub_slices.len() != required_sub_slice_count { self.tile_rect = TileRect::zero(); if self.sub_slices.len() > required_sub_slice_count { let old_sub_slices = self.sub_slices.split_off(required_sub_slice_count); for mut sub_slice in old_sub_slices { for tile in sub_slice.tiles.values_mut() { if let Some(TileSurface::Texture { descriptor: SurfaceTextureDescriptor::Native { ref mut id, .. }, .. }) = tile.surface { if let Some(id) = id.take() { resource_cache.destroy_compositor_tile(id); } } } if let Some(native_surface) = sub_slice.native_surface { resource_cache.destroy_compositor_surface(native_surface.opaque); resource_cache.destroy_compositor_surface(native_surface.alpha); } } } else { while self.sub_slices.len() < required_sub_slice_count { self.sub_slices.push(SubSlice::new()); } } } // Store the parameters from the scene builder for this slice. Other // params in the tile cache are retained and reused, or are always // updated during pre/post_update. self.slice_flags = params.slice_flags; self.spatial_node_index = params.spatial_node_index; self.background_color = params.background_color; self.shared_clips = params.shared_clips; self.shared_clip_chain = params.shared_clip_chain; // Since the slice flags may have changed, ensure we re-evaluate the // appropriate tile size for this cache next update. self.frames_until_size_eval = 0; } /// Destroy any manually managed resources before this picture cache is /// destroyed, such as native compositor surfaces. pub fn destroy( self, resource_cache: &mut ResourceCache, ) { for sub_slice in self.sub_slices { if let Some(native_surface) = sub_slice.native_surface { resource_cache.destroy_compositor_surface(native_surface.opaque); resource_cache.destroy_compositor_surface(native_surface.alpha); } } for (_, external_surface) in self.external_native_surface_cache { resource_cache.destroy_compositor_surface(external_surface.native_surface_id) } } /// Get the tile coordinates for a given rectangle. fn get_tile_coords_for_rect( &self, rect: &PictureRect, ) -> (TileOffset, TileOffset) { // Get the tile coordinates in the picture space. let mut p0 = TileOffset::new( (rect.origin.x / self.tile_size.width).floor() as i32, (rect.origin.y / self.tile_size.height).floor() as i32, ); let mut p1 = TileOffset::new( ((rect.origin.x + rect.size.width) / self.tile_size.width).ceil() as i32, ((rect.origin.y + rect.size.height) / self.tile_size.height).ceil() as i32, ); // Clamp the tile coordinates here to avoid looping over irrelevant tiles later on. p0.x = clamp(p0.x, self.tile_bounds_p0.x, self.tile_bounds_p1.x); p0.y = clamp(p0.y, self.tile_bounds_p0.y, self.tile_bounds_p1.y); p1.x = clamp(p1.x, self.tile_bounds_p0.x, self.tile_bounds_p1.x); p1.y = clamp(p1.y, self.tile_bounds_p0.y, self.tile_bounds_p1.y); (p0, p1) } /// Update transforms, opacity, color bindings and tile rects. pub fn pre_update( &mut self, pic_rect: PictureRect, surface_index: SurfaceIndex, frame_context: &FrameVisibilityContext, frame_state: &mut FrameVisibilityState, ) -> WorldRect { self.surface_index = surface_index; self.local_rect = pic_rect; self.local_clip_rect = PictureRect::max_rect(); for sub_slice in &mut self.sub_slices { sub_slice.reset(); } // Reset the opaque rect + subpixel mode, as they are calculated // during the prim dependency checks. self.backdrop = BackdropInfo::empty(); let pic_to_world_mapper = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, self.spatial_node_index, frame_context.global_screen_world_rect, frame_context.spatial_tree, ); // If there is a valid set of shared clips, build a clip chain instance for this, // which will provide a local clip rect. This is useful for establishing things // like whether the backdrop rect supplied by Gecko can be considered opaque. if self.shared_clip_chain != ClipChainId::NONE { let shared_clips = &mut frame_state.scratch.picture.clip_chain_ids; shared_clips.clear(); let map_local_to_surface = SpaceMapper::new( self.spatial_node_index, pic_rect, ); let mut current_clip_chain_id = self.shared_clip_chain; while current_clip_chain_id != ClipChainId::NONE { shared_clips.push(current_clip_chain_id); let clip_chain_node = &frame_state.clip_store.clip_chain_nodes[current_clip_chain_id.0 as usize]; current_clip_chain_id = clip_chain_node.parent_clip_chain_id; } frame_state.clip_store.set_active_clips( LayoutRect::max_rect(), self.spatial_node_index, map_local_to_surface.ref_spatial_node_index, &shared_clips, frame_context.spatial_tree, &mut frame_state.data_stores.clip, ); let clip_chain_instance = frame_state.clip_store.build_clip_chain_instance( pic_rect.cast_unit(), &map_local_to_surface, &pic_to_world_mapper, frame_context.spatial_tree, frame_state.gpu_cache, frame_state.resource_cache, frame_context.global_device_pixel_scale, &frame_context.global_screen_world_rect, &mut frame_state.data_stores.clip, true, false, ); // Ensure that if the entire picture cache is clipped out, the local // clip rect is zero. This makes sure we don't register any occluders // that are actually off-screen. self.local_clip_rect = clip_chain_instance.map_or(PictureRect::zero(), |clip_chain_instance| { clip_chain_instance.pic_clip_rect }); } // Advance the current frame ID counter for this picture cache (must be done // after any retained prev state is taken above). self.frame_id.advance(); // Notify the spatial node comparer that a new frame has started, and the // current reference spatial node for this tile cache. self.spatial_node_comparer.next_frame(self.spatial_node_index); // At the start of the frame, step through each current compositor surface // and mark it as unused. Later, this is used to free old compositor surfaces. // TODO(gw): In future, we might make this more sophisticated - for example, // retaining them for >1 frame if unused, or retaining them in some // kind of pool to reduce future allocations. for external_native_surface in self.external_native_surface_cache.values_mut() { external_native_surface.used_this_frame = false; } // Only evaluate what tile size to use fairly infrequently, so that we don't end // up constantly invalidating and reallocating tiles if the picture rect size is // changing near a threshold value. if self.frames_until_size_eval == 0 || self.tile_size_override != frame_context.config.tile_size_override { // Work out what size tile is appropriate for this picture cache. let desired_tile_size = match frame_context.config.tile_size_override { Some(tile_size_override) => { tile_size_override } None => { if self.slice_flags.contains(SliceFlags::IS_SCROLLBAR) { if pic_rect.size.width <= pic_rect.size.height { TILE_SIZE_SCROLLBAR_VERTICAL } else { TILE_SIZE_SCROLLBAR_HORIZONTAL } } else { frame_state.resource_cache.texture_cache.default_picture_tile_size() } } }; // If the desired tile size has changed, then invalidate and drop any // existing tiles. if desired_tile_size != self.current_tile_size { for sub_slice in &mut self.sub_slices { // Destroy any native surfaces on the tiles that will be dropped due // to resizing. if let Some(native_surface) = sub_slice.native_surface.take() { frame_state.resource_cache.destroy_compositor_surface(native_surface.opaque); frame_state.resource_cache.destroy_compositor_surface(native_surface.alpha); } sub_slice.tiles.clear(); } self.tile_rect = TileRect::zero(); self.current_tile_size = desired_tile_size; } // Reset counter until next evaluating the desired tile size. This is an // arbitrary value. self.frames_until_size_eval = 120; self.tile_size_override = frame_context.config.tile_size_override; } // Map an arbitrary point in picture space to world space, to work out // what the fractional translation is that's applied by this scroll root. // TODO(gw): I'm not 100% sure this is right. At least, in future, we should // make a specific API for this, and/or enforce that the picture // cache transform only includes scale and/or translation (we // already ensure it doesn't have perspective). let world_origin = pic_to_world_mapper .map(&PictureRect::new(PicturePoint::zero(), PictureSize::new(1.0, 1.0))) .expect("bug: unable to map origin to world space") .origin; // Get the desired integer device coordinate let device_origin = world_origin * frame_context.global_device_pixel_scale; let desired_device_origin = device_origin.round(); self.device_position = desired_device_origin; self.device_fract_offset = desired_device_origin - device_origin; // Unmap from device space to world space rect let ref_world_rect = WorldRect::new( desired_device_origin / frame_context.global_device_pixel_scale, WorldSize::new(1.0, 1.0), ); // Unmap from world space to picture space; this should be the fractional offset // required in picture space to align in device space self.fract_offset = pic_to_world_mapper .unmap(&ref_world_rect) .expect("bug: unable to unmap ref world rect") .origin .to_vector(); // Do a hacky diff of opacity binding values from the last frame. This is // used later on during tile invalidation tests. let current_properties = frame_context.scene_properties.float_properties(); mem::swap(&mut self.opacity_bindings, &mut self.old_opacity_bindings); self.opacity_bindings.clear(); for (id, value) in current_properties { let changed = match self.old_opacity_bindings.get(id) { Some(old_property) => !old_property.value.approx_eq(value), None => true, }; self.opacity_bindings.insert(*id, OpacityBindingInfo { value: *value, changed, }); } // Do a hacky diff of color binding values from the last frame. This is // used later on during tile invalidation tests. let current_properties = frame_context.scene_properties.color_properties(); mem::swap(&mut self.color_bindings, &mut self.old_color_bindings); self.color_bindings.clear(); for (id, value) in current_properties { let changed = match self.old_color_bindings.get(id) { Some(old_property) => old_property.value != (*value).into(), None => true, }; self.color_bindings.insert(*id, ColorBindingInfo { value: (*value).into(), changed, }); } let world_tile_size = WorldSize::new( self.current_tile_size.width as f32 / frame_context.global_device_pixel_scale.0, self.current_tile_size.height as f32 / frame_context.global_device_pixel_scale.0, ); // We know that this is an exact rectangle, since we (for now) only support tile // caches where the scroll root is in the root coordinate system. let local_tile_rect = pic_to_world_mapper .unmap(&WorldRect::new(WorldPoint::zero(), world_tile_size)) .expect("bug: unable to get local tile rect"); self.tile_size = local_tile_rect.size; let screen_rect_in_pic_space = pic_to_world_mapper .unmap(&frame_context.global_screen_world_rect) .expect("unable to unmap screen rect"); // Inflate the needed rect a bit, so that we retain tiles that we have drawn // but have just recently gone off-screen. This means that we avoid re-drawing // tiles if the user is scrolling up and down small amounts, at the cost of // a bit of extra texture memory. let desired_rect_in_pic_space = screen_rect_in_pic_space .inflate(0.0, 1.0 * self.tile_size.height); let needed_rect_in_pic_space = desired_rect_in_pic_space .intersection(&pic_rect) .unwrap_or_else(PictureRect::zero); let p0 = needed_rect_in_pic_space.origin; let p1 = needed_rect_in_pic_space.bottom_right(); let x0 = (p0.x / local_tile_rect.size.width).floor() as i32; let x1 = (p1.x / local_tile_rect.size.width).ceil() as i32; let y0 = (p0.y / local_tile_rect.size.height).floor() as i32; let y1 = (p1.y / local_tile_rect.size.height).ceil() as i32; let x_tiles = x1 - x0; let y_tiles = y1 - y0; let new_tile_rect = TileRect::new( TileOffset::new(x0, y0), TileSize::new(x_tiles, y_tiles), ); // Determine whether the current bounds of the tile grid will exceed the // bounds of the DC virtual surface, taking into account the current // virtual offset. If so, we need to invalidate all tiles, and set up // a new virtual offset, centered around the current tile grid. let virtual_surface_size = frame_context.config.compositor_kind.get_virtual_surface_size(); // We only need to invalidate in this case if the underlying platform // uses virtual surfaces. if virtual_surface_size > 0 { // Get the extremities of the tile grid after virtual offset is applied let tx0 = self.virtual_offset.x + x0 * self.current_tile_size.width; let ty0 = self.virtual_offset.y + y0 * self.current_tile_size.height; let tx1 = self.virtual_offset.x + (x1+1) * self.current_tile_size.width; let ty1 = self.virtual_offset.y + (y1+1) * self.current_tile_size.height; let need_new_virtual_offset = tx0 < 0 || ty0 < 0 || tx1 >= virtual_surface_size || ty1 >= virtual_surface_size; if need_new_virtual_offset { // Calculate a new virtual offset, centered around the middle of the // current tile grid. This means we won't need to invalidate and get // a new offset for a long time! self.virtual_offset = DeviceIntPoint::new( (virtual_surface_size/2) - ((x0 + x1) / 2) * self.current_tile_size.width, (virtual_surface_size/2) - ((y0 + y1) / 2) * self.current_tile_size.height, ); // Invalidate all native tile surfaces. They will be re-allocated next time // they are scheduled to be rasterized. for sub_slice in &mut self.sub_slices { for tile in sub_slice.tiles.values_mut() { if let Some(TileSurface::Texture { descriptor: SurfaceTextureDescriptor::Native { ref mut id, .. }, .. }) = tile.surface { if let Some(id) = id.take() { frame_state.resource_cache.destroy_compositor_tile(id); tile.surface = None; // Invalidate the entire tile to force a redraw. // TODO(gw): Add a new invalidation reason for virtual offset changing tile.invalidate(None, InvalidationReason::CompositorKindChanged); } } } // Destroy the native virtual surfaces. They will be re-allocated next time a tile // that references them is scheduled to draw. if let Some(native_surface) = sub_slice.native_surface.take() { frame_state.resource_cache.destroy_compositor_surface(native_surface.opaque); frame_state.resource_cache.destroy_compositor_surface(native_surface.alpha); } } } } // Rebuild the tile grid if the picture cache rect has changed. if new_tile_rect != self.tile_rect { for sub_slice in &mut self.sub_slices { let mut old_tiles = sub_slice.resize(new_tile_rect); // When old tiles that remain after the loop, dirty rects are not valid. if !old_tiles.is_empty() { frame_state.composite_state.dirty_rects_are_valid = false; } // Any old tiles that remain after the loop above are going to be dropped. For // simple composite mode, the texture cache handle will expire and be collected // by the texture cache. For native compositor mode, we need to explicitly // invoke a callback to the client to destroy that surface. frame_state.composite_state.destroy_native_tiles( old_tiles.values_mut(), frame_state.resource_cache, ); } } // This is duplicated information from tile_rect, but cached here to avoid // redundant calculations during get_tile_coords_for_rect self.tile_bounds_p0 = TileOffset::new(x0, y0); self.tile_bounds_p1 = TileOffset::new(x1, y1); self.tile_rect = new_tile_rect; let mut world_culling_rect = WorldRect::zero(); let mut ctx = TilePreUpdateContext { pic_to_world_mapper, fract_offset: self.fract_offset, device_fract_offset: self.device_fract_offset, background_color: self.background_color, global_screen_world_rect: frame_context.global_screen_world_rect, tile_size: self.tile_size, frame_id: self.frame_id, }; // Pre-update each tile for sub_slice in &mut self.sub_slices { for tile in sub_slice.tiles.values_mut() { tile.pre_update(&ctx); // Only include the tiles that are currently in view into the world culling // rect. This is a very important optimization for a couple of reasons: // (1) Primitives that intersect with tiles in the grid that are not currently // visible can be skipped from primitive preparation, clip chain building // and tile dependency updates. // (2) When we need to allocate an off-screen surface for a child picture (for // example a CSS filter) we clip the size of the GPU surface to the world // culling rect below (to ensure we draw enough of it to be sampled by any // tiles that reference it). Making the world culling rect only affected // by visible tiles (rather than the entire virtual tile display port) can // result in allocating _much_ smaller GPU surfaces for cases where the // true off-screen surface size is very large. if tile.is_visible { world_culling_rect = world_culling_rect.union(&tile.world_tile_rect); } } // The background color can only be applied to the first sub-slice. ctx.background_color = None; } // If compositor mode is changed, need to drop all incompatible tiles. match frame_context.config.compositor_kind { CompositorKind::Draw { .. } => { for sub_slice in &mut self.sub_slices { for tile in sub_slice.tiles.values_mut() { if let Some(TileSurface::Texture { descriptor: SurfaceTextureDescriptor::Native { ref mut id, .. }, .. }) = tile.surface { if let Some(id) = id.take() { frame_state.resource_cache.destroy_compositor_tile(id); } tile.surface = None; // Invalidate the entire tile to force a redraw. tile.invalidate(None, InvalidationReason::CompositorKindChanged); } } if let Some(native_surface) = sub_slice.native_surface.take() { frame_state.resource_cache.destroy_compositor_surface(native_surface.opaque); frame_state.resource_cache.destroy_compositor_surface(native_surface.alpha); } } for (_, external_surface) in self.external_native_surface_cache.drain() { frame_state.resource_cache.destroy_compositor_surface(external_surface.native_surface_id) } } CompositorKind::Native { .. } => { // This could hit even when compositor mode is not changed, // then we need to check if there are incompatible tiles. for sub_slice in &mut self.sub_slices { for tile in sub_slice.tiles.values_mut() { if let Some(TileSurface::Texture { descriptor: SurfaceTextureDescriptor::TextureCache { .. }, .. }) = tile.surface { tile.surface = None; // Invalidate the entire tile to force a redraw. tile.invalidate(None, InvalidationReason::CompositorKindChanged); } } } } } world_culling_rect } fn can_promote_to_surface( &mut self, flags: PrimitiveFlags, prim_clip_chain: &ClipChainInstance, prim_spatial_node_index: SpatialNodeIndex, is_root_tile_cache: bool, sub_slice_index: usize, frame_context: &FrameVisibilityContext, ) -> SurfacePromotionResult { // Check if this primitive _wants_ to be promoted to a compositor surface. if !flags.contains(PrimitiveFlags::PREFER_COMPOSITOR_SURFACE) { return SurfacePromotionResult::Failed; } // For now, only support a small (arbitrary) number of compositor surfaces. if sub_slice_index == MAX_COMPOSITOR_SURFACES { return SurfacePromotionResult::Failed; } // If a complex clip is being applied to this primitive, it can't be // promoted directly to a compositor surface (we might be able to // do this in limited cases in future, some native compositors do // support rounded rect clips, for example) if prim_clip_chain.needs_mask { return SurfacePromotionResult::Failed; } // If not on the root picture cache, it has some kind of // complex effect (such as a filter, mix-blend-mode or 3d transform). if !is_root_tile_cache { return SurfacePromotionResult::Failed; } let mapper : SpaceMapper<PicturePixel, WorldPixel> = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, prim_spatial_node_index, frame_context.global_screen_world_rect, &frame_context.spatial_tree); let transform = mapper.get_transform(); if !transform.is_2d_scale_translation() { return SurfacePromotionResult::Failed; } if transform.m11 < 0.0 { return SurfacePromotionResult::Failed; } if self.slice_flags.contains(SliceFlags::IS_BLEND_CONTAINER) { return SurfacePromotionResult::Failed; } SurfacePromotionResult::Success { flip_y: transform.m22 < 0.0, } } fn setup_compositor_surfaces_yuv( &mut self, sub_slice_index: usize, prim_info: &mut PrimitiveDependencyInfo, flags: PrimitiveFlags, local_prim_rect: LayoutRect, prim_spatial_node_index: SpatialNodeIndex, pic_clip_rect: PictureRect, frame_context: &FrameVisibilityContext, image_dependencies: &[ImageDependency;3], api_keys: &[ImageKey; 3], resource_cache: &mut ResourceCache, composite_state: &mut CompositeState, gpu_cache: &mut GpuCache, image_rendering: ImageRendering, color_depth: ColorDepth, color_space: YuvColorSpace, format: YuvFormat, ) -> bool { for &key in api_keys { if key != ImageKey::DUMMY { // TODO: See comment in setup_compositor_surfaces_rgb. resource_cache.request_image(ImageRequest { key, rendering: image_rendering, tile: None, }, gpu_cache, ); } } self.setup_compositor_surfaces_impl( sub_slice_index, prim_info, flags, local_prim_rect, prim_spatial_node_index, pic_clip_rect, frame_context, ExternalSurfaceDependency::Yuv { image_dependencies: *image_dependencies, color_space, format, rescale: color_depth.rescaling_factor(), }, api_keys, resource_cache, composite_state, image_rendering, true, ) } fn setup_compositor_surfaces_rgb( &mut self, sub_slice_index: usize, prim_info: &mut PrimitiveDependencyInfo, flags: PrimitiveFlags, local_prim_rect: LayoutRect, prim_spatial_node_index: SpatialNodeIndex, pic_clip_rect: PictureRect, frame_context: &FrameVisibilityContext, image_dependency: ImageDependency, api_key: ImageKey, resource_cache: &mut ResourceCache, composite_state: &mut CompositeState, gpu_cache: &mut GpuCache, image_rendering: ImageRendering, flip_y: bool, ) -> bool { let mut api_keys = [ImageKey::DUMMY; 3]; api_keys[0] = api_key; // TODO: The picture compositing code requires images promoted // into their own picture cache slices to be requested every // frame even if they are not visible. However the image updates // are only reached on the prepare pass for visible primitives. // So we make sure to trigger an image request when promoting // the image here. resource_cache.request_image(ImageRequest { key: api_key, rendering: image_rendering, tile: None, }, gpu_cache, ); let is_opaque = resource_cache.get_image_properties(api_key) .map_or(false, |properties| properties.descriptor.is_opaque()); self.setup_compositor_surfaces_impl( sub_slice_index, prim_info, flags, local_prim_rect, prim_spatial_node_index, pic_clip_rect, frame_context, ExternalSurfaceDependency::Rgb { image_dependency, flip_y, }, &api_keys, resource_cache, composite_state, image_rendering, is_opaque, ) } // returns false if composition is not available for this surface, // and the non-compositor path should be used to draw it instead. fn setup_compositor_surfaces_impl( &mut self, sub_slice_index: usize, prim_info: &mut PrimitiveDependencyInfo, flags: PrimitiveFlags, local_prim_rect: LayoutRect, prim_spatial_node_index: SpatialNodeIndex, pic_clip_rect: PictureRect, frame_context: &FrameVisibilityContext, dependency: ExternalSurfaceDependency, api_keys: &[ImageKey; 3], resource_cache: &mut ResourceCache, composite_state: &mut CompositeState, image_rendering: ImageRendering, is_opaque: bool, ) -> bool { let map_local_to_surface = SpaceMapper::new_with_target( self.spatial_node_index, prim_spatial_node_index, self.local_rect, frame_context.spatial_tree, ); // Map the primitive local rect into picture space. let prim_rect = match map_local_to_surface.map(&local_prim_rect) { Some(rect) => rect, None => return true, }; // If the rect is invalid, no need to create dependencies. if prim_rect.size.is_empty() { return true; } let pic_to_world_mapper = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, self.spatial_node_index, frame_context.global_screen_world_rect, frame_context.spatial_tree, ); let world_clip_rect = pic_to_world_mapper .map(&prim_info.prim_clip_box.to_rect()) .expect("bug: unable to map clip to world space"); let is_visible = world_clip_rect.intersects(&frame_context.global_screen_world_rect); if !is_visible { return true; } let world_rect = pic_to_world_mapper .map(&prim_rect) .expect("bug: unable to map the primitive to world space"); let device_rect = (world_rect * frame_context.global_device_pixel_scale).round(); // TODO(gw): Is there any case where if the primitive ends up on a fractional // boundary we want to _skip_ promoting to a compositor surface and // draw it as part of the content? let (surface_rect, transform) = match composite_state.compositor_kind { CompositorKind::Draw { .. } => { (device_rect, Transform3D::identity()) } CompositorKind::Native { .. } => { // If we have a Native Compositor, then we can support doing the transformation // as part of compositing. Use the local prim rect for the external surface, and // compute the full local to device transform to provide to the compositor. let surface_to_world_mapper : SpaceMapper<PicturePixel, WorldPixel> = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, prim_spatial_node_index, frame_context.global_screen_world_rect, frame_context.spatial_tree, ); let prim_origin = Vector3D::new(local_prim_rect.origin.x, local_prim_rect.origin.y, 0.0); let world_to_device_scale = Transform3D::from_scale(frame_context.global_device_pixel_scale); let transform = surface_to_world_mapper.get_transform().pre_translate(prim_origin).then(&world_to_device_scale); (local_prim_rect.cast_unit(), transform) } }; let clip_rect = (world_clip_rect * frame_context.global_device_pixel_scale).round(); if surface_rect.size.width >= MAX_COMPOSITOR_SURFACES_SIZE || surface_rect.size.height >= MAX_COMPOSITOR_SURFACES_SIZE { return false; } // If this primitive is an external image, and supports being used // directly by a native compositor, then lookup the external image id // so we can pass that through. let external_image_id = if flags.contains(PrimitiveFlags::SUPPORTS_EXTERNAL_COMPOSITOR_SURFACE) { resource_cache.get_image_properties(api_keys[0]) .and_then(|properties| properties.external_image) .and_then(|image| Some(image.id)) } else { None }; // When using native compositing, we need to find an existing native surface // handle to use, or allocate a new one. For existing native surfaces, we can // also determine whether this needs to be updated, depending on whether the // image generation(s) of the planes have changed since last composite. let (native_surface_id, update_params) = match composite_state.compositor_kind { CompositorKind::Draw { .. } => { (None, None) } CompositorKind::Native { .. } => { let native_surface_size = surface_rect.size.round().to_i32(); let key = ExternalNativeSurfaceKey { image_keys: *api_keys, size: native_surface_size, is_external_surface: external_image_id.is_some(), }; let native_surface = self.external_native_surface_cache .entry(key) .or_insert_with(|| { // No existing surface, so allocate a new compositor surface. let native_surface_id = match external_image_id { Some(_external_image) => { // If we have a suitable external image, then create an external // surface to attach to. resource_cache.create_compositor_external_surface(is_opaque) } None => { // Otherwise create a normal compositor surface and a single // compositor tile that covers the entire surface. let native_surface_id = resource_cache.create_compositor_surface( DeviceIntPoint::zero(), native_surface_size, is_opaque, ); let tile_id = NativeTileId { surface_id: native_surface_id, x: 0, y: 0, }; resource_cache.create_compositor_tile(tile_id); native_surface_id } }; ExternalNativeSurface { used_this_frame: true, native_surface_id, image_dependencies: [ImageDependency::INVALID; 3], } }); // Mark that the surface is referenced this frame so that the // backing native surface handle isn't freed. native_surface.used_this_frame = true; let update_params = match external_image_id { Some(external_image) => { // If this is an external image surface, then there's no update // to be done. Just attach the current external image to the surface // and we're done. resource_cache.attach_compositor_external_image( native_surface.native_surface_id, external_image, ); None } None => { // If the image dependencies match, there is no need to update // the backing native surface. match dependency { ExternalSurfaceDependency::Yuv{ image_dependencies, .. } => { if image_dependencies == native_surface.image_dependencies { None } else { Some(native_surface_size) } }, ExternalSurfaceDependency::Rgb{ image_dependency, .. } => { if image_dependency == native_surface.image_dependencies[0] { None } else { Some(native_surface_size) } }, } } }; (Some(native_surface.native_surface_id), update_params) } }; // For compositor surfaces, if we didn't find an earlier sub-slice to add to, // we know we can append to the current slice. assert!(sub_slice_index < self.sub_slices.len() - 1); let sub_slice = &mut self.sub_slices[sub_slice_index]; // Each compositor surface allocates a unique z-id sub_slice.compositor_surfaces.push(CompositorSurface { prohibited_rect: pic_clip_rect, is_opaque, descriptor: ExternalSurfaceDescriptor { local_rect: prim_info.prim_clip_box.to_rect(), local_clip_rect: prim_info.prim_clip_box.to_rect(), dependency, image_rendering, device_rect, surface_rect, clip_rect, transform: transform.cast_unit(), z_id: ZBufferId::invalid(), native_surface_id, update_params, }, }); true } /// Update the dependencies for each tile for a given primitive instance. pub fn update_prim_dependencies( &mut self, prim_instance: &mut PrimitiveInstance, prim_spatial_node_index: SpatialNodeIndex, local_prim_rect: LayoutRect, frame_context: &FrameVisibilityContext, data_stores: &DataStores, clip_store: &ClipStore, pictures: &[PicturePrimitive], resource_cache: &mut ResourceCache, color_bindings: &ColorBindingStorage, surface_stack: &[SurfaceIndex], composite_state: &mut CompositeState, gpu_cache: &mut GpuCache, is_root_tile_cache: bool, ) { // This primitive exists on the last element on the current surface stack. profile_scope!("update_prim_dependencies"); let prim_surface_index = *surface_stack.last().unwrap(); let prim_clip_chain = &prim_instance.vis.clip_chain; // If the primitive is directly drawn onto this picture cache surface, then // the pic_clip_rect is in the same space. If not, we need to map it from // the surface space into the picture cache space. let on_picture_surface = prim_surface_index == self.surface_index; let pic_clip_rect = if on_picture_surface { prim_clip_chain.pic_clip_rect } else { // We want to get the rect in the tile cache surface space that this primitive // occupies, in order to enable correct invalidation regions. Each surface // that exists in the chain between this primitive and the tile cache surface // may have an arbitrary inflation factor (for example, in the case of a series // of nested blur elements). To account for this, step through the current // surface stack, mapping the primitive rect into each surface space, including // the inflation factor from each intermediate surface. let mut current_pic_clip_rect = prim_clip_chain.pic_clip_rect; let mut current_spatial_node_index = frame_context .surfaces[prim_surface_index.0] .surface_spatial_node_index; for surface_index in surface_stack.iter().rev() { let surface = &frame_context.surfaces[surface_index.0]; let map_local_to_surface = SpaceMapper::new_with_target( surface.surface_spatial_node_index, current_spatial_node_index, surface.rect, frame_context.spatial_tree, ); // Map the rect into the parent surface, and inflate if this surface requires // it. If the rect can't be mapping (e.g. due to an invalid transform) then // just bail out from the dependencies and cull this primitive. current_pic_clip_rect = match map_local_to_surface.map(&current_pic_clip_rect) { Some(rect) => { rect.inflate(surface.inflation_factor, surface.inflation_factor) } None => { return; } }; current_spatial_node_index = surface.surface_spatial_node_index; } current_pic_clip_rect }; // Get the tile coordinates in the picture space. let (p0, p1) = self.get_tile_coords_for_rect(&pic_clip_rect); // If the primitive is outside the tiling rects, it's known to not // be visible. if p0.x == p1.x || p0.y == p1.y { return; } // Build the list of resources that this primitive has dependencies on. let mut prim_info = PrimitiveDependencyInfo::new( prim_instance.uid(), pic_clip_rect.to_box2d(), ); let mut sub_slice_index = self.sub_slices.len() - 1; // Only need to evaluate sub-slice regions if we have compositor surfaces present if sub_slice_index > 0 { // Find the first sub-slice we can add this primitive to (we want to add // prims to the primary surface if possible, so they get subpixel AA). for (i, sub_slice) in self.sub_slices.iter_mut().enumerate() { let mut intersects_prohibited_region = false; for surface in &mut sub_slice.compositor_surfaces { if pic_clip_rect.intersects(&surface.prohibited_rect) { surface.prohibited_rect = surface.prohibited_rect.union(&pic_clip_rect); intersects_prohibited_region = true; } } if !intersects_prohibited_region { sub_slice_index = i; break; } } } // Include the prim spatial node, if differs relative to cache root. if prim_spatial_node_index != self.spatial_node_index { prim_info.spatial_nodes.push(prim_spatial_node_index); } // If there was a clip chain, add any clip dependencies to the list for this tile. let clip_instances = &clip_store .clip_node_instances[prim_clip_chain.clips_range.to_range()]; for clip_instance in clip_instances { prim_info.clips.push(clip_instance.handle.uid()); // If the clip has the same spatial node, the relative transform // will always be the same, so there's no need to depend on it. if clip_instance.spatial_node_index != self.spatial_node_index && !prim_info.spatial_nodes.contains(&clip_instance.spatial_node_index) { prim_info.spatial_nodes.push(clip_instance.spatial_node_index); } } // Certain primitives may select themselves to be a backdrop candidate, which is // then applied below. let mut backdrop_candidate = None; // For pictures, we don't (yet) know the valid clip rect, so we can't correctly // use it to calculate the local bounding rect for the tiles. If we include them // then we may calculate a bounding rect that is too large, since it won't include // the clip bounds of the picture. Excluding them from the bounding rect here // fixes any correctness issues (the clips themselves are considered when we // consider the bounds of the primitives that are *children* of the picture), // however it does potentially result in some un-necessary invalidations of a // tile (in cases where the picture local rect affects the tile, but the clip // rect eventually means it doesn't affect that tile). // TODO(gw): Get picture clips earlier (during the initial picture traversal // pass) so that we can calculate these correctly. match prim_instance.kind { PrimitiveInstanceKind::Picture { pic_index,.. } => { // Pictures can depend on animated opacity bindings. let pic = &pictures[pic_index.0]; if let Some(PictureCompositeMode::Filter(Filter::Opacity(binding, _))) = pic.requested_composite_mode { prim_info.opacity_bindings.push(binding.into()); } } PrimitiveInstanceKind::Rectangle { data_handle, color_binding_index, .. } => { // Rectangles can only form a backdrop candidate if they are known opaque. // TODO(gw): We could resolve the opacity binding here, but the common // case for background rects is that they don't have animated opacity. let color = match data_stores.prim[data_handle].kind { PrimitiveTemplateKind::Rectangle { color, .. } => { frame_context.scene_properties.resolve_color(&color) } _ => unreachable!(), }; if color.a >= 1.0 { backdrop_candidate = Some(BackdropInfo { opaque_rect: pic_clip_rect, kind: Some(BackdropKind::Color { color }), }); } if color_binding_index != ColorBindingIndex::INVALID { prim_info.color_binding = Some(color_bindings[color_binding_index].into()); } } PrimitiveInstanceKind::Image { data_handle, ref mut is_compositor_surface, .. } => { let image_key = &data_stores.image[data_handle]; let image_data = &image_key.kind; let mut promote_to_surface = false; let mut promote_with_flip_y = false; match self.can_promote_to_surface(image_key.common.flags, prim_clip_chain, prim_spatial_node_index, is_root_tile_cache, sub_slice_index, frame_context) { SurfacePromotionResult::Failed => { } SurfacePromotionResult::Success{flip_y} => { promote_to_surface = true; promote_with_flip_y = flip_y; } } // Native OS compositors (DC and CA, at least) support premultiplied alpha // only. If we have an image that's not pre-multiplied alpha, we can't promote it. if image_data.alpha_type == AlphaType::Alpha { promote_to_surface = false; } if let Some(image_properties) = resource_cache.get_image_properties(image_data.key) { // For an image to be a possible opaque backdrop, it must: // - Have a valid, opaque image descriptor // - Not use tiling (since they can fail to draw) // - Not having any spacing / padding // - Have opaque alpha in the instance (flattened) color if image_properties.descriptor.is_opaque() && image_properties.tiling.is_none() && image_data.tile_spacing == LayoutSize::zero() && image_data.color.a >= 1.0 { backdrop_candidate = Some(BackdropInfo { opaque_rect: pic_clip_rect, kind: None, }); } } if promote_to_surface { promote_to_surface = self.setup_compositor_surfaces_rgb( sub_slice_index, &mut prim_info, image_key.common.flags, local_prim_rect, prim_spatial_node_index, pic_clip_rect, frame_context, ImageDependency { key: image_data.key, generation: resource_cache.get_image_generation(image_data.key), }, image_data.key, resource_cache, composite_state, gpu_cache, image_data.image_rendering, promote_with_flip_y, ); } *is_compositor_surface = promote_to_surface; if promote_to_surface { prim_instance.vis.state = VisibilityState::Culled; return; } else { prim_info.images.push(ImageDependency { key: image_data.key, generation: resource_cache.get_image_generation(image_data.key), }); } } PrimitiveInstanceKind::YuvImage { data_handle, ref mut is_compositor_surface, .. } => { let prim_data = &data_stores.yuv_image[data_handle]; let mut promote_to_surface = match self.can_promote_to_surface( prim_data.common.flags, prim_clip_chain, prim_spatial_node_index, is_root_tile_cache, sub_slice_index, frame_context) { SurfacePromotionResult::Failed => false, SurfacePromotionResult::Success{flip_y} => !flip_y, }; // TODO(gw): When we support RGBA images for external surfaces, we also // need to check if opaque (YUV images are implicitly opaque). // If this primitive is being promoted to a surface, construct an external // surface descriptor for use later during batching and compositing. We only // add the image keys for this primitive as a dependency if this is _not_ // a promoted surface, since we don't want the tiles to invalidate when the // video content changes, if it's a compositor surface! if promote_to_surface { // Build dependency for each YUV plane, with current image generation for // later detection of when the composited surface has changed. let mut image_dependencies = [ImageDependency::INVALID; 3]; for (key, dep) in prim_data.kind.yuv_key.iter().cloned().zip(image_dependencies.iter_mut()) { *dep = ImageDependency { key, generation: resource_cache.get_image_generation(key), } } promote_to_surface = self.setup_compositor_surfaces_yuv( sub_slice_index, &mut prim_info, prim_data.common.flags, local_prim_rect, prim_spatial_node_index, pic_clip_rect, frame_context, &image_dependencies, &prim_data.kind.yuv_key, resource_cache, composite_state, gpu_cache, prim_data.kind.image_rendering, prim_data.kind.color_depth, prim_data.kind.color_space, prim_data.kind.format, ); } // Store on the YUV primitive instance whether this is a promoted surface. // This is used by the batching code to determine whether to draw the // image to the content tiles, or just a transparent z-write. *is_compositor_surface = promote_to_surface; if promote_to_surface { prim_instance.vis.state = VisibilityState::Culled; return; } else { prim_info.images.extend( prim_data.kind.yuv_key.iter().map(|key| { ImageDependency { key: *key, generation: resource_cache.get_image_generation(*key), } }) ); } } PrimitiveInstanceKind::ImageBorder { data_handle, .. } => { let border_data = &data_stores.image_border[data_handle].kind; prim_info.images.push(ImageDependency { key: border_data.request.key, generation: resource_cache.get_image_generation(border_data.request.key), }); } PrimitiveInstanceKind::Clear { .. } => { backdrop_candidate = Some(BackdropInfo { opaque_rect: pic_clip_rect, kind: Some(BackdropKind::Clear), }); } PrimitiveInstanceKind::LinearGradient { data_handle, .. } | PrimitiveInstanceKind::CachedLinearGradient { data_handle, .. } => { let gradient_data = &data_stores.linear_grad[data_handle]; if gradient_data.stops_opacity.is_opaque && gradient_data.tile_spacing == LayoutSize::zero() { backdrop_candidate = Some(BackdropInfo { opaque_rect: pic_clip_rect, kind: None, }); } } PrimitiveInstanceKind::ConicGradient { data_handle, .. } => { let gradient_data = &data_stores.conic_grad[data_handle]; if gradient_data.stops_opacity.is_opaque && gradient_data.tile_spacing == LayoutSize::zero() { backdrop_candidate = Some(BackdropInfo { opaque_rect: pic_clip_rect, kind: None, }); } } PrimitiveInstanceKind::RadialGradient { data_handle, .. } => { let gradient_data = &data_stores.radial_grad[data_handle]; if gradient_data.stops_opacity.is_opaque && gradient_data.tile_spacing == LayoutSize::zero() { backdrop_candidate = Some(BackdropInfo { opaque_rect: pic_clip_rect, kind: None, }); } } PrimitiveInstanceKind::LineDecoration { .. } | PrimitiveInstanceKind::NormalBorder { .. } | PrimitiveInstanceKind::TextRun { .. } | PrimitiveInstanceKind::Backdrop { .. } => { // These don't contribute dependencies } }; // If this primitive considers itself a backdrop candidate, apply further // checks to see if it matches all conditions to be a backdrop. let mut vis_flags = PrimitiveVisibilityFlags::empty(); let sub_slice = &mut self.sub_slices[sub_slice_index]; if let Some(backdrop_candidate) = backdrop_candidate { let is_suitable_backdrop = match backdrop_candidate.kind { Some(BackdropKind::Clear) => { // Clear prims are special - they always end up in their own slice, // and always set the backdrop. In future, we hope to completely // remove clear prims, since they don't integrate with the compositing // system cleanly. true } Some(BackdropKind::Color { .. }) | None => { // Check a number of conditions to see if we can consider this // primitive as an opaque backdrop rect. Several of these are conservative // checks and could be relaxed in future. However, these checks // are quick and capture the common cases of background rects and images. // Specifically, we currently require: // - The primitive is on the main picture cache surface. // - Same coord system as picture cache (ensures rects are axis-aligned). // - No clip masks exist. let same_coord_system = { let prim_spatial_node = &frame_context.spatial_tree .spatial_nodes[prim_spatial_node_index.0 as usize]; let surface_spatial_node = &frame_context.spatial_tree .spatial_nodes[self.spatial_node_index.0 as usize]; prim_spatial_node.coordinate_system_id == surface_spatial_node.coordinate_system_id }; same_coord_system && on_picture_surface } }; if sub_slice_index == 0 && is_suitable_backdrop && sub_slice.compositor_surfaces.is_empty() && !prim_clip_chain.needs_mask { if backdrop_candidate.opaque_rect.contains_rect(&self.backdrop.opaque_rect) { self.backdrop.opaque_rect = backdrop_candidate.opaque_rect; } if let Some(kind) = backdrop_candidate.kind { if backdrop_candidate.opaque_rect.contains_rect(&self.local_rect) { // If we have a color backdrop, mark the visibility flags // of the primitive so it is skipped during batching (and // also clears any previous primitives). if let BackdropKind::Color { .. } = kind { vis_flags |= PrimitiveVisibilityFlags::IS_BACKDROP; } self.backdrop.kind = Some(kind); } } } } // Record any new spatial nodes in the used list. for spatial_node_index in &prim_info.spatial_nodes { self.spatial_node_comparer.register_used_transform( *spatial_node_index, self.frame_id, frame_context.spatial_tree, ); } // Truncate the lengths of dependency arrays to the max size we can handle. // Any arrays this size or longer will invalidate every frame. prim_info.clips.truncate(MAX_PRIM_SUB_DEPS); prim_info.opacity_bindings.truncate(MAX_PRIM_SUB_DEPS); prim_info.spatial_nodes.truncate(MAX_PRIM_SUB_DEPS); prim_info.images.truncate(MAX_PRIM_SUB_DEPS); // Normalize the tile coordinates before adding to tile dependencies. // For each affected tile, mark any of the primitive dependencies. for y in p0.y .. p1.y { for x in p0.x .. p1.x { // TODO(gw): Convert to 2d array temporarily to avoid hash lookups per-tile? let key = TileOffset::new(x, y); let tile = sub_slice.tiles.get_mut(&key).expect("bug: no tile"); tile.add_prim_dependency(&prim_info); } } prim_instance.vis.state = VisibilityState::Coarse { filter: BatchFilter { rect_in_pic_space: pic_clip_rect, sub_slice_index: SubSliceIndex::new(sub_slice_index), }, vis_flags, }; } /// Print debug information about this picture cache to a tree printer. fn print(&self) { // TODO(gw): This initial implementation is very basic - just printing // the picture cache state to stdout. In future, we can // make this dump each frame to a file, and produce a report // stating which frames had invalidations. This will allow // diff'ing the invalidation states in a visual tool. let mut pt = PrintTree::new("Picture Cache"); pt.new_level(format!("Slice {:?}", self.slice)); pt.add_item(format!("fract_offset: {:?}", self.fract_offset)); pt.add_item(format!("background_color: {:?}", self.background_color)); for (sub_slice_index, sub_slice) in self.sub_slices.iter().enumerate() { pt.new_level(format!("SubSlice {:?}", sub_slice_index)); for y in self.tile_bounds_p0.y .. self.tile_bounds_p1.y { for x in self.tile_bounds_p0.x .. self.tile_bounds_p1.x { let key = TileOffset::new(x, y); let tile = &sub_slice.tiles[&key]; tile.print(&mut pt); } } pt.end_level(); } pt.end_level(); } fn calculate_subpixel_mode(&self) -> SubpixelMode { let has_opaque_bg_color = self.background_color.map_or(false, |c| c.a >= 1.0); // If the overall tile cache is known opaque, subpixel AA is allowed everywhere if has_opaque_bg_color { return SubpixelMode::Allow; } // If we didn't find any valid opaque backdrop, no subpixel AA allowed if self.backdrop.opaque_rect.is_empty() { return SubpixelMode::Deny; } // If the opaque backdrop rect covers the entire tile cache surface, // we can allow subpixel AA anywhere, skipping the per-text-run tests // later on during primitive preparation. if self.backdrop.opaque_rect.contains_rect(&self.local_rect) { return SubpixelMode::Allow; } // If none of the simple cases above match, we need test where we can support subpixel AA. // TODO(gw): In future, it may make sense to have > 1 inclusion rect, // but this handles the common cases. // TODO(gw): If a text run gets animated such that it's moving in a way that is // sometimes intersecting with the video rect, this can result in subpixel // AA flicking on/off for that text run. It's probably very rare, but // something we should handle in future. SubpixelMode::Conditional { allowed_rect: self.backdrop.opaque_rect, } } /// Apply any updates after prim dependency updates. This applies /// any late tile invalidations, and sets up the dirty rect and /// set of tile blits. pub fn post_update( &mut self, frame_context: &FrameVisibilityContext, frame_state: &mut FrameVisibilityState, ) { self.dirty_region.reset(self.spatial_node_index); self.subpixel_mode = self.calculate_subpixel_mode(); let map_pic_to_world = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, self.spatial_node_index, frame_context.global_screen_world_rect, frame_context.spatial_tree, ); // A simple GC of the native external surface cache, to remove and free any // surfaces that were not referenced during the update_prim_dependencies pass. self.external_native_surface_cache.retain(|_, surface| { if !surface.used_this_frame { // If we removed an external surface, we need to mark the dirty rects as // invalid so a full composite occurs on the next frame. frame_state.composite_state.dirty_rects_are_valid = false; frame_state.resource_cache.destroy_compositor_surface(surface.native_surface_id); } surface.used_this_frame }); // Detect if the picture cache was scrolled or scaled. In this case, // the device space dirty rects aren't applicable (until we properly // integrate with OS compositors that can handle scrolling slices). let root_transform = frame_context .spatial_tree .get_relative_transform( self.spatial_node_index, ROOT_SPATIAL_NODE_INDEX, ); let root_transform = match root_transform { CoordinateSpaceMapping::Local => ScaleOffset::identity(), CoordinateSpaceMapping::ScaleOffset(scale_offset) => scale_offset, CoordinateSpaceMapping::Transform(..) => panic!("bug: picture caches don't support complex transforms"), }; const EPSILON: f32 = 0.001; let root_translation_changed = !root_transform.offset.x.approx_eq_eps(&self.root_transform.offset.x, &EPSILON) || !root_transform.offset.y.approx_eq_eps(&self.root_transform.offset.y, &EPSILON); let root_scale_changed = !root_transform.scale.x.approx_eq_eps(&self.root_transform.scale.x, &EPSILON) || !root_transform.scale.y.approx_eq_eps(&self.root_transform.scale.y, &EPSILON); if root_translation_changed || root_scale_changed || frame_context.config.force_invalidation { self.root_transform = root_transform; frame_state.composite_state.dirty_rects_are_valid = false; } let pic_to_world_mapper = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, self.spatial_node_index, frame_context.global_screen_world_rect, frame_context.spatial_tree, ); let mut ctx = TilePostUpdateContext { pic_to_world_mapper, global_device_pixel_scale: frame_context.global_device_pixel_scale, local_clip_rect: self.local_clip_rect, backdrop: None, opacity_bindings: &self.opacity_bindings, color_bindings: &self.color_bindings, current_tile_size: self.current_tile_size, local_rect: self.local_rect, z_id: ZBufferId::invalid(), invalidate_all: root_scale_changed || frame_context.config.force_invalidation, }; let mut state = TilePostUpdateState { resource_cache: frame_state.resource_cache, composite_state: frame_state.composite_state, compare_cache: &mut self.compare_cache, spatial_node_comparer: &mut self.spatial_node_comparer, }; // Step through each tile and invalidate if the dependencies have changed. Determine // the current opacity setting and whether it's changed. for (i, sub_slice) in self.sub_slices.iter_mut().enumerate().rev() { // The backdrop is only relevant for the first sub-slice if i == 0 { ctx.backdrop = Some(self.backdrop); } for compositor_surface in sub_slice.compositor_surfaces.iter_mut().rev() { compositor_surface.descriptor.z_id = state.composite_state.z_generator.next(); } ctx.z_id = state.composite_state.z_generator.next(); for tile in sub_slice.tiles.values_mut() { tile.post_update(&ctx, &mut state, frame_context); } } // Register any opaque external compositor surfaces as potential occluders. This // is especially useful when viewing video in full-screen mode, as it is // able to occlude every background tile (avoiding allocation, rasterizion // and compositing). for sub_slice in &self.sub_slices { for compositor_surface in &sub_slice.compositor_surfaces { if compositor_surface.is_opaque { let local_surface_rect = compositor_surface .descriptor .local_rect .intersection(&compositor_surface.descriptor.local_clip_rect) .and_then(|r| { r.intersection(&self.local_clip_rect) }); if let Some(local_surface_rect) = local_surface_rect { let world_surface_rect = map_pic_to_world .map(&local_surface_rect) .expect("bug: unable to map external surface to world space"); frame_state.composite_state.register_occluder( compositor_surface.descriptor.z_id, world_surface_rect, ); } } } } // Register the opaque region of this tile cache as an occluder, which // is used later in the frame to occlude other tiles. if !self.backdrop.opaque_rect.is_empty() { let z_id_backdrop = frame_state.composite_state.z_generator.next(); let backdrop_rect = self.backdrop.opaque_rect .intersection(&self.local_rect) .and_then(|r| { r.intersection(&self.local_clip_rect) }); if let Some(backdrop_rect) = backdrop_rect { let world_backdrop_rect = map_pic_to_world .map(&backdrop_rect) .expect("bug: unable to map backdrop to world space"); // Since we register the entire backdrop rect, use the opaque z-id for the // picture cache slice. frame_state.composite_state.register_occluder( z_id_backdrop, world_backdrop_rect, ); } } } } pub struct PictureScratchBuffer { surface_stack: Vec<SurfaceIndex>, clip_chain_ids: Vec<ClipChainId>, } impl Default for PictureScratchBuffer { fn default() -> Self { PictureScratchBuffer { surface_stack: Vec::new(), clip_chain_ids: Vec::new(), } } } impl PictureScratchBuffer { pub fn begin_frame(&mut self) { self.surface_stack.clear(); self.clip_chain_ids.clear(); } pub fn recycle(&mut self, recycler: &mut Recycler) { recycler.recycle_vec(&mut self.surface_stack); } } /// Maintains a stack of picture and surface information, that /// is used during the initial picture traversal. pub struct PictureUpdateState<'a> { surfaces: &'a mut Vec<SurfaceInfo>, surface_stack: Vec<SurfaceIndex>, } impl<'a> PictureUpdateState<'a> { pub fn update_all( buffers: &mut PictureScratchBuffer, surfaces: &'a mut Vec<SurfaceInfo>, pic_index: PictureIndex, picture_primitives: &mut [PicturePrimitive], frame_context: &FrameBuildingContext, gpu_cache: &mut GpuCache, clip_store: &ClipStore, data_stores: &mut DataStores, ) { profile_scope!("UpdatePictures"); profile_marker!("UpdatePictures"); let mut state = PictureUpdateState { surfaces, surface_stack: buffers.surface_stack.take().cleared(), }; state.surface_stack.push(SurfaceIndex(0)); state.update( pic_index, picture_primitives, frame_context, gpu_cache, clip_store, data_stores, ); buffers.surface_stack = state.surface_stack.take(); } /// Return the current surface fn current_surface(&self) -> &SurfaceInfo { &self.surfaces[self.surface_stack.last().unwrap().0] } /// Return the current surface (mutable) fn current_surface_mut(&mut self) -> &mut SurfaceInfo { &mut self.surfaces[self.surface_stack.last().unwrap().0] } /// Push a new surface onto the update stack. fn push_surface( &mut self, surface: SurfaceInfo, ) -> SurfaceIndex { let surface_index = SurfaceIndex(self.surfaces.len()); self.surfaces.push(surface); self.surface_stack.push(surface_index); surface_index } /// Pop a surface on the way up the picture traversal fn pop_surface(&mut self) -> SurfaceIndex{ self.surface_stack.pop().unwrap() } /// Update a picture, determining surface configuration, /// rasterization roots, and (in future) whether there /// are cached surfaces that can be used by this picture. fn update( &mut self, pic_index: PictureIndex, picture_primitives: &mut [PicturePrimitive], frame_context: &FrameBuildingContext, gpu_cache: &mut GpuCache, clip_store: &ClipStore, data_stores: &mut DataStores, ) { if let Some(prim_list) = picture_primitives[pic_index.0].pre_update( self, frame_context, ) { for child_pic_index in &prim_list.child_pictures { self.update( *child_pic_index, picture_primitives, frame_context, gpu_cache, clip_store, data_stores, ); } picture_primitives[pic_index.0].post_update( prim_list, self, frame_context, data_stores, ); } } } #[derive(Debug, Copy, Clone, PartialEq)] #[cfg_attr(feature = "capture", derive(Serialize))] pub struct SurfaceIndex(pub usize); pub const ROOT_SURFACE_INDEX: SurfaceIndex = SurfaceIndex(0); /// Describes the render task configuration for a picture surface. #[derive(Debug)] pub enum SurfaceRenderTasks { /// The common type of surface is a single render task Simple(RenderTaskId), /// Some surfaces draw their content, and then have further tasks applied /// to that input (such as blur passes for shadows). These tasks have a root /// (the output of the surface), and a port (for attaching child task dependencies /// to the content). Chained { root_task_id: RenderTaskId, port_task_id: RenderTaskId }, /// Picture caches are a single surface consisting of multiple render /// tasks, one per tile with dirty content. Tiled(Vec<RenderTaskId>), } /// Information about an offscreen surface. For now, /// it contains information about the size and coordinate /// system of the surface. In the future, it will contain /// information about the contents of the surface, which /// will allow surfaces to be cached / retained between /// frames and display lists. #[derive(Debug)] pub struct SurfaceInfo { /// A local rect defining the size of this surface, in the /// coordinate system of the surface itself. pub rect: PictureRect, /// Part of the surface that we know to be opaque. pub opaque_rect: PictureRect, /// Helper structs for mapping local rects in different /// coordinate systems into the surface coordinates. pub map_local_to_surface: SpaceMapper<LayoutPixel, PicturePixel>, /// Defines the positioning node for the surface itself, /// and the rasterization root for this surface. pub raster_spatial_node_index: SpatialNodeIndex, pub surface_spatial_node_index: SpatialNodeIndex, /// This is set when the render task is created. pub render_tasks: Option<SurfaceRenderTasks>, /// How much the local surface rect should be inflated (for blur radii). pub inflation_factor: f32, /// The device pixel ratio specific to this surface. pub device_pixel_scale: DevicePixelScale, /// The scale factors of the surface to raster transform. pub scale_factors: (f32, f32), /// The allocated device rect for this surface pub device_rect: Option<DeviceRect>, } impl SurfaceInfo { pub fn new( surface_spatial_node_index: SpatialNodeIndex, raster_spatial_node_index: SpatialNodeIndex, inflation_factor: f32, world_rect: WorldRect, spatial_tree: &SpatialTree, device_pixel_scale: DevicePixelScale, scale_factors: (f32, f32), ) -> Self { let map_surface_to_world = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, surface_spatial_node_index, world_rect, spatial_tree, ); let pic_bounds = map_surface_to_world .unmap(&map_surface_to_world.bounds) .unwrap_or_else(PictureRect::max_rect); let map_local_to_surface = SpaceMapper::new( surface_spatial_node_index, pic_bounds, ); SurfaceInfo { rect: PictureRect::zero(), opaque_rect: PictureRect::zero(), map_local_to_surface, render_tasks: None, raster_spatial_node_index, surface_spatial_node_index, inflation_factor, device_pixel_scale, scale_factors, device_rect: None, } } pub fn get_device_rect(&self) -> DeviceRect { self.device_rect.expect("bug: queried before surface was initialized") } } #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] pub struct RasterConfig { /// How this picture should be composited into /// the parent surface. pub composite_mode: PictureCompositeMode, /// Index to the surface descriptor for this /// picture. pub surface_index: SurfaceIndex, /// Whether this picture establishes a rasterization root. pub establishes_raster_root: bool, /// Scaling factor applied to fit within MAX_SURFACE_SIZE when /// establishing a raster root. /// Most code doesn't need to know about it, since it is folded /// into device_pixel_scale when the rendertask is set up. /// However e.g. text rasterization uses it to ensure consistent /// on-screen font size. pub root_scaling_factor: f32, /// The world rect of this picture clipped to the current culling /// rect. This is used for determining the size of the render /// target rect for this surface, and calculating raster scale /// factors. pub clipped_bounding_rect: WorldRect, } bitflags! { /// A set of flags describing why a picture may need a backing surface. #[cfg_attr(feature = "capture", derive(Serialize))] pub struct BlitReason: u32 { /// Mix-blend-mode on a child that requires isolation. const ISOLATE = 1; /// Clip node that _might_ require a surface. const CLIP = 2; /// Preserve-3D requires a surface for plane-splitting. const PRESERVE3D = 4; /// A backdrop that is reused which requires a surface. const BACKDROP = 8; } } /// Specifies how this Picture should be composited /// onto the target it belongs to. #[allow(dead_code)] #[derive(Debug, Clone)] #[cfg_attr(feature = "capture", derive(Serialize))] pub enum PictureCompositeMode { /// Apply CSS mix-blend-mode effect. MixBlend(MixBlendMode), /// Apply a CSS filter (except component transfer). Filter(Filter), /// Apply a component transfer filter. ComponentTransferFilter(FilterDataHandle), /// Draw to intermediate surface, copy straight across. This /// is used for CSS isolation, and plane splitting. Blit(BlitReason), /// Used to cache a picture as a series of tiles. TileCache { slice_id: SliceId, }, /// Apply an SVG filter SvgFilter(Vec<FilterPrimitive>, Vec<SFilterData>), } impl PictureCompositeMode { pub fn inflate_picture_rect(&self, picture_rect: PictureRect, scale_factors: (f32, f32)) -> PictureRect { let mut result_rect = picture_rect; match self { PictureCompositeMode::Filter(filter) => match filter { Filter::Blur(width, height) => { let width_factor = clamp_blur_radius(*width, scale_factors).ceil() * BLUR_SAMPLE_SCALE; let height_factor = clamp_blur_radius(*height, scale_factors).ceil() * BLUR_SAMPLE_SCALE; result_rect = picture_rect.inflate(width_factor, height_factor); }, Filter::DropShadows(shadows) => { let mut max_inflation: f32 = 0.0; for shadow in shadows { max_inflation = max_inflation.max(shadow.blur_radius); } max_inflation = clamp_blur_radius(max_inflation, scale_factors).ceil() * BLUR_SAMPLE_SCALE; result_rect = picture_rect.inflate(max_inflation, max_inflation); }, _ => {} } PictureCompositeMode::SvgFilter(primitives, _) => { let mut output_rects = Vec::with_capacity(primitives.len()); for (cur_index, primitive) in primitives.iter().enumerate() { let output_rect = match primitive.kind { FilterPrimitiveKind::Blur(ref primitive) => { let input = primitive.input.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect); let width_factor = primitive.width.round() * BLUR_SAMPLE_SCALE; let height_factor = primitive.height.round() * BLUR_SAMPLE_SCALE; input.inflate(width_factor, height_factor) } FilterPrimitiveKind::DropShadow(ref primitive) => { let inflation_factor = primitive.shadow.blur_radius.ceil() * BLUR_SAMPLE_SCALE; let input = primitive.input.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect); let shadow_rect = input.inflate(inflation_factor, inflation_factor); input.union(&shadow_rect.translate(primitive.shadow.offset * Scale::new(1.0))) } FilterPrimitiveKind::Blend(ref primitive) => { primitive.input1.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect) .union(&primitive.input2.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect)) } FilterPrimitiveKind::Composite(ref primitive) => { primitive.input1.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect) .union(&primitive.input2.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect)) } FilterPrimitiveKind::Identity(ref primitive) => primitive.input.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect), FilterPrimitiveKind::Opacity(ref primitive) => primitive.input.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect), FilterPrimitiveKind::ColorMatrix(ref primitive) => primitive.input.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect), FilterPrimitiveKind::ComponentTransfer(ref primitive) => primitive.input.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect), FilterPrimitiveKind::Offset(ref primitive) => { let input_rect = primitive.input.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect); input_rect.translate(primitive.offset * Scale::new(1.0)) }, FilterPrimitiveKind::Flood(..) => picture_rect, }; output_rects.push(output_rect); result_rect = result_rect.union(&output_rect); } } _ => {}, } result_rect } } /// Enum value describing the place of a picture in a 3D context. #[derive(Clone, Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] pub enum Picture3DContext<C> { /// The picture is not a part of 3D context sub-hierarchy. Out, /// The picture is a part of 3D context. In { /// Additional data per child for the case of this a root of 3D hierarchy. root_data: Option<Vec<C>>, /// The spatial node index of an "ancestor" element, i.e. one /// that establishes the transformed element's containing block. /// /// See CSS spec draft for more details: /// https://drafts.csswg.org/css-transforms-2/#accumulated-3d-transformation-matrix-computation ancestor_index: SpatialNodeIndex, }, } /// Information about a preserve-3D hierarchy child that has been plane-split /// and ordered according to the view direction. #[derive(Clone, Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] pub struct OrderedPictureChild { pub anchor: PlaneSplitAnchor, pub spatial_node_index: SpatialNodeIndex, pub gpu_address: GpuCacheAddress, } bitflags! { /// A set of flags describing why a picture may need a backing surface. #[cfg_attr(feature = "capture", derive(Serialize))] pub struct ClusterFlags: u32 { /// Whether this cluster is visible when the position node is a backface. const IS_BACKFACE_VISIBLE = 1; /// This flag is set during the first pass picture traversal, depending on whether /// the cluster is visible or not. It's read during the second pass when primitives /// consult their owning clusters to see if the primitive itself is visible. const IS_VISIBLE = 2; /// Is a backdrop-filter cluster that requires special handling during post_update. const IS_BACKDROP_FILTER = 4; } } /// Descriptor for a cluster of primitives. For now, this is quite basic but will be /// extended to handle more spatial clustering of primitives. #[cfg_attr(feature = "capture", derive(Serialize))] pub struct PrimitiveCluster { /// The positioning node for this cluster. pub spatial_node_index: SpatialNodeIndex, /// The bounding rect of the cluster, in the local space of the spatial node. /// This is used to quickly determine the overall bounding rect for a picture /// during the first picture traversal, which is needed for local scale /// determination, and render task size calculations. bounding_rect: LayoutRect, /// a part of the cluster that we know to be opaque if any. Does not always /// describe the entire opaque region, but all content within that rect must /// be opaque. pub opaque_rect: LayoutRect, /// The range of primitive instance indices associated with this cluster. pub prim_range: Range<usize>, /// Various flags / state for this cluster. pub flags: ClusterFlags, } impl PrimitiveCluster { /// Construct a new primitive cluster for a given positioning node. fn new( spatial_node_index: SpatialNodeIndex, flags: ClusterFlags, first_instance_index: usize, ) -> Self { PrimitiveCluster { bounding_rect: LayoutRect::zero(), opaque_rect: LayoutRect::zero(), spatial_node_index, flags, prim_range: first_instance_index..first_instance_index } } /// Return true if this cluster is compatible with the given params pub fn is_compatible( &self, spatial_node_index: SpatialNodeIndex, flags: ClusterFlags, ) -> bool { self.flags == flags && self.spatial_node_index == spatial_node_index } pub fn prim_range(&self) -> Range<usize> { self.prim_range.clone() } /// Add a primitive instance to this cluster, at the start or end fn add_instance( &mut self, culling_rect: &LayoutRect, instance_index: usize, ) { debug_assert_eq!(instance_index, self.prim_range.end); self.bounding_rect = self.bounding_rect.union(culling_rect); self.prim_range.end += 1; } } /// A list of primitive instances that are added to a picture /// This ensures we can keep a list of primitives that /// are pictures, for a fast initial traversal of the picture /// tree without walking the instance list. #[cfg_attr(feature = "capture", derive(Serialize))] pub struct PrimitiveList { /// List of primitives grouped into clusters. pub clusters: Vec<PrimitiveCluster>, pub prim_instances: Vec<PrimitiveInstance>, pub child_pictures: Vec<PictureIndex>, /// The number of preferred compositor surfaces that were found when /// adding prims to this list. pub compositor_surface_count: usize, } impl PrimitiveList { /// Construct an empty primitive list. This is /// just used during the take_context / restore_context /// borrow check dance, which will be removed as the /// picture traversal pass is completed. pub fn empty() -> Self { PrimitiveList { clusters: Vec::new(), prim_instances: Vec::new(), child_pictures: Vec::new(), compositor_surface_count: 0, } } /// Add a primitive instance to the end of the list pub fn add_prim( &mut self, prim_instance: PrimitiveInstance, prim_rect: LayoutRect, spatial_node_index: SpatialNodeIndex, prim_flags: PrimitiveFlags, ) { let mut flags = ClusterFlags::empty(); // Pictures are always put into a new cluster, to make it faster to // iterate all pictures in a given primitive list. match prim_instance.kind { PrimitiveInstanceKind::Picture { pic_index, .. } => { self.child_pictures.push(pic_index); } PrimitiveInstanceKind::Backdrop { .. } => { flags.insert(ClusterFlags::IS_BACKDROP_FILTER); } _ => {} } if prim_flags.contains(PrimitiveFlags::IS_BACKFACE_VISIBLE) { flags.insert(ClusterFlags::IS_BACKFACE_VISIBLE); } if prim_flags.contains(PrimitiveFlags::PREFER_COMPOSITOR_SURFACE) { self.compositor_surface_count += 1; } let culling_rect = prim_instance.clip_set.local_clip_rect .intersection(&prim_rect) .unwrap_or_else(LayoutRect::zero); // Primitive lengths aren't evenly distributed among primitive lists: // We often have a large amount of single primitive lists, a // few below 20~30 primitives, and even fewer lists (maybe a couple) // in the multiple hundreds with nothing in between. // We can see in profiles that reallocating vectors while pushing // primitives is taking a large amount of the total scene build time, // so we take advantage of what we know about the length distributions // to go for an adapted vector growth pattern that avoids over-allocating // for the many small allocations while avoiding a lot of reallocation by // quickly converging to the common sizes. // Rust's default vector growth strategy (when pushing elements one by one) // is to double the capacity every time. let prims_len = self.prim_instances.len(); if prims_len == self.prim_instances.capacity() { let next_alloc = match prims_len { 1 ..= 31 => 32 - prims_len, 32 ..= 256 => 512 - prims_len, _ => prims_len * 2, }; self.prim_instances.reserve(next_alloc); } let instance_index = prims_len; self.prim_instances.push(prim_instance); if let Some(cluster) = self.clusters.last_mut() { if cluster.is_compatible(spatial_node_index, flags) { cluster.add_instance(&culling_rect, instance_index); return; } } // Same idea with clusters, using a different distribution. let clusters_len = self.clusters.len(); if clusters_len == self.clusters.capacity() { let next_alloc = match clusters_len { 1 ..= 15 => 16 - clusters_len, 16 ..= 127 => 128 - clusters_len, _ => clusters_len * 2, }; self.clusters.reserve(next_alloc); } let mut cluster = PrimitiveCluster::new( spatial_node_index, flags, instance_index, ); cluster.add_instance(&culling_rect, instance_index); self.clusters.push(cluster); } /// Returns true if there are no clusters (and thus primitives) pub fn is_empty(&self) -> bool { self.clusters.is_empty() } } /// Defines configuration options for a given picture primitive. #[cfg_attr(feature = "capture", derive(Serialize))] pub struct PictureOptions { /// If true, WR should inflate the bounding rect of primitives when /// using a filter effect that requires inflation. pub inflate_if_required: bool, } impl Default for PictureOptions { fn default() -> Self { PictureOptions { inflate_if_required: true, } } } #[cfg_attr(feature = "capture", derive(Serialize))] pub struct PicturePrimitive { /// List of primitives, and associated info for this picture. pub prim_list: PrimitiveList, #[cfg_attr(feature = "capture", serde(skip))] pub state: Option<PictureState>, /// If true, apply the local clip rect to primitive drawn /// in this picture. pub apply_local_clip_rect: bool, /// If false and transform ends up showing the back of the picture, /// it will be considered invisible. pub is_backface_visible: bool, pub primary_render_task_id: Option<RenderTaskId>, /// If a mix-blend-mode, contains the render task for /// the readback of the framebuffer that we use to sample /// from in the mix-blend-mode shader. /// For drop-shadow filter, this will store the original /// picture task which would be rendered on screen after /// blur pass. pub secondary_render_task_id: Option<RenderTaskId>, /// How this picture should be composited. /// If None, don't composite - just draw directly on parent surface. pub requested_composite_mode: Option<PictureCompositeMode>, pub raster_config: Option<RasterConfig>, pub context_3d: Picture3DContext<OrderedPictureChild>, // Optional cache handles for storing extra data // in the GPU cache, depending on the type of // picture. pub extra_gpu_data_handles: SmallVec<[GpuCacheHandle; 1]>, /// The spatial node index of this picture when it is /// composited into the parent picture. pub spatial_node_index: SpatialNodeIndex, /// The conservative local rect of this picture. It is /// built dynamically during the first picture traversal. /// It is composed of already snapped primitives. pub estimated_local_rect: LayoutRect, /// The local rect of this picture. It is built /// dynamically during the frame visibility update. It /// differs from the estimated_local_rect because it /// will not contain culled primitives, takes into /// account surface inflation and the whole clip chain. /// It is frequently the same, but may be quite /// different depending on how much was culled. pub precise_local_rect: LayoutRect, /// Store the state of the previous precise local rect /// for this picture. We need this in order to know when /// to invalidate segments / drop-shadow gpu cache handles. pub prev_precise_local_rect: LayoutRect, /// If false, this picture needs to (re)build segments /// if it supports segment rendering. This can occur /// if the local rect of the picture changes due to /// transform animation and/or scrolling. pub segments_are_valid: bool, /// The config options for this picture. pub options: PictureOptions, /// Set to true if we know for sure the picture is fully opaque. pub is_opaque: bool, } impl PicturePrimitive { pub fn print<T: PrintTreePrinter>( &self, pictures: &[Self], self_index: PictureIndex, pt: &mut T, ) { pt.new_level(format!("{:?}", self_index)); pt.add_item(format!("cluster_count: {:?}", self.prim_list.clusters.len())); pt.add_item(format!("estimated_local_rect: {:?}", self.estimated_local_rect)); pt.add_item(format!("precise_local_rect: {:?}", self.precise_local_rect)); pt.add_item(format!("spatial_node_index: {:?}", self.spatial_node_index)); pt.add_item(format!("raster_config: {:?}", self.raster_config)); pt.add_item(format!("requested_composite_mode: {:?}", self.requested_composite_mode)); for child_pic_index in &self.prim_list.child_pictures { pictures[child_pic_index.0].print(pictures, *child_pic_index, pt); } pt.end_level(); } /// Returns true if this picture supports segmented rendering. pub fn can_use_segments(&self) -> bool { match self.raster_config { // TODO(gw): Support brush segment rendering for filter and mix-blend // shaders. It's possible this already works, but I'm just // applying this optimization to Blit mode for now. Some(RasterConfig { composite_mode: PictureCompositeMode::MixBlend(..), .. }) | Some(RasterConfig { composite_mode: PictureCompositeMode::Filter(..), .. }) | Some(RasterConfig { composite_mode: PictureCompositeMode::ComponentTransferFilter(..), .. }) | Some(RasterConfig { composite_mode: PictureCompositeMode::TileCache { .. }, .. }) | Some(RasterConfig { composite_mode: PictureCompositeMode::SvgFilter(..), .. }) | None => { false } Some(RasterConfig { composite_mode: PictureCompositeMode::Blit(reason), ..}) => { reason == BlitReason::CLIP } } } fn resolve_scene_properties(&mut self, properties: &SceneProperties) -> bool { match self.requested_composite_mode { Some(PictureCompositeMode::Filter(ref mut filter)) => { match *filter { Filter::Opacity(ref binding, ref mut value) => { *value = properties.resolve_float(binding); } _ => {} } filter.is_visible() } _ => true, } } pub fn is_visible(&self) -> bool { match self.requested_composite_mode { Some(PictureCompositeMode::Filter(ref filter)) => { filter.is_visible() } _ => true, } } // TODO(gw): We have the PictureOptions struct available. We // should move some of the parameter list in this // method to be part of the PictureOptions, and // avoid adding new parameters here. pub fn new_image( requested_composite_mode: Option<PictureCompositeMode>, context_3d: Picture3DContext<OrderedPictureChild>, apply_local_clip_rect: bool, flags: PrimitiveFlags, prim_list: PrimitiveList, spatial_node_index: SpatialNodeIndex, options: PictureOptions, ) -> Self { PicturePrimitive { prim_list, state: None, primary_render_task_id: None, secondary_render_task_id: None, requested_composite_mode, raster_config: None, context_3d, extra_gpu_data_handles: SmallVec::new(), apply_local_clip_rect, is_backface_visible: flags.contains(PrimitiveFlags::IS_BACKFACE_VISIBLE), spatial_node_index, estimated_local_rect: LayoutRect::zero(), precise_local_rect: LayoutRect::zero(), prev_precise_local_rect: LayoutRect::zero(), options, segments_are_valid: false, is_opaque: false, } } pub fn take_context( &mut self, pic_index: PictureIndex, surface_spatial_node_index: SpatialNodeIndex, raster_spatial_node_index: SpatialNodeIndex, parent_surface_index: SurfaceIndex, parent_subpixel_mode: SubpixelMode, frame_state: &mut FrameBuildingState, frame_context: &FrameBuildingContext, scratch: &mut PrimitiveScratchBuffer, tile_cache_logger: &mut TileCacheLogger, tile_caches: &mut FastHashMap<SliceId, Box<TileCacheInstance>>, ) -> Option<(PictureContext, PictureState, PrimitiveList)> { self.primary_render_task_id = None; self.secondary_render_task_id = None; if !self.is_visible() { return None; } profile_scope!("take_context"); // Extract the raster and surface spatial nodes from the raster // config, if this picture establishes a surface. Otherwise just // pass in the spatial node indices from the parent context. let (raster_spatial_node_index, surface_spatial_node_index, surface_index, inflation_factor) = match self.raster_config { Some(ref raster_config) => { let surface = &frame_state.surfaces[raster_config.surface_index.0]; ( surface.raster_spatial_node_index, self.spatial_node_index, raster_config.surface_index, surface.inflation_factor, ) } None => { ( raster_spatial_node_index, surface_spatial_node_index, parent_surface_index, 0.0, ) } }; let map_pic_to_world = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, surface_spatial_node_index, frame_context.global_screen_world_rect, frame_context.spatial_tree, ); let pic_bounds = map_pic_to_world.unmap(&map_pic_to_world.bounds) .unwrap_or_else(PictureRect::max_rect); let map_local_to_pic = SpaceMapper::new( surface_spatial_node_index, pic_bounds, ); let (map_raster_to_world, map_pic_to_raster) = create_raster_mappers( surface_spatial_node_index, raster_spatial_node_index, frame_context.global_screen_world_rect, frame_context.spatial_tree, ); let plane_splitter = match self.context_3d { Picture3DContext::Out => { None } Picture3DContext::In { root_data: Some(_), .. } => { Some(PlaneSplitter::new()) } Picture3DContext::In { root_data: None, .. } => { None } }; match self.raster_config { Some(RasterConfig { surface_index, composite_mode: PictureCompositeMode::TileCache { slice_id }, .. }) => { let tile_cache = tile_caches.get_mut(&slice_id).unwrap(); let mut debug_info = SliceDebugInfo::new(); let mut surface_tasks = Vec::with_capacity(tile_cache.tile_count()); let mut surface_device_rect = DeviceRect::zero(); let device_pixel_scale = frame_state .surfaces[surface_index.0] .device_pixel_scale; // Get the overall world space rect of the picture cache. Used to clip // the tile rects below for occlusion testing to the relevant area. let world_clip_rect = map_pic_to_world .map(&tile_cache.local_clip_rect) .expect("bug: unable to map clip rect"); let device_clip_rect = (world_clip_rect * frame_context.global_device_pixel_scale).round(); for (sub_slice_index, sub_slice) in tile_cache.sub_slices.iter_mut().enumerate() { for tile in sub_slice.tiles.values_mut() { surface_device_rect = surface_device_rect.union(&tile.device_valid_rect); if tile.is_visible { // Get the world space rect that this tile will actually occupy on screem let device_draw_rect = device_clip_rect.intersection(&tile.device_valid_rect); // If that draw rect is occluded by some set of tiles in front of it, // then mark it as not visible and skip drawing. When it's not occluded // it will fail this test, and get rasterized by the render task setup // code below. match device_draw_rect { Some(device_draw_rect) => { // Only check for occlusion on visible tiles that are fixed position. if tile_cache.spatial_node_index == ROOT_SPATIAL_NODE_INDEX && frame_state.composite_state.occluders.is_tile_occluded(tile.z_id, device_draw_rect) { // If this tile has an allocated native surface, free it, since it's completely // occluded. We will need to re-allocate this surface if it becomes visible, // but that's likely to be rare (e.g. when there is no content display list // for a frame or two during a tab switch). let surface = tile.surface.as_mut().expect("no tile surface set!"); if let TileSurface::Texture { descriptor: SurfaceTextureDescriptor::Native { id, .. }, .. } = surface { if let Some(id) = id.take() { frame_state.resource_cache.destroy_compositor_tile(id); } } tile.is_visible = false; if frame_context.fb_config.testing { debug_info.tiles.insert( tile.tile_offset, TileDebugInfo::Occluded, ); } continue; } } None => { tile.is_visible = false; } } } // If we get here, we want to ensure that the surface remains valid in the texture // cache, _even if_ it's not visible due to clipping or being scrolled off-screen. // This ensures that we retain valid tiles that are off-screen, but still in the // display port of this tile cache instance. if let Some(TileSurface::Texture { descriptor, .. }) = tile.surface.as_ref() { if let SurfaceTextureDescriptor::TextureCache { ref handle, .. } = descriptor { frame_state.resource_cache.texture_cache.request( handle, frame_state.gpu_cache, ); } } // If the tile has been found to be off-screen / clipped, skip any further processing. if !tile.is_visible { if frame_context.fb_config.testing { debug_info.tiles.insert( tile.tile_offset, TileDebugInfo::Culled, ); } continue; } if frame_context.debug_flags.contains(DebugFlags::PICTURE_CACHING_DBG) { tile.root.draw_debug_rects( &map_pic_to_world, tile.is_opaque, tile.current_descriptor.local_valid_rect, scratch, frame_context.global_device_pixel_scale, ); let label_offset = DeviceVector2D::new( 20.0 + sub_slice_index as f32 * 20.0, 30.0 + sub_slice_index as f32 * 20.0, ); let tile_device_rect = tile.world_tile_rect * frame_context.global_device_pixel_scale; if tile_device_rect.size.height >= label_offset.y { let surface = tile.surface.as_ref().expect("no tile surface set!"); scratch.push_debug_string( tile_device_rect.origin + label_offset, debug_colors::RED, format!("{:?}: s={} is_opaque={} surface={} sub={}", tile.id, tile_cache.slice, tile.is_opaque, surface.kind(), sub_slice_index, ), ); } } if let TileSurface::Texture { descriptor, .. } = tile.surface.as_mut().unwrap() { match descriptor { SurfaceTextureDescriptor::TextureCache { ref handle, .. } => { // Invalidate if the backing texture was evicted. if frame_state.resource_cache.texture_cache.is_allocated(handle) { // Request the backing texture so it won't get evicted this frame. // We specifically want to mark the tile texture as used, even // if it's detected not visible below and skipped. This is because // we maintain the set of tiles we care about based on visibility // during pre_update. If a tile still exists after that, we are // assuming that it's either visible or we want to retain it for // a while in case it gets scrolled back onto screen soon. // TODO(gw): Consider switching to manual eviction policy? frame_state.resource_cache.texture_cache.request(handle, frame_state.gpu_cache); } else { // If the texture was evicted on a previous frame, we need to assume // that the entire tile rect is dirty. tile.invalidate(None, InvalidationReason::NoTexture); } } SurfaceTextureDescriptor::Native { id, .. } => { if id.is_none() { // There is no current surface allocation, so ensure the entire tile is invalidated tile.invalidate(None, InvalidationReason::NoSurface); } } } } // Ensure that the dirty rect doesn't extend outside the local valid rect. tile.local_dirty_rect = tile.local_dirty_rect .intersection(&tile.current_descriptor.local_valid_rect) .unwrap_or_else(PictureRect::zero); // Update the world/device dirty rect let world_dirty_rect = map_pic_to_world.map(&tile.local_dirty_rect).expect("bug"); let device_rect = (tile.world_tile_rect * frame_context.global_device_pixel_scale).round(); tile.device_dirty_rect = (world_dirty_rect * frame_context.global_device_pixel_scale) .round_out() .intersection(&device_rect) .unwrap_or_else(DeviceRect::zero); if tile.is_valid { if frame_context.fb_config.testing { debug_info.tiles.insert( tile.tile_offset, TileDebugInfo::Valid, ); } continue; } // Add this dirty rect to the dirty region tracker. This must be done outside the if statement below, // so that we include in the dirty region tiles that are handled by a background color only (no // surface allocation). tile_cache.dirty_region.add_dirty_region( tile.local_dirty_rect, SubSliceIndex::new(sub_slice_index), frame_context.spatial_tree, ); // Ensure that this texture is allocated. if let TileSurface::Texture { ref mut descriptor } = tile.surface.as_mut().unwrap() { match descriptor { SurfaceTextureDescriptor::TextureCache { ref mut handle } => { if !frame_state.resource_cache.texture_cache.is_allocated(handle) { frame_state.resource_cache.texture_cache.update_picture_cache( tile_cache.current_tile_size, handle, frame_state.gpu_cache, ); } } SurfaceTextureDescriptor::Native { id } => { if id.is_none() { // Allocate a native surface id if we're in native compositing mode, // and we don't have a surface yet (due to first frame, or destruction // due to tile size changing etc). if sub_slice.native_surface.is_none() { let opaque = frame_state .resource_cache .create_compositor_surface( tile_cache.virtual_offset, tile_cache.current_tile_size, true, ); let alpha = frame_state .resource_cache .create_compositor_surface( tile_cache.virtual_offset, tile_cache.current_tile_size, false, ); sub_slice.native_surface = Some(NativeSurface { opaque, alpha, }); } // Create the tile identifier and allocate it. let surface_id = if tile.is_opaque { sub_slice.native_surface.as_ref().unwrap().opaque } else { sub_slice.native_surface.as_ref().unwrap().alpha }; let tile_id = NativeTileId { surface_id, x: tile.tile_offset.x, y: tile.tile_offset.y, }; frame_state.resource_cache.create_compositor_tile(tile_id); *id = Some(tile_id); } } } let content_origin_f = tile.world_tile_rect.origin * device_pixel_scale; let content_origin = content_origin_f.round(); debug_assert!((content_origin_f.x - content_origin.x).abs() < 0.01); debug_assert!((content_origin_f.y - content_origin.y).abs() < 0.01); let surface = descriptor.resolve( frame_state.resource_cache, tile_cache.current_tile_size, ); let scissor_rect = tile.device_dirty_rect .translate(-device_rect.origin.to_vector()) .round() .to_i32(); let valid_rect = tile.device_valid_rect .translate(-device_rect.origin.to_vector()) .round() .to_i32(); let task_size = tile_cache.current_tile_size; let batch_filter = BatchFilter { rect_in_pic_space: tile.local_dirty_rect, sub_slice_index: SubSliceIndex::new(sub_slice_index), }; let render_task_id = frame_state.rg_builder.add().init( RenderTask::new( RenderTaskLocation::Static { surface: StaticRenderTaskSurface::PictureCache { surface, }, rect: task_size.into(), }, RenderTaskKind::new_picture( task_size, tile_cache.current_tile_size.to_f32(), pic_index, content_origin, surface_spatial_node_index, device_pixel_scale, Some(batch_filter), Some(scissor_rect), Some(valid_rect), ) ), ); surface_tasks.push(render_task_id); } if frame_context.fb_config.testing { debug_info.tiles.insert( tile.tile_offset, TileDebugInfo::Dirty(DirtyTileDebugInfo { local_valid_rect: tile.current_descriptor.local_valid_rect, local_dirty_rect: tile.local_dirty_rect, }), ); } // If the entire tile valid region is dirty, we can update the fract offset // at which the tile was rendered. if tile.device_dirty_rect.contains_rect(&tile.device_valid_rect) { tile.device_fract_offset = tile_cache.device_fract_offset; } // Now that the tile is valid, reset the dirty rect. tile.local_dirty_rect = PictureRect::zero(); tile.is_valid = true; } } // If invalidation debugging is enabled, dump the picture cache state to a tree printer. if frame_context.debug_flags.contains(DebugFlags::INVALIDATION_DBG) { tile_cache.print(); } // If testing mode is enabled, write some information about the current state // of this picture cache (made available in RenderResults). if frame_context.fb_config.testing { frame_state.composite_state .picture_cache_debug .slices .insert( tile_cache.slice, debug_info, ); } frame_state.init_surface_tiled( surface_index, surface_tasks, surface_device_rect, ); } Some(ref mut raster_config) => { let pic_rect = self.precise_local_rect.cast_unit(); let mut device_pixel_scale = frame_state .surfaces[raster_config.surface_index.0] .device_pixel_scale; let scale_factors = frame_state .surfaces[raster_config.surface_index.0] .scale_factors; // If the primitive has a filter that can sample with an offset, the clip rect has // to take it into account. let clip_inflation = match raster_config.composite_mode { PictureCompositeMode::Filter(Filter::DropShadows(ref shadows)) => { let mut max_offset = vec2(0.0, 0.0); let mut min_offset = vec2(0.0, 0.0); for shadow in shadows { let offset = layout_vector_as_picture_vector(shadow.offset); max_offset = max_offset.max(offset); min_offset = min_offset.min(offset); } // Get the shadow offsets in world space. let raster_min = map_pic_to_raster.map_vector(min_offset); let raster_max = map_pic_to_raster.map_vector(max_offset); let world_min = map_raster_to_world.map_vector(raster_min); let world_max = map_raster_to_world.map_vector(raster_max); // Grow the clip in the opposite direction of the shadow's offset. SideOffsets2D::from_vectors_outer( -world_max.max(vec2(0.0, 0.0)), -world_min.min(vec2(0.0, 0.0)), ) } _ => SideOffsets2D::zero(), }; let (mut clipped, mut unclipped) = match get_raster_rects( pic_rect, &map_pic_to_raster, &map_raster_to_world, raster_config.clipped_bounding_rect.outer_rect(clip_inflation), device_pixel_scale, ) { Some(info) => info, None => { return None } }; let transform = map_pic_to_raster.get_transform(); /// If the picture (raster_config) establishes a raster root, /// its requested resolution won't be clipped by the parent or /// viewport; so we need to make sure the requested resolution is /// "reasonable", ie. <= MAX_SURFACE_SIZE. If not, scale the /// picture down until it fits that limit. This results in a new /// device_rect, a new unclipped rect, and a new device_pixel_scale. /// /// Since the adjusted device_pixel_scale is passed into the /// RenderTask (and then the shader via RenderTaskData) this mostly /// works transparently, reusing existing support for variable DPI /// support. The on-the-fly scaling can be seen as on-the-fly, /// per-task DPI adjustment. Logical pixels are unaffected. /// /// The scaling factor is returned to the caller; blur radius, /// font size, etc. need to be scaled accordingly. fn adjust_scale_for_max_surface_size( raster_config: &RasterConfig, max_target_size: i32, pic_rect: PictureRect, map_pic_to_raster: &SpaceMapper<PicturePixel, RasterPixel>, map_raster_to_world: &SpaceMapper<RasterPixel, WorldPixel>, clipped_prim_bounding_rect: WorldRect, device_pixel_scale : &mut DevicePixelScale, device_rect: &mut DeviceRect, unclipped: &mut DeviceRect) -> Option<f32> { let limit = if raster_config.establishes_raster_root { MAX_SURFACE_SIZE } else { max_target_size as f32 }; if device_rect.size.width > limit || device_rect.size.height > limit { // round_out will grow by 1 integer pixel if origin is on a // fractional position, so keep that margin for error with -1: let scale = (limit as f32 - 1.0) / (f32::max(device_rect.size.width, device_rect.size.height)); *device_pixel_scale = *device_pixel_scale * Scale::new(scale); let new_device_rect = device_rect.to_f32() * Scale::new(scale); *device_rect = new_device_rect.round_out(); *unclipped = match get_raster_rects( pic_rect, &map_pic_to_raster, &map_raster_to_world, clipped_prim_bounding_rect, *device_pixel_scale ) { Some(info) => info.1, None => { return None } }; Some(scale) } else { None } } let primary_render_task_id; match raster_config.composite_mode { PictureCompositeMode::TileCache { .. } => { unreachable!("handled above"); } PictureCompositeMode::Filter(Filter::Blur(width, height)) => { let width_std_deviation = clamp_blur_radius(width, scale_factors) * device_pixel_scale.0; let height_std_deviation = clamp_blur_radius(height, scale_factors) * device_pixel_scale.0; let mut blur_std_deviation = DeviceSize::new( width_std_deviation * scale_factors.0, height_std_deviation * scale_factors.1 ); let mut device_rect = if self.options.inflate_if_required { let inflation_factor = frame_state.surfaces[raster_config.surface_index.0].inflation_factor; let inflation_factor = inflation_factor * device_pixel_scale.0; // The clipped field is the part of the picture that is visible // on screen. The unclipped field is the screen-space rect of // the complete picture, if no screen / clip-chain was applied // (this includes the extra space for blur region). To ensure // that we draw a large enough part of the picture to get correct // blur results, inflate that clipped area by the blur range, and // then intersect with the total screen rect, to minimize the // allocation size. clipped .inflate(inflation_factor * scale_factors.0, inflation_factor * scale_factors.1) .intersection(&unclipped) .unwrap() } else { clipped }; let mut original_size = device_rect.size; // Adjust the size to avoid introducing sampling errors during the down-scaling passes. // what would be even better is to rasterize the picture at the down-scaled size // directly. device_rect.size = BlurTask::adjusted_blur_source_size( device_rect.size, blur_std_deviation, ); if let Some(scale) = adjust_scale_for_max_surface_size( raster_config, frame_context.fb_config.max_target_size, pic_rect, &map_pic_to_raster, &map_raster_to_world, raster_config.clipped_bounding_rect, &mut device_pixel_scale, &mut device_rect, &mut unclipped, ) { blur_std_deviation = blur_std_deviation * scale; original_size = original_size.to_f32() * scale; raster_config.root_scaling_factor = scale; } let uv_rect_kind = calculate_uv_rect_kind( &pic_rect, &transform, &device_rect, device_pixel_scale, ); let task_size = device_rect.size.to_i32(); let picture_task_id = frame_state.rg_builder.add().init( RenderTask::new_dynamic( task_size, RenderTaskKind::new_picture( task_size, unclipped.size, pic_index, device_rect.origin, surface_spatial_node_index, device_pixel_scale, None, None, None, ) ).with_uv_rect_kind(uv_rect_kind) ); let blur_render_task_id = RenderTask::new_blur( blur_std_deviation, picture_task_id, frame_state.rg_builder, RenderTargetKind::Color, None, original_size.to_i32(), ); primary_render_task_id = Some(blur_render_task_id); frame_state.init_surface_chain( raster_config.surface_index, blur_render_task_id, picture_task_id, parent_surface_index, device_rect, ); } PictureCompositeMode::Filter(Filter::DropShadows(ref shadows)) => { let mut max_std_deviation = 0.0; for shadow in shadows { max_std_deviation = f32::max(max_std_deviation, shadow.blur_radius); } max_std_deviation = clamp_blur_radius(max_std_deviation, scale_factors) * device_pixel_scale.0; let max_blur_range = max_std_deviation * BLUR_SAMPLE_SCALE; // We cast clipped to f32 instead of casting unclipped to i32 // because unclipped can overflow an i32. let mut device_rect = clipped .inflate(max_blur_range * scale_factors.0, max_blur_range * scale_factors.1) .intersection(&unclipped) .unwrap(); device_rect.size = BlurTask::adjusted_blur_source_size( device_rect.size, DeviceSize::new( max_std_deviation * scale_factors.0, max_std_deviation * scale_factors.1 ), ); if let Some(scale) = adjust_scale_for_max_surface_size( raster_config, frame_context.fb_config.max_target_size, pic_rect, &map_pic_to_raster, &map_raster_to_world, raster_config.clipped_bounding_rect, &mut device_pixel_scale, &mut device_rect, &mut unclipped, ) { // std_dev adjusts automatically from using device_pixel_scale raster_config.root_scaling_factor = scale; } let uv_rect_kind = calculate_uv_rect_kind( &pic_rect, &transform, &device_rect, device_pixel_scale, ); let task_size = device_rect.size.to_i32(); let picture_task_id = frame_state.rg_builder.add().init( RenderTask::new_dynamic( task_size, RenderTaskKind::new_picture( task_size, unclipped.size, pic_index, device_rect.origin, surface_spatial_node_index, device_pixel_scale, None, None, None, ), ).with_uv_rect_kind(uv_rect_kind) ); // Add this content picture as a dependency of the parent surface, to // ensure it isn't free'd after the shadow uses it as an input. frame_state.add_child_render_task( parent_surface_index, picture_task_id, ); let mut blur_tasks = BlurTaskCache::default(); self.extra_gpu_data_handles.resize(shadows.len(), GpuCacheHandle::new()); let mut blur_render_task_id = picture_task_id; for shadow in shadows { let blur_radius = clamp_blur_radius(shadow.blur_radius, scale_factors) * device_pixel_scale.0; blur_render_task_id = RenderTask::new_blur( DeviceSize::new( blur_radius * scale_factors.0, blur_radius * scale_factors.1, ), picture_task_id, frame_state.rg_builder, RenderTargetKind::Color, Some(&mut blur_tasks), device_rect.size.to_i32(), ); } primary_render_task_id = Some(blur_render_task_id); self.secondary_render_task_id = Some(picture_task_id); frame_state.init_surface_chain( raster_config.surface_index, blur_render_task_id, picture_task_id, parent_surface_index, device_rect, ); } PictureCompositeMode::MixBlend(mode) if BlendMode::from_mix_blend_mode( mode, frame_context.fb_config.gpu_supports_advanced_blend, frame_context.fb_config.advanced_blend_is_coherent, frame_context.fb_config.dual_source_blending_is_enabled && frame_context.fb_config.dual_source_blending_is_supported, ).is_none() => { if let Some(scale) = adjust_scale_for_max_surface_size( raster_config, frame_context.fb_config.max_target_size, pic_rect, &map_pic_to_raster, &map_raster_to_world, raster_config.clipped_bounding_rect, &mut device_pixel_scale, &mut clipped, &mut unclipped, ) { raster_config.root_scaling_factor = scale; } let uv_rect_kind = calculate_uv_rect_kind( &pic_rect, &transform, &clipped, device_pixel_scale, ); let parent_surface = &frame_state.surfaces[parent_surface_index.0]; let parent_raster_spatial_node_index = parent_surface.raster_spatial_node_index; let parent_device_pixel_scale = parent_surface.device_pixel_scale; // Create a space mapper that will allow mapping from the local rect // of the mix-blend primitive into the space of the surface that we // need to read back from. Note that we use the parent's raster spatial // node here, so that we are in the correct device space of the parent // surface, whether it establishes a raster root or not. let map_pic_to_parent = SpaceMapper::new_with_target( parent_raster_spatial_node_index, self.spatial_node_index, RasterRect::max_rect(), // TODO(gw): May need a conservative estimate? frame_context.spatial_tree, ); let pic_in_raster_space = map_pic_to_parent .map(&pic_rect) .expect("bug: unable to map mix-blend content into parent"); // Apply device pixel ratio for parent surface to get into device // pixels for that surface. let backdrop_rect = raster_rect_to_device_pixels( pic_in_raster_space, parent_device_pixel_scale, ); let parent_surface_rect = parent_surface.get_device_rect(); // If there is no available parent surface to read back from (for example, if // the parent surface is affected by a clip that doesn't affect the child // surface), then create a dummy 16x16 readback. In future, we could alter // the composite mode of this primitive to skip the mix-blend, but for simplicity // we just create a dummy readback for now. let readback_task_id = match backdrop_rect.intersection(&parent_surface_rect) { Some(available_rect) => { // Calculate the UV coords necessary for the shader to sampler // from the primitive rect within the readback region. This is // 0..1 for aligned surfaces, but doing it this way allows // accurate sampling if the primitive bounds have fractional values. let backdrop_uv = calculate_uv_rect_kind( &pic_rect, &map_pic_to_parent.get_transform(), &available_rect, parent_device_pixel_scale, ); frame_state.rg_builder.add().init( RenderTask::new_dynamic( available_rect.size.to_i32(), RenderTaskKind::new_readback(Some(available_rect.origin)), ).with_uv_rect_kind(backdrop_uv) ) } None => { frame_state.rg_builder.add().init( RenderTask::new_dynamic( DeviceIntSize::new(16, 16), RenderTaskKind::new_readback(None), ) ) } }; frame_state.add_child_render_task( parent_surface_index, readback_task_id, ); self.secondary_render_task_id = Some(readback_task_id); let task_size = clipped.size.to_i32(); let render_task_id = frame_state.rg_builder.add().init( RenderTask::new_dynamic( task_size, RenderTaskKind::new_picture( task_size, unclipped.size, pic_index, clipped.origin, surface_spatial_node_index, device_pixel_scale, None, None, None, ) ).with_uv_rect_kind(uv_rect_kind) ); primary_render_task_id = Some(render_task_id); frame_state.init_surface( raster_config.surface_index, render_task_id, parent_surface_index, clipped, ); } PictureCompositeMode::Filter(..) => { if let Some(scale) = adjust_scale_for_max_surface_size( raster_config, frame_context.fb_config.max_target_size, pic_rect, &map_pic_to_raster, &map_raster_to_world, raster_config.clipped_bounding_rect, &mut device_pixel_scale, &mut clipped, &mut unclipped, ) { raster_config.root_scaling_factor = scale; } let uv_rect_kind = calculate_uv_rect_kind( &pic_rect, &transform, &clipped, device_pixel_scale, ); let task_size = clipped.size.to_i32(); let render_task_id = frame_state.rg_builder.add().init( RenderTask::new_dynamic( task_size, RenderTaskKind::new_picture( task_size, unclipped.size, pic_index, clipped.origin, surface_spatial_node_index, device_pixel_scale, None, None, None, ) ).with_uv_rect_kind(uv_rect_kind) ); primary_render_task_id = Some(render_task_id); frame_state.init_surface( raster_config.surface_index, render_task_id, parent_surface_index, clipped, ); } PictureCompositeMode::ComponentTransferFilter(..) => { if let Some(scale) = adjust_scale_for_max_surface_size( raster_config, frame_context.fb_config.max_target_size, pic_rect, &map_pic_to_raster, &map_raster_to_world, raster_config.clipped_bounding_rect, &mut device_pixel_scale, &mut clipped, &mut unclipped, ) { raster_config.root_scaling_factor = scale; } let uv_rect_kind = calculate_uv_rect_kind( &pic_rect, &transform, &clipped, device_pixel_scale, ); let task_size = clipped.size.to_i32(); let render_task_id = frame_state.rg_builder.add().init( RenderTask::new_dynamic( task_size, RenderTaskKind::new_picture( task_size, unclipped.size, pic_index, clipped.origin, surface_spatial_node_index, device_pixel_scale, None, None, None, ) ).with_uv_rect_kind(uv_rect_kind) ); primary_render_task_id = Some(render_task_id); frame_state.init_surface( raster_config.surface_index, render_task_id, parent_surface_index, clipped, ); } PictureCompositeMode::MixBlend(..) | PictureCompositeMode::Blit(_) => { if let Some(scale) = adjust_scale_for_max_surface_size( raster_config, frame_context.fb_config.max_target_size, pic_rect, &map_pic_to_raster, &map_raster_to_world, raster_config.clipped_bounding_rect, &mut device_pixel_scale, &mut clipped, &mut unclipped, ) { raster_config.root_scaling_factor = scale; } let uv_rect_kind = calculate_uv_rect_kind( &pic_rect, &transform, &clipped, device_pixel_scale, ); let task_size = clipped.size.to_i32(); let render_task_id = frame_state.rg_builder.add().init( RenderTask::new_dynamic( task_size, RenderTaskKind::new_picture( task_size, unclipped.size, pic_index, clipped.origin, surface_spatial_node_index, device_pixel_scale, None, None, None, ) ).with_uv_rect_kind(uv_rect_kind) ); primary_render_task_id = Some(render_task_id); frame_state.init_surface( raster_config.surface_index, render_task_id, parent_surface_index, clipped, ); } PictureCompositeMode::SvgFilter(ref primitives, ref filter_datas) => { if let Some(scale) = adjust_scale_for_max_surface_size( raster_config, frame_context.fb_config.max_target_size, pic_rect, &map_pic_to_raster, &map_raster_to_world, raster_config.clipped_bounding_rect, &mut device_pixel_scale, &mut clipped, &mut unclipped, ) { raster_config.root_scaling_factor = scale; } let uv_rect_kind = calculate_uv_rect_kind( &pic_rect, &transform, &clipped, device_pixel_scale, ); let task_size = clipped.size.to_i32(); let picture_task_id = frame_state.rg_builder.add().init( RenderTask::new_dynamic( task_size, RenderTaskKind::new_picture( task_size, unclipped.size, pic_index, clipped.origin, surface_spatial_node_index, device_pixel_scale, None, None, None, ) ).with_uv_rect_kind(uv_rect_kind) ); let filter_task_id = RenderTask::new_svg_filter( primitives, filter_datas, frame_state.rg_builder, clipped.size.to_i32(), uv_rect_kind, picture_task_id, device_pixel_scale, ); primary_render_task_id = Some(filter_task_id); frame_state.init_surface_chain( raster_config.surface_index, filter_task_id, picture_task_id, parent_surface_index, clipped, ); } } self.primary_render_task_id = primary_render_task_id; // Update the device pixel ratio in the surface, in case it was adjusted due // to the surface being too large. This ensures the correct scale is available // in case it's used as input to a parent mix-blend-mode readback. frame_state .surfaces[raster_config.surface_index.0] .device_pixel_scale = device_pixel_scale; } None => {} }; #[cfg(feature = "capture")] { if frame_context.debug_flags.contains(DebugFlags::TILE_CACHE_LOGGING_DBG) { if let Some(PictureCompositeMode::TileCache { slice_id }) = self.requested_composite_mode { if let Some(ref tile_cache) = tile_caches.get(&slice_id) { // extract just the fields that we're interested in let mut tile_cache_tiny = TileCacheInstanceSerializer { slice: tile_cache.slice, tiles: FastHashMap::default(), background_color: tile_cache.background_color, fract_offset: tile_cache.fract_offset }; // TODO(gw): Debug output only writes the primary sub-slice for now for (key, tile) in &tile_cache.sub_slices.first().unwrap().tiles { tile_cache_tiny.tiles.insert(*key, TileSerializer { rect: tile.local_tile_rect, current_descriptor: tile.current_descriptor.clone(), device_fract_offset: tile.device_fract_offset, id: tile.id, root: tile.root.clone(), background_color: tile.background_color, invalidation_reason: tile.invalidation_reason.clone() }); } let text = ron::ser::to_string_pretty(&tile_cache_tiny, Default::default()).unwrap(); tile_cache_logger.add(text, map_pic_to_world.get_transform()); } } } } #[cfg(not(feature = "capture"))] { let _tile_cache_logger = tile_cache_logger; // unused variable fix } let state = PictureState { //TODO: check for MAX_CACHE_SIZE here? map_local_to_pic, map_pic_to_world, map_pic_to_raster, map_raster_to_world, plane_splitter, }; let mut dirty_region_count = 0; // If this is a picture cache, push the dirty region to ensure any // child primitives are culled and clipped to the dirty rect(s). if let Some(RasterConfig { composite_mode: PictureCompositeMode::TileCache { slice_id }, .. }) = self.raster_config { let dirty_region = tile_caches[&slice_id].dirty_region.clone(); frame_state.push_dirty_region(dirty_region); dirty_region_count += 1; } if inflation_factor > 0.0 { let inflated_region = frame_state.current_dirty_region().inflate( inflation_factor, frame_context.spatial_tree, ); frame_state.push_dirty_region(inflated_region); dirty_region_count += 1; } // Disallow subpixel AA if an intermediate surface is needed. // TODO(lsalzman): allow overriding parent if intermediate surface is opaque let subpixel_mode = match self.raster_config { Some(RasterConfig { ref composite_mode, .. }) => { let subpixel_mode = match composite_mode { PictureCompositeMode::TileCache { slice_id } => { tile_caches[&slice_id].subpixel_mode } PictureCompositeMode::Blit(..) | PictureCompositeMode::ComponentTransferFilter(..) | PictureCompositeMode::Filter(..) | PictureCompositeMode::MixBlend(..) | PictureCompositeMode::SvgFilter(..) => { // TODO(gw): We can take advantage of the same logic that // exists in the opaque rect detection for tile // caches, to allow subpixel text on other surfaces // that can be detected as opaque. SubpixelMode::Deny } }; subpixel_mode } None => { SubpixelMode::Allow } }; // Still disable subpixel AA if parent forbids it let subpixel_mode = match (parent_subpixel_mode, subpixel_mode) { (SubpixelMode::Allow, SubpixelMode::Allow) => { // Both parent and this surface unconditionally allow subpixel AA SubpixelMode::Allow } (SubpixelMode::Allow, SubpixelMode::Conditional { allowed_rect }) => { // Parent allows, but we are conditional subpixel AA SubpixelMode::Conditional { allowed_rect, } } (SubpixelMode::Conditional { allowed_rect }, SubpixelMode::Allow) => { // Propagate conditional subpixel mode to child pictures that allow subpixel AA SubpixelMode::Conditional { allowed_rect, } } (SubpixelMode::Conditional { .. }, SubpixelMode::Conditional { ..}) => { unreachable!("bug: only top level picture caches have conditional subpixel"); } (SubpixelMode::Deny, _) | (_, SubpixelMode::Deny) => { // Either parent or this surface explicitly deny subpixel, these take precedence SubpixelMode::Deny } }; let context = PictureContext { pic_index, apply_local_clip_rect: self.apply_local_clip_rect, raster_spatial_node_index, surface_spatial_node_index, surface_index, dirty_region_count, subpixel_mode, }; let prim_list = mem::replace(&mut self.prim_list, PrimitiveList::empty()); Some((context, state, prim_list)) } pub fn restore_context( &mut self, prim_list: PrimitiveList, context: PictureContext, state: PictureState, frame_state: &mut FrameBuildingState, ) { // Pop any dirty regions this picture set for _ in 0 .. context.dirty_region_count { frame_state.pop_dirty_region(); } self.prim_list = prim_list; self.state = Some(state); } pub fn take_state(&mut self) -> PictureState { self.state.take().expect("bug: no state present!") } /// Add a primitive instance to the plane splitter. The function would generate /// an appropriate polygon, clip it against the frustum, and register with the /// given plane splitter. pub fn add_split_plane( splitter: &mut PlaneSplitter, spatial_tree: &SpatialTree, prim_spatial_node_index: SpatialNodeIndex, original_local_rect: LayoutRect, combined_local_clip_rect: &LayoutRect, world_rect: WorldRect, plane_split_anchor: PlaneSplitAnchor, ) -> bool { let transform = spatial_tree .get_world_transform(prim_spatial_node_index); let matrix = transform.clone().into_transform().cast(); // Apply the local clip rect here, before splitting. This is // because the local clip rect can't be applied in the vertex // shader for split composites, since we are drawing polygons // rather that rectangles. The interpolation still works correctly // since we determine the UVs by doing a bilerp with a factor // from the original local rect. let local_rect = match original_local_rect .intersection(combined_local_clip_rect) { Some(rect) => rect.cast(), None => return false, }; let world_rect = world_rect.cast(); match transform { CoordinateSpaceMapping::Local => { let polygon = Polygon::from_rect( local_rect * Scale::new(1.0), plane_split_anchor, ); splitter.add(polygon); } CoordinateSpaceMapping::ScaleOffset(scale_offset) if scale_offset.scale == Vector2D::new(1.0, 1.0) => { let inv_matrix = scale_offset.inverse().to_transform().cast(); let polygon = Polygon::from_transformed_rect_with_inverse( local_rect, &matrix, &inv_matrix, plane_split_anchor, ).unwrap(); splitter.add(polygon); } CoordinateSpaceMapping::ScaleOffset(_) | CoordinateSpaceMapping::Transform(_) => { let mut clipper = Clipper::new(); let results = clipper.clip_transformed( Polygon::from_rect( local_rect, plane_split_anchor, ), &matrix, Some(world_rect), ); if let Ok(results) = results { for poly in results { splitter.add(poly); } } } } true } pub fn resolve_split_planes( &mut self, splitter: &mut PlaneSplitter, gpu_cache: &mut GpuCache, spatial_tree: &SpatialTree, ) { let ordered = match self.context_3d { Picture3DContext::In { root_data: Some(ref mut list), .. } => list, _ => panic!("Expected to find 3D context root"), }; ordered.clear(); // Process the accumulated split planes and order them for rendering. // Z axis is directed at the screen, `sort` is ascending, and we need back-to-front order. let sorted = splitter.sort(vec3(0.0, 0.0, 1.0)); ordered.reserve(sorted.len()); for poly in sorted { let cluster = &self.prim_list.clusters[poly.anchor.cluster_index]; let spatial_node_index = cluster.spatial_node_index; let transform = match spatial_tree .get_world_transform(spatial_node_index) .inverse() { Some(transform) => transform.into_transform(), // logging this would be a bit too verbose None => continue, }; let local_points = [ transform.transform_point3d(poly.points[0].cast()), transform.transform_point3d(poly.points[1].cast()), transform.transform_point3d(poly.points[2].cast()), transform.transform_point3d(poly.points[3].cast()), ]; // If any of the points are un-transformable, just drop this // plane from drawing. if local_points.iter().any(|p| p.is_none()) { continue; } let p0 = local_points[0].unwrap(); let p1 = local_points[1].unwrap(); let p2 = local_points[2].unwrap(); let p3 = local_points[3].unwrap(); let gpu_blocks = [ [p0.x, p0.y, p1.x, p1.y].into(), [p2.x, p2.y, p3.x, p3.y].into(), ]; let gpu_handle = gpu_cache.push_per_frame_blocks(&gpu_blocks); let gpu_address = gpu_cache.get_address(&gpu_handle); ordered.push(OrderedPictureChild { anchor: poly.anchor, spatial_node_index, gpu_address, }); } } /// Called during initial picture traversal, before we know the /// bounding rect of children. It is possible to determine the /// surface / raster config now though. fn pre_update( &mut self, state: &mut PictureUpdateState, frame_context: &FrameBuildingContext, ) -> Option<PrimitiveList> { // Reset raster config in case we early out below. self.raster_config = None; // Resolve animation properties, and early out if the filter // properties make this picture invisible. if !self.resolve_scene_properties(frame_context.scene_properties) { return None; } // For out-of-preserve-3d pictures, the backface visibility is determined by // the local transform only. // Note: we aren't taking the transform relativce to the parent picture, // since picture tree can be more dense than the corresponding spatial tree. if !self.is_backface_visible { if let Picture3DContext::Out = self.context_3d { match frame_context.spatial_tree.get_local_visible_face(self.spatial_node_index) { VisibleFace::Front => {} VisibleFace::Back => return None, } } } // See if this picture actually needs a surface for compositing. // TODO(gw): FPC: Remove the actual / requested composite mode distinction. let actual_composite_mode = self.requested_composite_mode.clone(); if let Some(composite_mode) = actual_composite_mode { // Retrieve the positioning node information for the parent surface. let parent_raster_node_index = state.current_surface().raster_spatial_node_index; let parent_device_pixel_scale = state.current_surface().device_pixel_scale; let surface_spatial_node_index = self.spatial_node_index; let surface_to_parent_transform = frame_context.spatial_tree .get_relative_transform(surface_spatial_node_index, parent_raster_node_index); // Check if there is perspective or if an SVG filter is applied, and thus whether a new // rasterization root should be established. let establishes_raster_root = match composite_mode { PictureCompositeMode::TileCache { .. } => { // Picture caches are special cased - they never need to establish a raster root. In future, // we will probably remove TileCache as a specific composite mode. false } PictureCompositeMode::SvgFilter(..) => { // Filters must be applied before transforms, to do this, we can mark this picture as establishing a raster root. true } PictureCompositeMode::MixBlend(..) | PictureCompositeMode::Filter(..) | PictureCompositeMode::ComponentTransferFilter(..) | PictureCompositeMode::Blit(..) => { // TODO(gw): As follow ups, individually move each of these composite modes to create raster roots. surface_to_parent_transform.is_perspective() } }; let (raster_spatial_node_index, device_pixel_scale) = if establishes_raster_root { // If a raster root is established, this surface should be scaled based on the scale factors of the surface raster to parent raster transform. // This scaling helps ensure that the content in this surface does not become blurry or pixelated when composited in the parent surface. let scale_factors = surface_to_parent_transform.scale_factors(); // Pick the largest scale factor of the transform for the scaling factor. // Currently, we ensure that the scaling factor is >= 1.0 as a smaller scale factor can result in blurry output. let scaling_factor = scale_factors.0.max(scale_factors.1).max(1.0); let device_pixel_scale = parent_device_pixel_scale * Scale::new(scaling_factor); (surface_spatial_node_index, device_pixel_scale) } else { (parent_raster_node_index, parent_device_pixel_scale) }; let scale_factors = frame_context .spatial_tree .get_relative_transform(surface_spatial_node_index, raster_spatial_node_index) .scale_factors(); // This inflation factor is to be applied to all primitives within the surface. // Only inflate if the caller hasn't already inflated the bounding rects for this filter. let mut inflation_factor = 0.0; if self.options.inflate_if_required { match composite_mode { PictureCompositeMode::Filter(Filter::Blur(width, height)) => { let blur_radius = f32::max(clamp_blur_radius(width, scale_factors), clamp_blur_radius(height, scale_factors)); // The amount of extra space needed for primitives inside // this picture to ensure the visibility check is correct. inflation_factor = blur_radius * BLUR_SAMPLE_SCALE; } PictureCompositeMode::SvgFilter(ref primitives, _) => { let mut max = 0.0; for primitive in primitives { if let FilterPrimitiveKind::Blur(ref blur) = primitive.kind { max = f32::max(max, blur.width); max = f32::max(max, blur.height); } } inflation_factor = clamp_blur_radius(max, scale_factors) * BLUR_SAMPLE_SCALE; } PictureCompositeMode::Filter(Filter::DropShadows(ref shadows)) => { // TODO(gw): This is incorrect, since we don't consider the drop shadow // offset. However, fixing that is a larger task, so this is // an improvement on the current case (this at least works where // the offset of the drop-shadow is ~0, which is often true). // Can't use max_by_key here since f32 isn't Ord let mut max_blur_radius: f32 = 0.0; for shadow in shadows { max_blur_radius = max_blur_radius.max(shadow.blur_radius); } inflation_factor = clamp_blur_radius(max_blur_radius, scale_factors) * BLUR_SAMPLE_SCALE; } _ => {} } } let surface = SurfaceInfo::new( surface_spatial_node_index, raster_spatial_node_index, inflation_factor, frame_context.global_screen_world_rect, &frame_context.spatial_tree, device_pixel_scale, scale_factors, ); self.raster_config = Some(RasterConfig { composite_mode, establishes_raster_root, surface_index: state.push_surface(surface), root_scaling_factor: 1.0, clipped_bounding_rect: WorldRect::zero(), }); } Some(mem::replace(&mut self.prim_list, PrimitiveList::empty())) } /// Called after updating child pictures during the initial /// picture traversal. fn post_update( &mut self, prim_list: PrimitiveList, state: &mut PictureUpdateState, frame_context: &FrameBuildingContext, data_stores: &mut DataStores, ) { // Restore the pictures list used during recursion. self.prim_list = prim_list; let surface = state.current_surface_mut(); for cluster in &mut self.prim_list.clusters { cluster.flags.remove(ClusterFlags::IS_VISIBLE); // Skip the cluster if backface culled. if !cluster.flags.contains(ClusterFlags::IS_BACKFACE_VISIBLE) { // For in-preserve-3d primitives and pictures, the backface visibility is // evaluated relative to the containing block. if let Picture3DContext::In { ancestor_index, .. } = self.context_3d { let mut face = VisibleFace::Front; frame_context.spatial_tree.get_relative_transform_with_face( cluster.spatial_node_index, ancestor_index, Some(&mut face), ); if face == VisibleFace::Back { continue } } } // No point including this cluster if it can't be transformed let spatial_node = &frame_context .spatial_tree .spatial_nodes[cluster.spatial_node_index.0 as usize]; if !spatial_node.invertible { continue; } // Update any primitives/cluster bounding rects that can only be done // with information available during frame building. if cluster.flags.contains(ClusterFlags::IS_BACKDROP_FILTER) { let backdrop_to_world_mapper = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, cluster.spatial_node_index, LayoutRect::max_rect(), frame_context.spatial_tree, ); for prim_instance in &mut self.prim_list.prim_instances[cluster.prim_range()] { match prim_instance.kind { PrimitiveInstanceKind::Backdrop { data_handle, .. } => { // The actual size and clip rect of this primitive are determined by computing the bounding // box of the projected rect of the backdrop-filter element onto the backdrop. let prim_data = &mut data_stores.backdrop[data_handle]; let spatial_node_index = prim_data.kind.spatial_node_index; // We cannot use the relative transform between the backdrop and the element because // that doesn't take into account any projection transforms that both spatial nodes are children of. // Instead, we first project from the element to the world space and get a flattened 2D bounding rect // in the screen space, we then map this rect from the world space to the backdrop space to get the // proper bounding box where the backdrop-filter needs to be processed. let prim_to_world_mapper = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, spatial_node_index, LayoutRect::max_rect(), frame_context.spatial_tree, ); // First map to the screen and get a flattened rect let prim_rect = prim_to_world_mapper.map(&prim_data.kind.border_rect).unwrap_or_else(LayoutRect::zero); // Backwards project the flattened rect onto the backdrop let prim_rect = backdrop_to_world_mapper.unmap(&prim_rect).unwrap_or_else(LayoutRect::zero); // TODO(aosmond): Is this safe? Updating the primitive size during // frame building is usually problematic since scene building will cache // the primitive information in the GPU already. prim_data.common.prim_rect = prim_rect; prim_instance.clip_set.local_clip_rect = prim_rect; // Update the cluster bounding rect now that we have the backdrop rect. cluster.bounding_rect = cluster.bounding_rect.union(&prim_rect); } _ => { panic!("BUG: unexpected deferred primitive kind for cluster updates"); } } } } // Map the cluster bounding rect into the space of the surface, and // include it in the surface bounding rect. surface.map_local_to_surface.set_target_spatial_node( cluster.spatial_node_index, frame_context.spatial_tree, ); // Mark the cluster visible, since it passed the invertible and // backface checks. cluster.flags.insert(ClusterFlags::IS_VISIBLE); if let Some(cluster_rect) = surface.map_local_to_surface.map(&cluster.bounding_rect) { surface.rect = surface.rect.union(&cluster_rect); } } // If this picture establishes a surface, then map the surface bounding // rect into the parent surface coordinate space, and propagate that up // to the parent. if let Some(ref mut raster_config) = self.raster_config { let surface = state.current_surface_mut(); // Inflate the local bounding rect if required by the filter effect. if self.options.inflate_if_required { surface.rect = raster_config.composite_mode.inflate_picture_rect(surface.rect, surface.scale_factors); } let mut surface_rect = surface.rect * Scale::new(1.0); // Pop this surface from the stack let surface_index = state.pop_surface(); debug_assert_eq!(surface_index, raster_config.surface_index); // Set the estimated and precise local rects. The precise local rect // may be changed again during frame visibility. self.estimated_local_rect = surface_rect; self.precise_local_rect = surface_rect; // Drop shadows draw both a content and shadow rect, so need to expand the local // rect of any surfaces to be composited in parent surfaces correctly. match raster_config.composite_mode { PictureCompositeMode::Filter(Filter::DropShadows(ref shadows)) => { for shadow in shadows { let shadow_rect = self.estimated_local_rect.translate(shadow.offset); surface_rect = surface_rect.union(&shadow_rect); } } _ => {} } // Propagate up to parent surface, now that we know this surface's static rect let parent_surface = state.current_surface_mut(); parent_surface.map_local_to_surface.set_target_spatial_node( self.spatial_node_index, frame_context.spatial_tree, ); if let Some(parent_surface_rect) = parent_surface .map_local_to_surface .map(&surface_rect) { parent_surface.rect = parent_surface.rect.union(&parent_surface_rect); } } } pub fn prepare_for_render( &mut self, frame_context: &FrameBuildingContext, frame_state: &mut FrameBuildingState, data_stores: &mut DataStores, ) -> bool { let mut pic_state_for_children = self.take_state(); if let Some(ref mut splitter) = pic_state_for_children.plane_splitter { self.resolve_split_planes( splitter, &mut frame_state.gpu_cache, &frame_context.spatial_tree, ); } let raster_config = match self.raster_config { Some(ref mut raster_config) => raster_config, None => { return true } }; // TODO(gw): Almost all of the Picture types below use extra_gpu_cache_data // to store the same type of data. The exception is the filter // with a ColorMatrix, which stores the color matrix here. It's // probably worth tidying this code up to be a bit more consistent. // Perhaps store the color matrix after the common data, even though // it's not used by that shader. match raster_config.composite_mode { PictureCompositeMode::TileCache { .. } => {} PictureCompositeMode::Filter(Filter::Blur(..)) => {} PictureCompositeMode::Filter(Filter::DropShadows(ref shadows)) => { self.extra_gpu_data_handles.resize(shadows.len(), GpuCacheHandle::new()); for (shadow, extra_handle) in shadows.iter().zip(self.extra_gpu_data_handles.iter_mut()) { if let Some(mut request) = frame_state.gpu_cache.request(extra_handle) { // Basic brush primitive header is (see end of prepare_prim_for_render_inner in prim_store.rs) // [brush specific data] // [segment_rect, segment data] let shadow_rect = self.precise_local_rect.translate(shadow.offset); // ImageBrush colors request.push(shadow.color.premultiplied()); request.push(PremultipliedColorF::WHITE); request.push([ self.precise_local_rect.size.width, self.precise_local_rect.size.height, 0.0, 0.0, ]); // segment rect / extra data request.push(shadow_rect); request.push([0.0, 0.0, 0.0, 0.0]); } } } PictureCompositeMode::Filter(ref filter) => { match *filter { Filter::ColorMatrix(ref m) => { if self.extra_gpu_data_handles.is_empty() { self.extra_gpu_data_handles.push(GpuCacheHandle::new()); } if let Some(mut request) = frame_state.gpu_cache.request(&mut self.extra_gpu_data_handles[0]) { for i in 0..5 { request.push([m[i*4], m[i*4+1], m[i*4+2], m[i*4+3]]); } } } Filter::Flood(ref color) => { if self.extra_gpu_data_handles.is_empty() { self.extra_gpu_data_handles.push(GpuCacheHandle::new()); } if let Some(mut request) = frame_state.gpu_cache.request(&mut self.extra_gpu_data_handles[0]) { request.push(color.to_array()); } } _ => {} } } PictureCompositeMode::ComponentTransferFilter(handle) => { let filter_data = &mut data_stores.filter_data[handle]; filter_data.update(frame_state); } PictureCompositeMode::MixBlend(..) | PictureCompositeMode::Blit(_) | PictureCompositeMode::SvgFilter(..) => {} } true } } // Calculate a single homogeneous screen-space UV for a picture. fn calculate_screen_uv( local_pos: &PicturePoint, transform: &PictureToRasterTransform, rendered_rect: &DeviceRect, device_pixel_scale: DevicePixelScale, ) -> DeviceHomogeneousVector { let raster_pos = transform.transform_point2d_homogeneous(*local_pos); DeviceHomogeneousVector::new( (raster_pos.x * device_pixel_scale.0 - rendered_rect.origin.x * raster_pos.w) / rendered_rect.size.width, (raster_pos.y * device_pixel_scale.0 - rendered_rect.origin.y * raster_pos.w) / rendered_rect.size.height, 0.0, raster_pos.w, ) } // Calculate a UV rect within an image based on the screen space // vertex positions of a picture. fn calculate_uv_rect_kind( pic_rect: &PictureRect, transform: &PictureToRasterTransform, rendered_rect: &DeviceRect, device_pixel_scale: DevicePixelScale, ) -> UvRectKind { let top_left = calculate_screen_uv( &pic_rect.origin, transform, &rendered_rect, device_pixel_scale, ); let top_right = calculate_screen_uv( &pic_rect.top_right(), transform, &rendered_rect, device_pixel_scale, ); let bottom_left = calculate_screen_uv( &pic_rect.bottom_left(), transform, &rendered_rect, device_pixel_scale, ); let bottom_right = calculate_screen_uv( &pic_rect.bottom_right(), transform, &rendered_rect, device_pixel_scale, ); UvRectKind::Quad { top_left, top_right, bottom_left, bottom_right, } } fn create_raster_mappers( surface_spatial_node_index: SpatialNodeIndex, raster_spatial_node_index: SpatialNodeIndex, world_rect: WorldRect, spatial_tree: &SpatialTree, ) -> (SpaceMapper<RasterPixel, WorldPixel>, SpaceMapper<PicturePixel, RasterPixel>) { let map_raster_to_world = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, raster_spatial_node_index, world_rect, spatial_tree, ); let raster_bounds = map_raster_to_world.unmap(&world_rect) .unwrap_or_else(RasterRect::max_rect); let map_pic_to_raster = SpaceMapper::new_with_target( raster_spatial_node_index, surface_spatial_node_index, raster_bounds, spatial_tree, ); (map_raster_to_world, map_pic_to_raster) } fn get_transform_key( spatial_node_index: SpatialNodeIndex, cache_spatial_node_index: SpatialNodeIndex, spatial_tree: &SpatialTree, ) -> TransformKey { // Note: this is the only place where we don't know beforehand if the tile-affecting // spatial node is below or above the current picture. let transform = if cache_spatial_node_index >= spatial_node_index { spatial_tree .get_relative_transform( cache_spatial_node_index, spatial_node_index, ) } else { spatial_tree .get_relative_transform( spatial_node_index, cache_spatial_node_index, ) }; transform.into() } /// A key for storing primitive comparison results during tile dependency tests. #[derive(Debug, Copy, Clone, Eq, Hash, PartialEq)] struct PrimitiveComparisonKey { prev_index: PrimitiveDependencyIndex, curr_index: PrimitiveDependencyIndex, } /// Information stored an image dependency #[derive(Debug, Copy, Clone, PartialEq)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct ImageDependency { pub key: ImageKey, pub generation: ImageGeneration, } impl ImageDependency { pub const INVALID: ImageDependency = ImageDependency { key: ImageKey::DUMMY, generation: ImageGeneration::INVALID, }; } /// A helper struct to compare a primitive and all its sub-dependencies. struct PrimitiveComparer<'a> { clip_comparer: CompareHelper<'a, ItemUid>, transform_comparer: CompareHelper<'a, SpatialNodeKey>, image_comparer: CompareHelper<'a, ImageDependency>, opacity_comparer: CompareHelper<'a, OpacityBinding>, color_comparer: CompareHelper<'a, ColorBinding>, resource_cache: &'a ResourceCache, spatial_node_comparer: &'a mut SpatialNodeComparer, opacity_bindings: &'a FastHashMap<PropertyBindingId, OpacityBindingInfo>, color_bindings: &'a FastHashMap<PropertyBindingId, ColorBindingInfo>, } impl<'a> PrimitiveComparer<'a> { fn new( prev: &'a TileDescriptor, curr: &'a TileDescriptor, resource_cache: &'a ResourceCache, spatial_node_comparer: &'a mut SpatialNodeComparer, opacity_bindings: &'a FastHashMap<PropertyBindingId, OpacityBindingInfo>, color_bindings: &'a FastHashMap<PropertyBindingId, ColorBindingInfo>, ) -> Self { let clip_comparer = CompareHelper::new( &prev.clips, &curr.clips, ); let transform_comparer = CompareHelper::new( &prev.transforms, &curr.transforms, ); let image_comparer = CompareHelper::new( &prev.images, &curr.images, ); let opacity_comparer = CompareHelper::new( &prev.opacity_bindings, &curr.opacity_bindings, ); let color_comparer = CompareHelper::new( &prev.color_bindings, &curr.color_bindings, ); PrimitiveComparer { clip_comparer, transform_comparer, image_comparer, opacity_comparer, color_comparer, resource_cache, spatial_node_comparer, opacity_bindings, color_bindings, } } fn reset(&mut self) { self.clip_comparer.reset(); self.transform_comparer.reset(); self.image_comparer.reset(); self.opacity_comparer.reset(); self.color_comparer.reset(); } fn advance_prev(&mut self, prim: &PrimitiveDescriptor) { self.clip_comparer.advance_prev(prim.clip_dep_count); self.transform_comparer.advance_prev(prim.transform_dep_count); self.image_comparer.advance_prev(prim.image_dep_count); self.opacity_comparer.advance_prev(prim.opacity_binding_dep_count); self.color_comparer.advance_prev(prim.color_binding_dep_count); } fn advance_curr(&mut self, prim: &PrimitiveDescriptor) { self.clip_comparer.advance_curr(prim.clip_dep_count); self.transform_comparer.advance_curr(prim.transform_dep_count); self.image_comparer.advance_curr(prim.image_dep_count); self.opacity_comparer.advance_curr(prim.opacity_binding_dep_count); self.color_comparer.advance_curr(prim.color_binding_dep_count); } /// Check if two primitive descriptors are the same. fn compare_prim( &mut self, prev: &PrimitiveDescriptor, curr: &PrimitiveDescriptor, opt_detail: Option<&mut PrimitiveCompareResultDetail>, ) -> PrimitiveCompareResult { let resource_cache = self.resource_cache; let spatial_node_comparer = &mut self.spatial_node_comparer; let opacity_bindings = self.opacity_bindings; let color_bindings = self.color_bindings; // Check equality of the PrimitiveDescriptor if prev != curr { if let Some(detail) = opt_detail { *detail = PrimitiveCompareResultDetail::Descriptor{ old: *prev, new: *curr }; } return PrimitiveCompareResult::Descriptor; } // Check if any of the clips this prim has are different. let mut clip_result = CompareHelperResult::Equal; if !self.clip_comparer.is_same( prev.clip_dep_count, curr.clip_dep_count, |prev, curr| { prev == curr }, if opt_detail.is_some() { Some(&mut clip_result) } else { None } ) { if let Some(detail) = opt_detail { *detail = PrimitiveCompareResultDetail::Clip{ detail: clip_result }; } return PrimitiveCompareResult::Clip; } // Check if any of the transforms this prim has are different. let mut transform_result = CompareHelperResult::Equal; if !self.transform_comparer.is_same( prev.transform_dep_count, curr.transform_dep_count, |prev, curr| { spatial_node_comparer.are_transforms_equivalent(prev, curr) }, if opt_detail.is_some() { Some(&mut transform_result) } else { None }, ) { if let Some(detail) = opt_detail { *detail = PrimitiveCompareResultDetail::Transform{ detail: transform_result }; } return PrimitiveCompareResult::Transform; } // Check if any of the images this prim has are different. let mut image_result = CompareHelperResult::Equal; if !self.image_comparer.is_same( prev.image_dep_count, curr.image_dep_count, |prev, curr| { prev == curr && resource_cache.get_image_generation(curr.key) == curr.generation }, if opt_detail.is_some() { Some(&mut image_result) } else { None }, ) { if let Some(detail) = opt_detail { *detail = PrimitiveCompareResultDetail::Image{ detail: image_result }; } return PrimitiveCompareResult::Image; } // Check if any of the opacity bindings this prim has are different. let mut bind_result = CompareHelperResult::Equal; if !self.opacity_comparer.is_same( prev.opacity_binding_dep_count, curr.opacity_binding_dep_count, |prev, curr| { if prev != curr { return false; } if let OpacityBinding::Binding(id) = curr { if opacity_bindings .get(id) .map_or(true, |info| info.changed) { return false; } } true }, if opt_detail.is_some() { Some(&mut bind_result) } else { None }, ) { if let Some(detail) = opt_detail { *detail = PrimitiveCompareResultDetail::OpacityBinding{ detail: bind_result }; } return PrimitiveCompareResult::OpacityBinding; } // Check if any of the color bindings this prim has are different. let mut bind_result = CompareHelperResult::Equal; if !self.color_comparer.is_same( prev.color_binding_dep_count, curr.color_binding_dep_count, |prev, curr| { if prev != curr { return false; } if let ColorBinding::Binding(id) = curr { if color_bindings .get(id) .map_or(true, |info| info.changed) { return false; } } true }, if opt_detail.is_some() { Some(&mut bind_result) } else { None }, ) { if let Some(detail) = opt_detail { *detail = PrimitiveCompareResultDetail::ColorBinding{ detail: bind_result }; } return PrimitiveCompareResult::ColorBinding; } PrimitiveCompareResult::Equal } } /// Details for a node in a quadtree that tracks dirty rects for a tile. #[cfg_attr(any(feature="capture",feature="replay"), derive(Clone))] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub enum TileNodeKind { Leaf { /// The index buffer of primitives that affected this tile previous frame #[cfg_attr(any(feature = "capture", feature = "replay"), serde(skip))] prev_indices: Vec<PrimitiveDependencyIndex>, /// The index buffer of primitives that affect this tile on this frame #[cfg_attr(any(feature = "capture", feature = "replay"), serde(skip))] curr_indices: Vec<PrimitiveDependencyIndex>, /// A bitset of which of the last 64 frames have been dirty for this leaf. #[cfg_attr(any(feature = "capture", feature = "replay"), serde(skip))] dirty_tracker: u64, /// The number of frames since this node split or merged. #[cfg_attr(any(feature = "capture", feature = "replay"), serde(skip))] frames_since_modified: usize, }, Node { /// The four children of this node children: Vec<TileNode>, }, } /// The kind of modification that a tile wants to do #[derive(Copy, Clone, PartialEq, Debug)] enum TileModification { Split, Merge, } /// A node in the dirty rect tracking quadtree. #[cfg_attr(any(feature="capture",feature="replay"), derive(Clone))] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct TileNode { /// Leaf or internal node pub kind: TileNodeKind, /// Rect of this node in the same space as the tile cache picture pub rect: PictureBox2D, } impl TileNode { /// Construct a new leaf node, with the given primitive dependency index buffer fn new_leaf(curr_indices: Vec<PrimitiveDependencyIndex>) -> Self { TileNode { kind: TileNodeKind::Leaf { prev_indices: Vec::new(), curr_indices, dirty_tracker: 0, frames_since_modified: 0, }, rect: PictureBox2D::zero(), } } /// Draw debug information about this tile node fn draw_debug_rects( &self, pic_to_world_mapper: &SpaceMapper<PicturePixel, WorldPixel>, is_opaque: bool, local_valid_rect: PictureRect, scratch: &mut PrimitiveScratchBuffer, global_device_pixel_scale: DevicePixelScale, ) { match self.kind { TileNodeKind::Leaf { dirty_tracker, .. } => { let color = if (dirty_tracker & 1) != 0 { debug_colors::RED } else if is_opaque { debug_colors::GREEN } else { debug_colors::YELLOW }; if let Some(local_rect) = local_valid_rect.intersection(&self.rect.to_rect()) { let world_rect = pic_to_world_mapper .map(&local_rect) .unwrap(); let device_rect = world_rect * global_device_pixel_scale; let outer_color = color.scale_alpha(0.3); let inner_color = outer_color.scale_alpha(0.5); scratch.push_debug_rect( device_rect.inflate(-3.0, -3.0), outer_color, inner_color ); } } TileNodeKind::Node { ref children, .. } => { for child in children.iter() { child.draw_debug_rects( pic_to_world_mapper, is_opaque, local_valid_rect, scratch, global_device_pixel_scale, ); } } } } /// Calculate the four child rects for a given node fn get_child_rects( rect: &PictureBox2D, result: &mut [PictureBox2D; 4], ) { let p0 = rect.min; let p1 = rect.max; let pc = p0 + rect.size() * 0.5; *result = [ PictureBox2D::new( p0, pc, ), PictureBox2D::new( PicturePoint::new(pc.x, p0.y), PicturePoint::new(p1.x, pc.y), ), PictureBox2D::new( PicturePoint::new(p0.x, pc.y), PicturePoint::new(pc.x, p1.y), ), PictureBox2D::new( pc, p1, ), ]; } /// Called during pre_update, to clear the current dependencies fn clear( &mut self, rect: PictureBox2D, ) { self.rect = rect; match self.kind { TileNodeKind::Leaf { ref mut prev_indices, ref mut curr_indices, ref mut dirty_tracker, ref mut frames_since_modified } => { // Swap current dependencies to be the previous frame mem::swap(prev_indices, curr_indices); curr_indices.clear(); // Note that another frame has passed in the dirty bit trackers *dirty_tracker = *dirty_tracker << 1; *frames_since_modified += 1; } TileNodeKind::Node { ref mut children, .. } => { let mut child_rects = [PictureBox2D::zero(); 4]; TileNode::get_child_rects(&rect, &mut child_rects); assert_eq!(child_rects.len(), children.len()); for (child, rect) in children.iter_mut().zip(child_rects.iter()) { child.clear(*rect); } } } } /// Add a primitive dependency to this node fn add_prim( &mut self, index: PrimitiveDependencyIndex, prim_rect: &PictureBox2D, ) { match self.kind { TileNodeKind::Leaf { ref mut curr_indices, .. } => { curr_indices.push(index); } TileNodeKind::Node { ref mut children, .. } => { for child in children.iter_mut() { if child.rect.intersects(prim_rect) { child.add_prim(index, prim_rect); } } } } } /// Apply a merge or split operation to this tile, if desired fn maybe_merge_or_split( &mut self, level: i32, curr_prims: &[PrimitiveDescriptor], max_split_levels: i32, ) { // Determine if this tile wants to split or merge let mut tile_mod = None; fn get_dirty_frames( dirty_tracker: u64, frames_since_modified: usize, ) -> Option<u32> { // Only consider splitting or merging at least 64 frames since we last changed if frames_since_modified > 64 { // Each bit in the tracker is a frame that was recently invalidated Some(dirty_tracker.count_ones()) } else { None } } match self.kind { TileNodeKind::Leaf { dirty_tracker, frames_since_modified, .. } => { // Only consider splitting if the tree isn't too deep. if level < max_split_levels { if let Some(dirty_frames) = get_dirty_frames(dirty_tracker, frames_since_modified) { // If the tile has invalidated > 50% of the recent number of frames, split. if dirty_frames > 32 { tile_mod = Some(TileModification::Split); } } } } TileNodeKind::Node { ref children, .. } => { // There's two conditions that cause a node to merge its children: // (1) If _all_ the child nodes are constantly invalidating, then we are wasting // CPU time tracking dependencies for each child, so merge them. // (2) If _none_ of the child nodes are recently invalid, then the page content // has probably changed, and we no longer need to track fine grained dependencies here. let mut static_count = 0; let mut changing_count = 0; for child in children { // Only consider merging nodes at the edge of the tree. if let TileNodeKind::Leaf { dirty_tracker, frames_since_modified, .. } = child.kind { if let Some(dirty_frames) = get_dirty_frames(dirty_tracker, frames_since_modified) { if dirty_frames == 0 { // Hasn't been invalidated for some time static_count += 1; } else if dirty_frames == 64 { // Is constantly being invalidated changing_count += 1; } } } // Only merge if all the child tiles are in agreement. Otherwise, we have some // that are invalidating / static, and it's worthwhile tracking dependencies for // them individually. if static_count == 4 || changing_count == 4 { tile_mod = Some(TileModification::Merge); } } } } match tile_mod { Some(TileModification::Split) => { // To split a node, take the current dependency index buffer for this node, and // split it into child index buffers. let curr_indices = match self.kind { TileNodeKind::Node { .. } => { unreachable!("bug - only leaves can split"); } TileNodeKind::Leaf { ref mut curr_indices, .. } => { curr_indices.take() } }; let mut child_rects = [PictureBox2D::zero(); 4]; TileNode::get_child_rects(&self.rect, &mut child_rects); let mut child_indices = [ Vec::new(), Vec::new(), Vec::new(), Vec::new(), ]; // Step through the index buffer, and add primitives to each of the children // that they intersect. for index in curr_indices { let prim = &curr_prims[index.0 as usize]; for (child_rect, indices) in child_rects.iter().zip(child_indices.iter_mut()) { if prim.prim_clip_box.intersects(child_rect) { indices.push(index); } } } // Create the child nodes and switch from leaf -> node. let children = child_indices .iter_mut() .map(|i| TileNode::new_leaf(mem::replace(i, Vec::new()))) .collect(); self.kind = TileNodeKind::Node { children, }; } Some(TileModification::Merge) => { // Construct a merged index buffer by collecting the dependency index buffers // from each child, and merging them into a de-duplicated index buffer. let merged_indices = match self.kind { TileNodeKind::Node { ref mut children, .. } => { let mut merged_indices = Vec::new(); for child in children.iter() { let child_indices = match child.kind { TileNodeKind::Leaf { ref curr_indices, .. } => { curr_indices } TileNodeKind::Node { .. } => { unreachable!("bug: child is not a leaf"); } }; merged_indices.extend_from_slice(child_indices); } merged_indices.sort(); merged_indices.dedup(); merged_indices } TileNodeKind::Leaf { .. } => { unreachable!("bug - trying to merge a leaf"); } }; // Switch from a node to a leaf, with the combined index buffer self.kind = TileNodeKind::Leaf { prev_indices: Vec::new(), curr_indices: merged_indices, dirty_tracker: 0, frames_since_modified: 0, }; } None => { // If this node didn't merge / split, then recurse into children // to see if they want to split / merge. if let TileNodeKind::Node { ref mut children, .. } = self.kind { for child in children.iter_mut() { child.maybe_merge_or_split( level+1, curr_prims, max_split_levels, ); } } } } } /// Update the dirty state of this node, building the overall dirty rect fn update_dirty_rects( &mut self, prev_prims: &[PrimitiveDescriptor], curr_prims: &[PrimitiveDescriptor], prim_comparer: &mut PrimitiveComparer, dirty_rect: &mut PictureBox2D, compare_cache: &mut FastHashMap<PrimitiveComparisonKey, PrimitiveCompareResult>, invalidation_reason: &mut Option<InvalidationReason>, frame_context: &FrameVisibilityContext, ) { match self.kind { TileNodeKind::Node { ref mut children, .. } => { for child in children.iter_mut() { child.update_dirty_rects( prev_prims, curr_prims, prim_comparer, dirty_rect, compare_cache, invalidation_reason, frame_context, ); } } TileNodeKind::Leaf { ref prev_indices, ref curr_indices, ref mut dirty_tracker, .. } => { // If the index buffers are of different length, they must be different if prev_indices.len() == curr_indices.len() { let mut prev_i0 = 0; let mut prev_i1 = 0; prim_comparer.reset(); // Walk each index buffer, comparing primitives for (prev_index, curr_index) in prev_indices.iter().zip(curr_indices.iter()) { let i0 = prev_index.0 as usize; let i1 = curr_index.0 as usize; // Advance the dependency arrays for each primitive (this handles // prims that may be skipped by these index buffers). for i in prev_i0 .. i0 { prim_comparer.advance_prev(&prev_prims[i]); } for i in prev_i1 .. i1 { prim_comparer.advance_curr(&curr_prims[i]); } // Compare the primitives, caching the result in a hash map // to save comparisons in other tree nodes. let key = PrimitiveComparisonKey { prev_index: *prev_index, curr_index: *curr_index, }; #[cfg(any(feature = "capture", feature = "replay"))] let mut compare_detail = PrimitiveCompareResultDetail::Equal; #[cfg(any(feature = "capture", feature = "replay"))] let prim_compare_result_detail = if frame_context.debug_flags.contains(DebugFlags::TILE_CACHE_LOGGING_DBG) { Some(&mut compare_detail) } else { None }; #[cfg(not(any(feature = "capture", feature = "replay")))] let compare_detail = PrimitiveCompareResultDetail::Equal; #[cfg(not(any(feature = "capture", feature = "replay")))] let prim_compare_result_detail = None; let prim_compare_result = *compare_cache .entry(key) .or_insert_with(|| { let prev = &prev_prims[i0]; let curr = &curr_prims[i1]; prim_comparer.compare_prim(prev, curr, prim_compare_result_detail) }); // If not the same, mark this node as dirty and update the dirty rect if prim_compare_result != PrimitiveCompareResult::Equal { if invalidation_reason.is_none() { *invalidation_reason = Some(InvalidationReason::Content { prim_compare_result, prim_compare_result_detail: Some(compare_detail) }); } *dirty_rect = self.rect.union(dirty_rect); *dirty_tracker = *dirty_tracker | 1; break; } prev_i0 = i0; prev_i1 = i1; } } else { if invalidation_reason.is_none() { // if and only if tile logging is enabled, do the expensive step of // converting indices back to ItemUids and allocating old and new vectors // to store them in. #[cfg(any(feature = "capture", feature = "replay"))] { if frame_context.debug_flags.contains(DebugFlags::TILE_CACHE_LOGGING_DBG) { let old = prev_indices.iter().map( |i| prev_prims[i.0 as usize].prim_uid ).collect(); let new = curr_indices.iter().map( |i| curr_prims[i.0 as usize].prim_uid ).collect(); *invalidation_reason = Some(InvalidationReason::PrimCount { old: Some(old), new: Some(new) }); } else { *invalidation_reason = Some(InvalidationReason::PrimCount { old: None, new: None }); } } #[cfg(not(any(feature = "capture", feature = "replay")))] { *invalidation_reason = Some(InvalidationReason::PrimCount { old: None, new: None }); } } *dirty_rect = self.rect.union(dirty_rect); *dirty_tracker = *dirty_tracker | 1; } } } } } impl CompositeState { // A helper function to destroy all native surfaces for a given list of tiles pub fn destroy_native_tiles<'a, I: Iterator<Item = &'a mut Box<Tile>>>( &mut self, tiles_iter: I, resource_cache: &mut ResourceCache, ) { // Any old tiles that remain after the loop above are going to be dropped. For // simple composite mode, the texture cache handle will expire and be collected // by the texture cache. For native compositor mode, we need to explicitly // invoke a callback to the client to destroy that surface. if let CompositorKind::Native { .. } = self.compositor_kind { for tile in tiles_iter { // Only destroy native surfaces that have been allocated. It's // possible for display port tiles to be created that never // come on screen, and thus never get a native surface allocated. if let Some(TileSurface::Texture { descriptor: SurfaceTextureDescriptor::Native { ref mut id, .. }, .. }) = tile.surface { if let Some(id) = id.take() { resource_cache.destroy_compositor_tile(id); } } } } } } pub fn get_raster_rects( pic_rect: PictureRect, map_to_raster: &SpaceMapper<PicturePixel, RasterPixel>, map_to_world: &SpaceMapper<RasterPixel, WorldPixel>, prim_bounding_rect: WorldRect, device_pixel_scale: DevicePixelScale, ) -> Option<(DeviceRect, DeviceRect)> { let unclipped_raster_rect = map_to_raster.map(&pic_rect)?; let unclipped = raster_rect_to_device_pixels( unclipped_raster_rect, device_pixel_scale, ); let unclipped_world_rect = map_to_world.map(&unclipped_raster_rect)?; let clipped_world_rect = unclipped_world_rect.intersection(&prim_bounding_rect)?; // We don't have to be able to do the back-projection from world into raster. // Rendering only cares one way, so if that fails, we fall back to the full rect. let clipped_raster_rect = match map_to_world.unmap(&clipped_world_rect) { Some(rect) => rect.intersection(&unclipped_raster_rect)?, None => return Some((unclipped, unclipped)), }; let clipped = raster_rect_to_device_pixels( clipped_raster_rect, device_pixel_scale, ); // Ensure that we won't try to allocate a zero-sized clip render task. if clipped.is_empty() { return None; } Some((clipped, unclipped)) }
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! A picture represents a dynamically rendered image. //! //! # Overview //! //! Pictures consists of: //! //! - A number of primitives that are drawn onto the picture. //! - A composite operation describing how to composite this //! picture into its parent. //! - A configuration describing how to draw the primitives on //! this picture (e.g. in screen space or local space). //! //! The tree of pictures are generated during scene building. //! //! Depending on their composite operations pictures can be rendered into //! intermediate targets or folded into their parent picture. //! //! ## Picture caching //! //! Pictures can be cached to reduce the amount of rasterization happening per //! frame. //! //! When picture caching is enabled, the scene is cut into a small number of slices, //! typically: //! //! - content slice //! - UI slice //! - background UI slice which is hidden by the other two slices most of the time. //! //! Each of these slice is made up of fixed-size large tiles of 2048x512 pixels //! (or 128x128 for the UI slice). //! //! Tiles can be either cached rasterized content into a texture or "clear tiles" //! that contain only a solid color rectangle rendered directly during the composite //! pass. //! //! ## Invalidation //! //! Each tile keeps track of the elements that affect it, which can be: //! //! - primitives //! - clips //! - image keys //! - opacity bindings //! - transforms //! //! These dependency lists are built each frame and compared to the previous frame to //! see if the tile changed. //! //! The tile's primitive dependency information is organized in a quadtree, each node //! storing an index buffer of tile primitive dependencies. //! //! The union of the invalidated leaves of each quadtree produces a per-tile dirty rect //! which defines the scissor rect used when replaying the tile's drawing commands and //! can be used for partial present. //! //! ## Display List shape //! //! WR will first look for an iframe item in the root stacking context to apply //! picture caching to. If that's not found, it will apply to the entire root //! stacking context of the display list. Apart from that, the format of the //! display list is not important to picture caching. Each time a new scroll root //! is encountered, a new picture cache slice will be created. If the display //! list contains more than some arbitrary number of slices (currently 8), the //! content will all be squashed into a single slice, in order to save GPU memory //! and compositing performance. use api::{MixBlendMode, PipelineId, PremultipliedColorF, FilterPrimitiveKind}; use api::{PropertyBinding, PropertyBindingId, FilterPrimitive, FontRenderMode}; use api::{DebugFlags, RasterSpace, ImageKey, ColorF, PrimitiveFlags}; use api::units::*; use crate::box_shadow::{BLUR_SAMPLE_SCALE}; use crate::clip::{ClipStore, ClipChainInstance, ClipDataHandle, ClipChainId}; use crate::clip_scroll_tree::{ROOT_SPATIAL_NODE_INDEX, ClipScrollTree, CoordinateSpaceMapping, SpatialNodeIndex, VisibleFace }; use crate::composite::{CompositorKind, CompositeState, NativeSurfaceId}; use crate::debug_colors; use euclid::{vec3, Point2D, Scale, Size2D, Vector2D, Rect}; use euclid::approxeq::ApproxEq; use crate::filterdata::SFilterData; use crate::frame_builder::{FrameVisibilityContext, FrameVisibilityState}; use crate::intern::ItemUid; use crate::internal_types::{FastHashMap, FastHashSet, PlaneSplitter, Filter, PlaneSplitAnchor, TextureSource}; use crate::frame_builder::{FrameBuildingContext, FrameBuildingState, PictureState, PictureContext}; use crate::gpu_cache::{GpuCache, GpuCacheAddress, GpuCacheHandle}; use crate::gpu_types::UvRectKind; use plane_split::{Clipper, Polygon, Splitter}; use crate::prim_store::{SpaceMapper, PrimitiveVisibilityMask, PointKey, PrimitiveTemplateKind}; use crate::prim_store::{SpaceSnapper, PictureIndex, PrimitiveInstance, PrimitiveInstanceKind}; use crate::prim_store::{get_raster_rects, PrimitiveScratchBuffer, RectangleKey}; use crate::prim_store::{OpacityBindingStorage, ImageInstanceStorage, OpacityBindingIndex}; use crate::print_tree::{PrintTree, PrintTreePrinter}; use crate::render_backend::DataStores; use crate::render_task_graph::RenderTaskId; use crate::render_target::RenderTargetKind; use crate::render_task::{RenderTask, RenderTaskLocation, BlurTaskCache, ClearMode}; use crate::resource_cache::{ResourceCache, ImageGeneration}; use crate::scene::SceneProperties; use smallvec::SmallVec; use std::{mem, u8, marker, u32}; use std::sync::atomic::{AtomicUsize, Ordering}; use crate::texture_cache::TextureCacheHandle; use crate::util::{TransformedRectKind, MatrixHelpers, MaxRect, scale_factors, VecHelper, RectHelpers}; use crate::filterdata::{FilterDataHandle}; /// Specify whether a surface allows subpixel AA text rendering. #[derive(Debug, Copy, Clone, PartialEq)] pub enum SubpixelMode { /// This surface allows subpixel AA text Allow, /// Subpixel AA text cannot be drawn on this surface Deny, } /// A comparable transform matrix, that compares with epsilon checks. #[derive(Debug, Clone)] struct MatrixKey { m: [f32; 16], } impl PartialEq for MatrixKey { fn eq(&self, other: &Self) -> bool { const EPSILON: f32 = 0.001; // TODO(gw): It's possible that we may need to adjust the epsilon // to be tighter on most of the matrix, except the // translation parts? for (i, j) in self.m.iter().zip(other.m.iter()) { if !i.approx_eq_eps(j, &EPSILON) { return false; } } true } } /// A comparable / hashable version of a coordinate space mapping. Used to determine /// if a transform dependency for a tile has changed. #[derive(Debug, PartialEq, Clone)] enum TransformKey { Local, ScaleOffset { scale_x: f32, scale_y: f32, offset_x: f32, offset_y: f32, }, Transform { m: MatrixKey, } } impl<Src, Dst> From<CoordinateSpaceMapping<Src, Dst>> for TransformKey { fn from(transform: CoordinateSpaceMapping<Src, Dst>) -> TransformKey { match transform { CoordinateSpaceMapping::Local => { TransformKey::Local } CoordinateSpaceMapping::ScaleOffset(ref scale_offset) => { TransformKey::ScaleOffset { scale_x: scale_offset.scale.x, scale_y: scale_offset.scale.y, offset_x: scale_offset.offset.x, offset_y: scale_offset.offset.y, } } CoordinateSpaceMapping::Transform(ref m) => { TransformKey::Transform { m: MatrixKey { m: m.to_row_major_array(), }, } } } } } /// Information about a picture that is pushed / popped on the /// PictureUpdateState during picture traversal pass. struct PictureInfo { /// The spatial node for this picture. _spatial_node_index: SpatialNodeIndex, } /// Picture-caching state to keep between scenes. pub struct PictureCacheState { /// The tiles retained by this picture cache. pub tiles: FastHashMap<TileOffset, Tile>, /// State of the spatial nodes from previous frame spatial_nodes: FastHashMap<SpatialNodeIndex, SpatialNodeDependency>, /// State of opacity bindings from previous frame opacity_bindings: FastHashMap<PropertyBindingId, OpacityBindingInfo>, /// The current transform of the picture cache root spatial node root_transform: TransformKey, /// The current tile size in device pixels current_tile_size: DeviceIntSize, /// Various allocations we want to avoid re-doing. allocations: PictureCacheRecycledAllocations, } pub struct PictureCacheRecycledAllocations { old_tiles: FastHashMap<TileOffset, Tile>, old_opacity_bindings: FastHashMap<PropertyBindingId, OpacityBindingInfo>, compare_cache: FastHashMap<PrimitiveComparisonKey, PrimitiveCompareResult>, } /// Stores a list of cached picture tiles that are retained /// between new scenes. #[cfg_attr(feature = "capture", derive(Serialize))] pub struct RetainedTiles { /// The tiles retained between display lists. #[cfg_attr(feature = "capture", serde(skip))] //TODO pub caches: FastHashMap<usize, PictureCacheState>, } impl RetainedTiles { pub fn new() -> Self { RetainedTiles { caches: FastHashMap::default(), } } /// Merge items from one retained tiles into another. pub fn merge(&mut self, other: RetainedTiles) { assert!(self.caches.is_empty() || other.caches.is_empty()); if self.caches.is_empty() { self.caches = other.caches; } } } /// Unit for tile coordinates. #[derive(Hash, Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)] pub struct TileCoordinate; // Geometry types for tile coordinates. pub type TileOffset = Point2D<i32, TileCoordinate>; pub type TileSize = Size2D<i32, TileCoordinate>; pub type TileRect = Rect<i32, TileCoordinate>; /// The size in device pixels of a normal cached tile. pub const TILE_SIZE_DEFAULT: DeviceIntSize = DeviceIntSize { width: 1024, height: 512, _unit: marker::PhantomData, }; /// The size in device pixels of a tile for horizontal scroll bars pub const TILE_SIZE_SCROLLBAR_HORIZONTAL: DeviceIntSize = DeviceIntSize { width: 512, height: 16, _unit: marker::PhantomData, }; /// The size in device pixels of a tile for vertical scroll bars pub const TILE_SIZE_SCROLLBAR_VERTICAL: DeviceIntSize = DeviceIntSize { width: 16, height: 512, _unit: marker::PhantomData, }; // Return the list of tile sizes for the renderer to allocate texture arrays for. pub fn tile_cache_sizes() -> &'static [DeviceIntSize] { &[ TILE_SIZE_DEFAULT, TILE_SIZE_SCROLLBAR_HORIZONTAL, TILE_SIZE_SCROLLBAR_VERTICAL, ] } /// The maximum size per axis of a surface, /// in WorldPixel coordinates. const MAX_SURFACE_SIZE: f32 = 4096.0; /// The maximum number of sub-dependencies (e.g. clips, transforms) we can handle /// per-primitive. If a primitive has more than this, it will invalidate every frame. const MAX_PRIM_SUB_DEPS: usize = u8::MAX as usize; /// Used to get unique tile IDs, even when the tile cache is /// destroyed between display lists / scenes. static NEXT_TILE_ID: AtomicUsize = AtomicUsize::new(0); fn clamp(value: i32, low: i32, high: i32) -> i32 { value.max(low).min(high) } fn clampf(value: f32, low: f32, high: f32) -> f32 { value.max(low).min(high) } /// An index into the prims array in a TileDescriptor. #[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] struct PrimitiveDependencyIndex(u32); /// Information about the state of an opacity binding. #[derive(Debug)] pub struct OpacityBindingInfo { /// The current value retrieved from dynamic scene properties. value: f32, /// True if it was changed (or is new) since the last frame build. changed: bool, } /// Information stored in a tile descriptor for an opacity binding. #[derive(Debug, PartialEq, Clone)] pub enum OpacityBinding { Value(f32), Binding(PropertyBindingId), } impl From<PropertyBinding<f32>> for OpacityBinding { fn from(binding: PropertyBinding<f32>) -> OpacityBinding { match binding { PropertyBinding::Binding(key, _) => OpacityBinding::Binding(key.id), PropertyBinding::Value(value) => OpacityBinding::Value(value), } } } /// Information about the state of a spatial node value #[derive(Debug)] pub struct SpatialNodeDependency { /// The current value retrieved from the clip-scroll tree. value: TransformKey, /// True if it was changed (or is new) since the last frame build. changed: bool, } // Immutable context passed to picture cache tiles during pre_update struct TilePreUpdateContext { /// The local rect of the overall picture cache local_rect: PictureRect, /// The local clip rect (in picture space) of the entire picture cache local_clip_rect: PictureRect, /// Maps from picture cache coords -> world space coords. pic_to_world_mapper: SpaceMapper<PicturePixel, WorldPixel>, /// The fractional position of the picture cache, which may /// require invalidation of all tiles. fract_offset: PictureVector2D, /// The optional background color of the picture cache instance background_color: Option<ColorF>, /// The visible part of the screen in world coords. global_screen_world_rect: WorldRect, } // Immutable context passed to picture cache tiles during post_update struct TilePostUpdateContext<'a> { /// The calculated backdrop information for this cache instance. backdrop: BackdropInfo, /// Information about transform node differences from last frame. spatial_nodes: &'a FastHashMap<SpatialNodeIndex, SpatialNodeDependency>, /// Information about opacity bindings from the picture cache. opacity_bindings: &'a FastHashMap<PropertyBindingId, OpacityBindingInfo>, /// Current size in device pixels of tiles for this cache current_tile_size: DeviceIntSize, } // Mutable state passed to picture cache tiles during post_update struct TilePostUpdateState<'a> { /// Allow access to the texture cache for requesting tiles resource_cache: &'a mut ResourceCache, /// Current configuration and setup for compositing all the picture cache tiles in renderer. composite_state: &'a mut CompositeState, /// A cache of comparison results to avoid re-computation during invalidation. compare_cache: &'a mut FastHashMap<PrimitiveComparisonKey, PrimitiveCompareResult>, } /// Information about the dependencies of a single primitive instance. struct PrimitiveDependencyInfo { /// If true, we should clip the prim rect to the tile boundaries. clip_by_tile: bool, /// Unique content identifier of the primitive. prim_uid: ItemUid, /// The picture space origin of this primitive. prim_origin: PicturePoint, /// The (conservative) clipped area in picture space this primitive occupies. prim_clip_rect: PictureRect, /// Image keys this primitive depends on. images: SmallVec<[ImageDependency; 8]>, /// Opacity bindings this primitive depends on. opacity_bindings: SmallVec<[OpacityBinding; 4]>, /// Clips that this primitive depends on. clips: SmallVec<[ItemUid; 8]>, /// Spatial nodes references by the clip dependencies of this primitive. spatial_nodes: SmallVec<[SpatialNodeIndex; 4]>, } impl PrimitiveDependencyInfo { /// Construct dependency info for a new primitive. fn new( prim_uid: ItemUid, prim_origin: PicturePoint, prim_clip_rect: PictureRect, ) -> Self { PrimitiveDependencyInfo { prim_uid, prim_origin, images: SmallVec::new(), opacity_bindings: SmallVec::new(), clip_by_tile: false, prim_clip_rect, clips: SmallVec::new(), spatial_nodes: SmallVec::new(), } } } /// A stable ID for a given tile, to help debugging. These are also used /// as unique identfiers for tile surfaces when using a native compositor. #[derive(Debug, Copy, Clone, PartialEq)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct TileId(pub usize); /// A descriptor for the kind of texture that a picture cache tile will /// be drawn into. #[derive(Debug)] pub enum SurfaceTextureDescriptor { /// When using the WR compositor, the tile is drawn into an entry /// in the WR texture cache. TextureCache { handle: TextureCacheHandle }, /// When using an OS compositor, the tile is drawn into a native /// surface identified by arbitrary id. NativeSurface { /// The arbitrary id of this surface. id: Option<NativeSurfaceId>, /// Size in device pixels of the native surface. size: DeviceIntSize, }, } /// This is the same as a `SurfaceTextureDescriptor` but has been resolved /// into a texture cache handle (if appropriate) that can be used by the /// batching and compositing code in the renderer. #[derive(Clone, Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub enum ResolvedSurfaceTexture { TextureCache { /// The texture ID to draw to. texture: TextureSource, /// Slice index in the texture array to draw to. layer: i32, }, NativeSurface { /// The arbitrary id of this surface. id: NativeSurfaceId, /// Size in device pixels of the native surface. size: DeviceIntSize, } } impl SurfaceTextureDescriptor { /// Create a resolved surface texture for this descriptor pub fn resolve( &self, resource_cache: &ResourceCache, ) -> ResolvedSurfaceTexture { match self { SurfaceTextureDescriptor::TextureCache { handle } => { let cache_item = resource_cache.texture_cache.get(handle); ResolvedSurfaceTexture::TextureCache { texture: cache_item.texture_id, layer: cache_item.texture_layer, } } SurfaceTextureDescriptor::NativeSurface { id, size } => { ResolvedSurfaceTexture::NativeSurface { id: id.expect("bug: native surface not allocated"), size: *size, } } } } } /// The backing surface for this tile. #[derive(Debug)] pub enum TileSurface { Texture { /// Descriptor for the surface that this tile draws into. descriptor: SurfaceTextureDescriptor, /// Bitfield specifying the dirty region(s) that are relevant to this tile. visibility_mask: PrimitiveVisibilityMask, }, Color { color: ColorF, }, Clear, } impl TileSurface { fn kind(&self) -> &'static str { match *self { TileSurface::Color { .. } => "Color", TileSurface::Texture { .. } => "Texture", TileSurface::Clear => "Clear", } } } /// The result of a primitive dependency comparison. Size is a u8 /// since this is a hot path in the code, and keeping the data small /// is a performance win. #[derive(Debug, Copy, Clone, PartialEq)] #[repr(u8)] enum PrimitiveCompareResult { /// Primitives match Equal, /// Something in the PrimitiveDescriptor was different Descriptor, /// The clip node content or spatial node changed Clip, /// The value of the transform changed Transform, /// An image dependency was dirty Image, /// The value of an opacity binding changed OpacityBinding, } /// Debugging information about why a tile was invalidated #[derive(Debug)] enum InvalidationReason { /// The fractional offset changed FractionalOffset, /// The background color changed BackgroundColor, /// The opaque state of the backing native surface changed SurfaceOpacityChanged, /// There was no backing texture (evicted or never rendered) NoTexture, /// There was no backing native surface (never rendered, or recreated) NoSurface, /// The primitive count in the dependency list was different PrimCount, /// The content of one of the primitives was different Content { /// What changed in the primitive that was different prim_compare_result: PrimitiveCompareResult, }, } /// Information about a cached tile. pub struct Tile { /// The current world rect of this tile. pub world_rect: WorldRect, /// The current local rect of this tile. pub rect: PictureRect, /// The local rect of the tile clipped to the overall picture local rect. clipped_rect: PictureRect, /// Uniquely describes the content of this tile, in a way that can be /// (reasonably) efficiently hashed and compared. pub current_descriptor: TileDescriptor, /// The content descriptor for this tile from the previous frame. pub prev_descriptor: TileDescriptor, /// Handle to the backing surface for this tile. pub surface: Option<TileSurface>, /// If true, this tile is marked valid, and the existing texture /// cache handle can be used. Tiles are invalidated during the /// build_dirty_regions method. pub is_valid: bool, /// If true, this tile intersects with the currently visible screen /// rect, and will be drawn. pub is_visible: bool, /// The current fractional offset of the cache transform root. If this changes, /// all tiles need to be invalidated and redrawn, since snapping differences are /// likely to occur. fract_offset: PictureVector2D, /// The tile id is stable between display lists and / or frames, /// if the tile is retained. Useful for debugging tile evictions. pub id: TileId, /// If true, the tile was determined to be opaque, which means blending /// can be disabled when drawing it. pub is_opaque: bool, /// Root node of the quadtree dirty rect tracker. root: TileNode, /// The picture space dirty rect for this tile. dirty_rect: PictureRect, /// The world space dirty rect for this tile. /// TODO(gw): We have multiple dirty rects available due to the quadtree above. In future, /// expose these as multiple dirty rects, which will help in some cases. pub world_dirty_rect: WorldRect, /// The last rendered background color on this tile. background_color: Option<ColorF>, /// The first reason the tile was invalidated this frame. invalidation_reason: Option<InvalidationReason>, } impl Tile { /// Construct a new, invalid tile. fn new( id: TileId, ) -> Self { Tile { rect: PictureRect::zero(), clipped_rect: PictureRect::zero(), world_rect: WorldRect::zero(), surface: None, current_descriptor: TileDescriptor::new(), prev_descriptor: TileDescriptor::new(), is_valid: false, is_visible: false, fract_offset: PictureVector2D::zero(), id, is_opaque: false, root: TileNode::new_leaf(Vec::new()), dirty_rect: PictureRect::zero(), world_dirty_rect: WorldRect::zero(), background_color: None, invalidation_reason: None, } } /// Print debug information about this tile to a tree printer. fn print(&self, pt: &mut dyn PrintTreePrinter) { pt.new_level(format!("Tile {:?}", self.id)); pt.add_item(format!("rect: {}", self.rect)); pt.add_item(format!("fract_offset: {:?}", self.fract_offset)); pt.add_item(format!("background_color: {:?}", self.background_color)); pt.add_item(format!("invalidation_reason: {:?}", self.invalidation_reason)); self.current_descriptor.print(pt); pt.end_level(); } /// Check if the content of the previous and current tile descriptors match fn update_dirty_rects( &mut self, ctx: &TilePostUpdateContext, state: &mut TilePostUpdateState, invalidation_reason: &mut Option<InvalidationReason>, ) -> PictureRect { let mut prim_comparer = PrimitiveComparer::new( &self.prev_descriptor, &self.current_descriptor, state.resource_cache, ctx.spatial_nodes, ctx.opacity_bindings, ); let mut dirty_rect = PictureRect::zero(); self.root.update_dirty_rects( &self.prev_descriptor.prims, &self.current_descriptor.prims, &mut prim_comparer, &mut dirty_rect, state.compare_cache, invalidation_reason, ); dirty_rect } /// Invalidate a tile based on change in content. This /// must be called even if the tile is not currently /// visible on screen. We might be able to improve this /// later by changing how ComparableVec is used. fn update_content_validity( &mut self, ctx: &TilePostUpdateContext, state: &mut TilePostUpdateState, ) { // Check if the contents of the primitives, clips, and // other dependencies are the same. state.compare_cache.clear(); let mut invalidation_reason = None; let dirty_rect = self.update_dirty_rects( ctx, state, &mut invalidation_reason, ); if !dirty_rect.is_empty() { self.invalidate( Some(dirty_rect), invalidation_reason.expect("bug: no invalidation_reason"), ); } } /// Invalidate this tile. If `invalidation_rect` is None, the entire /// tile is invalidated. fn invalidate( &mut self, invalidation_rect: Option<PictureRect>, reason: InvalidationReason, ) { self.is_valid = false; match invalidation_rect { Some(rect) => { self.dirty_rect = self.dirty_rect.union(&rect); } None => { self.dirty_rect = self.rect; } } if self.invalidation_reason.is_none() { self.invalidation_reason = Some(reason); } } /// Called during pre_update of a tile cache instance. Allows the /// tile to setup state before primitive dependency calculations. fn pre_update( &mut self, rect: PictureRect, ctx: &TilePreUpdateContext, ) { self.rect = rect; self.invalidation_reason = None; self.clipped_rect = self.rect .intersection(&ctx.local_rect) .and_then(|r| r.intersection(&ctx.local_clip_rect)) .unwrap_or(PictureRect::zero()); self.world_rect = ctx.pic_to_world_mapper .map(&self.rect) .expect("bug: map local tile rect"); // Check if this tile is currently on screen. self.is_visible = self.world_rect.intersects(&ctx.global_screen_world_rect); // If the tile isn't visible, early exit, skipping the normal set up to // validate dependencies. Instead, we will only compare the current tile // dependencies the next time it comes into view. if !self.is_visible { return; } // Determine if the fractional offset of the transform is different this frame // from the currently cached tile set. let fract_changed = (self.fract_offset.x - ctx.fract_offset.x).abs() > 0.001 || (self.fract_offset.y - ctx.fract_offset.y).abs() > 0.001; if fract_changed { self.invalidate(None, InvalidationReason::FractionalOffset); self.fract_offset = ctx.fract_offset; } if ctx.background_color != self.background_color { self.invalidate(None, InvalidationReason::BackgroundColor); self.background_color = ctx.background_color; } // Clear any dependencies so that when we rebuild them we // can compare if the tile has the same content. mem::swap( &mut self.current_descriptor, &mut self.prev_descriptor, ); self.current_descriptor.clear(); self.root.clear(rect); } /// Add dependencies for a given primitive to this tile. fn add_prim_dependency( &mut self, info: &PrimitiveDependencyInfo, ) { // If this tile isn't currently visible, we don't want to update the dependencies // for this tile, as an optimization, since it won't be drawn anyway. if !self.is_visible { return; } // Include any image keys this tile depends on. self.current_descriptor.images.extend_from_slice(&info.images); // Include any opacity bindings this primitive depends on. self.current_descriptor.opacity_bindings.extend_from_slice(&info.opacity_bindings); // Include any clip nodes that this primitive depends on. self.current_descriptor.clips.extend_from_slice(&info.clips); // Include any transforms that this primitive depends on. self.current_descriptor.transforms.extend_from_slice(&info.spatial_nodes); // TODO(gw): The origin of background rects produced by APZ changes // in Gecko during scrolling. Consider investigating this so the // hack / workaround below is not required. let (prim_origin, prim_clip_rect) = if info.clip_by_tile { let tile_p0 = self.rect.origin; let tile_p1 = self.rect.bottom_right(); let clip_p0 = PicturePoint::new( clampf(info.prim_clip_rect.origin.x, tile_p0.x, tile_p1.x), clampf(info.prim_clip_rect.origin.y, tile_p0.y, tile_p1.y), ); let clip_p1 = PicturePoint::new( clampf(info.prim_clip_rect.origin.x + info.prim_clip_rect.size.width, tile_p0.x, tile_p1.x), clampf(info.prim_clip_rect.origin.y + info.prim_clip_rect.size.height, tile_p0.y, tile_p1.y), ); ( PicturePoint::new( clampf(info.prim_origin.x, tile_p0.x, tile_p1.x), clampf(info.prim_origin.y, tile_p0.y, tile_p1.y), ), PictureRect::new( clip_p0, PictureSize::new( clip_p1.x - clip_p0.x, clip_p1.y - clip_p0.y, ), ), ) } else { (info.prim_origin, info.prim_clip_rect) }; // Update the tile descriptor, used for tile comparison during scene swaps. let prim_index = PrimitiveDependencyIndex(self.current_descriptor.prims.len() as u32); // We know that the casts below will never overflow because the array lengths are // truncated to MAX_PRIM_SUB_DEPS during update_prim_dependencies. debug_assert!(info.spatial_nodes.len() <= MAX_PRIM_SUB_DEPS); debug_assert!(info.clips.len() <= MAX_PRIM_SUB_DEPS); debug_assert!(info.images.len() <= MAX_PRIM_SUB_DEPS); debug_assert!(info.opacity_bindings.len() <= MAX_PRIM_SUB_DEPS); self.current_descriptor.prims.push(PrimitiveDescriptor { prim_uid: info.prim_uid, origin: prim_origin.into(), prim_clip_rect: prim_clip_rect.into(), transform_dep_count: info.spatial_nodes.len() as u8, clip_dep_count: info.clips.len() as u8, image_dep_count: info.images.len() as u8, opacity_binding_dep_count: info.opacity_bindings.len() as u8, }); // Add this primitive to the dirty rect quadtree. self.root.add_prim(prim_index, &info.prim_clip_rect); } /// Called during tile cache instance post_update. Allows invalidation and dirty /// rect calculation after primitive dependencies have been updated. fn post_update( &mut self, ctx: &TilePostUpdateContext, state: &mut TilePostUpdateState, ) -> bool { // If tile is not visible, just early out from here - we don't update dependencies // so don't want to invalidate, merge, split etc. The tile won't need to be drawn // (and thus updated / invalidated) until it is on screen again. if !self.is_visible { return false; } // Invalidate the tile based on the content changing. self.update_content_validity(ctx, state); // If there are no primitives there is no need to draw or cache it. if self.current_descriptor.prims.is_empty() { return false; } // Check if this tile can be considered opaque. Opacity state must be updated only // after all early out checks have been performed. Otherwise, we might miss updating // the native surface next time this tile becomes visible. let tile_is_opaque = ctx.backdrop.rect.contains_rect(&self.clipped_rect); let opacity_changed = tile_is_opaque != self.is_opaque; self.is_opaque = tile_is_opaque; // Check if the selected composite mode supports dirty rect updates. For Draw composite // mode, we can always update the content with smaller dirty rects. For native composite // mode, we can only use dirty rects if the compositor supports partial surface updates. let (supports_dirty_rects, supports_simple_prims) = match state.composite_state.compositor_kind { CompositorKind::Draw { .. } => { (true, true) } CompositorKind::Native { max_update_rects, .. } => { (max_update_rects > 0, false) } }; // TODO(gw): Consider using smaller tiles and/or tile splits for // native compositors that don't support dirty rects. if supports_dirty_rects { // Only allow splitting for normal content sized tiles if ctx.current_tile_size == TILE_SIZE_DEFAULT { let max_split_level = 3; // Consider splitting / merging dirty regions self.root.maybe_merge_or_split( 0, &self.current_descriptor.prims, max_split_level, ); } } // The dirty rect will be set correctly by now. If the underlying platform // doesn't support partial updates, and this tile isn't valid, force the dirty // rect to be the size of the entire tile. if !self.is_valid && !supports_dirty_rects { self.dirty_rect = self.rect; } // Ensure that the dirty rect doesn't extend outside the local tile rect. self.dirty_rect = self.dirty_rect .intersection(&self.rect) .unwrap_or(PictureRect::zero()); // See if this tile is a simple color, in which case we can just draw // it as a rect, and avoid allocating a texture surface and drawing it. // TODO(gw): Initial native compositor interface doesn't support simple // color tiles. We can definitely support this in DC, so this // should be added as a follow up. let is_simple_prim = ctx.backdrop.kind.can_be_promoted_to_compositor_surface() && self.current_descriptor.prims.len() == 1 && self.is_opaque && supports_simple_prims; // Set up the backing surface for this tile. let surface = if is_simple_prim { // If we determine the tile can be represented by a color, set the // surface unconditionally (this will drop any previously used // texture cache backing surface). match ctx.backdrop.kind { BackdropKind::Color { color } => { TileSurface::Color { color, } } BackdropKind::Clear => { TileSurface::Clear } BackdropKind::Image => { // This should be prevented by the is_simple_prim check above. unreachable!(); } } } else { // If this tile will be backed by a surface, we want to retain // the texture handle from the previous frame, if possible. If // the tile was previously a color, or not set, then just set // up a new texture cache handle. match self.surface.take() { Some(TileSurface::Texture { mut descriptor, visibility_mask }) => { // If opacity changed, and this is a native OS compositor surface, // it needs to be recreated. // TODO(gw): This is a limitation of the DirectComposite APIs. It might // make sense on other platforms to be able to change this as // a property on a surface, if we ever see pages where this // is changing frequently. if opacity_changed { if let SurfaceTextureDescriptor::NativeSurface { ref mut id, .. } = descriptor { // Reset the dirty rect and tile validity in this case, to // force the new tile to be completely redrawn. self.invalidate(None, InvalidationReason::SurfaceOpacityChanged); // If this tile has a currently allocated native surface, destroy it. It // will be re-allocated next time it's determined to be visible. if let Some(id) = id.take() { state.resource_cache.destroy_compositor_surface(id); } } } // Reuse the existing descriptor and vis mask TileSurface::Texture { descriptor, visibility_mask, } } Some(TileSurface::Color { .. }) | Some(TileSurface::Clear) | None => { // This is the case where we are constructing a tile surface that // involves drawing to a texture. Create the correct surface // descriptor depending on the compositing mode that will read // the output. let descriptor = match state.composite_state.compositor_kind { CompositorKind::Draw { .. } => { // For a texture cache entry, create an invalid handle that // will be allocated when update_picture_cache is called. SurfaceTextureDescriptor::TextureCache { handle: TextureCacheHandle::invalid(), } } CompositorKind::Native { .. } => { // Create a native surface surface descriptor, but don't allocate // a surface yet. The surface is allocated *after* occlusion // culling occurs, so that only visible tiles allocate GPU memory. SurfaceTextureDescriptor::NativeSurface { id: None, size: ctx.current_tile_size, } } }; TileSurface::Texture { descriptor, visibility_mask: PrimitiveVisibilityMask::empty(), } } } }; // Store the current surface backing info for use during batching. self.surface = Some(surface); true } } /// Defines a key that uniquely identifies a primitive instance. #[derive(Debug, Clone)] pub struct PrimitiveDescriptor { /// Uniquely identifies the content of the primitive template. prim_uid: ItemUid, /// The origin in world space of this primitive. origin: PointKey, /// The clip rect for this primitive. Included here in /// dependencies since there is no entry in the clip chain /// dependencies for the local clip rect. prim_clip_rect: RectangleKey, /// The number of extra dependencies that this primitive has. transform_dep_count: u8, image_dep_count: u8, opacity_binding_dep_count: u8, clip_dep_count: u8, } impl PartialEq for PrimitiveDescriptor { fn eq(&self, other: &Self) -> bool { const EPSILON: f32 = 0.001; if self.prim_uid != other.prim_uid { return false; } if !self.origin.x.approx_eq_eps(&other.origin.x, &EPSILON) { return false; } if !self.origin.y.approx_eq_eps(&other.origin.y, &EPSILON) { return false; } if !self.prim_clip_rect.x.approx_eq_eps(&other.prim_clip_rect.x, &EPSILON) { return false; } if !self.prim_clip_rect.y.approx_eq_eps(&other.prim_clip_rect.y, &EPSILON) { return false; } if !self.prim_clip_rect.w.approx_eq_eps(&other.prim_clip_rect.w, &EPSILON) { return false; } if !self.prim_clip_rect.h.approx_eq_eps(&other.prim_clip_rect.h, &EPSILON) { return false; } true } } /// A small helper to compare two arrays of primitive dependencies. struct CompareHelper<'a, T> { offset_curr: usize, offset_prev: usize, curr_items: &'a [T], prev_items: &'a [T], } impl<'a, T> CompareHelper<'a, T> where T: PartialEq { /// Construct a new compare helper for a current / previous set of dependency information. fn new( prev_items: &'a [T], curr_items: &'a [T], ) -> Self { CompareHelper { offset_curr: 0, offset_prev: 0, curr_items, prev_items, } } /// Reset the current position in the dependency array to the start fn reset(&mut self) { self.offset_prev = 0; self.offset_curr = 0; } /// Test if two sections of the dependency arrays are the same, by checking both /// item equality, and a user closure to see if the content of the item changed. fn is_same<F>( &self, prev_count: u8, curr_count: u8, f: F, ) -> bool where F: Fn(&T) -> bool { // If the number of items is different, trivial reject. if prev_count != curr_count { return false; } // If both counts are 0, then no need to check these dependencies. if curr_count == 0 { return true; } // If both counts are u8::MAX, this is a sentinel that we can't compare these // deps, so just trivial reject. if curr_count as usize == MAX_PRIM_SUB_DEPS { return false; } let end_prev = self.offset_prev + prev_count as usize; let end_curr = self.offset_curr + curr_count as usize; let curr_items = &self.curr_items[self.offset_curr .. end_curr]; let prev_items = &self.prev_items[self.offset_prev .. end_prev]; for (curr, prev) in curr_items.iter().zip(prev_items.iter()) { if prev != curr { return false; } if f(curr) { return false; } } true } // Advance the prev dependency array by a given amount fn advance_prev(&mut self, count: u8) { self.offset_prev += count as usize; } // Advance the current dependency array by a given amount fn advance_curr(&mut self, count: u8) { self.offset_curr += count as usize; } } /// Uniquely describes the content of this tile, in a way that can be /// (reasonably) efficiently hashed and compared. pub struct TileDescriptor { /// List of primitive instance unique identifiers. The uid is guaranteed /// to uniquely describe the content of the primitive template, while /// the other parameters describe the clip chain and instance params. pub prims: Vec<PrimitiveDescriptor>, /// List of clip node descriptors. clips: Vec<ItemUid>, /// List of image keys that this tile depends on. images: Vec<ImageDependency>, /// The set of opacity bindings that this tile depends on. // TODO(gw): Ugh, get rid of all opacity binding support! opacity_bindings: Vec<OpacityBinding>, /// List of the effects of transforms that we care about /// tracking for this tile. transforms: Vec<SpatialNodeIndex>, } impl TileDescriptor { fn new() -> Self { TileDescriptor { prims: Vec::new(), clips: Vec::new(), opacity_bindings: Vec::new(), images: Vec::new(), transforms: Vec::new(), } } /// Print debug information about this tile descriptor to a tree printer. fn print(&self, pt: &mut dyn PrintTreePrinter) { pt.new_level("current_descriptor".to_string()); pt.new_level("prims".to_string()); for prim in &self.prims { pt.new_level(format!("prim uid={}", prim.prim_uid.get_uid())); pt.add_item(format!("origin: {},{}", prim.origin.x, prim.origin.y)); pt.add_item(format!("clip: origin={},{} size={}x{}", prim.prim_clip_rect.x, prim.prim_clip_rect.y, prim.prim_clip_rect.w, prim.prim_clip_rect.h, )); pt.add_item(format!("deps: t={} i={} o={} c={}", prim.transform_dep_count, prim.image_dep_count, prim.opacity_binding_dep_count, prim.clip_dep_count, )); pt.end_level(); } pt.end_level(); if !self.clips.is_empty() { pt.new_level("clips".to_string()); for clip in &self.clips { pt.new_level(format!("clip uid={}", clip.get_uid())); pt.end_level(); } pt.end_level(); } if !self.images.is_empty() { pt.new_level("images".to_string()); for info in &self.images { pt.new_level(format!("key={:?}", info.key)); pt.new_level(format!("generation={:?}", info.generation)); pt.end_level(); } pt.end_level(); } if !self.opacity_bindings.is_empty() { pt.new_level("opacity_bindings".to_string()); for opacity_binding in &self.opacity_bindings { pt.new_level(format!("binding={:?}", opacity_binding)); pt.end_level(); } pt.end_level(); } if !self.transforms.is_empty() { pt.new_level("transforms".to_string()); for transform in &self.transforms { pt.new_level(format!("spatial_node={:?}", transform)); pt.end_level(); } pt.end_level(); } pt.end_level(); } /// Clear the dependency information for a tile, when the dependencies /// are being rebuilt. fn clear(&mut self) { self.prims.clear(); self.clips.clear(); self.opacity_bindings.clear(); self.images.clear(); self.transforms.clear(); } } /// Stores both the world and devices rects for a single dirty rect. #[derive(Debug, Clone)] pub struct DirtyRegionRect { /// World rect of this dirty region pub world_rect: WorldRect, /// Bitfield for picture render tasks that draw this dirty region. pub visibility_mask: PrimitiveVisibilityMask, } /// Represents the dirty region of a tile cache picture. #[derive(Debug, Clone)] pub struct DirtyRegion { /// The individual dirty rects of this region. pub dirty_rects: Vec<DirtyRegionRect>, /// The overall dirty rect, a combination of dirty_rects pub combined: WorldRect, } impl DirtyRegion { /// Construct a new dirty region tracker. pub fn new( ) -> Self { DirtyRegion { dirty_rects: Vec::with_capacity(PrimitiveVisibilityMask::MAX_DIRTY_REGIONS), combined: WorldRect::zero(), } } /// Reset the dirty regions back to empty pub fn clear(&mut self) { self.dirty_rects.clear(); self.combined = WorldRect::zero(); } /// Push a dirty rect into this region pub fn push( &mut self, rect: WorldRect, visibility_mask: PrimitiveVisibilityMask, ) { // Include this in the overall dirty rect self.combined = self.combined.union(&rect); // Store the individual dirty rect. self.dirty_rects.push(DirtyRegionRect { world_rect: rect, visibility_mask, }); } /// Include another rect into an existing dirty region. pub fn include_rect( &mut self, region_index: usize, rect: WorldRect, ) { self.combined = self.combined.union(&rect); let region = &mut self.dirty_rects[region_index]; region.world_rect = region.world_rect.union(&rect); } // TODO(gw): This returns a heap allocated object. Perhaps we can simplify this // logic? Although - it's only used very rarely so it may not be an issue. pub fn inflate( &self, inflate_amount: f32, ) -> DirtyRegion { let mut dirty_rects = Vec::with_capacity(self.dirty_rects.len()); let mut combined = WorldRect::zero(); for rect in &self.dirty_rects { let world_rect = rect.world_rect.inflate(inflate_amount, inflate_amount); combined = combined.union(&world_rect); dirty_rects.push(DirtyRegionRect { world_rect, visibility_mask: rect.visibility_mask, }); } DirtyRegion { dirty_rects, combined, } } /// Creates a record of this dirty region for exporting to test infrastructure. pub fn record(&self) -> RecordedDirtyRegion { let mut rects: Vec<WorldRect> = self.dirty_rects.iter().map(|r| r.world_rect.clone()).collect(); rects.sort_unstable_by_key(|r| (r.origin.y as usize, r.origin.x as usize)); RecordedDirtyRegion { rects } } } /// A recorded copy of the dirty region for exporting to test infrastructure. #[cfg_attr(feature = "capture", derive(Serialize))] pub struct RecordedDirtyRegion { pub rects: Vec<WorldRect>, } impl ::std::fmt::Display for RecordedDirtyRegion { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { for r in self.rects.iter() { let (x, y, w, h) = (r.origin.x, r.origin.y, r.size.width, r.size.height); write!(f, "[({},{}):{}x{}]", x, y, w, h)?; } Ok(()) } } impl ::std::fmt::Debug for RecordedDirtyRegion { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { ::std::fmt::Display::fmt(self, f) } } #[derive(Debug, Copy, Clone)] enum BackdropKind { Color { color: ColorF, }, Clear, Image, } impl BackdropKind { /// Returns true if the compositor can directly draw this backdrop. fn can_be_promoted_to_compositor_surface(&self) -> bool { match self { BackdropKind::Color { .. } | BackdropKind::Clear => true, BackdropKind::Image => false, } } } /// Stores information about the calculated opaque backdrop of this slice. #[derive(Debug, Copy, Clone)] struct BackdropInfo { /// The picture space rectangle that is known to be opaque. This is used /// to determine where subpixel AA can be used, and where alpha blending /// can be disabled. rect: PictureRect, /// Kind of the backdrop kind: BackdropKind, } impl BackdropInfo { fn empty() -> Self { BackdropInfo { rect: PictureRect::zero(), kind: BackdropKind::Color { color: ColorF::BLACK, }, } } } /// Represents a cache of tiles that make up a picture primitives. pub struct TileCacheInstance { /// Index of the tile cache / slice for this frame builder. It's determined /// by the setup_picture_caching method during flattening, which splits the /// picture tree into multiple slices. It's used as a simple input to the tile /// keys. It does mean we invalidate tiles if a new layer gets inserted / removed /// between display lists - this seems very unlikely to occur on most pages, but /// can be revisited if we ever notice that. pub slice: usize, /// The currently selected tile size to use for this cache pub current_tile_size: DeviceIntSize, /// The positioning node for this tile cache. pub spatial_node_index: SpatialNodeIndex, /// Hash of tiles present in this picture. pub tiles: FastHashMap<TileOffset, Tile>, /// Switch back and forth between old and new tiles hashmaps to avoid re-allocating. old_tiles: FastHashMap<TileOffset, Tile>, /// A helper struct to map local rects into surface coords. map_local_to_surface: SpaceMapper<LayoutPixel, PicturePixel>, /// A helper struct to map child picture rects into picture cache surface coords. map_child_pic_to_surface: SpaceMapper<PicturePixel, PicturePixel>, /// List of opacity bindings, with some extra information /// about whether they changed since last frame. opacity_bindings: FastHashMap<PropertyBindingId, OpacityBindingInfo>, /// Switch back and forth between old and new bindings hashmaps to avoid re-allocating. old_opacity_bindings: FastHashMap<PropertyBindingId, OpacityBindingInfo>, /// List of spatial nodes, with some extra information /// about whether they changed since last frame. spatial_nodes: FastHashMap<SpatialNodeIndex, SpatialNodeDependency>, /// Switch back and forth between old and new spatial nodes hashmaps to avoid re-allocating. old_spatial_nodes: FastHashMap<SpatialNodeIndex, SpatialNodeDependency>, /// A set of spatial nodes that primitives / clips depend on found /// during dependency creation. This is used to avoid trying to /// calculate invalid relative transforms when building the spatial /// nodes hash above. used_spatial_nodes: FastHashSet<SpatialNodeIndex>, /// The current dirty region tracker for this picture. pub dirty_region: DirtyRegion, /// Current size of tiles in picture units. tile_size: PictureSize, /// Tile coords of the currently allocated grid. tile_rect: TileRect, /// Pre-calculated versions of the tile_rect above, used to speed up the /// calculations in get_tile_coords_for_rect. tile_bounds_p0: TileOffset, tile_bounds_p1: TileOffset, /// Local rect (unclipped) of the picture this cache covers. pub local_rect: PictureRect, /// The local clip rect, from the shared clips of this picture. local_clip_rect: PictureRect, /// A list of tiles that are valid and visible, which should be drawn to the main scene. pub tiles_to_draw: Vec<TileOffset>, /// The surface index that this tile cache will be drawn into. surface_index: SurfaceIndex, /// The background color from the renderer. If this is set opaque, we know it's /// fine to clear the tiles to this and allow subpixel text on the first slice. pub background_color: Option<ColorF>, /// Information about the calculated backdrop content of this cache. backdrop: BackdropInfo, /// The allowed subpixel mode for this surface, which depends on the detected /// opacity of the background. pub subpixel_mode: SubpixelMode, /// A list of clip handles that exist on every (top-level) primitive in this picture. /// It's often the case that these are root / fixed position clips. By handling them /// here, we can avoid applying them to the items, which reduces work, but more importantly /// reduces invalidations. pub shared_clips: Vec<ClipDataHandle>, /// The clip chain that represents the shared_clips above. Used to build the local /// clip rect for this tile cache. shared_clip_chain: ClipChainId, /// The current transform of the picture cache root spatial node root_transform: TransformKey, /// The number of frames until this cache next evaluates what tile size to use. /// If a picture rect size is regularly changing just around a size threshold, /// we don't want to constantly invalidate and reallocate different tile size /// configuration each frame. frames_until_size_eval: usize, /// The current fractional offset of the cached picture fract_offset: PictureVector2D, /// keep around the hash map used as compare_cache to avoid reallocating it each /// frame. compare_cache: FastHashMap<PrimitiveComparisonKey, PrimitiveCompareResult>, } impl TileCacheInstance { pub fn new( slice: usize, spatial_node_index: SpatialNodeIndex, background_color: Option<ColorF>, shared_clips: Vec<ClipDataHandle>, shared_clip_chain: ClipChainId, ) -> Self { TileCacheInstance { slice, spatial_node_index, tiles: FastHashMap::default(), old_tiles: FastHashMap::default(), map_local_to_surface: SpaceMapper::new( ROOT_SPATIAL_NODE_INDEX, PictureRect::zero(), ), map_child_pic_to_surface: SpaceMapper::new( ROOT_SPATIAL_NODE_INDEX, PictureRect::zero(), ), opacity_bindings: FastHashMap::default(), old_opacity_bindings: FastHashMap::default(), spatial_nodes: FastHashMap::default(), old_spatial_nodes: FastHashMap::default(), used_spatial_nodes: FastHashSet::default(), dirty_region: DirtyRegion::new(), tile_size: PictureSize::zero(), tile_rect: TileRect::zero(), tile_bounds_p0: TileOffset::zero(), tile_bounds_p1: TileOffset::zero(), local_rect: PictureRect::zero(), local_clip_rect: PictureRect::zero(), tiles_to_draw: Vec::new(), surface_index: SurfaceIndex(0), background_color, backdrop: BackdropInfo::empty(), subpixel_mode: SubpixelMode::Allow, root_transform: TransformKey::Local, shared_clips, shared_clip_chain, current_tile_size: DeviceIntSize::zero(), frames_until_size_eval: 0, fract_offset: PictureVector2D::zero(), compare_cache: FastHashMap::default(), } } /// Returns true if this tile cache is considered opaque. pub fn is_opaque(&self) -> bool { // If known opaque due to background clear color and being the first slice. // The background_color will only be Some(..) if this is the first slice. match self.background_color { Some(color) => color.a >= 1.0, None => false } } /// Get the tile coordinates for a given rectangle. fn get_tile_coords_for_rect( &self, rect: &PictureRect, ) -> (TileOffset, TileOffset) { // Get the tile coordinates in the picture space. let mut p0 = TileOffset::new( (rect.origin.x / self.tile_size.width).floor() as i32, (rect.origin.y / self.tile_size.height).floor() as i32, ); let mut p1 = TileOffset::new( ((rect.origin.x + rect.size.width) / self.tile_size.width).ceil() as i32, ((rect.origin.y + rect.size.height) / self.tile_size.height).ceil() as i32, ); // Clamp the tile coordinates here to avoid looping over irrelevant tiles later on. p0.x = clamp(p0.x, self.tile_bounds_p0.x, self.tile_bounds_p1.x); p0.y = clamp(p0.y, self.tile_bounds_p0.y, self.tile_bounds_p1.y); p1.x = clamp(p1.x, self.tile_bounds_p0.x, self.tile_bounds_p1.x); p1.y = clamp(p1.y, self.tile_bounds_p0.y, self.tile_bounds_p1.y); (p0, p1) } /// Update transforms, opacity bindings and tile rects. pub fn pre_update( &mut self, pic_rect: PictureRect, surface_index: SurfaceIndex, frame_context: &FrameVisibilityContext, frame_state: &mut FrameVisibilityState, ) -> WorldRect { self.surface_index = surface_index; self.local_rect = pic_rect; self.local_clip_rect = PictureRect::max_rect(); // Reset the opaque rect + subpixel mode, as they are calculated // during the prim dependency checks. self.backdrop = BackdropInfo::empty(); self.subpixel_mode = SubpixelMode::Allow; self.map_local_to_surface = SpaceMapper::new( self.spatial_node_index, PictureRect::from_untyped(&pic_rect.to_untyped()), ); self.map_child_pic_to_surface = SpaceMapper::new( self.spatial_node_index, PictureRect::from_untyped(&pic_rect.to_untyped()), ); let pic_to_world_mapper = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, self.spatial_node_index, frame_context.global_screen_world_rect, frame_context.clip_scroll_tree, ); // If there is a valid set of shared clips, build a clip chain instance for this, // which will provide a local clip rect. This is useful for establishing things // like whether the backdrop rect supplied by Gecko can be considered opaque. if self.shared_clip_chain != ClipChainId::NONE { let mut shared_clips = Vec::new(); let mut current_clip_chain_id = self.shared_clip_chain; while current_clip_chain_id != ClipChainId::NONE { shared_clips.push(current_clip_chain_id); let clip_chain_node = &frame_state.clip_store.clip_chain_nodes[current_clip_chain_id.0 as usize]; current_clip_chain_id = clip_chain_node.parent_clip_chain_id; } frame_state.clip_store.set_active_clips( LayoutRect::max_rect(), self.spatial_node_index, &shared_clips, frame_context.clip_scroll_tree, &mut frame_state.data_stores.clip, ); let clip_chain_instance = frame_state.clip_store.build_clip_chain_instance( LayoutRect::from_untyped(&pic_rect.to_untyped()), &self.map_local_to_surface, &pic_to_world_mapper, frame_context.clip_scroll_tree, frame_state.gpu_cache, frame_state.resource_cache, frame_context.global_device_pixel_scale, &frame_context.global_screen_world_rect, &mut frame_state.data_stores.clip, true, false, ); // Ensure that if the entire picture cache is clipped out, the local // clip rect is zero. This makes sure we don't register any occluders // that are actually off-screen. self.local_clip_rect = clip_chain_instance.map_or(PictureRect::zero(), |clip_chain_instance| { clip_chain_instance.pic_clip_rect }); } // If there are pending retained state, retrieve it. if let Some(prev_state) = frame_state.retained_tiles.caches.remove(&self.slice) { self.tiles.extend(prev_state.tiles); self.root_transform = prev_state.root_transform; self.spatial_nodes = prev_state.spatial_nodes; self.opacity_bindings = prev_state.opacity_bindings; self.current_tile_size = prev_state.current_tile_size; fn recycle_map<K: std::cmp::Eq + std::hash::Hash, V>( dest: &mut FastHashMap<K, V>, src: FastHashMap<K, V>, ) { if dest.capacity() < src.capacity() { *dest = src; } } recycle_map(&mut self.old_tiles, prev_state.allocations.old_tiles); recycle_map(&mut self.old_opacity_bindings, prev_state.allocations.old_opacity_bindings); recycle_map(&mut self.compare_cache, prev_state.allocations.compare_cache); } // Only evaluate what tile size to use fairly infrequently, so that we don't end // up constantly invalidating and reallocating tiles if the picture rect size is // changing near a threshold value. if self.frames_until_size_eval == 0 { const TILE_SIZE_TINY: f32 = 32.0; // Work out what size tile is appropriate for this picture cache. let desired_tile_size; // There's no need to check the other dimension. If we encounter a picture // that is small on one dimension, it's a reasonable choice to use a scrollbar // sized tile configuration regardless of the other dimension. if pic_rect.size.width <= TILE_SIZE_TINY { desired_tile_size = TILE_SIZE_SCROLLBAR_VERTICAL; } else if pic_rect.size.height <= TILE_SIZE_TINY { desired_tile_size = TILE_SIZE_SCROLLBAR_HORIZONTAL; } else { desired_tile_size = TILE_SIZE_DEFAULT; } // If the desired tile size has changed, then invalidate and drop any // existing tiles. if desired_tile_size != self.current_tile_size { // Destroy any native surfaces on the tiles that will be dropped due // to resizing. frame_state.composite_state.destroy_native_surfaces( self.tiles.values(), frame_state.resource_cache, ); self.tiles.clear(); self.current_tile_size = desired_tile_size; } // Reset counter until next evaluating the desired tile size. This is an // arbitrary value. self.frames_until_size_eval = 120; } // Map an arbitrary point in picture space to world space, to work out // what the fractional translation is that's applied by this scroll root. // TODO(gw): I'm not 100% sure this is right. At least, in future, we should // make a specific API for this, and/or enforce that the picture // cache transform only includes scale and/or translation (we // already ensure it doesn't have perspective). let world_origin = pic_to_world_mapper .map(&PictureRect::new(PicturePoint::zero(), PictureSize::new(1.0, 1.0))) .expect("bug: unable to map origin to world space") .origin; // Get the desired integer device coordinate let device_origin = world_origin * frame_context.global_device_pixel_scale; let desired_device_origin = device_origin.round(); // Unmap from device space to world space rect let ref_world_rect = WorldRect::new( desired_device_origin / frame_context.global_device_pixel_scale, WorldSize::new(1.0, 1.0), ); // Unmap from world space to picture space let ref_point = pic_to_world_mapper .unmap(&ref_world_rect) .expect("bug: unable to unmap ref world rect") .origin; // Extract the fractional offset required in picture space to align in device space self.fract_offset = PictureVector2D::new( ref_point.x.fract(), ref_point.y.fract(), ); // Do a hacky diff of opacity binding values from the last frame. This is // used later on during tile invalidation tests. let current_properties = frame_context.scene_properties.float_properties(); mem::swap(&mut self.opacity_bindings, &mut self.old_opacity_bindings); self.opacity_bindings.clear(); for (id, value) in current_properties { let changed = match self.old_opacity_bindings.get(id) { Some(old_property) => !old_property.value.approx_eq(value), None => true, }; self.opacity_bindings.insert(*id, OpacityBindingInfo { value: *value, changed, }); } let world_tile_size = WorldSize::new( self.current_tile_size.width as f32 / frame_context.global_device_pixel_scale.0, self.current_tile_size.height as f32 / frame_context.global_device_pixel_scale.0, ); // We know that this is an exact rectangle, since we (for now) only support tile // caches where the scroll root is in the root coordinate system. let local_tile_rect = pic_to_world_mapper .unmap(&WorldRect::new(WorldPoint::zero(), world_tile_size)) .expect("bug: unable to get local tile rect"); self.tile_size = local_tile_rect.size; let screen_rect_in_pic_space = pic_to_world_mapper .unmap(&frame_context.global_screen_world_rect) .expect("unable to unmap screen rect"); // Inflate the needed rect a bit, so that we retain tiles that we have drawn // but have just recently gone off-screen. This means that we avoid re-drawing // tiles if the user is scrolling up and down small amounts, at the cost of // a bit of extra texture memory. let desired_rect_in_pic_space = screen_rect_in_pic_space .inflate(0.0, 3.0 * self.tile_size.height); let needed_rect_in_pic_space = desired_rect_in_pic_space .intersection(&pic_rect) .unwrap_or(PictureRect::zero()); let p0 = needed_rect_in_pic_space.origin; let p1 = needed_rect_in_pic_space.bottom_right(); let x0 = (p0.x / local_tile_rect.size.width).floor() as i32; let x1 = (p1.x / local_tile_rect.size.width).ceil() as i32; let y0 = (p0.y / local_tile_rect.size.height).floor() as i32; let y1 = (p1.y / local_tile_rect.size.height).ceil() as i32; let x_tiles = x1 - x0; let y_tiles = y1 - y0; self.tile_rect = TileRect::new( TileOffset::new(x0, y0), TileSize::new(x_tiles, y_tiles), ); // This is duplicated information from tile_rect, but cached here to avoid // redundant calculations during get_tile_coords_for_rect self.tile_bounds_p0 = TileOffset::new(x0, y0); self.tile_bounds_p1 = TileOffset::new(x1, y1); let mut world_culling_rect = WorldRect::zero(); mem::swap(&mut self.tiles, &mut self.old_tiles); let ctx = TilePreUpdateContext { local_rect: self.local_rect, local_clip_rect: self.local_clip_rect, pic_to_world_mapper, fract_offset: self.fract_offset, background_color: self.background_color, global_screen_world_rect: frame_context.global_screen_world_rect, }; self.tiles.clear(); for y in y0 .. y1 { for x in x0 .. x1 { let key = TileOffset::new(x, y); let mut tile = self.old_tiles .remove(&key) .unwrap_or_else(|| { let next_id = TileId(NEXT_TILE_ID.fetch_add(1, Ordering::Relaxed)); Tile::new(next_id) }); // Ensure each tile is offset by the appropriate amount from the // origin, such that the content origin will be a whole number and // the snapping will be consistent. let rect = PictureRect::new( PicturePoint::new( x as f32 * self.tile_size.width + self.fract_offset.x, y as f32 * self.tile_size.height + self.fract_offset.y, ), self.tile_size, ); tile.pre_update( rect, &ctx, ); // Only include the tiles that are currently in view into the world culling // rect. This is a very important optimization for a couple of reasons: // (1) Primitives that intersect with tiles in the grid that are not currently // visible can be skipped from primitive preparation, clip chain building // and tile dependency updates. // (2) When we need to allocate an off-screen surface for a child picture (for // example a CSS filter) we clip the size of the GPU surface to the world // culling rect below (to ensure we draw enough of it to be sampled by any // tiles that reference it). Making the world culling rect only affected // by visible tiles (rather than the entire virtual tile display port) can // result in allocating _much_ smaller GPU surfaces for cases where the // true off-screen surface size is very large. if tile.is_visible { world_culling_rect = world_culling_rect.union(&tile.world_rect); } self.tiles.insert(key, tile); } } // Any old tiles that remain after the loop above are going to be dropped. For // simple composite mode, the texture cache handle will expire and be collected // by the texture cache. For native compositor mode, we need to explicitly // invoke a callback to the client to destroy that surface. frame_state.composite_state.destroy_native_surfaces( self.old_tiles.values(), frame_state.resource_cache, ); world_culling_rect } /// Update the dependencies for each tile for a given primitive instance. pub fn update_prim_dependencies( &mut self, prim_instance: &PrimitiveInstance, prim_spatial_node_index: SpatialNodeIndex, prim_clip_chain: Option<&ClipChainInstance>, local_prim_rect: LayoutRect, clip_scroll_tree: &ClipScrollTree, data_stores: &DataStores, clip_store: &ClipStore, pictures: &[PicturePrimitive], resource_cache: &ResourceCache, opacity_binding_store: &OpacityBindingStorage, image_instances: &ImageInstanceStorage, surface_index: SurfaceIndex, surface_spatial_node_index: SpatialNodeIndex, ) -> bool { // If the primitive is completely clipped out by the clip chain, there // is no need to add it to any primitive dependencies. let prim_clip_chain = match prim_clip_chain { Some(prim_clip_chain) => prim_clip_chain, None => return false, }; self.map_local_to_surface.set_target_spatial_node( prim_spatial_node_index, clip_scroll_tree, ); // Map the primitive local rect into picture space. let prim_rect = match self.map_local_to_surface.map(&local_prim_rect) { Some(rect) => rect, None => return false, }; // If the rect is invalid, no need to create dependencies. if prim_rect.size.is_empty_or_negative() { return false; } // If the primitive is directly drawn onto this picture cache surface, then // the pic_clip_rect is in the same space. If not, we need to map it from // the surface space into the picture cache space. let on_picture_surface = surface_index == self.surface_index; let pic_clip_rect = if on_picture_surface { prim_clip_chain.pic_clip_rect } else { self.map_child_pic_to_surface.set_target_spatial_node( surface_spatial_node_index, clip_scroll_tree, ); self.map_child_pic_to_surface .map(&prim_clip_chain.pic_clip_rect) .expect("bug: unable to map clip rect to picture cache space") }; // Get the tile coordinates in the picture space. let (p0, p1) = self.get_tile_coords_for_rect(&pic_clip_rect); // If the primitive is outside the tiling rects, it's known to not // be visible. if p0.x == p1.x || p0.y == p1.y { return false; } // Build the list of resources that this primitive has dependencies on. let mut prim_info = PrimitiveDependencyInfo::new( prim_instance.uid(), prim_rect.origin, pic_clip_rect, ); // Include the prim spatial node, if differs relative to cache root. if prim_spatial_node_index != self.spatial_node_index { prim_info.spatial_nodes.push(prim_spatial_node_index); } // If there was a clip chain, add any clip dependencies to the list for this tile. let clip_instances = &clip_store .clip_node_instances[prim_clip_chain.clips_range.to_range()]; for clip_instance in clip_instances { prim_info.clips.push(clip_instance.handle.uid()); // If the clip has the same spatial node, the relative transform // will always be the same, so there's no need to depend on it. let clip_node = &data_stores.clip[clip_instance.handle]; if clip_node.item.spatial_node_index != self.spatial_node_index { if !prim_info.spatial_nodes.contains(&clip_node.item.spatial_node_index) { prim_info.spatial_nodes.push(clip_node.item.spatial_node_index); } } } // Certain primitives may select themselves to be a backdrop candidate, which is // then applied below. let mut backdrop_candidate = None; // For pictures, we don't (yet) know the valid clip rect, so we can't correctly // use it to calculate the local bounding rect for the tiles. If we include them // then we may calculate a bounding rect that is too large, since it won't include // the clip bounds of the picture. Excluding them from the bounding rect here // fixes any correctness issues (the clips themselves are considered when we // consider the bounds of the primitives that are *children* of the picture), // however it does potentially result in some un-necessary invalidations of a // tile (in cases where the picture local rect affects the tile, but the clip // rect eventually means it doesn't affect that tile). // TODO(gw): Get picture clips earlier (during the initial picture traversal // pass) so that we can calculate these correctly. match prim_instance.kind { PrimitiveInstanceKind::Picture { pic_index,.. } => { // Pictures can depend on animated opacity bindings. let pic = &pictures[pic_index.0]; if let Some(PictureCompositeMode::Filter(Filter::Opacity(binding, _))) = pic.requested_composite_mode { prim_info.opacity_bindings.push(binding.into()); } } PrimitiveInstanceKind::Rectangle { data_handle, opacity_binding_index, .. } => { if opacity_binding_index == OpacityBindingIndex::INVALID { // Rectangles can only form a backdrop candidate if they are known opaque. // TODO(gw): We could resolve the opacity binding here, but the common // case for background rects is that they don't have animated opacity. let color = match data_stores.prim[data_handle].kind { PrimitiveTemplateKind::Rectangle { color, .. } => color, _ => unreachable!(), }; if color.a >= 1.0 { backdrop_candidate = Some(BackdropKind::Color { color }); } } else { let opacity_binding = &opacity_binding_store[opacity_binding_index]; for binding in &opacity_binding.bindings { prim_info.opacity_bindings.push(OpacityBinding::from(*binding)); } } prim_info.clip_by_tile = true; } PrimitiveInstanceKind::Image { data_handle, image_instance_index, .. } => { let image_data = &data_stores.image[data_handle].kind; let image_instance = &image_instances[image_instance_index]; let opacity_binding_index = image_instance.opacity_binding_index; if opacity_binding_index == OpacityBindingIndex::INVALID { if let Some(image_properties) = resource_cache.get_image_properties(image_data.key) { // If this image is opaque, it can be considered as a possible opaque backdrop if image_properties.descriptor.is_opaque() { backdrop_candidate = Some(BackdropKind::Image); } } } else { let opacity_binding = &opacity_binding_store[opacity_binding_index]; for binding in &opacity_binding.bindings { prim_info.opacity_bindings.push(OpacityBinding::from(*binding)); } } prim_info.images.push(ImageDependency { key: image_data.key, generation: resource_cache.get_image_generation(image_data.key), }); } PrimitiveInstanceKind::YuvImage { data_handle, .. } => { let yuv_image_data = &data_stores.yuv_image[data_handle].kind; prim_info.images.extend( yuv_image_data.yuv_key.iter().map(|key| { ImageDependency { key: *key, generation: resource_cache.get_image_generation(*key), } }) ); } PrimitiveInstanceKind::ImageBorder { data_handle, .. } => { let border_data = &data_stores.image_border[data_handle].kind; prim_info.images.push(ImageDependency { key: border_data.request.key, generation: resource_cache.get_image_generation(border_data.request.key), }); } PrimitiveInstanceKind::PushClipChain | PrimitiveInstanceKind::PopClipChain => { // Early exit to ensure this doesn't get added as a dependency on the tile. return false; } PrimitiveInstanceKind::TextRun { data_handle, .. } => { // Only do these checks if we haven't already disabled subpx // text rendering for this slice. if self.subpixel_mode == SubpixelMode::Allow && !self.is_opaque() { let run_data = &data_stores.text_run[data_handle]; // Only care about text runs that have requested subpixel rendering. // This is conservative - it may still end up that a subpx requested // text run doesn't get subpx for other reasons (e.g. glyph size). let subpx_requested = match run_data.font.render_mode { FontRenderMode::Subpixel => true, FontRenderMode::Alpha | FontRenderMode::Mono => false, }; // If a text run is on a child surface, the subpx mode will be // correctly determined as we recurse through pictures in take_context. if on_picture_surface && subpx_requested { if !self.backdrop.rect.contains_rect(&pic_clip_rect) { self.subpixel_mode = SubpixelMode::Deny; } } } } PrimitiveInstanceKind::Clear { .. } => { backdrop_candidate = Some(BackdropKind::Clear); } PrimitiveInstanceKind::LineDecoration { .. } | PrimitiveInstanceKind::NormalBorder { .. } | PrimitiveInstanceKind::LinearGradient { .. } | PrimitiveInstanceKind::RadialGradient { .. } | PrimitiveInstanceKind::Backdrop { .. } => { // These don't contribute dependencies } }; // If this primitive considers itself a backdrop candidate, apply further // checks to see if it matches all conditions to be a backdrop. if let Some(backdrop_candidate) = backdrop_candidate { let is_suitable_backdrop = match backdrop_candidate { BackdropKind::Clear => { // Clear prims are special - they always end up in their own slice, // and always set the backdrop. In future, we hope to completely // remove clear prims, since they don't integrate with the compositing // system cleanly. true } BackdropKind::Image | BackdropKind::Color { .. } => { // Check a number of conditions to see if we can consider this // primitive as an opaque backdrop rect. Several of these are conservative // checks and could be relaxed in future. However, these checks // are quick and capture the common cases of background rects and images. // Specifically, we currently require: // - The primitive is on the main picture cache surface. // - Same coord system as picture cache (ensures rects are axis-aligned). // - No clip masks exist. let same_coord_system = { let prim_spatial_node = &clip_scroll_tree .spatial_nodes[prim_spatial_node_index.0 as usize]; let surface_spatial_node = &clip_scroll_tree .spatial_nodes[self.spatial_node_index.0 as usize]; prim_spatial_node.coordinate_system_id == surface_spatial_node.coordinate_system_id }; same_coord_system && on_picture_surface } }; if is_suitable_backdrop { if !prim_clip_chain.needs_mask && pic_clip_rect.contains_rect(&self.backdrop.rect) { self.backdrop = BackdropInfo { rect: pic_clip_rect, kind: backdrop_candidate, } } } } // Record any new spatial nodes in the used list. self.used_spatial_nodes.extend(&prim_info.spatial_nodes); // Truncate the lengths of dependency arrays to the max size we can handle. // Any arrays this size or longer will invalidate every frame. prim_info.clips.truncate(MAX_PRIM_SUB_DEPS); prim_info.opacity_bindings.truncate(MAX_PRIM_SUB_DEPS); prim_info.spatial_nodes.truncate(MAX_PRIM_SUB_DEPS); prim_info.images.truncate(MAX_PRIM_SUB_DEPS); // Normalize the tile coordinates before adding to tile dependencies. // For each affected tile, mark any of the primitive dependencies. for y in p0.y .. p1.y { for x in p0.x .. p1.x { // TODO(gw): Convert to 2d array temporarily to avoid hash lookups per-tile? let key = TileOffset::new(x, y); let tile = self.tiles.get_mut(&key).expect("bug: no tile"); tile.add_prim_dependency(&prim_info); } } true } /// Print debug information about this picture cache to a tree printer. fn print(&self) { // TODO(gw): This initial implementation is very basic - just printing // the picture cache state to stdout. In future, we can // make this dump each frame to a file, and produce a report // stating which frames had invalidations. This will allow // diff'ing the invalidation states in a visual tool. let mut pt = PrintTree::new("Picture Cache"); pt.new_level(format!("Slice {}", self.slice)); pt.add_item(format!("fract_offset: {:?}", self.fract_offset)); pt.add_item(format!("background_color: {:?}", self.background_color)); for y in self.tile_bounds_p0.y .. self.tile_bounds_p1.y { for x in self.tile_bounds_p0.x .. self.tile_bounds_p1.x { let key = TileOffset::new(x, y); let tile = &self.tiles[&key]; tile.print(&mut pt); } } pt.end_level(); } /// Apply any updates after prim dependency updates. This applies /// any late tile invalidations, and sets up the dirty rect and /// set of tile blits. pub fn post_update( &mut self, frame_context: &FrameVisibilityContext, frame_state: &mut FrameVisibilityState, ) { self.tiles_to_draw.clear(); self.dirty_region.clear(); // Register the opaque region of this tile cache as an occluder, which // is used later in the frame to occlude other tiles. if self.backdrop.rect.is_well_formed_and_nonempty() { let backdrop_rect = self.backdrop.rect .intersection(&self.local_rect) .and_then(|r| { r.intersection(&self.local_clip_rect) }); if let Some(backdrop_rect) = backdrop_rect { let map_pic_to_world = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, self.spatial_node_index, frame_context.global_screen_world_rect, frame_context.clip_scroll_tree, ); let world_backdrop_rect = map_pic_to_world .map(&backdrop_rect) .expect("bug: unable to map backdrop to world space"); frame_state.composite_state.register_occluder( self.slice, world_backdrop_rect, ); } } // Detect if the picture cache was scrolled or scaled. In this case, // the device space dirty rects aren't applicable (until we properly // integrate with OS compositors that can handle scrolling slices). let root_transform = frame_context .clip_scroll_tree .get_relative_transform( self.spatial_node_index, ROOT_SPATIAL_NODE_INDEX, ) .into(); let root_transform_changed = root_transform != self.root_transform; if root_transform_changed { self.root_transform = root_transform; frame_state.composite_state.dirty_rects_are_valid = false; } // Diff the state of the spatial nodes between last frame build and now. mem::swap(&mut self.spatial_nodes, &mut self.old_spatial_nodes); // TODO(gw): Maybe remove the used_spatial_nodes set and just mutate / create these // diffs inside add_prim_dependency? self.spatial_nodes.clear(); for spatial_node_index in self.used_spatial_nodes.drain() { // Get the current relative transform. let mut value = get_transform_key( spatial_node_index, self.spatial_node_index, frame_context.clip_scroll_tree, ); // Check if the transform has changed from last frame let mut changed = true; if let Some(old_info) = self.old_spatial_nodes.remove(&spatial_node_index) { if old_info.value == value { // Since the transform key equality check applies epsilon, if we // consider the value to be the same, store that old value to avoid // missing very slow drifts in the value over time. // TODO(gw): We should change ComparableVec to use a trait for comparison // rather than PartialEq. value = old_info.value; changed = false; } } self.spatial_nodes.insert(spatial_node_index, SpatialNodeDependency { changed, value, }); } let ctx = TilePostUpdateContext { backdrop: self.backdrop, spatial_nodes: &self.spatial_nodes, opacity_bindings: &self.opacity_bindings, current_tile_size: self.current_tile_size, }; let mut state = TilePostUpdateState { resource_cache: frame_state.resource_cache, composite_state: frame_state.composite_state, compare_cache: &mut self.compare_cache, }; // Step through each tile and invalidate if the dependencies have changed. for (key, tile) in self.tiles.iter_mut() { if tile.post_update(&ctx, &mut state) { self.tiles_to_draw.push(*key); } } // When under test, record a copy of the dirty region to support // invalidation testing in wrench. if frame_context.config.testing { frame_state.scratch.recorded_dirty_regions.push(self.dirty_region.record()); } } } /// Maintains a stack of picture and surface information, that /// is used during the initial picture traversal. pub struct PictureUpdateState<'a> { surfaces: &'a mut Vec<SurfaceInfo>, surface_stack: Vec<SurfaceIndex>, picture_stack: Vec<PictureInfo>, are_raster_roots_assigned: bool, composite_state: &'a CompositeState, } impl<'a> PictureUpdateState<'a> { pub fn update_all( surfaces: &'a mut Vec<SurfaceInfo>, pic_index: PictureIndex, picture_primitives: &mut [PicturePrimitive], frame_context: &FrameBuildingContext, gpu_cache: &mut GpuCache, clip_store: &ClipStore, data_stores: &mut DataStores, composite_state: &CompositeState, ) { profile_marker!("UpdatePictures"); let mut state = PictureUpdateState { surfaces, surface_stack: vec![SurfaceIndex(0)], picture_stack: Vec::new(), are_raster_roots_assigned: true, composite_state, }; state.update( pic_index, picture_primitives, frame_context, gpu_cache, clip_store, data_stores, ); if !state.are_raster_roots_assigned { state.assign_raster_roots( pic_index, picture_primitives, ROOT_SPATIAL_NODE_INDEX, ); } } /// Return the current surface fn current_surface(&self) -> &SurfaceInfo { &self.surfaces[self.surface_stack.last().unwrap().0] } /// Return the current surface (mutable) fn current_surface_mut(&mut self) -> &mut SurfaceInfo { &mut self.surfaces[self.surface_stack.last().unwrap().0] } /// Push a new surface onto the update stack. fn push_surface( &mut self, surface: SurfaceInfo, ) -> SurfaceIndex { let surface_index = SurfaceIndex(self.surfaces.len()); self.surfaces.push(surface); self.surface_stack.push(surface_index); surface_index } /// Pop a surface on the way up the picture traversal fn pop_surface(&mut self) -> SurfaceIndex{ self.surface_stack.pop().unwrap() } /// Push information about a picture on the update stack fn push_picture( &mut self, info: PictureInfo, ) { self.picture_stack.push(info); } /// Pop the picture info off, on the way up the picture traversal fn pop_picture( &mut self, ) -> PictureInfo { self.picture_stack.pop().unwrap() } /// Update a picture, determining surface configuration, /// rasterization roots, and (in future) whether there /// are cached surfaces that can be used by this picture. fn update( &mut self, pic_index: PictureIndex, picture_primitives: &mut [PicturePrimitive], frame_context: &FrameBuildingContext, gpu_cache: &mut GpuCache, clip_store: &ClipStore, data_stores: &mut DataStores, ) { if let Some(prim_list) = picture_primitives[pic_index.0].pre_update( self, frame_context, ) { for cluster in &prim_list.clusters { if cluster.flags.contains(ClusterFlags::IS_PICTURE) { for prim_instance in &cluster.prim_instances { let child_pic_index = match prim_instance.kind { PrimitiveInstanceKind::Picture { pic_index, .. } => pic_index, _ => unreachable!(), }; self.update( child_pic_index, picture_primitives, frame_context, gpu_cache, clip_store, data_stores, ); } } } picture_primitives[pic_index.0].post_update( prim_list, self, frame_context, data_stores, ); } } /// Process the picture tree again in a depth-first order, /// and adjust the raster roots of the pictures that want to establish /// their own roots but are not able to due to the size constraints. fn assign_raster_roots( &mut self, pic_index: PictureIndex, picture_primitives: &[PicturePrimitive], fallback_raster_spatial_node: SpatialNodeIndex, ) { let picture = &picture_primitives[pic_index.0]; if !picture.is_visible() { return } let new_fallback = match picture.raster_config { Some(ref config) => { let surface = &mut self.surfaces[config.surface_index.0]; if !config.establishes_raster_root { surface.raster_spatial_node_index = fallback_raster_spatial_node; } surface.raster_spatial_node_index } None => fallback_raster_spatial_node, }; for cluster in &picture.prim_list.clusters { if cluster.flags.contains(ClusterFlags::IS_PICTURE) { for instance in &cluster.prim_instances { let child_pic_index = match instance.kind { PrimitiveInstanceKind::Picture { pic_index, .. } => pic_index, _ => unreachable!(), }; self.assign_raster_roots( child_pic_index, picture_primitives, new_fallback, ); } } } } } #[derive(Debug, Copy, Clone, PartialEq)] #[cfg_attr(feature = "capture", derive(Serialize))] pub struct SurfaceIndex(pub usize); pub const ROOT_SURFACE_INDEX: SurfaceIndex = SurfaceIndex(0); #[derive(Debug, Copy, Clone)] pub struct SurfaceRenderTasks { /// The root of the render task chain for this surface. This /// is attached to parent tasks, and also the surface that /// gets added during batching. pub root: RenderTaskId, /// The port of the render task change for this surface. This /// is where child tasks for this surface get attached to. pub port: RenderTaskId, } /// Information about an offscreen surface. For now, /// it contains information about the size and coordinate /// system of the surface. In the future, it will contain /// information about the contents of the surface, which /// will allow surfaces to be cached / retained between /// frames and display lists. #[derive(Debug)] pub struct SurfaceInfo { /// A local rect defining the size of this surface, in the /// coordinate system of the surface itself. pub rect: PictureRect, /// Helper structs for mapping local rects in different /// coordinate systems into the surface coordinates. pub map_local_to_surface: SpaceMapper<LayoutPixel, PicturePixel>, /// Defines the positioning node for the surface itself, /// and the rasterization root for this surface. pub raster_spatial_node_index: SpatialNodeIndex, pub surface_spatial_node_index: SpatialNodeIndex, /// This is set when the render task is created. pub render_tasks: Option<SurfaceRenderTasks>, /// How much the local surface rect should be inflated (for blur radii). pub inflation_factor: f32, /// The device pixel ratio specific to this surface. pub device_pixel_scale: DevicePixelScale, } impl SurfaceInfo { pub fn new( surface_spatial_node_index: SpatialNodeIndex, raster_spatial_node_index: SpatialNodeIndex, inflation_factor: f32, world_rect: WorldRect, clip_scroll_tree: &ClipScrollTree, device_pixel_scale: DevicePixelScale, ) -> Self { let map_surface_to_world = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, surface_spatial_node_index, world_rect, clip_scroll_tree, ); let pic_bounds = map_surface_to_world .unmap(&map_surface_to_world.bounds) .unwrap_or_else(PictureRect::max_rect); let map_local_to_surface = SpaceMapper::new( surface_spatial_node_index, pic_bounds, ); SurfaceInfo { rect: PictureRect::zero(), map_local_to_surface, render_tasks: None, raster_spatial_node_index, surface_spatial_node_index, inflation_factor, device_pixel_scale, } } } #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] pub struct RasterConfig { /// How this picture should be composited into /// the parent surface. pub composite_mode: PictureCompositeMode, /// Index to the surface descriptor for this /// picture. pub surface_index: SurfaceIndex, /// Whether this picture establishes a rasterization root. pub establishes_raster_root: bool, } bitflags! { /// A set of flags describing why a picture may need a backing surface. #[cfg_attr(feature = "capture", derive(Serialize))] pub struct BlitReason: u32 { /// Mix-blend-mode on a child that requires isolation. const ISOLATE = 1; /// Clip node that _might_ require a surface. const CLIP = 2; /// Preserve-3D requires a surface for plane-splitting. const PRESERVE3D = 4; /// A backdrop that is reused which requires a surface. const BACKDROP = 8; } } /// Specifies how this Picture should be composited /// onto the target it belongs to. #[allow(dead_code)] #[derive(Debug, Clone)] #[cfg_attr(feature = "capture", derive(Serialize))] pub enum PictureCompositeMode { /// Apply CSS mix-blend-mode effect. MixBlend(MixBlendMode), /// Apply a CSS filter (except component transfer). Filter(Filter), /// Apply a component transfer filter. ComponentTransferFilter(FilterDataHandle), /// Draw to intermediate surface, copy straight across. This /// is used for CSS isolation, and plane splitting. Blit(BlitReason), /// Used to cache a picture as a series of tiles. TileCache { }, /// Apply an SVG filter SvgFilter(Vec<FilterPrimitive>, Vec<SFilterData>), } impl PictureCompositeMode { pub fn inflate_picture_rect(&self, picture_rect: PictureRect, inflation_factor: f32) -> PictureRect { let mut result_rect = picture_rect; match self { PictureCompositeMode::Filter(filter) => match filter { Filter::Blur(_) => { result_rect = picture_rect.inflate(inflation_factor, inflation_factor); }, Filter::DropShadows(shadows) => { let mut max_inflation: f32 = 0.0; for shadow in shadows { let inflation_factor = shadow.blur_radius.round() * BLUR_SAMPLE_SCALE; max_inflation = max_inflation.max(inflation_factor); } result_rect = picture_rect.inflate(max_inflation, max_inflation); }, _ => {} } PictureCompositeMode::SvgFilter(primitives, _) => { let mut output_rects = Vec::with_capacity(primitives.len()); for (cur_index, primitive) in primitives.iter().enumerate() { let output_rect = match primitive.kind { FilterPrimitiveKind::Blur(ref primitive) => { let input = primitive.input.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect); let inflation_factor = primitive.radius.round() * BLUR_SAMPLE_SCALE; input.inflate(inflation_factor, inflation_factor) } FilterPrimitiveKind::DropShadow(ref primitive) => { let inflation_factor = primitive.shadow.blur_radius.round() * BLUR_SAMPLE_SCALE; let input = primitive.input.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect); let shadow_rect = input.inflate(inflation_factor, inflation_factor); input.union(&shadow_rect.translate(primitive.shadow.offset * Scale::new(1.0))) } FilterPrimitiveKind::Blend(ref primitive) => { primitive.input1.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect) .union(&primitive.input2.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect)) } FilterPrimitiveKind::Composite(ref primitive) => { primitive.input1.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect) .union(&primitive.input2.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect)) } FilterPrimitiveKind::Identity(ref primitive) => primitive.input.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect), FilterPrimitiveKind::Opacity(ref primitive) => primitive.input.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect), FilterPrimitiveKind::ColorMatrix(ref primitive) => primitive.input.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect), FilterPrimitiveKind::ComponentTransfer(ref primitive) => primitive.input.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect), FilterPrimitiveKind::Offset(ref primitive) => { let input_rect = primitive.input.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect); input_rect.translate(primitive.offset * Scale::new(1.0)) }, FilterPrimitiveKind::Flood(..) => picture_rect, }; output_rects.push(output_rect); result_rect = result_rect.union(&output_rect); } } _ => {}, } result_rect } } /// Enum value describing the place of a picture in a 3D context. #[derive(Clone, Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] pub enum Picture3DContext<C> { /// The picture is not a part of 3D context sub-hierarchy. Out, /// The picture is a part of 3D context. In { /// Additional data per child for the case of this a root of 3D hierarchy. root_data: Option<Vec<C>>, /// The spatial node index of an "ancestor" element, i.e. one /// that establishes the transformed element’s containing block. /// /// See CSS spec draft for more details: /// https://drafts.csswg.org/css-transforms-2/#accumulated-3d-transformation-matrix-computation ancestor_index: SpatialNodeIndex, }, } /// Information about a preserve-3D hierarchy child that has been plane-split /// and ordered according to the view direction. #[derive(Clone, Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] pub struct OrderedPictureChild { pub anchor: PlaneSplitAnchor, pub spatial_node_index: SpatialNodeIndex, pub gpu_address: GpuCacheAddress, } bitflags! { /// A set of flags describing why a picture may need a backing surface. #[cfg_attr(feature = "capture", derive(Serialize))] pub struct ClusterFlags: u32 { /// This cluster is a picture const IS_PICTURE = 1; /// Whether this cluster is visible when the position node is a backface. const IS_BACKFACE_VISIBLE = 2; /// This flag is set during the first pass picture traversal, depending on whether /// the cluster is visible or not. It's read during the second pass when primitives /// consult their owning clusters to see if the primitive itself is visible. const IS_VISIBLE = 4; /// Is a backdrop-filter cluster that requires special handling during post_update. const IS_BACKDROP_FILTER = 8; /// Force creation of a picture caching slice before this cluster. const CREATE_PICTURE_CACHE_PRE = 16; /// Force creation of a picture caching slice after this cluster. const CREATE_PICTURE_CACHE_POST = 32; /// If set, this cluster represents a scroll bar container. const SCROLLBAR_CONTAINER = 64; /// If set, this cluster contains clear rectangle primitives. const IS_CLEAR_PRIMITIVE = 128; } } /// Descriptor for a cluster of primitives. For now, this is quite basic but will be /// extended to handle more spatial clustering of primitives. #[cfg_attr(feature = "capture", derive(Serialize))] pub struct PrimitiveCluster { /// The positioning node for this cluster. pub spatial_node_index: SpatialNodeIndex, /// The bounding rect of the cluster, in the local space of the spatial node. /// This is used to quickly determine the overall bounding rect for a picture /// during the first picture traversal, which is needed for local scale /// determination, and render task size calculations. bounding_rect: LayoutRect, /// The list of primitive instances in this cluster. pub prim_instances: Vec<PrimitiveInstance>, /// Various flags / state for this cluster. pub flags: ClusterFlags, /// An optional scroll root to use if this cluster establishes a picture cache slice. pub cache_scroll_root: Option<SpatialNodeIndex>, } /// Where to insert a prim instance in a primitive list. #[derive(Debug, Copy, Clone)] enum PrimitiveListPosition { Begin, End, } impl PrimitiveCluster { /// Construct a new primitive cluster for a given positioning node. fn new( spatial_node_index: SpatialNodeIndex, flags: ClusterFlags, ) -> Self { PrimitiveCluster { bounding_rect: LayoutRect::zero(), spatial_node_index, flags, prim_instances: Vec::new(), cache_scroll_root: None, } } /// Return true if this cluster is compatible with the given params pub fn is_compatible( &self, spatial_node_index: SpatialNodeIndex, flags: ClusterFlags, ) -> bool { self.flags == flags && self.spatial_node_index == spatial_node_index } /// Add a primitive instance to this cluster, at the start or end fn push( &mut self, prim_instance: PrimitiveInstance, prim_size: LayoutSize, ) { let prim_rect = LayoutRect::new( prim_instance.prim_origin, prim_size, ); let culling_rect = prim_instance.local_clip_rect .intersection(&prim_rect) .unwrap_or_else(LayoutRect::zero); self.bounding_rect = self.bounding_rect.union(&culling_rect); self.prim_instances.push(prim_instance); } } /// A list of primitive instances that are added to a picture /// This ensures we can keep a list of primitives that /// are pictures, for a fast initial traversal of the picture /// tree without walking the instance list. #[cfg_attr(feature = "capture", derive(Serialize))] pub struct PrimitiveList { /// List of primitives grouped into clusters. pub clusters: Vec<PrimitiveCluster>, } impl PrimitiveList { /// Construct an empty primitive list. This is /// just used during the take_context / restore_context /// borrow check dance, which will be removed as the /// picture traversal pass is completed. pub fn empty() -> Self { PrimitiveList { clusters: Vec::new(), } } /// Add a primitive instance to this list, at the start or end fn push( &mut self, prim_instance: PrimitiveInstance, prim_size: LayoutSize, spatial_node_index: SpatialNodeIndex, prim_flags: PrimitiveFlags, insert_position: PrimitiveListPosition, ) { let mut flags = ClusterFlags::empty(); // Pictures are always put into a new cluster, to make it faster to // iterate all pictures in a given primitive list. match prim_instance.kind { PrimitiveInstanceKind::Picture { .. } => { flags.insert(ClusterFlags::IS_PICTURE); } PrimitiveInstanceKind::Backdrop { .. } => { flags.insert(ClusterFlags::IS_BACKDROP_FILTER); } PrimitiveInstanceKind::Clear { .. } => { flags.insert(ClusterFlags::IS_CLEAR_PRIMITIVE); } _ => {} } if prim_flags.contains(PrimitiveFlags::IS_BACKFACE_VISIBLE) { flags.insert(ClusterFlags::IS_BACKFACE_VISIBLE); } if prim_flags.contains(PrimitiveFlags::IS_SCROLLBAR_CONTAINER) { flags.insert(ClusterFlags::SCROLLBAR_CONTAINER); } // Insert the primitive into the first or last cluster as required match insert_position { PrimitiveListPosition::Begin => { let mut cluster = PrimitiveCluster::new( spatial_node_index, flags, ); cluster.push(prim_instance, prim_size); self.clusters.insert(0, cluster); } PrimitiveListPosition::End => { if let Some(cluster) = self.clusters.last_mut() { if cluster.is_compatible(spatial_node_index, flags) { cluster.push(prim_instance, prim_size); return; } } let mut cluster = PrimitiveCluster::new( spatial_node_index, flags, ); cluster.push(prim_instance, prim_size); self.clusters.push(cluster); } } } /// Add a primitive instance to the start of the list pub fn add_prim_to_start( &mut self, prim_instance: PrimitiveInstance, prim_size: LayoutSize, spatial_node_index: SpatialNodeIndex, flags: PrimitiveFlags, ) { self.push( prim_instance, prim_size, spatial_node_index, flags, PrimitiveListPosition::Begin, ) } /// Add a primitive instance to the end of the list pub fn add_prim( &mut self, prim_instance: PrimitiveInstance, prim_size: LayoutSize, spatial_node_index: SpatialNodeIndex, flags: PrimitiveFlags, ) { self.push( prim_instance, prim_size, spatial_node_index, flags, PrimitiveListPosition::End, ) } /// Returns true if there are no clusters (and thus primitives) pub fn is_empty(&self) -> bool { self.clusters.is_empty() } /// Add an existing cluster to this prim list pub fn add_cluster(&mut self, cluster: PrimitiveCluster) { self.clusters.push(cluster); } /// Merge another primitive list into this one pub fn extend(&mut self, prim_list: PrimitiveList) { self.clusters.extend(prim_list.clusters); } } /// Defines configuration options for a given picture primitive. #[cfg_attr(feature = "capture", derive(Serialize))] pub struct PictureOptions { /// If true, WR should inflate the bounding rect of primitives when /// using a filter effect that requires inflation. pub inflate_if_required: bool, } impl Default for PictureOptions { fn default() -> Self { PictureOptions { inflate_if_required: true, } } } #[cfg_attr(feature = "capture", derive(Serialize))] pub struct PicturePrimitive { /// List of primitives, and associated info for this picture. pub prim_list: PrimitiveList, #[cfg_attr(feature = "capture", serde(skip))] pub state: Option<PictureState>, /// If true, apply the local clip rect to primitive drawn /// in this picture. pub apply_local_clip_rect: bool, /// If false and transform ends up showing the back of the picture, /// it will be considered invisible. pub is_backface_visible: bool, // If a mix-blend-mode, contains the render task for // the readback of the framebuffer that we use to sample // from in the mix-blend-mode shader. // For drop-shadow filter, this will store the original // picture task which would be rendered on screen after // blur pass. pub secondary_render_task_id: Option<RenderTaskId>, /// How this picture should be composited. /// If None, don't composite - just draw directly on parent surface. pub requested_composite_mode: Option<PictureCompositeMode>, /// Requested rasterization space for this picture. It is /// a performance hint only. pub requested_raster_space: RasterSpace, pub raster_config: Option<RasterConfig>, pub context_3d: Picture3DContext<OrderedPictureChild>, // If requested as a frame output (for rendering // pages to a texture), this is the pipeline this // picture is the root of. pub frame_output_pipeline_id: Option<PipelineId>, // Optional cache handles for storing extra data // in the GPU cache, depending on the type of // picture. pub extra_gpu_data_handles: SmallVec<[GpuCacheHandle; 1]>, /// The spatial node index of this picture when it is /// composited into the parent picture. pub spatial_node_index: SpatialNodeIndex, /// The conservative local rect of this picture. It is /// built dynamically during the first picture traversal. /// It is composed of already snapped primitives. pub estimated_local_rect: LayoutRect, /// The local rect of this picture. It is built /// dynamically during the frame visibility update. It /// differs from the estimated_local_rect because it /// will not contain culled primitives, takes into /// account surface inflation and the whole clip chain. /// It is frequently the same, but may be quite /// different depending on how much was culled. pub precise_local_rect: LayoutRect, /// If false, this picture needs to (re)build segments /// if it supports segment rendering. This can occur /// if the local rect of the picture changes due to /// transform animation and/or scrolling. pub segments_are_valid: bool, /// If Some(..) the tile cache that is associated with this picture. #[cfg_attr(feature = "capture", serde(skip))] //TODO pub tile_cache: Option<Box<TileCacheInstance>>, /// The config options for this picture. pub options: PictureOptions, } impl PicturePrimitive { pub fn print<T: PrintTreePrinter>( &self, pictures: &[Self], self_index: PictureIndex, pt: &mut T, ) { pt.new_level(format!("{:?}", self_index)); pt.add_item(format!("cluster_count: {:?}", self.prim_list.clusters.len())); pt.add_item(format!("estimated_local_rect: {:?}", self.estimated_local_rect)); pt.add_item(format!("precise_local_rect: {:?}", self.precise_local_rect)); pt.add_item(format!("spatial_node_index: {:?}", self.spatial_node_index)); pt.add_item(format!("raster_config: {:?}", self.raster_config)); pt.add_item(format!("requested_composite_mode: {:?}", self.requested_composite_mode)); for cluster in &self.prim_list.clusters { if cluster.flags.contains(ClusterFlags::IS_PICTURE) { for instance in &cluster.prim_instances { let index = match instance.kind { PrimitiveInstanceKind::Picture { pic_index, .. } => pic_index, _ => unreachable!(), }; pictures[index.0].print(pictures, index, pt); } } } pt.end_level(); } /// Returns true if this picture supports segmented rendering. pub fn can_use_segments(&self) -> bool { match self.raster_config { // TODO(gw): Support brush segment rendering for filter and mix-blend // shaders. It's possible this already works, but I'm just // applying this optimization to Blit mode for now. Some(RasterConfig { composite_mode: PictureCompositeMode::MixBlend(..), .. }) | Some(RasterConfig { composite_mode: PictureCompositeMode::Filter(..), .. }) | Some(RasterConfig { composite_mode: PictureCompositeMode::ComponentTransferFilter(..), .. }) | Some(RasterConfig { composite_mode: PictureCompositeMode::TileCache { .. }, .. }) | Some(RasterConfig { composite_mode: PictureCompositeMode::SvgFilter(..), .. }) | None => { false } Some(RasterConfig { composite_mode: PictureCompositeMode::Blit(reason), ..}) => { reason == BlitReason::CLIP } } } fn resolve_scene_properties(&mut self, properties: &SceneProperties) -> bool { match self.requested_composite_mode { Some(PictureCompositeMode::Filter(ref mut filter)) => { match *filter { Filter::Opacity(ref binding, ref mut value) => { *value = properties.resolve_float(binding); } _ => {} } filter.is_visible() } _ => true, } } pub fn is_visible(&self) -> bool { match self.requested_composite_mode { Some(PictureCompositeMode::Filter(ref filter)) => { filter.is_visible() } _ => true, } } /// Destroy an existing picture. This is called just before /// a frame builder is replaced with a newly built scene. It /// gives a picture a chance to retain any cached tiles that /// may be useful during the next scene build. pub fn destroy( &mut self, retained_tiles: &mut RetainedTiles, ) { if let Some(tile_cache) = self.tile_cache.take() { if !tile_cache.tiles.is_empty() { retained_tiles.caches.insert( tile_cache.slice, PictureCacheState { tiles: tile_cache.tiles, spatial_nodes: tile_cache.spatial_nodes, opacity_bindings: tile_cache.opacity_bindings, root_transform: tile_cache.root_transform, current_tile_size: tile_cache.current_tile_size, allocations: PictureCacheRecycledAllocations { old_tiles: tile_cache.old_tiles, old_opacity_bindings: tile_cache.old_opacity_bindings, compare_cache: tile_cache.compare_cache, }, }, ); } } } // TODO(gw): We have the PictureOptions struct available. We // should move some of the parameter list in this // method to be part of the PictureOptions, and // avoid adding new parameters here. pub fn new_image( requested_composite_mode: Option<PictureCompositeMode>, context_3d: Picture3DContext<OrderedPictureChild>, frame_output_pipeline_id: Option<PipelineId>, apply_local_clip_rect: bool, flags: PrimitiveFlags, requested_raster_space: RasterSpace, prim_list: PrimitiveList, spatial_node_index: SpatialNodeIndex, tile_cache: Option<Box<TileCacheInstance>>, options: PictureOptions, ) -> Self { PicturePrimitive { prim_list, state: None, secondary_render_task_id: None, requested_composite_mode, raster_config: None, context_3d, frame_output_pipeline_id, extra_gpu_data_handles: SmallVec::new(), apply_local_clip_rect, is_backface_visible: flags.contains(PrimitiveFlags::IS_BACKFACE_VISIBLE), requested_raster_space, spatial_node_index, estimated_local_rect: LayoutRect::zero(), precise_local_rect: LayoutRect::zero(), tile_cache, options, segments_are_valid: false, } } /// Gets the raster space to use when rendering the picture. /// Usually this would be the requested raster space. However, if the /// picture's spatial node or one of its ancestors is being pinch zoomed /// then we round it. This prevents us rasterizing glyphs for every minor /// change in zoom level, as that would be too expensive. pub fn get_raster_space(&self, clip_scroll_tree: &ClipScrollTree) -> RasterSpace { let spatial_node = &clip_scroll_tree.spatial_nodes[self.spatial_node_index.0 as usize]; if spatial_node.is_ancestor_or_self_zooming { let scale_factors = clip_scroll_tree .get_relative_transform(self.spatial_node_index, ROOT_SPATIAL_NODE_INDEX) .scale_factors(); // Round the scale up to the nearest power of 2, but don't exceed 8. let scale = scale_factors.0.max(scale_factors.1).min(8.0); let rounded_up = 1 << scale.log2().ceil() as u32; RasterSpace::Local(rounded_up as f32) } else { self.requested_raster_space } } pub fn take_context( &mut self, pic_index: PictureIndex, clipped_prim_bounding_rect: WorldRect, surface_spatial_node_index: SpatialNodeIndex, raster_spatial_node_index: SpatialNodeIndex, parent_surface_index: SurfaceIndex, parent_subpixel_mode: SubpixelMode, frame_state: &mut FrameBuildingState, frame_context: &FrameBuildingContext, scratch: &mut PrimitiveScratchBuffer, ) -> Option<(PictureContext, PictureState, PrimitiveList)> { if !self.is_visible() { return None; } // Extract the raster and surface spatial nodes from the raster // config, if this picture establishes a surface. Otherwise just // pass in the spatial node indices from the parent context. let (raster_spatial_node_index, surface_spatial_node_index, surface_index, inflation_factor) = match self.raster_config { Some(ref raster_config) => { let surface = &frame_state.surfaces[raster_config.surface_index.0]; ( surface.raster_spatial_node_index, self.spatial_node_index, raster_config.surface_index, surface.inflation_factor, ) } None => { ( raster_spatial_node_index, surface_spatial_node_index, parent_surface_index, 0.0, ) } }; let map_pic_to_world = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, surface_spatial_node_index, frame_context.global_screen_world_rect, frame_context.clip_scroll_tree, ); let pic_bounds = map_pic_to_world.unmap(&map_pic_to_world.bounds) .unwrap_or_else(PictureRect::max_rect); let map_local_to_pic = SpaceMapper::new( surface_spatial_node_index, pic_bounds, ); let (map_raster_to_world, map_pic_to_raster) = create_raster_mappers( surface_spatial_node_index, raster_spatial_node_index, frame_context.global_screen_world_rect, frame_context.clip_scroll_tree, ); let plane_splitter = match self.context_3d { Picture3DContext::Out => { None } Picture3DContext::In { root_data: Some(_), .. } => { Some(PlaneSplitter::new()) } Picture3DContext::In { root_data: None, .. } => { None } }; match self.raster_config { Some(ref raster_config) => { let pic_rect = PictureRect::from_untyped(&self.precise_local_rect.to_untyped()); let device_pixel_scale = frame_state .surfaces[raster_config.surface_index.0] .device_pixel_scale; let (clipped, unclipped) = match get_raster_rects( pic_rect, &map_pic_to_raster, &map_raster_to_world, clipped_prim_bounding_rect, device_pixel_scale, ) { Some(info) => info, None => { return None } }; let transform = map_pic_to_raster.get_transform(); let dep_info = match raster_config.composite_mode { PictureCompositeMode::Filter(Filter::Blur(blur_radius)) => { let blur_std_deviation = blur_radius * device_pixel_scale.0; let scale_factors = scale_factors(&transform); let blur_std_deviation = DeviceSize::new( blur_std_deviation * scale_factors.0, blur_std_deviation * scale_factors.1 ); let mut device_rect = if self.options.inflate_if_required { let inflation_factor = frame_state.surfaces[raster_config.surface_index.0].inflation_factor; let inflation_factor = (inflation_factor * device_pixel_scale.0).ceil(); // The clipped field is the part of the picture that is visible // on screen. The unclipped field is the screen-space rect of // the complete picture, if no screen / clip-chain was applied // (this includes the extra space for blur region). To ensure // that we draw a large enough part of the picture to get correct // blur results, inflate that clipped area by the blur range, and // then intersect with the total screen rect, to minimize the // allocation size. // We cast clipped to f32 instead of casting unclipped to i32 // because unclipped can overflow an i32. let device_rect = clipped.to_f32() .inflate(inflation_factor, inflation_factor) .intersection(&unclipped) .unwrap(); match device_rect.try_cast::<i32>() { Some(rect) => rect, None => { return None } } } else { clipped }; let original_size = device_rect.size; // Adjust the size to avoid introducing sampling errors during the down-scaling passes. // what would be even better is to rasterize the picture at the down-scaled size // directly. device_rect.size = RenderTask::adjusted_blur_source_size( device_rect.size, blur_std_deviation, ); let uv_rect_kind = calculate_uv_rect_kind( &pic_rect, &transform, &device_rect, device_pixel_scale, true, ); let picture_task = RenderTask::new_picture( RenderTaskLocation::Dynamic(None, device_rect.size), unclipped.size, pic_index, device_rect.origin, uv_rect_kind, surface_spatial_node_index, device_pixel_scale, PrimitiveVisibilityMask::all(), None, ); let picture_task_id = frame_state.render_tasks.add(picture_task); let blur_render_task_id = RenderTask::new_blur( blur_std_deviation, picture_task_id, frame_state.render_tasks, RenderTargetKind::Color, ClearMode::Transparent, None, original_size, ); Some((blur_render_task_id, picture_task_id)) } PictureCompositeMode::Filter(Filter::DropShadows(ref shadows)) => { let mut max_std_deviation = 0.0; for shadow in shadows { // TODO(nical) presumably we should compute the clipped rect for each shadow // and compute the union of them to determine what we need to rasterize and blur? max_std_deviation = f32::max(max_std_deviation, shadow.blur_radius * device_pixel_scale.0); } max_std_deviation = max_std_deviation.round(); let max_blur_range = (max_std_deviation * BLUR_SAMPLE_SCALE).ceil(); // We cast clipped to f32 instead of casting unclipped to i32 // because unclipped can overflow an i32. let device_rect = clipped.to_f32() .inflate(max_blur_range, max_blur_range) .intersection(&unclipped) .unwrap(); let mut device_rect = match device_rect.try_cast::<i32>() { Some(rect) => rect, None => { return None } }; device_rect.size = RenderTask::adjusted_blur_source_size( device_rect.size, DeviceSize::new(max_std_deviation, max_std_deviation), ); let uv_rect_kind = calculate_uv_rect_kind( &pic_rect, &transform, &device_rect, device_pixel_scale, true, ); let mut picture_task = RenderTask::new_picture( RenderTaskLocation::Dynamic(None, device_rect.size), unclipped.size, pic_index, device_rect.origin, uv_rect_kind, surface_spatial_node_index, device_pixel_scale, PrimitiveVisibilityMask::all(), None, ); picture_task.mark_for_saving(); let picture_task_id = frame_state.render_tasks.add(picture_task); self.secondary_render_task_id = Some(picture_task_id); let mut blur_tasks = BlurTaskCache::default(); self.extra_gpu_data_handles.resize(shadows.len(), GpuCacheHandle::new()); let mut blur_render_task_id = picture_task_id; for shadow in shadows { let std_dev = f32::round(shadow.blur_radius * device_pixel_scale.0); blur_render_task_id = RenderTask::new_blur( DeviceSize::new(std_dev, std_dev), picture_task_id, frame_state.render_tasks, RenderTargetKind::Color, ClearMode::Transparent, Some(&mut blur_tasks), device_rect.size, ); } // TODO(nical) the second one should to be the blur's task id but we have several blurs now Some((blur_render_task_id, picture_task_id)) } PictureCompositeMode::MixBlend(..) if !frame_context.fb_config.gpu_supports_advanced_blend => { let uv_rect_kind = calculate_uv_rect_kind( &pic_rect, &transform, &clipped, device_pixel_scale, true, ); let picture_task = RenderTask::new_picture( RenderTaskLocation::Dynamic(None, clipped.size), unclipped.size, pic_index, clipped.origin, uv_rect_kind, surface_spatial_node_index, device_pixel_scale, PrimitiveVisibilityMask::all(), None, ); let readback_task_id = frame_state.render_tasks.add( RenderTask::new_readback(clipped) ); frame_state.render_tasks.add_dependency( frame_state.surfaces[parent_surface_index.0].render_tasks.unwrap().port, readback_task_id, ); self.secondary_render_task_id = Some(readback_task_id); let render_task_id = frame_state.render_tasks.add(picture_task); Some((render_task_id, render_task_id)) } PictureCompositeMode::Filter(..) => { let uv_rect_kind = calculate_uv_rect_kind( &pic_rect, &transform, &clipped, device_pixel_scale, true, ); let picture_task = RenderTask::new_picture( RenderTaskLocation::Dynamic(None, clipped.size), unclipped.size, pic_index, clipped.origin, uv_rect_kind, surface_spatial_node_index, device_pixel_scale, PrimitiveVisibilityMask::all(), None, ); let render_task_id = frame_state.render_tasks.add(picture_task); Some((render_task_id, render_task_id)) } PictureCompositeMode::ComponentTransferFilter(..) => { let uv_rect_kind = calculate_uv_rect_kind( &pic_rect, &transform, &clipped, device_pixel_scale, true, ); let picture_task = RenderTask::new_picture( RenderTaskLocation::Dynamic(None, clipped.size), unclipped.size, pic_index, clipped.origin, uv_rect_kind, surface_spatial_node_index, device_pixel_scale, PrimitiveVisibilityMask::all(), None, ); let render_task_id = frame_state.render_tasks.add(picture_task); Some((render_task_id, render_task_id)) } PictureCompositeMode::TileCache { .. } => { let tile_cache = self.tile_cache.as_mut().unwrap(); let mut first = true; // Get the overall world space rect of the picture cache. Used to clip // the tile rects below for occlusion testing to the relevant area. let local_clip_rect = tile_cache.local_rect .intersection(&tile_cache.local_clip_rect) .unwrap_or(PictureRect::zero()); let world_clip_rect = map_pic_to_world .map(&local_clip_rect) .expect("bug: unable to map clip rect"); for key in &tile_cache.tiles_to_draw { let tile = tile_cache.tiles.get_mut(key).expect("bug: no tile found!"); // Get the world space rect that this tile will actually occupy on screem let tile_draw_rect = match world_clip_rect.intersection(&tile.world_rect) { Some(rect) => rect, None => { tile.is_visible = false; continue; } }; // If that draw rect is occluded by some set of tiles in front of it, // then mark it as not visible and skip drawing. When it's not occluded // it will fail this test, and get rasterized by the render task setup // code below. if frame_state.composite_state.is_tile_occluded(tile_cache.slice, tile_draw_rect) { // If this tile has an allocated native surface, free it, since it's completely // occluded. We will need to re-allocate this surface if it becomes visible, // but that's likely to be rare (e.g. when there is no content display list // for a frame or two during a tab switch). let surface = tile.surface.as_mut().expect("no tile surface set!"); if let TileSurface::Texture { descriptor: SurfaceTextureDescriptor::NativeSurface { id, .. }, .. } = surface { if let Some(id) = id.take() { frame_state.resource_cache.destroy_compositor_surface(id); } } tile.is_visible = false; continue; } if frame_context.debug_flags.contains(DebugFlags::PICTURE_CACHING_DBG) { tile.root.draw_debug_rects( &map_pic_to_world, tile.is_opaque, scratch, frame_context.global_device_pixel_scale, ); let label_offset = DeviceVector2D::new(20.0, 30.0); let tile_device_rect = tile.world_rect * frame_context.global_device_pixel_scale; if tile_device_rect.size.height >= label_offset.y { let surface = tile.surface.as_ref().expect("no tile surface set!"); scratch.push_debug_string( tile_device_rect.origin + label_offset, debug_colors::RED, format!("{:?}: s={} is_opaque={} surface={}", tile.id, tile_cache.slice, tile.is_opaque, surface.kind(), ), ); } } if let TileSurface::Texture { descriptor, .. } = tile.surface.as_mut().unwrap() { match descriptor { SurfaceTextureDescriptor::TextureCache { ref handle, .. } => { // Invalidate if the backing texture was evicted. if frame_state.resource_cache.texture_cache.is_allocated(handle) { // Request the backing texture so it won't get evicted this frame. // We specifically want to mark the tile texture as used, even // if it's detected not visible below and skipped. This is because // we maintain the set of tiles we care about based on visibility // during pre_update. If a tile still exists after that, we are // assuming that it's either visible or we want to retain it for // a while in case it gets scrolled back onto screen soon. // TODO(gw): Consider switching to manual eviction policy? frame_state.resource_cache.texture_cache.request(handle, frame_state.gpu_cache); } else { // If the texture was evicted on a previous frame, we need to assume // that the entire tile rect is dirty. tile.invalidate(None, InvalidationReason::NoTexture); } } SurfaceTextureDescriptor::NativeSurface { id, .. } => { if id.is_none() { // There is no current surface allocation, so ensure the entire tile is invalidated tile.invalidate(None, InvalidationReason::NoSurface); } } } } // Update the world dirty rect tile.world_dirty_rect = map_pic_to_world.map(&tile.dirty_rect).expect("bug"); if tile.is_valid { continue; } // Ensure that this texture is allocated. if let TileSurface::Texture { ref mut descriptor, ref mut visibility_mask } = tile.surface.as_mut().unwrap() { match descriptor { SurfaceTextureDescriptor::TextureCache { ref mut handle } => { if !frame_state.resource_cache.texture_cache.is_allocated(handle) { frame_state.resource_cache.texture_cache.update_picture_cache( tile_cache.current_tile_size, handle, frame_state.gpu_cache, ); } } SurfaceTextureDescriptor::NativeSurface { id, size } => { if id.is_none() { *id = Some(frame_state.resource_cache.create_compositor_surface( *size, tile.is_opaque, )); } } } *visibility_mask = PrimitiveVisibilityMask::empty(); let dirty_region_index = tile_cache.dirty_region.dirty_rects.len(); // If we run out of dirty regions, then force the last dirty region to // be a union of any remaining regions. This is an inefficiency, in that // we'll add items to batches later on that are redundant / outside this // tile, but it's really rare except in pathological cases (even on a // 4k screen, the typical dirty region count is < 16). if dirty_region_index < PrimitiveVisibilityMask::MAX_DIRTY_REGIONS { visibility_mask.set_visible(dirty_region_index); tile_cache.dirty_region.push( tile.world_dirty_rect, *visibility_mask, ); } else { visibility_mask.set_visible(PrimitiveVisibilityMask::MAX_DIRTY_REGIONS - 1); tile_cache.dirty_region.include_rect( PrimitiveVisibilityMask::MAX_DIRTY_REGIONS - 1, tile.world_dirty_rect, ); } let content_origin_f = tile.world_rect.origin * device_pixel_scale; let content_origin = content_origin_f.round(); debug_assert!((content_origin_f.x - content_origin.x).abs() < 0.01); debug_assert!((content_origin_f.y - content_origin.y).abs() < 0.01); // Get a task-local scissor rect for the dirty region of this // picture cache task. let scissor_rect = tile.world_dirty_rect.translate( -tile.world_rect.origin.to_vector() ); // The world rect is guaranteed to be device pixel aligned, by the tile // sizing code in tile::pre_update. However, there might be some // small floating point accuracy issues (these were observed on ARM // CPUs). Round the rect here before casting to integer device pixels // to ensure the scissor rect is correct. let scissor_rect = (scissor_rect * device_pixel_scale).round(); let surface = descriptor.resolve(frame_state.resource_cache); let task = RenderTask::new_picture( RenderTaskLocation::PictureCache { size: tile_cache.current_tile_size, surface, }, tile_cache.current_tile_size.to_f32(), pic_index, content_origin.to_i32(), UvRectKind::Rect, surface_spatial_node_index, device_pixel_scale, *visibility_mask, Some(scissor_rect.to_i32()), ); let render_task_id = frame_state.render_tasks.add(task); frame_state.render_tasks.add_dependency( frame_state.surfaces[parent_surface_index.0].render_tasks.unwrap().port, render_task_id, ); if first { // TODO(gw): Maybe we can restructure this code to avoid the // first hack here. Or at least explain it with a follow up // bug. frame_state.surfaces[raster_config.surface_index.0].render_tasks = Some(SurfaceRenderTasks { root: render_task_id, port: render_task_id, }); first = false; } } // Now that the tile is valid, reset the dirty rect. tile.dirty_rect = PictureRect::zero(); tile.is_valid = true; } // If invalidation debugging is enabled, dump the picture cache state to a tree printer. if frame_context.debug_flags.contains(DebugFlags::INVALIDATION_DBG) { tile_cache.print(); } None } PictureCompositeMode::MixBlend(..) | PictureCompositeMode::Blit(_) => { // The SplitComposite shader used for 3d contexts doesn't snap // to pixels, so we shouldn't snap our uv coordinates either. let supports_snapping = match self.context_3d { Picture3DContext::In{ .. } => false, _ => true, }; let uv_rect_kind = calculate_uv_rect_kind( &pic_rect, &transform, &clipped, device_pixel_scale, supports_snapping, ); let picture_task = RenderTask::new_picture( RenderTaskLocation::Dynamic(None, clipped.size), unclipped.size, pic_index, clipped.origin, uv_rect_kind, surface_spatial_node_index, device_pixel_scale, PrimitiveVisibilityMask::all(), None, ); let render_task_id = frame_state.render_tasks.add(picture_task); Some((render_task_id, render_task_id)) } PictureCompositeMode::SvgFilter(ref primitives, ref filter_datas) => { let uv_rect_kind = calculate_uv_rect_kind( &pic_rect, &transform, &clipped, device_pixel_scale, true, ); let picture_task = RenderTask::new_picture( RenderTaskLocation::Dynamic(None, clipped.size), unclipped.size, pic_index, clipped.origin, uv_rect_kind, surface_spatial_node_index, device_pixel_scale, PrimitiveVisibilityMask::all(), None, ); let picture_task_id = frame_state.render_tasks.add(picture_task); let filter_task_id = RenderTask::new_svg_filter( primitives, filter_datas, &mut frame_state.render_tasks, clipped.size, uv_rect_kind, picture_task_id, device_pixel_scale, ); Some((filter_task_id, picture_task_id)) } }; if let Some((root, port)) = dep_info { frame_state.surfaces[raster_config.surface_index.0].render_tasks = Some(SurfaceRenderTasks { root, port, }); frame_state.render_tasks.add_dependency( frame_state.surfaces[parent_surface_index.0].render_tasks.unwrap().port, root, ); } } None => {} }; let state = PictureState { //TODO: check for MAX_CACHE_SIZE here? map_local_to_pic, map_pic_to_world, map_pic_to_raster, map_raster_to_world, plane_splitter, }; let mut dirty_region_count = 0; // If this is a picture cache, push the dirty region to ensure any // child primitives are culled and clipped to the dirty rect(s). if let Some(RasterConfig { composite_mode: PictureCompositeMode::TileCache { .. }, .. }) = self.raster_config { let dirty_region = self.tile_cache.as_ref().unwrap().dirty_region.clone(); frame_state.push_dirty_region(dirty_region); dirty_region_count += 1; } if inflation_factor > 0.0 { let inflated_region = frame_state.current_dirty_region().inflate(inflation_factor); frame_state.push_dirty_region(inflated_region); dirty_region_count += 1; } // Disallow subpixel AA if an intermediate surface is needed. // TODO(lsalzman): allow overriding parent if intermediate surface is opaque let (is_passthrough, subpixel_mode) = match self.raster_config { Some(RasterConfig { ref composite_mode, .. }) => { let subpixel_mode = match composite_mode { PictureCompositeMode::TileCache { .. } => { self.tile_cache.as_ref().unwrap().subpixel_mode } PictureCompositeMode::Blit(..) | PictureCompositeMode::ComponentTransferFilter(..) | PictureCompositeMode::Filter(..) | PictureCompositeMode::MixBlend(..) | PictureCompositeMode::SvgFilter(..) => { // TODO(gw): We can take advantage of the same logic that // exists in the opaque rect detection for tile // caches, to allow subpixel text on other surfaces // that can be detected as opaque. SubpixelMode::Deny } }; (false, subpixel_mode) } None => { (true, SubpixelMode::Allow) } }; // Still disable subpixel AA if parent forbids it let subpixel_mode = match (parent_subpixel_mode, subpixel_mode) { (SubpixelMode::Allow, SubpixelMode::Allow) => SubpixelMode::Allow, _ => SubpixelMode::Deny, }; let context = PictureContext { pic_index, apply_local_clip_rect: self.apply_local_clip_rect, is_passthrough, raster_spatial_node_index, surface_spatial_node_index, surface_index, dirty_region_count, subpixel_mode, }; let prim_list = mem::replace(&mut self.prim_list, PrimitiveList::empty()); Some((context, state, prim_list)) } pub fn restore_context( &mut self, prim_list: PrimitiveList, context: PictureContext, state: PictureState, frame_state: &mut FrameBuildingState, ) { // Pop any dirty regions this picture set for _ in 0 .. context.dirty_region_count { frame_state.pop_dirty_region(); } self.prim_list = prim_list; self.state = Some(state); } pub fn take_state(&mut self) -> PictureState { self.state.take().expect("bug: no state present!") } /// Add a primitive instance to the plane splitter. The function would generate /// an appropriate polygon, clip it against the frustum, and register with the /// given plane splitter. pub fn add_split_plane( splitter: &mut PlaneSplitter, clip_scroll_tree: &ClipScrollTree, prim_spatial_node_index: SpatialNodeIndex, original_local_rect: LayoutRect, combined_local_clip_rect: &LayoutRect, world_rect: WorldRect, plane_split_anchor: PlaneSplitAnchor, ) -> bool { let transform = clip_scroll_tree .get_world_transform(prim_spatial_node_index); let matrix = transform.clone().into_transform().cast(); // Apply the local clip rect here, before splitting. This is // because the local clip rect can't be applied in the vertex // shader for split composites, since we are drawing polygons // rather that rectangles. The interpolation still works correctly // since we determine the UVs by doing a bilerp with a factor // from the original local rect. let local_rect = match original_local_rect .intersection(combined_local_clip_rect) { Some(rect) => rect.cast(), None => return false, }; let world_rect = world_rect.cast(); match transform { CoordinateSpaceMapping::Local => { let polygon = Polygon::from_rect( local_rect * Scale::new(1.0), plane_split_anchor, ); splitter.add(polygon); } CoordinateSpaceMapping::ScaleOffset(scale_offset) if scale_offset.scale == Vector2D::new(1.0, 1.0) => { let inv_matrix = scale_offset.inverse().to_transform().cast(); let polygon = Polygon::from_transformed_rect_with_inverse( local_rect, &matrix, &inv_matrix, plane_split_anchor, ).unwrap(); splitter.add(polygon); } CoordinateSpaceMapping::ScaleOffset(_) | CoordinateSpaceMapping::Transform(_) => { let mut clipper = Clipper::new(); let results = clipper.clip_transformed( Polygon::from_rect( local_rect, plane_split_anchor, ), &matrix, Some(world_rect), ); if let Ok(results) = results { for poly in results { splitter.add(poly); } } } } true } pub fn resolve_split_planes( &mut self, splitter: &mut PlaneSplitter, gpu_cache: &mut GpuCache, clip_scroll_tree: &ClipScrollTree, ) { let ordered = match self.context_3d { Picture3DContext::In { root_data: Some(ref mut list), .. } => list, _ => panic!("Expected to find 3D context root"), }; ordered.clear(); // Process the accumulated split planes and order them for rendering. // Z axis is directed at the screen, `sort` is ascending, and we need back-to-front order. for poly in splitter.sort(vec3(0.0, 0.0, 1.0)) { let cluster = &self.prim_list.clusters[poly.anchor.cluster_index]; let spatial_node_index = cluster.spatial_node_index; let transform = match clip_scroll_tree .get_world_transform(spatial_node_index) .inverse() { Some(transform) => transform.into_transform(), // logging this would be a bit too verbose None => continue, }; let local_points = [ transform.transform_point3d(poly.points[0].cast()).unwrap(), transform.transform_point3d(poly.points[1].cast()).unwrap(), transform.transform_point3d(poly.points[2].cast()).unwrap(), transform.transform_point3d(poly.points[3].cast()).unwrap(), ]; let gpu_blocks = [ [local_points[0].x, local_points[0].y, local_points[1].x, local_points[1].y].into(), [local_points[2].x, local_points[2].y, local_points[3].x, local_points[3].y].into(), ]; let gpu_handle = gpu_cache.push_per_frame_blocks(&gpu_blocks); let gpu_address = gpu_cache.get_address(&gpu_handle); ordered.push(OrderedPictureChild { anchor: poly.anchor, spatial_node_index, gpu_address, }); } } /// Called during initial picture traversal, before we know the /// bounding rect of children. It is possible to determine the /// surface / raster config now though. fn pre_update( &mut self, state: &mut PictureUpdateState, frame_context: &FrameBuildingContext, ) -> Option<PrimitiveList> { // Reset raster config in case we early out below. self.raster_config = None; // Resolve animation properties, and early out if the filter // properties make this picture invisible. if !self.resolve_scene_properties(frame_context.scene_properties) { return None; } // For out-of-preserve-3d pictures, the backface visibility is determined by // the local transform only. // Note: we aren't taking the transform relativce to the parent picture, // since picture tree can be more dense than the corresponding spatial tree. if !self.is_backface_visible { if let Picture3DContext::Out = self.context_3d { match frame_context.clip_scroll_tree.get_local_visible_face(self.spatial_node_index) { VisibleFace::Front => {} VisibleFace::Back => return None, } } } // Push information about this pic on stack for children to read. state.push_picture(PictureInfo { _spatial_node_index: self.spatial_node_index, }); // See if this picture actually needs a surface for compositing. let actual_composite_mode = match self.requested_composite_mode { Some(PictureCompositeMode::Filter(ref filter)) if filter.is_noop() => None, Some(PictureCompositeMode::TileCache { .. }) => { // Only allow picture caching composite mode if global picture caching setting // is enabled this frame. if state.composite_state.picture_caching_is_enabled { Some(PictureCompositeMode::TileCache { }) } else { None } }, ref mode => mode.clone(), }; if let Some(composite_mode) = actual_composite_mode { // Retrieve the positioning node information for the parent surface. let parent_raster_node_index = state.current_surface().raster_spatial_node_index; let surface_spatial_node_index = self.spatial_node_index; // This inflation factor is to be applied to all primitives within the surface. let inflation_factor = match composite_mode { PictureCompositeMode::Filter(Filter::Blur(blur_radius)) => { // Only inflate if the caller hasn't already inflated // the bounding rects for this filter. if self.options.inflate_if_required { // The amount of extra space needed for primitives inside // this picture to ensure the visibility check is correct. BLUR_SAMPLE_SCALE * blur_radius } else { 0.0 } } PictureCompositeMode::SvgFilter(ref primitives, _) if self.options.inflate_if_required => { let mut max = 0.0; for primitive in primitives { if let FilterPrimitiveKind::Blur(ref blur) = primitive.kind { max = f32::max(max, blur.radius * BLUR_SAMPLE_SCALE); } } max } _ => { 0.0 } }; // Filters must be applied before transforms, to do this, we can mark this picture as establishing a raster root. let has_svg_filter = if let PictureCompositeMode::SvgFilter(..) = composite_mode { true } else { false }; // Check if there is perspective or if an SVG filter is applied, and thus whether a new // rasterization root should be established. let establishes_raster_root = has_svg_filter || frame_context.clip_scroll_tree .get_relative_transform(surface_spatial_node_index, parent_raster_node_index) .is_perspective(); let surface = SurfaceInfo::new( surface_spatial_node_index, if establishes_raster_root { surface_spatial_node_index } else { parent_raster_node_index }, inflation_factor, frame_context.global_screen_world_rect, &frame_context.clip_scroll_tree, frame_context.global_device_pixel_scale, ); self.raster_config = Some(RasterConfig { composite_mode, establishes_raster_root, surface_index: state.push_surface(surface), }); } Some(mem::replace(&mut self.prim_list, PrimitiveList::empty())) } /// Called after updating child pictures during the initial /// picture traversal. fn post_update( &mut self, prim_list: PrimitiveList, state: &mut PictureUpdateState, frame_context: &FrameBuildingContext, data_stores: &mut DataStores, ) { // Restore the pictures list used during recursion. self.prim_list = prim_list; // Pop the state information about this picture. state.pop_picture(); for cluster in &mut self.prim_list.clusters { cluster.flags.remove(ClusterFlags::IS_VISIBLE); // Skip the cluster if backface culled. if !cluster.flags.contains(ClusterFlags::IS_BACKFACE_VISIBLE) { // For in-preserve-3d primitives and pictures, the backface visibility is // evaluated relative to the containing block. if let Picture3DContext::In { ancestor_index, .. } = self.context_3d { match frame_context.clip_scroll_tree .get_relative_transform(cluster.spatial_node_index, ancestor_index) .visible_face() { VisibleFace::Back => continue, VisibleFace::Front => (), } } } // No point including this cluster if it can't be transformed let spatial_node = &frame_context .clip_scroll_tree .spatial_nodes[cluster.spatial_node_index.0 as usize]; if !spatial_node.invertible { continue; } // Update any primitives/cluster bounding rects that can only be done // with information available during frame building. if cluster.flags.contains(ClusterFlags::IS_BACKDROP_FILTER) { let backdrop_to_world_mapper = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, cluster.spatial_node_index, LayoutRect::max_rect(), frame_context.clip_scroll_tree, ); for prim_instance in &mut cluster.prim_instances { match prim_instance.kind { PrimitiveInstanceKind::Backdrop { data_handle, .. } => { // The actual size and clip rect of this primitive are determined by computing the bounding // box of the projected rect of the backdrop-filter element onto the backdrop. let prim_data = &mut data_stores.backdrop[data_handle]; let spatial_node_index = prim_data.kind.spatial_node_index; // We cannot use the relative transform between the backdrop and the element because // that doesn't take into account any projection transforms that both spatial nodes are children of. // Instead, we first project from the element to the world space and get a flattened 2D bounding rect // in the screen space, we then map this rect from the world space to the backdrop space to get the // proper bounding box where the backdrop-filter needs to be processed. let prim_to_world_mapper = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, spatial_node_index, LayoutRect::max_rect(), frame_context.clip_scroll_tree, ); // First map to the screen and get a flattened rect let prim_rect = prim_to_world_mapper.map(&prim_data.kind.border_rect).unwrap_or_else(LayoutRect::zero); // Backwards project the flattened rect onto the backdrop let prim_rect = backdrop_to_world_mapper.unmap(&prim_rect).unwrap_or_else(LayoutRect::zero); // TODO(aosmond): Is this safe? Updating the primitive size during // frame building is usually problematic since scene building will cache // the primitive information in the GPU already. prim_instance.prim_origin = prim_rect.origin; prim_data.common.prim_size = prim_rect.size; prim_instance.local_clip_rect = prim_rect; // Update the cluster bounding rect now that we have the backdrop rect. cluster.bounding_rect = cluster.bounding_rect.union(&prim_rect); } _ => { panic!("BUG: unexpected deferred primitive kind for cluster updates"); } } } } // Map the cluster bounding rect into the space of the surface, and // include it in the surface bounding rect. let surface = state.current_surface_mut(); surface.map_local_to_surface.set_target_spatial_node( cluster.spatial_node_index, frame_context.clip_scroll_tree, ); // Mark the cluster visible, since it passed the invertible and // backface checks. In future, this will include spatial clustering // which will allow the frame building code to skip most of the // current per-primitive culling code. cluster.flags.insert(ClusterFlags::IS_VISIBLE); if let Some(cluster_rect) = surface.map_local_to_surface.map(&cluster.bounding_rect) { surface.rect = surface.rect.union(&cluster_rect); } } // If this picture establishes a surface, then map the surface bounding // rect into the parent surface coordinate space, and propagate that up // to the parent. if let Some(ref mut raster_config) = self.raster_config { let surface = state.current_surface_mut(); // Inflate the local bounding rect if required by the filter effect. // This inflaction factor is to be applied to the surface itself. if self.options.inflate_if_required { surface.rect = raster_config.composite_mode.inflate_picture_rect(surface.rect, surface.inflation_factor); // The picture's local rect is calculated as the union of the // snapped primitive rects, which should result in a snapped // local rect, unless it was inflated. This is also done during // update visibility when calculating the picture's precise // local rect. let snap_surface_to_raster = SpaceSnapper::new_with_target( surface.raster_spatial_node_index, self.spatial_node_index, surface.device_pixel_scale, frame_context.clip_scroll_tree, ); surface.rect = snap_surface_to_raster.snap_rect(&surface.rect); } let mut surface_rect = surface.rect * Scale::new(1.0); // Pop this surface from the stack let surface_index = state.pop_surface(); debug_assert_eq!(surface_index, raster_config.surface_index); // Check if any of the surfaces can't be rasterized in local space but want to. if raster_config.establishes_raster_root { if surface_rect.size.width > MAX_SURFACE_SIZE || surface_rect.size.height > MAX_SURFACE_SIZE { raster_config.establishes_raster_root = false; state.are_raster_roots_assigned = false; } } // Set the estimated and precise local rects. The precise local rect // may be changed again during frame visibility. self.estimated_local_rect = surface_rect; self.precise_local_rect = surface_rect; // Drop shadows draw both a content and shadow rect, so need to expand the local // rect of any surfaces to be composited in parent surfaces correctly. match raster_config.composite_mode { PictureCompositeMode::Filter(Filter::DropShadows(ref shadows)) => { for shadow in shadows { let shadow_rect = self.estimated_local_rect.translate(shadow.offset); surface_rect = surface_rect.union(&shadow_rect); } } _ => {} } // Propagate up to parent surface, now that we know this surface's static rect let parent_surface = state.current_surface_mut(); parent_surface.map_local_to_surface.set_target_spatial_node( self.spatial_node_index, frame_context.clip_scroll_tree, ); if let Some(parent_surface_rect) = parent_surface .map_local_to_surface .map(&surface_rect) { parent_surface.rect = parent_surface.rect.union(&parent_surface_rect); } } } pub fn prepare_for_render( &mut self, frame_context: &FrameBuildingContext, frame_state: &mut FrameBuildingState, data_stores: &mut DataStores, ) -> bool { let mut pic_state_for_children = self.take_state(); if let Some(ref mut splitter) = pic_state_for_children.plane_splitter { self.resolve_split_planes( splitter, &mut frame_state.gpu_cache, &frame_context.clip_scroll_tree, ); } let raster_config = match self.raster_config { Some(ref mut raster_config) => raster_config, None => { return true } }; // TODO(gw): Almost all of the Picture types below use extra_gpu_cache_data // to store the same type of data. The exception is the filter // with a ColorMatrix, which stores the color matrix here. It's // probably worth tidying this code up to be a bit more consistent. // Perhaps store the color matrix after the common data, even though // it's not used by that shader. match raster_config.composite_mode { PictureCompositeMode::TileCache { .. } => {} PictureCompositeMode::Filter(Filter::Blur(..)) => {} PictureCompositeMode::Filter(Filter::DropShadows(ref shadows)) => { self.extra_gpu_data_handles.resize(shadows.len(), GpuCacheHandle::new()); for (shadow, extra_handle) in shadows.iter().zip(self.extra_gpu_data_handles.iter_mut()) { if let Some(mut request) = frame_state.gpu_cache.request(extra_handle) { // Basic brush primitive header is (see end of prepare_prim_for_render_inner in prim_store.rs) // [brush specific data] // [segment_rect, segment data] let shadow_rect = self.precise_local_rect.translate(shadow.offset); // ImageBrush colors request.push(shadow.color.premultiplied()); request.push(PremultipliedColorF::WHITE); request.push([ self.precise_local_rect.size.width, self.precise_local_rect.size.height, 0.0, 0.0, ]); // segment rect / extra data request.push(shadow_rect); request.push([0.0, 0.0, 0.0, 0.0]); } } } PictureCompositeMode::MixBlend(..) if !frame_context.fb_config.gpu_supports_advanced_blend => {} PictureCompositeMode::Filter(ref filter) => { match *filter { Filter::ColorMatrix(ref m) => { if self.extra_gpu_data_handles.is_empty() { self.extra_gpu_data_handles.push(GpuCacheHandle::new()); } if let Some(mut request) = frame_state.gpu_cache.request(&mut self.extra_gpu_data_handles[0]) { for i in 0..5 { request.push([m[i*4], m[i*4+1], m[i*4+2], m[i*4+3]]); } } } Filter::Flood(ref color) => { if self.extra_gpu_data_handles.is_empty() { self.extra_gpu_data_handles.push(GpuCacheHandle::new()); } if let Some(mut request) = frame_state.gpu_cache.request(&mut self.extra_gpu_data_handles[0]) { request.push(color.to_array()); } } _ => {} } } PictureCompositeMode::ComponentTransferFilter(handle) => { let filter_data = &mut data_stores.filter_data[handle]; filter_data.update(frame_state); } PictureCompositeMode::MixBlend(..) | PictureCompositeMode::Blit(_) | PictureCompositeMode::SvgFilter(..) => {} } true } } // Calculate a single homogeneous screen-space UV for a picture. fn calculate_screen_uv( local_pos: &PicturePoint, transform: &PictureToRasterTransform, rendered_rect: &DeviceRect, device_pixel_scale: DevicePixelScale, supports_snapping: bool, ) -> DeviceHomogeneousVector { let raster_pos = transform.transform_point2d_homogeneous(*local_pos); let mut device_vec = DeviceHomogeneousVector::new( raster_pos.x * device_pixel_scale.0, raster_pos.y * device_pixel_scale.0, 0.0, raster_pos.w, ); // Apply snapping for axis-aligned scroll nodes, as per prim_shared.glsl. if transform.transform_kind() == TransformedRectKind::AxisAligned && supports_snapping { device_vec = DeviceHomogeneousVector::new( (device_vec.x / device_vec.w + 0.5).floor(), (device_vec.y / device_vec.w + 0.5).floor(), 0.0, 1.0, ); } DeviceHomogeneousVector::new( (device_vec.x - rendered_rect.origin.x * device_vec.w) / rendered_rect.size.width, (device_vec.y - rendered_rect.origin.y * device_vec.w) / rendered_rect.size.height, 0.0, device_vec.w, ) } // Calculate a UV rect within an image based on the screen space // vertex positions of a picture. fn calculate_uv_rect_kind( pic_rect: &PictureRect, transform: &PictureToRasterTransform, rendered_rect: &DeviceIntRect, device_pixel_scale: DevicePixelScale, supports_snapping: bool, ) -> UvRectKind { let rendered_rect = rendered_rect.to_f32(); let top_left = calculate_screen_uv( &pic_rect.origin, transform, &rendered_rect, device_pixel_scale, supports_snapping, ); let top_right = calculate_screen_uv( &pic_rect.top_right(), transform, &rendered_rect, device_pixel_scale, supports_snapping, ); let bottom_left = calculate_screen_uv( &pic_rect.bottom_left(), transform, &rendered_rect, device_pixel_scale, supports_snapping, ); let bottom_right = calculate_screen_uv( &pic_rect.bottom_right(), transform, &rendered_rect, device_pixel_scale, supports_snapping, ); UvRectKind::Quad { top_left, top_right, bottom_left, bottom_right, } } fn create_raster_mappers( surface_spatial_node_index: SpatialNodeIndex, raster_spatial_node_index: SpatialNodeIndex, world_rect: WorldRect, clip_scroll_tree: &ClipScrollTree, ) -> (SpaceMapper<RasterPixel, WorldPixel>, SpaceMapper<PicturePixel, RasterPixel>) { let map_raster_to_world = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, raster_spatial_node_index, world_rect, clip_scroll_tree, ); let raster_bounds = map_raster_to_world.unmap(&world_rect) .unwrap_or_else(RasterRect::max_rect); let map_pic_to_raster = SpaceMapper::new_with_target( raster_spatial_node_index, surface_spatial_node_index, raster_bounds, clip_scroll_tree, ); (map_raster_to_world, map_pic_to_raster) } fn get_transform_key( spatial_node_index: SpatialNodeIndex, cache_spatial_node_index: SpatialNodeIndex, clip_scroll_tree: &ClipScrollTree, ) -> TransformKey { // Note: this is the only place where we don't know beforehand if the tile-affecting // spatial node is below or above the current picture. let transform = if cache_spatial_node_index >= spatial_node_index { clip_scroll_tree .get_relative_transform( cache_spatial_node_index, spatial_node_index, ) } else { clip_scroll_tree .get_relative_transform( spatial_node_index, cache_spatial_node_index, ) }; transform.into() } /// A key for storing primitive comparison results during tile dependency tests. #[derive(Debug, Copy, Clone, Eq, Hash, PartialEq)] struct PrimitiveComparisonKey { prev_index: PrimitiveDependencyIndex, curr_index: PrimitiveDependencyIndex, } /// Information stored an image dependency #[derive(Debug, Copy, Clone, PartialEq)] struct ImageDependency { key: ImageKey, generation: ImageGeneration, } /// A helper struct to compare a primitive and all its sub-dependencies. struct PrimitiveComparer<'a> { clip_comparer: CompareHelper<'a, ItemUid>, transform_comparer: CompareHelper<'a, SpatialNodeIndex>, image_comparer: CompareHelper<'a, ImageDependency>, opacity_comparer: CompareHelper<'a, OpacityBinding>, resource_cache: &'a ResourceCache, spatial_nodes: &'a FastHashMap<SpatialNodeIndex, SpatialNodeDependency>, opacity_bindings: &'a FastHashMap<PropertyBindingId, OpacityBindingInfo>, } impl<'a> PrimitiveComparer<'a> { fn new( prev: &'a TileDescriptor, curr: &'a TileDescriptor, resource_cache: &'a ResourceCache, spatial_nodes: &'a FastHashMap<SpatialNodeIndex, SpatialNodeDependency>, opacity_bindings: &'a FastHashMap<PropertyBindingId, OpacityBindingInfo>, ) -> Self { let clip_comparer = CompareHelper::new( &prev.clips, &curr.clips, ); let transform_comparer = CompareHelper::new( &prev.transforms, &curr.transforms, ); let image_comparer = CompareHelper::new( &prev.images, &curr.images, ); let opacity_comparer = CompareHelper::new( &prev.opacity_bindings, &curr.opacity_bindings, ); PrimitiveComparer { clip_comparer, transform_comparer, image_comparer, opacity_comparer, resource_cache, spatial_nodes, opacity_bindings, } } fn reset(&mut self) { self.clip_comparer.reset(); self.transform_comparer.reset(); self.image_comparer.reset(); self.opacity_comparer.reset(); } fn advance_prev(&mut self, prim: &PrimitiveDescriptor) { self.clip_comparer.advance_prev(prim.clip_dep_count); self.transform_comparer.advance_prev(prim.transform_dep_count); self.image_comparer.advance_prev(prim.image_dep_count); self.opacity_comparer.advance_prev(prim.opacity_binding_dep_count); } fn advance_curr(&mut self, prim: &PrimitiveDescriptor) { self.clip_comparer.advance_curr(prim.clip_dep_count); self.transform_comparer.advance_curr(prim.transform_dep_count); self.image_comparer.advance_curr(prim.image_dep_count); self.opacity_comparer.advance_curr(prim.opacity_binding_dep_count); } /// Check if two primitive descriptors are the same. fn compare_prim( &mut self, prev: &PrimitiveDescriptor, curr: &PrimitiveDescriptor, ) -> PrimitiveCompareResult { let resource_cache = self.resource_cache; let spatial_nodes = self.spatial_nodes; let opacity_bindings = self.opacity_bindings; // Check equality of the PrimitiveDescriptor if prev != curr { return PrimitiveCompareResult::Descriptor; } // Check if any of the clips this prim has are different. if !self.clip_comparer.is_same( prev.clip_dep_count, curr.clip_dep_count, |_| { false } ) { return PrimitiveCompareResult::Clip; } // Check if any of the transforms this prim has are different. if !self.transform_comparer.is_same( prev.transform_dep_count, curr.transform_dep_count, |curr| { spatial_nodes[curr].changed } ) { return PrimitiveCompareResult::Transform; } // Check if any of the images this prim has are different. if !self.image_comparer.is_same( prev.image_dep_count, curr.image_dep_count, |curr| { resource_cache.get_image_generation(curr.key) != curr.generation } ) { return PrimitiveCompareResult::Image; } // Check if any of the opacity bindings this prim has are different. if !self.opacity_comparer.is_same( prev.opacity_binding_dep_count, curr.opacity_binding_dep_count, |curr| { if let OpacityBinding::Binding(id) = curr { if opacity_bindings .get(id) .map_or(true, |info| info.changed) { return true; } } false } ) { return PrimitiveCompareResult::OpacityBinding; } PrimitiveCompareResult::Equal } } /// Details for a node in a quadtree that tracks dirty rects for a tile. enum TileNodeKind { Leaf { /// The index buffer of primitives that affected this tile previous frame prev_indices: Vec<PrimitiveDependencyIndex>, /// The index buffer of primitives that affect this tile on this frame curr_indices: Vec<PrimitiveDependencyIndex>, /// A bitset of which of the last 64 frames have been dirty for this leaf. dirty_tracker: u64, /// The number of frames since this node split or merged. frames_since_modified: usize, }, Node { /// The four children of this node children: Vec<TileNode>, }, } /// The kind of modification that a tile wants to do #[derive(Copy, Clone, PartialEq, Debug)] enum TileModification { Split, Merge, } /// A node in the dirty rect tracking quadtree. struct TileNode { /// Leaf or internal node kind: TileNodeKind, /// Rect of this node in the same space as the tile cache picture rect: PictureRect, } impl TileNode { /// Construct a new leaf node, with the given primitive dependency index buffer fn new_leaf(curr_indices: Vec<PrimitiveDependencyIndex>) -> Self { TileNode { kind: TileNodeKind::Leaf { prev_indices: Vec::new(), curr_indices, dirty_tracker: 0, frames_since_modified: 0, }, rect: PictureRect::zero(), } } /// Draw debug information about this tile node fn draw_debug_rects( &self, pic_to_world_mapper: &SpaceMapper<PicturePixel, WorldPixel>, is_opaque: bool, scratch: &mut PrimitiveScratchBuffer, global_device_pixel_scale: DevicePixelScale, ) { match self.kind { TileNodeKind::Leaf { dirty_tracker, .. } => { let color = if (dirty_tracker & 1) != 0 { debug_colors::RED } else if is_opaque { debug_colors::GREEN } else { debug_colors::YELLOW }; let world_rect = pic_to_world_mapper.map(&self.rect).unwrap(); let device_rect = world_rect * global_device_pixel_scale; let outer_color = color.scale_alpha(0.3); let inner_color = outer_color.scale_alpha(0.5); scratch.push_debug_rect( device_rect.inflate(-3.0, -3.0), outer_color, inner_color ); } TileNodeKind::Node { ref children, .. } => { for child in children.iter() { child.draw_debug_rects( pic_to_world_mapper, is_opaque, scratch, global_device_pixel_scale, ); } } } } /// Calculate the four child rects for a given node fn get_child_rects( rect: &PictureRect, ) -> Vec<PictureRect> { let p0 = rect.origin; let half_size = PictureSize::new(rect.size.width * 0.5, rect.size.height * 0.5); [ PictureRect::new( PicturePoint::new(p0.x, p0.y), half_size, ), PictureRect::new( PicturePoint::new(p0.x + half_size.width, p0.y), half_size, ), PictureRect::new( PicturePoint::new(p0.x, p0.y + half_size.height), half_size, ), PictureRect::new( PicturePoint::new(p0.x + half_size.width, p0.y + half_size.height), half_size, ), ].to_vec() } /// Called during pre_update, to clear the current dependencies fn clear( &mut self, rect: PictureRect, ) { self.rect = rect; match self.kind { TileNodeKind::Leaf { ref mut prev_indices, ref mut curr_indices, ref mut dirty_tracker, ref mut frames_since_modified } => { // Swap current dependencies to be the previous frame mem::swap(prev_indices, curr_indices); curr_indices.clear(); // Note that another frame has passed in the dirty bit trackers *dirty_tracker = *dirty_tracker << 1; *frames_since_modified += 1; } TileNodeKind::Node { ref mut children, .. } => { let child_rects = TileNode::get_child_rects(&rect); assert_eq!(child_rects.len(), children.len()); for (child, rect) in children.iter_mut().zip(child_rects.iter()) { child.clear(*rect); } } } } /// Add a primitive dependency to this node fn add_prim( &mut self, index: PrimitiveDependencyIndex, prim_rect: &PictureRect, ) { match self.kind { TileNodeKind::Leaf { ref mut curr_indices, .. } => { curr_indices.push(index); } TileNodeKind::Node { ref mut children, .. } => { for child in children.iter_mut() { if child.rect.intersects(prim_rect) { child.add_prim(index, prim_rect); } } } } } /// Apply a merge or split operation to this tile, if desired fn maybe_merge_or_split( &mut self, level: i32, curr_prims: &[PrimitiveDescriptor], max_split_levels: i32, ) { // Determine if this tile wants to split or merge let mut tile_mod = None; fn get_dirty_frames( dirty_tracker: u64, frames_since_modified: usize, ) -> Option<u32> { // Only consider splitting or merging at least 64 frames since we last changed if frames_since_modified > 64 { // Each bit in the tracker is a frame that was recently invalidated Some(dirty_tracker.count_ones()) } else { None } } match self.kind { TileNodeKind::Leaf { dirty_tracker, frames_since_modified, .. } => { // Only consider splitting if the tree isn't too deep. if level < max_split_levels { if let Some(dirty_frames) = get_dirty_frames(dirty_tracker, frames_since_modified) { // If the tile has invalidated > 50% of the recent number of frames, split. if dirty_frames > 32 { tile_mod = Some(TileModification::Split); } } } } TileNodeKind::Node { ref children, .. } => { // There's two conditions that cause a node to merge its children: // (1) If _all_ the child nodes are constantly invalidating, then we are wasting // CPU time tracking dependencies for each child, so merge them. // (2) If _none_ of the child nodes are recently invalid, then the page content // has probably changed, and we no longer need to track fine grained dependencies here. let mut static_count = 0; let mut changing_count = 0; for child in children { // Only consider merging nodes at the edge of the tree. if let TileNodeKind::Leaf { dirty_tracker, frames_since_modified, .. } = child.kind { if let Some(dirty_frames) = get_dirty_frames(dirty_tracker, frames_since_modified) { if dirty_frames == 0 { // Hasn't been invalidated for some time static_count += 1; } else if dirty_frames == 64 { // Is constantly being invalidated changing_count += 1; } } } // Only merge if all the child tiles are in agreement. Otherwise, we have some // that are invalidating / static, and it's worthwhile tracking dependencies for // them individually. if static_count == 4 || changing_count == 4 { tile_mod = Some(TileModification::Merge); } } } } match tile_mod { Some(TileModification::Split) => { // To split a node, take the current dependency index buffer for this node, and // split it into child index buffers. let curr_indices = match self.kind { TileNodeKind::Node { .. } => { unreachable!("bug - only leaves can split"); } TileNodeKind::Leaf { ref mut curr_indices, .. } => { curr_indices.take() } }; // TODO(gw): We know that these are fixed arrays, we could do better with // allocations here! let child_rects = TileNode::get_child_rects(&self.rect); let child_rects: Vec<RectangleKey> = child_rects.iter().map(|r| (*r).into()).collect(); let mut child_indices = vec![Vec::new(); child_rects.len()]; // Step through the index buffer, and add primitives to each of the children // that they intersect. for index in curr_indices { let prim = &curr_prims[index.0 as usize]; for (child_rect, indices) in child_rects.iter().zip(child_indices.iter_mut()) { if prim.prim_clip_rect.intersects(child_rect) { indices.push(index); } } } // Create the child nodes and switch from leaf -> node. let children = child_indices.into_iter().map(|i| TileNode::new_leaf(i)).collect(); self.kind = TileNodeKind::Node { children: children, }; } Some(TileModification::Merge) => { // Construct a merged index buffer by collecting the dependency index buffers // from each child, and merging them into a de-duplicated index buffer. let merged_indices = match self.kind { TileNodeKind::Node { ref mut children, .. } => { let mut merged_indices = Vec::new(); for child in children.iter() { let child_indices = match child.kind { TileNodeKind::Leaf { ref curr_indices, .. } => { curr_indices } TileNodeKind::Node { .. } => { unreachable!("bug: child is not a leaf"); } }; merged_indices.extend_from_slice(child_indices); } merged_indices.sort(); merged_indices.dedup(); merged_indices } TileNodeKind::Leaf { .. } => { unreachable!("bug - trying to merge a leaf"); } }; // Switch from a node to a leaf, with the combined index buffer self.kind = TileNodeKind::Leaf { prev_indices: Vec::new(), curr_indices: merged_indices, dirty_tracker: 0, frames_since_modified: 0, }; } None => { // If this node didn't merge / split, then recurse into children // to see if they want to split / merge. if let TileNodeKind::Node { ref mut children, .. } = self.kind { for child in children.iter_mut() { child.maybe_merge_or_split( level+1, curr_prims, max_split_levels, ); } } } } } /// Update the dirty state of this node, building the overall dirty rect fn update_dirty_rects( &mut self, prev_prims: &[PrimitiveDescriptor], curr_prims: &[PrimitiveDescriptor], prim_comparer: &mut PrimitiveComparer, dirty_rect: &mut PictureRect, compare_cache: &mut FastHashMap<PrimitiveComparisonKey, PrimitiveCompareResult>, invalidation_reason: &mut Option<InvalidationReason>, ) { match self.kind { TileNodeKind::Node { ref mut children, .. } => { for child in children.iter_mut() { child.update_dirty_rects( prev_prims, curr_prims, prim_comparer, dirty_rect, compare_cache, invalidation_reason, ); } } TileNodeKind::Leaf { ref prev_indices, ref curr_indices, ref mut dirty_tracker, .. } => { // If the index buffers are of different length, they must be different if prev_indices.len() == curr_indices.len() { let mut prev_i0 = 0; let mut prev_i1 = 0; prim_comparer.reset(); // Walk each index buffer, comparing primitives for (prev_index, curr_index) in prev_indices.iter().zip(curr_indices.iter()) { let i0 = prev_index.0 as usize; let i1 = curr_index.0 as usize; // Advance the dependency arrays for each primitive (this handles // prims that may be skipped by these index buffers). for i in prev_i0 .. i0 { prim_comparer.advance_prev(&prev_prims[i]); } for i in prev_i1 .. i1 { prim_comparer.advance_curr(&curr_prims[i]); } // Compare the primitives, caching the result in a hash map // to save comparisons in other tree nodes. let key = PrimitiveComparisonKey { prev_index: *prev_index, curr_index: *curr_index, }; let prim_compare_result = *compare_cache .entry(key) .or_insert_with(|| { let prev = &prev_prims[i0]; let curr = &curr_prims[i1]; prim_comparer.compare_prim(prev, curr) }); // If not the same, mark this node as dirty and update the dirty rect if prim_compare_result != PrimitiveCompareResult::Equal { if invalidation_reason.is_none() { *invalidation_reason = Some(InvalidationReason::Content { prim_compare_result, }); } *dirty_rect = self.rect.union(dirty_rect); *dirty_tracker = *dirty_tracker | 1; break; } prev_i0 = i0; prev_i1 = i1; } } else { if invalidation_reason.is_none() { *invalidation_reason = Some(InvalidationReason::PrimCount); } *dirty_rect = self.rect.union(dirty_rect); *dirty_tracker = *dirty_tracker | 1; } } } } } impl CompositeState { // A helper function to destroy all native surfaces for a given list of tiles pub fn destroy_native_surfaces<'a, I: Iterator<Item = &'a Tile>>( &mut self, tiles_iter: I, resource_cache: &mut ResourceCache, ) { // Any old tiles that remain after the loop above are going to be dropped. For // simple composite mode, the texture cache handle will expire and be collected // by the texture cache. For native compositor mode, we need to explicitly // invoke a callback to the client to destroy that surface. if let CompositorKind::Native { .. } = self.compositor_kind { for tile in tiles_iter { // Only destroy native surfaces that have been allocated. It's // possible for display port tiles to be created that never // come on screen, and thus never get a native surface allocated. if let Some(TileSurface::Texture { descriptor: SurfaceTextureDescriptor::NativeSurface { id, .. }, .. }) = tile.surface { if let Some(id) = id { resource_cache.destroy_compositor_surface(id); } } } } } } Bug 1602458 - Avoid unnecessary vector allocations when splitting the tile cache. r=gw Differential Revision: https://phabricator.services.mozilla.com/D56417 [wrupdater] From https://hg.mozilla.org/mozilla-central/rev/1f03147b38545bffc24dcccd819114922860b30e /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! A picture represents a dynamically rendered image. //! //! # Overview //! //! Pictures consists of: //! //! - A number of primitives that are drawn onto the picture. //! - A composite operation describing how to composite this //! picture into its parent. //! - A configuration describing how to draw the primitives on //! this picture (e.g. in screen space or local space). //! //! The tree of pictures are generated during scene building. //! //! Depending on their composite operations pictures can be rendered into //! intermediate targets or folded into their parent picture. //! //! ## Picture caching //! //! Pictures can be cached to reduce the amount of rasterization happening per //! frame. //! //! When picture caching is enabled, the scene is cut into a small number of slices, //! typically: //! //! - content slice //! - UI slice //! - background UI slice which is hidden by the other two slices most of the time. //! //! Each of these slice is made up of fixed-size large tiles of 2048x512 pixels //! (or 128x128 for the UI slice). //! //! Tiles can be either cached rasterized content into a texture or "clear tiles" //! that contain only a solid color rectangle rendered directly during the composite //! pass. //! //! ## Invalidation //! //! Each tile keeps track of the elements that affect it, which can be: //! //! - primitives //! - clips //! - image keys //! - opacity bindings //! - transforms //! //! These dependency lists are built each frame and compared to the previous frame to //! see if the tile changed. //! //! The tile's primitive dependency information is organized in a quadtree, each node //! storing an index buffer of tile primitive dependencies. //! //! The union of the invalidated leaves of each quadtree produces a per-tile dirty rect //! which defines the scissor rect used when replaying the tile's drawing commands and //! can be used for partial present. //! //! ## Display List shape //! //! WR will first look for an iframe item in the root stacking context to apply //! picture caching to. If that's not found, it will apply to the entire root //! stacking context of the display list. Apart from that, the format of the //! display list is not important to picture caching. Each time a new scroll root //! is encountered, a new picture cache slice will be created. If the display //! list contains more than some arbitrary number of slices (currently 8), the //! content will all be squashed into a single slice, in order to save GPU memory //! and compositing performance. use api::{MixBlendMode, PipelineId, PremultipliedColorF, FilterPrimitiveKind}; use api::{PropertyBinding, PropertyBindingId, FilterPrimitive, FontRenderMode}; use api::{DebugFlags, RasterSpace, ImageKey, ColorF, PrimitiveFlags}; use api::units::*; use crate::box_shadow::{BLUR_SAMPLE_SCALE}; use crate::clip::{ClipStore, ClipChainInstance, ClipDataHandle, ClipChainId}; use crate::clip_scroll_tree::{ROOT_SPATIAL_NODE_INDEX, ClipScrollTree, CoordinateSpaceMapping, SpatialNodeIndex, VisibleFace }; use crate::composite::{CompositorKind, CompositeState, NativeSurfaceId}; use crate::debug_colors; use euclid::{vec3, Point2D, Scale, Size2D, Vector2D, Rect}; use euclid::approxeq::ApproxEq; use crate::filterdata::SFilterData; use crate::frame_builder::{FrameVisibilityContext, FrameVisibilityState}; use crate::intern::ItemUid; use crate::internal_types::{FastHashMap, FastHashSet, PlaneSplitter, Filter, PlaneSplitAnchor, TextureSource}; use crate::frame_builder::{FrameBuildingContext, FrameBuildingState, PictureState, PictureContext}; use crate::gpu_cache::{GpuCache, GpuCacheAddress, GpuCacheHandle}; use crate::gpu_types::UvRectKind; use plane_split::{Clipper, Polygon, Splitter}; use crate::prim_store::{SpaceMapper, PrimitiveVisibilityMask, PointKey, PrimitiveTemplateKind}; use crate::prim_store::{SpaceSnapper, PictureIndex, PrimitiveInstance, PrimitiveInstanceKind}; use crate::prim_store::{get_raster_rects, PrimitiveScratchBuffer, RectangleKey}; use crate::prim_store::{OpacityBindingStorage, ImageInstanceStorage, OpacityBindingIndex}; use crate::print_tree::{PrintTree, PrintTreePrinter}; use crate::render_backend::DataStores; use crate::render_task_graph::RenderTaskId; use crate::render_target::RenderTargetKind; use crate::render_task::{RenderTask, RenderTaskLocation, BlurTaskCache, ClearMode}; use crate::resource_cache::{ResourceCache, ImageGeneration}; use crate::scene::SceneProperties; use smallvec::SmallVec; use std::{mem, u8, marker, u32}; use std::sync::atomic::{AtomicUsize, Ordering}; use crate::texture_cache::TextureCacheHandle; use crate::util::{TransformedRectKind, MatrixHelpers, MaxRect, scale_factors, VecHelper, RectHelpers}; use crate::filterdata::{FilterDataHandle}; /// Specify whether a surface allows subpixel AA text rendering. #[derive(Debug, Copy, Clone, PartialEq)] pub enum SubpixelMode { /// This surface allows subpixel AA text Allow, /// Subpixel AA text cannot be drawn on this surface Deny, } /// A comparable transform matrix, that compares with epsilon checks. #[derive(Debug, Clone)] struct MatrixKey { m: [f32; 16], } impl PartialEq for MatrixKey { fn eq(&self, other: &Self) -> bool { const EPSILON: f32 = 0.001; // TODO(gw): It's possible that we may need to adjust the epsilon // to be tighter on most of the matrix, except the // translation parts? for (i, j) in self.m.iter().zip(other.m.iter()) { if !i.approx_eq_eps(j, &EPSILON) { return false; } } true } } /// A comparable / hashable version of a coordinate space mapping. Used to determine /// if a transform dependency for a tile has changed. #[derive(Debug, PartialEq, Clone)] enum TransformKey { Local, ScaleOffset { scale_x: f32, scale_y: f32, offset_x: f32, offset_y: f32, }, Transform { m: MatrixKey, } } impl<Src, Dst> From<CoordinateSpaceMapping<Src, Dst>> for TransformKey { fn from(transform: CoordinateSpaceMapping<Src, Dst>) -> TransformKey { match transform { CoordinateSpaceMapping::Local => { TransformKey::Local } CoordinateSpaceMapping::ScaleOffset(ref scale_offset) => { TransformKey::ScaleOffset { scale_x: scale_offset.scale.x, scale_y: scale_offset.scale.y, offset_x: scale_offset.offset.x, offset_y: scale_offset.offset.y, } } CoordinateSpaceMapping::Transform(ref m) => { TransformKey::Transform { m: MatrixKey { m: m.to_row_major_array(), }, } } } } } /// Information about a picture that is pushed / popped on the /// PictureUpdateState during picture traversal pass. struct PictureInfo { /// The spatial node for this picture. _spatial_node_index: SpatialNodeIndex, } /// Picture-caching state to keep between scenes. pub struct PictureCacheState { /// The tiles retained by this picture cache. pub tiles: FastHashMap<TileOffset, Tile>, /// State of the spatial nodes from previous frame spatial_nodes: FastHashMap<SpatialNodeIndex, SpatialNodeDependency>, /// State of opacity bindings from previous frame opacity_bindings: FastHashMap<PropertyBindingId, OpacityBindingInfo>, /// The current transform of the picture cache root spatial node root_transform: TransformKey, /// The current tile size in device pixels current_tile_size: DeviceIntSize, /// Various allocations we want to avoid re-doing. allocations: PictureCacheRecycledAllocations, } pub struct PictureCacheRecycledAllocations { old_tiles: FastHashMap<TileOffset, Tile>, old_opacity_bindings: FastHashMap<PropertyBindingId, OpacityBindingInfo>, compare_cache: FastHashMap<PrimitiveComparisonKey, PrimitiveCompareResult>, } /// Stores a list of cached picture tiles that are retained /// between new scenes. #[cfg_attr(feature = "capture", derive(Serialize))] pub struct RetainedTiles { /// The tiles retained between display lists. #[cfg_attr(feature = "capture", serde(skip))] //TODO pub caches: FastHashMap<usize, PictureCacheState>, } impl RetainedTiles { pub fn new() -> Self { RetainedTiles { caches: FastHashMap::default(), } } /// Merge items from one retained tiles into another. pub fn merge(&mut self, other: RetainedTiles) { assert!(self.caches.is_empty() || other.caches.is_empty()); if self.caches.is_empty() { self.caches = other.caches; } } } /// Unit for tile coordinates. #[derive(Hash, Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)] pub struct TileCoordinate; // Geometry types for tile coordinates. pub type TileOffset = Point2D<i32, TileCoordinate>; pub type TileSize = Size2D<i32, TileCoordinate>; pub type TileRect = Rect<i32, TileCoordinate>; /// The size in device pixels of a normal cached tile. pub const TILE_SIZE_DEFAULT: DeviceIntSize = DeviceIntSize { width: 1024, height: 512, _unit: marker::PhantomData, }; /// The size in device pixels of a tile for horizontal scroll bars pub const TILE_SIZE_SCROLLBAR_HORIZONTAL: DeviceIntSize = DeviceIntSize { width: 512, height: 16, _unit: marker::PhantomData, }; /// The size in device pixels of a tile for vertical scroll bars pub const TILE_SIZE_SCROLLBAR_VERTICAL: DeviceIntSize = DeviceIntSize { width: 16, height: 512, _unit: marker::PhantomData, }; // Return the list of tile sizes for the renderer to allocate texture arrays for. pub fn tile_cache_sizes() -> &'static [DeviceIntSize] { &[ TILE_SIZE_DEFAULT, TILE_SIZE_SCROLLBAR_HORIZONTAL, TILE_SIZE_SCROLLBAR_VERTICAL, ] } /// The maximum size per axis of a surface, /// in WorldPixel coordinates. const MAX_SURFACE_SIZE: f32 = 4096.0; /// The maximum number of sub-dependencies (e.g. clips, transforms) we can handle /// per-primitive. If a primitive has more than this, it will invalidate every frame. const MAX_PRIM_SUB_DEPS: usize = u8::MAX as usize; /// Used to get unique tile IDs, even when the tile cache is /// destroyed between display lists / scenes. static NEXT_TILE_ID: AtomicUsize = AtomicUsize::new(0); fn clamp(value: i32, low: i32, high: i32) -> i32 { value.max(low).min(high) } fn clampf(value: f32, low: f32, high: f32) -> f32 { value.max(low).min(high) } /// An index into the prims array in a TileDescriptor. #[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] struct PrimitiveDependencyIndex(u32); /// Information about the state of an opacity binding. #[derive(Debug)] pub struct OpacityBindingInfo { /// The current value retrieved from dynamic scene properties. value: f32, /// True if it was changed (or is new) since the last frame build. changed: bool, } /// Information stored in a tile descriptor for an opacity binding. #[derive(Debug, PartialEq, Clone)] pub enum OpacityBinding { Value(f32), Binding(PropertyBindingId), } impl From<PropertyBinding<f32>> for OpacityBinding { fn from(binding: PropertyBinding<f32>) -> OpacityBinding { match binding { PropertyBinding::Binding(key, _) => OpacityBinding::Binding(key.id), PropertyBinding::Value(value) => OpacityBinding::Value(value), } } } /// Information about the state of a spatial node value #[derive(Debug)] pub struct SpatialNodeDependency { /// The current value retrieved from the clip-scroll tree. value: TransformKey, /// True if it was changed (or is new) since the last frame build. changed: bool, } // Immutable context passed to picture cache tiles during pre_update struct TilePreUpdateContext { /// The local rect of the overall picture cache local_rect: PictureRect, /// The local clip rect (in picture space) of the entire picture cache local_clip_rect: PictureRect, /// Maps from picture cache coords -> world space coords. pic_to_world_mapper: SpaceMapper<PicturePixel, WorldPixel>, /// The fractional position of the picture cache, which may /// require invalidation of all tiles. fract_offset: PictureVector2D, /// The optional background color of the picture cache instance background_color: Option<ColorF>, /// The visible part of the screen in world coords. global_screen_world_rect: WorldRect, } // Immutable context passed to picture cache tiles during post_update struct TilePostUpdateContext<'a> { /// The calculated backdrop information for this cache instance. backdrop: BackdropInfo, /// Information about transform node differences from last frame. spatial_nodes: &'a FastHashMap<SpatialNodeIndex, SpatialNodeDependency>, /// Information about opacity bindings from the picture cache. opacity_bindings: &'a FastHashMap<PropertyBindingId, OpacityBindingInfo>, /// Current size in device pixels of tiles for this cache current_tile_size: DeviceIntSize, } // Mutable state passed to picture cache tiles during post_update struct TilePostUpdateState<'a> { /// Allow access to the texture cache for requesting tiles resource_cache: &'a mut ResourceCache, /// Current configuration and setup for compositing all the picture cache tiles in renderer. composite_state: &'a mut CompositeState, /// A cache of comparison results to avoid re-computation during invalidation. compare_cache: &'a mut FastHashMap<PrimitiveComparisonKey, PrimitiveCompareResult>, } /// Information about the dependencies of a single primitive instance. struct PrimitiveDependencyInfo { /// If true, we should clip the prim rect to the tile boundaries. clip_by_tile: bool, /// Unique content identifier of the primitive. prim_uid: ItemUid, /// The picture space origin of this primitive. prim_origin: PicturePoint, /// The (conservative) clipped area in picture space this primitive occupies. prim_clip_rect: PictureRect, /// Image keys this primitive depends on. images: SmallVec<[ImageDependency; 8]>, /// Opacity bindings this primitive depends on. opacity_bindings: SmallVec<[OpacityBinding; 4]>, /// Clips that this primitive depends on. clips: SmallVec<[ItemUid; 8]>, /// Spatial nodes references by the clip dependencies of this primitive. spatial_nodes: SmallVec<[SpatialNodeIndex; 4]>, } impl PrimitiveDependencyInfo { /// Construct dependency info for a new primitive. fn new( prim_uid: ItemUid, prim_origin: PicturePoint, prim_clip_rect: PictureRect, ) -> Self { PrimitiveDependencyInfo { prim_uid, prim_origin, images: SmallVec::new(), opacity_bindings: SmallVec::new(), clip_by_tile: false, prim_clip_rect, clips: SmallVec::new(), spatial_nodes: SmallVec::new(), } } } /// A stable ID for a given tile, to help debugging. These are also used /// as unique identfiers for tile surfaces when using a native compositor. #[derive(Debug, Copy, Clone, PartialEq)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct TileId(pub usize); /// A descriptor for the kind of texture that a picture cache tile will /// be drawn into. #[derive(Debug)] pub enum SurfaceTextureDescriptor { /// When using the WR compositor, the tile is drawn into an entry /// in the WR texture cache. TextureCache { handle: TextureCacheHandle }, /// When using an OS compositor, the tile is drawn into a native /// surface identified by arbitrary id. NativeSurface { /// The arbitrary id of this surface. id: Option<NativeSurfaceId>, /// Size in device pixels of the native surface. size: DeviceIntSize, }, } /// This is the same as a `SurfaceTextureDescriptor` but has been resolved /// into a texture cache handle (if appropriate) that can be used by the /// batching and compositing code in the renderer. #[derive(Clone, Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub enum ResolvedSurfaceTexture { TextureCache { /// The texture ID to draw to. texture: TextureSource, /// Slice index in the texture array to draw to. layer: i32, }, NativeSurface { /// The arbitrary id of this surface. id: NativeSurfaceId, /// Size in device pixels of the native surface. size: DeviceIntSize, } } impl SurfaceTextureDescriptor { /// Create a resolved surface texture for this descriptor pub fn resolve( &self, resource_cache: &ResourceCache, ) -> ResolvedSurfaceTexture { match self { SurfaceTextureDescriptor::TextureCache { handle } => { let cache_item = resource_cache.texture_cache.get(handle); ResolvedSurfaceTexture::TextureCache { texture: cache_item.texture_id, layer: cache_item.texture_layer, } } SurfaceTextureDescriptor::NativeSurface { id, size } => { ResolvedSurfaceTexture::NativeSurface { id: id.expect("bug: native surface not allocated"), size: *size, } } } } } /// The backing surface for this tile. #[derive(Debug)] pub enum TileSurface { Texture { /// Descriptor for the surface that this tile draws into. descriptor: SurfaceTextureDescriptor, /// Bitfield specifying the dirty region(s) that are relevant to this tile. visibility_mask: PrimitiveVisibilityMask, }, Color { color: ColorF, }, Clear, } impl TileSurface { fn kind(&self) -> &'static str { match *self { TileSurface::Color { .. } => "Color", TileSurface::Texture { .. } => "Texture", TileSurface::Clear => "Clear", } } } /// The result of a primitive dependency comparison. Size is a u8 /// since this is a hot path in the code, and keeping the data small /// is a performance win. #[derive(Debug, Copy, Clone, PartialEq)] #[repr(u8)] enum PrimitiveCompareResult { /// Primitives match Equal, /// Something in the PrimitiveDescriptor was different Descriptor, /// The clip node content or spatial node changed Clip, /// The value of the transform changed Transform, /// An image dependency was dirty Image, /// The value of an opacity binding changed OpacityBinding, } /// Debugging information about why a tile was invalidated #[derive(Debug)] enum InvalidationReason { /// The fractional offset changed FractionalOffset, /// The background color changed BackgroundColor, /// The opaque state of the backing native surface changed SurfaceOpacityChanged, /// There was no backing texture (evicted or never rendered) NoTexture, /// There was no backing native surface (never rendered, or recreated) NoSurface, /// The primitive count in the dependency list was different PrimCount, /// The content of one of the primitives was different Content { /// What changed in the primitive that was different prim_compare_result: PrimitiveCompareResult, }, } /// Information about a cached tile. pub struct Tile { /// The current world rect of this tile. pub world_rect: WorldRect, /// The current local rect of this tile. pub rect: PictureRect, /// The local rect of the tile clipped to the overall picture local rect. clipped_rect: PictureRect, /// Uniquely describes the content of this tile, in a way that can be /// (reasonably) efficiently hashed and compared. pub current_descriptor: TileDescriptor, /// The content descriptor for this tile from the previous frame. pub prev_descriptor: TileDescriptor, /// Handle to the backing surface for this tile. pub surface: Option<TileSurface>, /// If true, this tile is marked valid, and the existing texture /// cache handle can be used. Tiles are invalidated during the /// build_dirty_regions method. pub is_valid: bool, /// If true, this tile intersects with the currently visible screen /// rect, and will be drawn. pub is_visible: bool, /// The current fractional offset of the cache transform root. If this changes, /// all tiles need to be invalidated and redrawn, since snapping differences are /// likely to occur. fract_offset: PictureVector2D, /// The tile id is stable between display lists and / or frames, /// if the tile is retained. Useful for debugging tile evictions. pub id: TileId, /// If true, the tile was determined to be opaque, which means blending /// can be disabled when drawing it. pub is_opaque: bool, /// Root node of the quadtree dirty rect tracker. root: TileNode, /// The picture space dirty rect for this tile. dirty_rect: PictureRect, /// The world space dirty rect for this tile. /// TODO(gw): We have multiple dirty rects available due to the quadtree above. In future, /// expose these as multiple dirty rects, which will help in some cases. pub world_dirty_rect: WorldRect, /// The last rendered background color on this tile. background_color: Option<ColorF>, /// The first reason the tile was invalidated this frame. invalidation_reason: Option<InvalidationReason>, } impl Tile { /// Construct a new, invalid tile. fn new( id: TileId, ) -> Self { Tile { rect: PictureRect::zero(), clipped_rect: PictureRect::zero(), world_rect: WorldRect::zero(), surface: None, current_descriptor: TileDescriptor::new(), prev_descriptor: TileDescriptor::new(), is_valid: false, is_visible: false, fract_offset: PictureVector2D::zero(), id, is_opaque: false, root: TileNode::new_leaf(Vec::new()), dirty_rect: PictureRect::zero(), world_dirty_rect: WorldRect::zero(), background_color: None, invalidation_reason: None, } } /// Print debug information about this tile to a tree printer. fn print(&self, pt: &mut dyn PrintTreePrinter) { pt.new_level(format!("Tile {:?}", self.id)); pt.add_item(format!("rect: {}", self.rect)); pt.add_item(format!("fract_offset: {:?}", self.fract_offset)); pt.add_item(format!("background_color: {:?}", self.background_color)); pt.add_item(format!("invalidation_reason: {:?}", self.invalidation_reason)); self.current_descriptor.print(pt); pt.end_level(); } /// Check if the content of the previous and current tile descriptors match fn update_dirty_rects( &mut self, ctx: &TilePostUpdateContext, state: &mut TilePostUpdateState, invalidation_reason: &mut Option<InvalidationReason>, ) -> PictureRect { let mut prim_comparer = PrimitiveComparer::new( &self.prev_descriptor, &self.current_descriptor, state.resource_cache, ctx.spatial_nodes, ctx.opacity_bindings, ); let mut dirty_rect = PictureRect::zero(); self.root.update_dirty_rects( &self.prev_descriptor.prims, &self.current_descriptor.prims, &mut prim_comparer, &mut dirty_rect, state.compare_cache, invalidation_reason, ); dirty_rect } /// Invalidate a tile based on change in content. This /// must be called even if the tile is not currently /// visible on screen. We might be able to improve this /// later by changing how ComparableVec is used. fn update_content_validity( &mut self, ctx: &TilePostUpdateContext, state: &mut TilePostUpdateState, ) { // Check if the contents of the primitives, clips, and // other dependencies are the same. state.compare_cache.clear(); let mut invalidation_reason = None; let dirty_rect = self.update_dirty_rects( ctx, state, &mut invalidation_reason, ); if !dirty_rect.is_empty() { self.invalidate( Some(dirty_rect), invalidation_reason.expect("bug: no invalidation_reason"), ); } } /// Invalidate this tile. If `invalidation_rect` is None, the entire /// tile is invalidated. fn invalidate( &mut self, invalidation_rect: Option<PictureRect>, reason: InvalidationReason, ) { self.is_valid = false; match invalidation_rect { Some(rect) => { self.dirty_rect = self.dirty_rect.union(&rect); } None => { self.dirty_rect = self.rect; } } if self.invalidation_reason.is_none() { self.invalidation_reason = Some(reason); } } /// Called during pre_update of a tile cache instance. Allows the /// tile to setup state before primitive dependency calculations. fn pre_update( &mut self, rect: PictureRect, ctx: &TilePreUpdateContext, ) { self.rect = rect; self.invalidation_reason = None; self.clipped_rect = self.rect .intersection(&ctx.local_rect) .and_then(|r| r.intersection(&ctx.local_clip_rect)) .unwrap_or(PictureRect::zero()); self.world_rect = ctx.pic_to_world_mapper .map(&self.rect) .expect("bug: map local tile rect"); // Check if this tile is currently on screen. self.is_visible = self.world_rect.intersects(&ctx.global_screen_world_rect); // If the tile isn't visible, early exit, skipping the normal set up to // validate dependencies. Instead, we will only compare the current tile // dependencies the next time it comes into view. if !self.is_visible { return; } // Determine if the fractional offset of the transform is different this frame // from the currently cached tile set. let fract_changed = (self.fract_offset.x - ctx.fract_offset.x).abs() > 0.001 || (self.fract_offset.y - ctx.fract_offset.y).abs() > 0.001; if fract_changed { self.invalidate(None, InvalidationReason::FractionalOffset); self.fract_offset = ctx.fract_offset; } if ctx.background_color != self.background_color { self.invalidate(None, InvalidationReason::BackgroundColor); self.background_color = ctx.background_color; } // Clear any dependencies so that when we rebuild them we // can compare if the tile has the same content. mem::swap( &mut self.current_descriptor, &mut self.prev_descriptor, ); self.current_descriptor.clear(); self.root.clear(rect); } /// Add dependencies for a given primitive to this tile. fn add_prim_dependency( &mut self, info: &PrimitiveDependencyInfo, ) { // If this tile isn't currently visible, we don't want to update the dependencies // for this tile, as an optimization, since it won't be drawn anyway. if !self.is_visible { return; } // Include any image keys this tile depends on. self.current_descriptor.images.extend_from_slice(&info.images); // Include any opacity bindings this primitive depends on. self.current_descriptor.opacity_bindings.extend_from_slice(&info.opacity_bindings); // Include any clip nodes that this primitive depends on. self.current_descriptor.clips.extend_from_slice(&info.clips); // Include any transforms that this primitive depends on. self.current_descriptor.transforms.extend_from_slice(&info.spatial_nodes); // TODO(gw): The origin of background rects produced by APZ changes // in Gecko during scrolling. Consider investigating this so the // hack / workaround below is not required. let (prim_origin, prim_clip_rect) = if info.clip_by_tile { let tile_p0 = self.rect.origin; let tile_p1 = self.rect.bottom_right(); let clip_p0 = PicturePoint::new( clampf(info.prim_clip_rect.origin.x, tile_p0.x, tile_p1.x), clampf(info.prim_clip_rect.origin.y, tile_p0.y, tile_p1.y), ); let clip_p1 = PicturePoint::new( clampf(info.prim_clip_rect.origin.x + info.prim_clip_rect.size.width, tile_p0.x, tile_p1.x), clampf(info.prim_clip_rect.origin.y + info.prim_clip_rect.size.height, tile_p0.y, tile_p1.y), ); ( PicturePoint::new( clampf(info.prim_origin.x, tile_p0.x, tile_p1.x), clampf(info.prim_origin.y, tile_p0.y, tile_p1.y), ), PictureRect::new( clip_p0, PictureSize::new( clip_p1.x - clip_p0.x, clip_p1.y - clip_p0.y, ), ), ) } else { (info.prim_origin, info.prim_clip_rect) }; // Update the tile descriptor, used for tile comparison during scene swaps. let prim_index = PrimitiveDependencyIndex(self.current_descriptor.prims.len() as u32); // We know that the casts below will never overflow because the array lengths are // truncated to MAX_PRIM_SUB_DEPS during update_prim_dependencies. debug_assert!(info.spatial_nodes.len() <= MAX_PRIM_SUB_DEPS); debug_assert!(info.clips.len() <= MAX_PRIM_SUB_DEPS); debug_assert!(info.images.len() <= MAX_PRIM_SUB_DEPS); debug_assert!(info.opacity_bindings.len() <= MAX_PRIM_SUB_DEPS); self.current_descriptor.prims.push(PrimitiveDescriptor { prim_uid: info.prim_uid, origin: prim_origin.into(), prim_clip_rect: prim_clip_rect.into(), transform_dep_count: info.spatial_nodes.len() as u8, clip_dep_count: info.clips.len() as u8, image_dep_count: info.images.len() as u8, opacity_binding_dep_count: info.opacity_bindings.len() as u8, }); // Add this primitive to the dirty rect quadtree. self.root.add_prim(prim_index, &info.prim_clip_rect); } /// Called during tile cache instance post_update. Allows invalidation and dirty /// rect calculation after primitive dependencies have been updated. fn post_update( &mut self, ctx: &TilePostUpdateContext, state: &mut TilePostUpdateState, ) -> bool { // If tile is not visible, just early out from here - we don't update dependencies // so don't want to invalidate, merge, split etc. The tile won't need to be drawn // (and thus updated / invalidated) until it is on screen again. if !self.is_visible { return false; } // Invalidate the tile based on the content changing. self.update_content_validity(ctx, state); // If there are no primitives there is no need to draw or cache it. if self.current_descriptor.prims.is_empty() { return false; } // Check if this tile can be considered opaque. Opacity state must be updated only // after all early out checks have been performed. Otherwise, we might miss updating // the native surface next time this tile becomes visible. let tile_is_opaque = ctx.backdrop.rect.contains_rect(&self.clipped_rect); let opacity_changed = tile_is_opaque != self.is_opaque; self.is_opaque = tile_is_opaque; // Check if the selected composite mode supports dirty rect updates. For Draw composite // mode, we can always update the content with smaller dirty rects. For native composite // mode, we can only use dirty rects if the compositor supports partial surface updates. let (supports_dirty_rects, supports_simple_prims) = match state.composite_state.compositor_kind { CompositorKind::Draw { .. } => { (true, true) } CompositorKind::Native { max_update_rects, .. } => { (max_update_rects > 0, false) } }; // TODO(gw): Consider using smaller tiles and/or tile splits for // native compositors that don't support dirty rects. if supports_dirty_rects { // Only allow splitting for normal content sized tiles if ctx.current_tile_size == TILE_SIZE_DEFAULT { let max_split_level = 3; // Consider splitting / merging dirty regions self.root.maybe_merge_or_split( 0, &self.current_descriptor.prims, max_split_level, ); } } // The dirty rect will be set correctly by now. If the underlying platform // doesn't support partial updates, and this tile isn't valid, force the dirty // rect to be the size of the entire tile. if !self.is_valid && !supports_dirty_rects { self.dirty_rect = self.rect; } // Ensure that the dirty rect doesn't extend outside the local tile rect. self.dirty_rect = self.dirty_rect .intersection(&self.rect) .unwrap_or(PictureRect::zero()); // See if this tile is a simple color, in which case we can just draw // it as a rect, and avoid allocating a texture surface and drawing it. // TODO(gw): Initial native compositor interface doesn't support simple // color tiles. We can definitely support this in DC, so this // should be added as a follow up. let is_simple_prim = ctx.backdrop.kind.can_be_promoted_to_compositor_surface() && self.current_descriptor.prims.len() == 1 && self.is_opaque && supports_simple_prims; // Set up the backing surface for this tile. let surface = if is_simple_prim { // If we determine the tile can be represented by a color, set the // surface unconditionally (this will drop any previously used // texture cache backing surface). match ctx.backdrop.kind { BackdropKind::Color { color } => { TileSurface::Color { color, } } BackdropKind::Clear => { TileSurface::Clear } BackdropKind::Image => { // This should be prevented by the is_simple_prim check above. unreachable!(); } } } else { // If this tile will be backed by a surface, we want to retain // the texture handle from the previous frame, if possible. If // the tile was previously a color, or not set, then just set // up a new texture cache handle. match self.surface.take() { Some(TileSurface::Texture { mut descriptor, visibility_mask }) => { // If opacity changed, and this is a native OS compositor surface, // it needs to be recreated. // TODO(gw): This is a limitation of the DirectComposite APIs. It might // make sense on other platforms to be able to change this as // a property on a surface, if we ever see pages where this // is changing frequently. if opacity_changed { if let SurfaceTextureDescriptor::NativeSurface { ref mut id, .. } = descriptor { // Reset the dirty rect and tile validity in this case, to // force the new tile to be completely redrawn. self.invalidate(None, InvalidationReason::SurfaceOpacityChanged); // If this tile has a currently allocated native surface, destroy it. It // will be re-allocated next time it's determined to be visible. if let Some(id) = id.take() { state.resource_cache.destroy_compositor_surface(id); } } } // Reuse the existing descriptor and vis mask TileSurface::Texture { descriptor, visibility_mask, } } Some(TileSurface::Color { .. }) | Some(TileSurface::Clear) | None => { // This is the case where we are constructing a tile surface that // involves drawing to a texture. Create the correct surface // descriptor depending on the compositing mode that will read // the output. let descriptor = match state.composite_state.compositor_kind { CompositorKind::Draw { .. } => { // For a texture cache entry, create an invalid handle that // will be allocated when update_picture_cache is called. SurfaceTextureDescriptor::TextureCache { handle: TextureCacheHandle::invalid(), } } CompositorKind::Native { .. } => { // Create a native surface surface descriptor, but don't allocate // a surface yet. The surface is allocated *after* occlusion // culling occurs, so that only visible tiles allocate GPU memory. SurfaceTextureDescriptor::NativeSurface { id: None, size: ctx.current_tile_size, } } }; TileSurface::Texture { descriptor, visibility_mask: PrimitiveVisibilityMask::empty(), } } } }; // Store the current surface backing info for use during batching. self.surface = Some(surface); true } } /// Defines a key that uniquely identifies a primitive instance. #[derive(Debug, Clone)] pub struct PrimitiveDescriptor { /// Uniquely identifies the content of the primitive template. prim_uid: ItemUid, /// The origin in world space of this primitive. origin: PointKey, /// The clip rect for this primitive. Included here in /// dependencies since there is no entry in the clip chain /// dependencies for the local clip rect. prim_clip_rect: RectangleKey, /// The number of extra dependencies that this primitive has. transform_dep_count: u8, image_dep_count: u8, opacity_binding_dep_count: u8, clip_dep_count: u8, } impl PartialEq for PrimitiveDescriptor { fn eq(&self, other: &Self) -> bool { const EPSILON: f32 = 0.001; if self.prim_uid != other.prim_uid { return false; } if !self.origin.x.approx_eq_eps(&other.origin.x, &EPSILON) { return false; } if !self.origin.y.approx_eq_eps(&other.origin.y, &EPSILON) { return false; } if !self.prim_clip_rect.x.approx_eq_eps(&other.prim_clip_rect.x, &EPSILON) { return false; } if !self.prim_clip_rect.y.approx_eq_eps(&other.prim_clip_rect.y, &EPSILON) { return false; } if !self.prim_clip_rect.w.approx_eq_eps(&other.prim_clip_rect.w, &EPSILON) { return false; } if !self.prim_clip_rect.h.approx_eq_eps(&other.prim_clip_rect.h, &EPSILON) { return false; } true } } /// A small helper to compare two arrays of primitive dependencies. struct CompareHelper<'a, T> { offset_curr: usize, offset_prev: usize, curr_items: &'a [T], prev_items: &'a [T], } impl<'a, T> CompareHelper<'a, T> where T: PartialEq { /// Construct a new compare helper for a current / previous set of dependency information. fn new( prev_items: &'a [T], curr_items: &'a [T], ) -> Self { CompareHelper { offset_curr: 0, offset_prev: 0, curr_items, prev_items, } } /// Reset the current position in the dependency array to the start fn reset(&mut self) { self.offset_prev = 0; self.offset_curr = 0; } /// Test if two sections of the dependency arrays are the same, by checking both /// item equality, and a user closure to see if the content of the item changed. fn is_same<F>( &self, prev_count: u8, curr_count: u8, f: F, ) -> bool where F: Fn(&T) -> bool { // If the number of items is different, trivial reject. if prev_count != curr_count { return false; } // If both counts are 0, then no need to check these dependencies. if curr_count == 0 { return true; } // If both counts are u8::MAX, this is a sentinel that we can't compare these // deps, so just trivial reject. if curr_count as usize == MAX_PRIM_SUB_DEPS { return false; } let end_prev = self.offset_prev + prev_count as usize; let end_curr = self.offset_curr + curr_count as usize; let curr_items = &self.curr_items[self.offset_curr .. end_curr]; let prev_items = &self.prev_items[self.offset_prev .. end_prev]; for (curr, prev) in curr_items.iter().zip(prev_items.iter()) { if prev != curr { return false; } if f(curr) { return false; } } true } // Advance the prev dependency array by a given amount fn advance_prev(&mut self, count: u8) { self.offset_prev += count as usize; } // Advance the current dependency array by a given amount fn advance_curr(&mut self, count: u8) { self.offset_curr += count as usize; } } /// Uniquely describes the content of this tile, in a way that can be /// (reasonably) efficiently hashed and compared. pub struct TileDescriptor { /// List of primitive instance unique identifiers. The uid is guaranteed /// to uniquely describe the content of the primitive template, while /// the other parameters describe the clip chain and instance params. pub prims: Vec<PrimitiveDescriptor>, /// List of clip node descriptors. clips: Vec<ItemUid>, /// List of image keys that this tile depends on. images: Vec<ImageDependency>, /// The set of opacity bindings that this tile depends on. // TODO(gw): Ugh, get rid of all opacity binding support! opacity_bindings: Vec<OpacityBinding>, /// List of the effects of transforms that we care about /// tracking for this tile. transforms: Vec<SpatialNodeIndex>, } impl TileDescriptor { fn new() -> Self { TileDescriptor { prims: Vec::new(), clips: Vec::new(), opacity_bindings: Vec::new(), images: Vec::new(), transforms: Vec::new(), } } /// Print debug information about this tile descriptor to a tree printer. fn print(&self, pt: &mut dyn PrintTreePrinter) { pt.new_level("current_descriptor".to_string()); pt.new_level("prims".to_string()); for prim in &self.prims { pt.new_level(format!("prim uid={}", prim.prim_uid.get_uid())); pt.add_item(format!("origin: {},{}", prim.origin.x, prim.origin.y)); pt.add_item(format!("clip: origin={},{} size={}x{}", prim.prim_clip_rect.x, prim.prim_clip_rect.y, prim.prim_clip_rect.w, prim.prim_clip_rect.h, )); pt.add_item(format!("deps: t={} i={} o={} c={}", prim.transform_dep_count, prim.image_dep_count, prim.opacity_binding_dep_count, prim.clip_dep_count, )); pt.end_level(); } pt.end_level(); if !self.clips.is_empty() { pt.new_level("clips".to_string()); for clip in &self.clips { pt.new_level(format!("clip uid={}", clip.get_uid())); pt.end_level(); } pt.end_level(); } if !self.images.is_empty() { pt.new_level("images".to_string()); for info in &self.images { pt.new_level(format!("key={:?}", info.key)); pt.new_level(format!("generation={:?}", info.generation)); pt.end_level(); } pt.end_level(); } if !self.opacity_bindings.is_empty() { pt.new_level("opacity_bindings".to_string()); for opacity_binding in &self.opacity_bindings { pt.new_level(format!("binding={:?}", opacity_binding)); pt.end_level(); } pt.end_level(); } if !self.transforms.is_empty() { pt.new_level("transforms".to_string()); for transform in &self.transforms { pt.new_level(format!("spatial_node={:?}", transform)); pt.end_level(); } pt.end_level(); } pt.end_level(); } /// Clear the dependency information for a tile, when the dependencies /// are being rebuilt. fn clear(&mut self) { self.prims.clear(); self.clips.clear(); self.opacity_bindings.clear(); self.images.clear(); self.transforms.clear(); } } /// Stores both the world and devices rects for a single dirty rect. #[derive(Debug, Clone)] pub struct DirtyRegionRect { /// World rect of this dirty region pub world_rect: WorldRect, /// Bitfield for picture render tasks that draw this dirty region. pub visibility_mask: PrimitiveVisibilityMask, } /// Represents the dirty region of a tile cache picture. #[derive(Debug, Clone)] pub struct DirtyRegion { /// The individual dirty rects of this region. pub dirty_rects: Vec<DirtyRegionRect>, /// The overall dirty rect, a combination of dirty_rects pub combined: WorldRect, } impl DirtyRegion { /// Construct a new dirty region tracker. pub fn new( ) -> Self { DirtyRegion { dirty_rects: Vec::with_capacity(PrimitiveVisibilityMask::MAX_DIRTY_REGIONS), combined: WorldRect::zero(), } } /// Reset the dirty regions back to empty pub fn clear(&mut self) { self.dirty_rects.clear(); self.combined = WorldRect::zero(); } /// Push a dirty rect into this region pub fn push( &mut self, rect: WorldRect, visibility_mask: PrimitiveVisibilityMask, ) { // Include this in the overall dirty rect self.combined = self.combined.union(&rect); // Store the individual dirty rect. self.dirty_rects.push(DirtyRegionRect { world_rect: rect, visibility_mask, }); } /// Include another rect into an existing dirty region. pub fn include_rect( &mut self, region_index: usize, rect: WorldRect, ) { self.combined = self.combined.union(&rect); let region = &mut self.dirty_rects[region_index]; region.world_rect = region.world_rect.union(&rect); } // TODO(gw): This returns a heap allocated object. Perhaps we can simplify this // logic? Although - it's only used very rarely so it may not be an issue. pub fn inflate( &self, inflate_amount: f32, ) -> DirtyRegion { let mut dirty_rects = Vec::with_capacity(self.dirty_rects.len()); let mut combined = WorldRect::zero(); for rect in &self.dirty_rects { let world_rect = rect.world_rect.inflate(inflate_amount, inflate_amount); combined = combined.union(&world_rect); dirty_rects.push(DirtyRegionRect { world_rect, visibility_mask: rect.visibility_mask, }); } DirtyRegion { dirty_rects, combined, } } /// Creates a record of this dirty region for exporting to test infrastructure. pub fn record(&self) -> RecordedDirtyRegion { let mut rects: Vec<WorldRect> = self.dirty_rects.iter().map(|r| r.world_rect.clone()).collect(); rects.sort_unstable_by_key(|r| (r.origin.y as usize, r.origin.x as usize)); RecordedDirtyRegion { rects } } } /// A recorded copy of the dirty region for exporting to test infrastructure. #[cfg_attr(feature = "capture", derive(Serialize))] pub struct RecordedDirtyRegion { pub rects: Vec<WorldRect>, } impl ::std::fmt::Display for RecordedDirtyRegion { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { for r in self.rects.iter() { let (x, y, w, h) = (r.origin.x, r.origin.y, r.size.width, r.size.height); write!(f, "[({},{}):{}x{}]", x, y, w, h)?; } Ok(()) } } impl ::std::fmt::Debug for RecordedDirtyRegion { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { ::std::fmt::Display::fmt(self, f) } } #[derive(Debug, Copy, Clone)] enum BackdropKind { Color { color: ColorF, }, Clear, Image, } impl BackdropKind { /// Returns true if the compositor can directly draw this backdrop. fn can_be_promoted_to_compositor_surface(&self) -> bool { match self { BackdropKind::Color { .. } | BackdropKind::Clear => true, BackdropKind::Image => false, } } } /// Stores information about the calculated opaque backdrop of this slice. #[derive(Debug, Copy, Clone)] struct BackdropInfo { /// The picture space rectangle that is known to be opaque. This is used /// to determine where subpixel AA can be used, and where alpha blending /// can be disabled. rect: PictureRect, /// Kind of the backdrop kind: BackdropKind, } impl BackdropInfo { fn empty() -> Self { BackdropInfo { rect: PictureRect::zero(), kind: BackdropKind::Color { color: ColorF::BLACK, }, } } } /// Represents a cache of tiles that make up a picture primitives. pub struct TileCacheInstance { /// Index of the tile cache / slice for this frame builder. It's determined /// by the setup_picture_caching method during flattening, which splits the /// picture tree into multiple slices. It's used as a simple input to the tile /// keys. It does mean we invalidate tiles if a new layer gets inserted / removed /// between display lists - this seems very unlikely to occur on most pages, but /// can be revisited if we ever notice that. pub slice: usize, /// The currently selected tile size to use for this cache pub current_tile_size: DeviceIntSize, /// The positioning node for this tile cache. pub spatial_node_index: SpatialNodeIndex, /// Hash of tiles present in this picture. pub tiles: FastHashMap<TileOffset, Tile>, /// Switch back and forth between old and new tiles hashmaps to avoid re-allocating. old_tiles: FastHashMap<TileOffset, Tile>, /// A helper struct to map local rects into surface coords. map_local_to_surface: SpaceMapper<LayoutPixel, PicturePixel>, /// A helper struct to map child picture rects into picture cache surface coords. map_child_pic_to_surface: SpaceMapper<PicturePixel, PicturePixel>, /// List of opacity bindings, with some extra information /// about whether they changed since last frame. opacity_bindings: FastHashMap<PropertyBindingId, OpacityBindingInfo>, /// Switch back and forth between old and new bindings hashmaps to avoid re-allocating. old_opacity_bindings: FastHashMap<PropertyBindingId, OpacityBindingInfo>, /// List of spatial nodes, with some extra information /// about whether they changed since last frame. spatial_nodes: FastHashMap<SpatialNodeIndex, SpatialNodeDependency>, /// Switch back and forth between old and new spatial nodes hashmaps to avoid re-allocating. old_spatial_nodes: FastHashMap<SpatialNodeIndex, SpatialNodeDependency>, /// A set of spatial nodes that primitives / clips depend on found /// during dependency creation. This is used to avoid trying to /// calculate invalid relative transforms when building the spatial /// nodes hash above. used_spatial_nodes: FastHashSet<SpatialNodeIndex>, /// The current dirty region tracker for this picture. pub dirty_region: DirtyRegion, /// Current size of tiles in picture units. tile_size: PictureSize, /// Tile coords of the currently allocated grid. tile_rect: TileRect, /// Pre-calculated versions of the tile_rect above, used to speed up the /// calculations in get_tile_coords_for_rect. tile_bounds_p0: TileOffset, tile_bounds_p1: TileOffset, /// Local rect (unclipped) of the picture this cache covers. pub local_rect: PictureRect, /// The local clip rect, from the shared clips of this picture. local_clip_rect: PictureRect, /// A list of tiles that are valid and visible, which should be drawn to the main scene. pub tiles_to_draw: Vec<TileOffset>, /// The surface index that this tile cache will be drawn into. surface_index: SurfaceIndex, /// The background color from the renderer. If this is set opaque, we know it's /// fine to clear the tiles to this and allow subpixel text on the first slice. pub background_color: Option<ColorF>, /// Information about the calculated backdrop content of this cache. backdrop: BackdropInfo, /// The allowed subpixel mode for this surface, which depends on the detected /// opacity of the background. pub subpixel_mode: SubpixelMode, /// A list of clip handles that exist on every (top-level) primitive in this picture. /// It's often the case that these are root / fixed position clips. By handling them /// here, we can avoid applying them to the items, which reduces work, but more importantly /// reduces invalidations. pub shared_clips: Vec<ClipDataHandle>, /// The clip chain that represents the shared_clips above. Used to build the local /// clip rect for this tile cache. shared_clip_chain: ClipChainId, /// The current transform of the picture cache root spatial node root_transform: TransformKey, /// The number of frames until this cache next evaluates what tile size to use. /// If a picture rect size is regularly changing just around a size threshold, /// we don't want to constantly invalidate and reallocate different tile size /// configuration each frame. frames_until_size_eval: usize, /// The current fractional offset of the cached picture fract_offset: PictureVector2D, /// keep around the hash map used as compare_cache to avoid reallocating it each /// frame. compare_cache: FastHashMap<PrimitiveComparisonKey, PrimitiveCompareResult>, } impl TileCacheInstance { pub fn new( slice: usize, spatial_node_index: SpatialNodeIndex, background_color: Option<ColorF>, shared_clips: Vec<ClipDataHandle>, shared_clip_chain: ClipChainId, ) -> Self { TileCacheInstance { slice, spatial_node_index, tiles: FastHashMap::default(), old_tiles: FastHashMap::default(), map_local_to_surface: SpaceMapper::new( ROOT_SPATIAL_NODE_INDEX, PictureRect::zero(), ), map_child_pic_to_surface: SpaceMapper::new( ROOT_SPATIAL_NODE_INDEX, PictureRect::zero(), ), opacity_bindings: FastHashMap::default(), old_opacity_bindings: FastHashMap::default(), spatial_nodes: FastHashMap::default(), old_spatial_nodes: FastHashMap::default(), used_spatial_nodes: FastHashSet::default(), dirty_region: DirtyRegion::new(), tile_size: PictureSize::zero(), tile_rect: TileRect::zero(), tile_bounds_p0: TileOffset::zero(), tile_bounds_p1: TileOffset::zero(), local_rect: PictureRect::zero(), local_clip_rect: PictureRect::zero(), tiles_to_draw: Vec::new(), surface_index: SurfaceIndex(0), background_color, backdrop: BackdropInfo::empty(), subpixel_mode: SubpixelMode::Allow, root_transform: TransformKey::Local, shared_clips, shared_clip_chain, current_tile_size: DeviceIntSize::zero(), frames_until_size_eval: 0, fract_offset: PictureVector2D::zero(), compare_cache: FastHashMap::default(), } } /// Returns true if this tile cache is considered opaque. pub fn is_opaque(&self) -> bool { // If known opaque due to background clear color and being the first slice. // The background_color will only be Some(..) if this is the first slice. match self.background_color { Some(color) => color.a >= 1.0, None => false } } /// Get the tile coordinates for a given rectangle. fn get_tile_coords_for_rect( &self, rect: &PictureRect, ) -> (TileOffset, TileOffset) { // Get the tile coordinates in the picture space. let mut p0 = TileOffset::new( (rect.origin.x / self.tile_size.width).floor() as i32, (rect.origin.y / self.tile_size.height).floor() as i32, ); let mut p1 = TileOffset::new( ((rect.origin.x + rect.size.width) / self.tile_size.width).ceil() as i32, ((rect.origin.y + rect.size.height) / self.tile_size.height).ceil() as i32, ); // Clamp the tile coordinates here to avoid looping over irrelevant tiles later on. p0.x = clamp(p0.x, self.tile_bounds_p0.x, self.tile_bounds_p1.x); p0.y = clamp(p0.y, self.tile_bounds_p0.y, self.tile_bounds_p1.y); p1.x = clamp(p1.x, self.tile_bounds_p0.x, self.tile_bounds_p1.x); p1.y = clamp(p1.y, self.tile_bounds_p0.y, self.tile_bounds_p1.y); (p0, p1) } /// Update transforms, opacity bindings and tile rects. pub fn pre_update( &mut self, pic_rect: PictureRect, surface_index: SurfaceIndex, frame_context: &FrameVisibilityContext, frame_state: &mut FrameVisibilityState, ) -> WorldRect { self.surface_index = surface_index; self.local_rect = pic_rect; self.local_clip_rect = PictureRect::max_rect(); // Reset the opaque rect + subpixel mode, as they are calculated // during the prim dependency checks. self.backdrop = BackdropInfo::empty(); self.subpixel_mode = SubpixelMode::Allow; self.map_local_to_surface = SpaceMapper::new( self.spatial_node_index, PictureRect::from_untyped(&pic_rect.to_untyped()), ); self.map_child_pic_to_surface = SpaceMapper::new( self.spatial_node_index, PictureRect::from_untyped(&pic_rect.to_untyped()), ); let pic_to_world_mapper = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, self.spatial_node_index, frame_context.global_screen_world_rect, frame_context.clip_scroll_tree, ); // If there is a valid set of shared clips, build a clip chain instance for this, // which will provide a local clip rect. This is useful for establishing things // like whether the backdrop rect supplied by Gecko can be considered opaque. if self.shared_clip_chain != ClipChainId::NONE { let mut shared_clips = Vec::new(); let mut current_clip_chain_id = self.shared_clip_chain; while current_clip_chain_id != ClipChainId::NONE { shared_clips.push(current_clip_chain_id); let clip_chain_node = &frame_state.clip_store.clip_chain_nodes[current_clip_chain_id.0 as usize]; current_clip_chain_id = clip_chain_node.parent_clip_chain_id; } frame_state.clip_store.set_active_clips( LayoutRect::max_rect(), self.spatial_node_index, &shared_clips, frame_context.clip_scroll_tree, &mut frame_state.data_stores.clip, ); let clip_chain_instance = frame_state.clip_store.build_clip_chain_instance( LayoutRect::from_untyped(&pic_rect.to_untyped()), &self.map_local_to_surface, &pic_to_world_mapper, frame_context.clip_scroll_tree, frame_state.gpu_cache, frame_state.resource_cache, frame_context.global_device_pixel_scale, &frame_context.global_screen_world_rect, &mut frame_state.data_stores.clip, true, false, ); // Ensure that if the entire picture cache is clipped out, the local // clip rect is zero. This makes sure we don't register any occluders // that are actually off-screen. self.local_clip_rect = clip_chain_instance.map_or(PictureRect::zero(), |clip_chain_instance| { clip_chain_instance.pic_clip_rect }); } // If there are pending retained state, retrieve it. if let Some(prev_state) = frame_state.retained_tiles.caches.remove(&self.slice) { self.tiles.extend(prev_state.tiles); self.root_transform = prev_state.root_transform; self.spatial_nodes = prev_state.spatial_nodes; self.opacity_bindings = prev_state.opacity_bindings; self.current_tile_size = prev_state.current_tile_size; fn recycle_map<K: std::cmp::Eq + std::hash::Hash, V>( dest: &mut FastHashMap<K, V>, src: FastHashMap<K, V>, ) { if dest.capacity() < src.capacity() { *dest = src; } } recycle_map(&mut self.old_tiles, prev_state.allocations.old_tiles); recycle_map(&mut self.old_opacity_bindings, prev_state.allocations.old_opacity_bindings); recycle_map(&mut self.compare_cache, prev_state.allocations.compare_cache); } // Only evaluate what tile size to use fairly infrequently, so that we don't end // up constantly invalidating and reallocating tiles if the picture rect size is // changing near a threshold value. if self.frames_until_size_eval == 0 { const TILE_SIZE_TINY: f32 = 32.0; // Work out what size tile is appropriate for this picture cache. let desired_tile_size; // There's no need to check the other dimension. If we encounter a picture // that is small on one dimension, it's a reasonable choice to use a scrollbar // sized tile configuration regardless of the other dimension. if pic_rect.size.width <= TILE_SIZE_TINY { desired_tile_size = TILE_SIZE_SCROLLBAR_VERTICAL; } else if pic_rect.size.height <= TILE_SIZE_TINY { desired_tile_size = TILE_SIZE_SCROLLBAR_HORIZONTAL; } else { desired_tile_size = TILE_SIZE_DEFAULT; } // If the desired tile size has changed, then invalidate and drop any // existing tiles. if desired_tile_size != self.current_tile_size { // Destroy any native surfaces on the tiles that will be dropped due // to resizing. frame_state.composite_state.destroy_native_surfaces( self.tiles.values(), frame_state.resource_cache, ); self.tiles.clear(); self.current_tile_size = desired_tile_size; } // Reset counter until next evaluating the desired tile size. This is an // arbitrary value. self.frames_until_size_eval = 120; } // Map an arbitrary point in picture space to world space, to work out // what the fractional translation is that's applied by this scroll root. // TODO(gw): I'm not 100% sure this is right. At least, in future, we should // make a specific API for this, and/or enforce that the picture // cache transform only includes scale and/or translation (we // already ensure it doesn't have perspective). let world_origin = pic_to_world_mapper .map(&PictureRect::new(PicturePoint::zero(), PictureSize::new(1.0, 1.0))) .expect("bug: unable to map origin to world space") .origin; // Get the desired integer device coordinate let device_origin = world_origin * frame_context.global_device_pixel_scale; let desired_device_origin = device_origin.round(); // Unmap from device space to world space rect let ref_world_rect = WorldRect::new( desired_device_origin / frame_context.global_device_pixel_scale, WorldSize::new(1.0, 1.0), ); // Unmap from world space to picture space let ref_point = pic_to_world_mapper .unmap(&ref_world_rect) .expect("bug: unable to unmap ref world rect") .origin; // Extract the fractional offset required in picture space to align in device space self.fract_offset = PictureVector2D::new( ref_point.x.fract(), ref_point.y.fract(), ); // Do a hacky diff of opacity binding values from the last frame. This is // used later on during tile invalidation tests. let current_properties = frame_context.scene_properties.float_properties(); mem::swap(&mut self.opacity_bindings, &mut self.old_opacity_bindings); self.opacity_bindings.clear(); for (id, value) in current_properties { let changed = match self.old_opacity_bindings.get(id) { Some(old_property) => !old_property.value.approx_eq(value), None => true, }; self.opacity_bindings.insert(*id, OpacityBindingInfo { value: *value, changed, }); } let world_tile_size = WorldSize::new( self.current_tile_size.width as f32 / frame_context.global_device_pixel_scale.0, self.current_tile_size.height as f32 / frame_context.global_device_pixel_scale.0, ); // We know that this is an exact rectangle, since we (for now) only support tile // caches where the scroll root is in the root coordinate system. let local_tile_rect = pic_to_world_mapper .unmap(&WorldRect::new(WorldPoint::zero(), world_tile_size)) .expect("bug: unable to get local tile rect"); self.tile_size = local_tile_rect.size; let screen_rect_in_pic_space = pic_to_world_mapper .unmap(&frame_context.global_screen_world_rect) .expect("unable to unmap screen rect"); // Inflate the needed rect a bit, so that we retain tiles that we have drawn // but have just recently gone off-screen. This means that we avoid re-drawing // tiles if the user is scrolling up and down small amounts, at the cost of // a bit of extra texture memory. let desired_rect_in_pic_space = screen_rect_in_pic_space .inflate(0.0, 3.0 * self.tile_size.height); let needed_rect_in_pic_space = desired_rect_in_pic_space .intersection(&pic_rect) .unwrap_or(PictureRect::zero()); let p0 = needed_rect_in_pic_space.origin; let p1 = needed_rect_in_pic_space.bottom_right(); let x0 = (p0.x / local_tile_rect.size.width).floor() as i32; let x1 = (p1.x / local_tile_rect.size.width).ceil() as i32; let y0 = (p0.y / local_tile_rect.size.height).floor() as i32; let y1 = (p1.y / local_tile_rect.size.height).ceil() as i32; let x_tiles = x1 - x0; let y_tiles = y1 - y0; self.tile_rect = TileRect::new( TileOffset::new(x0, y0), TileSize::new(x_tiles, y_tiles), ); // This is duplicated information from tile_rect, but cached here to avoid // redundant calculations during get_tile_coords_for_rect self.tile_bounds_p0 = TileOffset::new(x0, y0); self.tile_bounds_p1 = TileOffset::new(x1, y1); let mut world_culling_rect = WorldRect::zero(); mem::swap(&mut self.tiles, &mut self.old_tiles); let ctx = TilePreUpdateContext { local_rect: self.local_rect, local_clip_rect: self.local_clip_rect, pic_to_world_mapper, fract_offset: self.fract_offset, background_color: self.background_color, global_screen_world_rect: frame_context.global_screen_world_rect, }; self.tiles.clear(); for y in y0 .. y1 { for x in x0 .. x1 { let key = TileOffset::new(x, y); let mut tile = self.old_tiles .remove(&key) .unwrap_or_else(|| { let next_id = TileId(NEXT_TILE_ID.fetch_add(1, Ordering::Relaxed)); Tile::new(next_id) }); // Ensure each tile is offset by the appropriate amount from the // origin, such that the content origin will be a whole number and // the snapping will be consistent. let rect = PictureRect::new( PicturePoint::new( x as f32 * self.tile_size.width + self.fract_offset.x, y as f32 * self.tile_size.height + self.fract_offset.y, ), self.tile_size, ); tile.pre_update( rect, &ctx, ); // Only include the tiles that are currently in view into the world culling // rect. This is a very important optimization for a couple of reasons: // (1) Primitives that intersect with tiles in the grid that are not currently // visible can be skipped from primitive preparation, clip chain building // and tile dependency updates. // (2) When we need to allocate an off-screen surface for a child picture (for // example a CSS filter) we clip the size of the GPU surface to the world // culling rect below (to ensure we draw enough of it to be sampled by any // tiles that reference it). Making the world culling rect only affected // by visible tiles (rather than the entire virtual tile display port) can // result in allocating _much_ smaller GPU surfaces for cases where the // true off-screen surface size is very large. if tile.is_visible { world_culling_rect = world_culling_rect.union(&tile.world_rect); } self.tiles.insert(key, tile); } } // Any old tiles that remain after the loop above are going to be dropped. For // simple composite mode, the texture cache handle will expire and be collected // by the texture cache. For native compositor mode, we need to explicitly // invoke a callback to the client to destroy that surface. frame_state.composite_state.destroy_native_surfaces( self.old_tiles.values(), frame_state.resource_cache, ); world_culling_rect } /// Update the dependencies for each tile for a given primitive instance. pub fn update_prim_dependencies( &mut self, prim_instance: &PrimitiveInstance, prim_spatial_node_index: SpatialNodeIndex, prim_clip_chain: Option<&ClipChainInstance>, local_prim_rect: LayoutRect, clip_scroll_tree: &ClipScrollTree, data_stores: &DataStores, clip_store: &ClipStore, pictures: &[PicturePrimitive], resource_cache: &ResourceCache, opacity_binding_store: &OpacityBindingStorage, image_instances: &ImageInstanceStorage, surface_index: SurfaceIndex, surface_spatial_node_index: SpatialNodeIndex, ) -> bool { // If the primitive is completely clipped out by the clip chain, there // is no need to add it to any primitive dependencies. let prim_clip_chain = match prim_clip_chain { Some(prim_clip_chain) => prim_clip_chain, None => return false, }; self.map_local_to_surface.set_target_spatial_node( prim_spatial_node_index, clip_scroll_tree, ); // Map the primitive local rect into picture space. let prim_rect = match self.map_local_to_surface.map(&local_prim_rect) { Some(rect) => rect, None => return false, }; // If the rect is invalid, no need to create dependencies. if prim_rect.size.is_empty_or_negative() { return false; } // If the primitive is directly drawn onto this picture cache surface, then // the pic_clip_rect is in the same space. If not, we need to map it from // the surface space into the picture cache space. let on_picture_surface = surface_index == self.surface_index; let pic_clip_rect = if on_picture_surface { prim_clip_chain.pic_clip_rect } else { self.map_child_pic_to_surface.set_target_spatial_node( surface_spatial_node_index, clip_scroll_tree, ); self.map_child_pic_to_surface .map(&prim_clip_chain.pic_clip_rect) .expect("bug: unable to map clip rect to picture cache space") }; // Get the tile coordinates in the picture space. let (p0, p1) = self.get_tile_coords_for_rect(&pic_clip_rect); // If the primitive is outside the tiling rects, it's known to not // be visible. if p0.x == p1.x || p0.y == p1.y { return false; } // Build the list of resources that this primitive has dependencies on. let mut prim_info = PrimitiveDependencyInfo::new( prim_instance.uid(), prim_rect.origin, pic_clip_rect, ); // Include the prim spatial node, if differs relative to cache root. if prim_spatial_node_index != self.spatial_node_index { prim_info.spatial_nodes.push(prim_spatial_node_index); } // If there was a clip chain, add any clip dependencies to the list for this tile. let clip_instances = &clip_store .clip_node_instances[prim_clip_chain.clips_range.to_range()]; for clip_instance in clip_instances { prim_info.clips.push(clip_instance.handle.uid()); // If the clip has the same spatial node, the relative transform // will always be the same, so there's no need to depend on it. let clip_node = &data_stores.clip[clip_instance.handle]; if clip_node.item.spatial_node_index != self.spatial_node_index { if !prim_info.spatial_nodes.contains(&clip_node.item.spatial_node_index) { prim_info.spatial_nodes.push(clip_node.item.spatial_node_index); } } } // Certain primitives may select themselves to be a backdrop candidate, which is // then applied below. let mut backdrop_candidate = None; // For pictures, we don't (yet) know the valid clip rect, so we can't correctly // use it to calculate the local bounding rect for the tiles. If we include them // then we may calculate a bounding rect that is too large, since it won't include // the clip bounds of the picture. Excluding them from the bounding rect here // fixes any correctness issues (the clips themselves are considered when we // consider the bounds of the primitives that are *children* of the picture), // however it does potentially result in some un-necessary invalidations of a // tile (in cases where the picture local rect affects the tile, but the clip // rect eventually means it doesn't affect that tile). // TODO(gw): Get picture clips earlier (during the initial picture traversal // pass) so that we can calculate these correctly. match prim_instance.kind { PrimitiveInstanceKind::Picture { pic_index,.. } => { // Pictures can depend on animated opacity bindings. let pic = &pictures[pic_index.0]; if let Some(PictureCompositeMode::Filter(Filter::Opacity(binding, _))) = pic.requested_composite_mode { prim_info.opacity_bindings.push(binding.into()); } } PrimitiveInstanceKind::Rectangle { data_handle, opacity_binding_index, .. } => { if opacity_binding_index == OpacityBindingIndex::INVALID { // Rectangles can only form a backdrop candidate if they are known opaque. // TODO(gw): We could resolve the opacity binding here, but the common // case for background rects is that they don't have animated opacity. let color = match data_stores.prim[data_handle].kind { PrimitiveTemplateKind::Rectangle { color, .. } => color, _ => unreachable!(), }; if color.a >= 1.0 { backdrop_candidate = Some(BackdropKind::Color { color }); } } else { let opacity_binding = &opacity_binding_store[opacity_binding_index]; for binding in &opacity_binding.bindings { prim_info.opacity_bindings.push(OpacityBinding::from(*binding)); } } prim_info.clip_by_tile = true; } PrimitiveInstanceKind::Image { data_handle, image_instance_index, .. } => { let image_data = &data_stores.image[data_handle].kind; let image_instance = &image_instances[image_instance_index]; let opacity_binding_index = image_instance.opacity_binding_index; if opacity_binding_index == OpacityBindingIndex::INVALID { if let Some(image_properties) = resource_cache.get_image_properties(image_data.key) { // If this image is opaque, it can be considered as a possible opaque backdrop if image_properties.descriptor.is_opaque() { backdrop_candidate = Some(BackdropKind::Image); } } } else { let opacity_binding = &opacity_binding_store[opacity_binding_index]; for binding in &opacity_binding.bindings { prim_info.opacity_bindings.push(OpacityBinding::from(*binding)); } } prim_info.images.push(ImageDependency { key: image_data.key, generation: resource_cache.get_image_generation(image_data.key), }); } PrimitiveInstanceKind::YuvImage { data_handle, .. } => { let yuv_image_data = &data_stores.yuv_image[data_handle].kind; prim_info.images.extend( yuv_image_data.yuv_key.iter().map(|key| { ImageDependency { key: *key, generation: resource_cache.get_image_generation(*key), } }) ); } PrimitiveInstanceKind::ImageBorder { data_handle, .. } => { let border_data = &data_stores.image_border[data_handle].kind; prim_info.images.push(ImageDependency { key: border_data.request.key, generation: resource_cache.get_image_generation(border_data.request.key), }); } PrimitiveInstanceKind::PushClipChain | PrimitiveInstanceKind::PopClipChain => { // Early exit to ensure this doesn't get added as a dependency on the tile. return false; } PrimitiveInstanceKind::TextRun { data_handle, .. } => { // Only do these checks if we haven't already disabled subpx // text rendering for this slice. if self.subpixel_mode == SubpixelMode::Allow && !self.is_opaque() { let run_data = &data_stores.text_run[data_handle]; // Only care about text runs that have requested subpixel rendering. // This is conservative - it may still end up that a subpx requested // text run doesn't get subpx for other reasons (e.g. glyph size). let subpx_requested = match run_data.font.render_mode { FontRenderMode::Subpixel => true, FontRenderMode::Alpha | FontRenderMode::Mono => false, }; // If a text run is on a child surface, the subpx mode will be // correctly determined as we recurse through pictures in take_context. if on_picture_surface && subpx_requested { if !self.backdrop.rect.contains_rect(&pic_clip_rect) { self.subpixel_mode = SubpixelMode::Deny; } } } } PrimitiveInstanceKind::Clear { .. } => { backdrop_candidate = Some(BackdropKind::Clear); } PrimitiveInstanceKind::LineDecoration { .. } | PrimitiveInstanceKind::NormalBorder { .. } | PrimitiveInstanceKind::LinearGradient { .. } | PrimitiveInstanceKind::RadialGradient { .. } | PrimitiveInstanceKind::Backdrop { .. } => { // These don't contribute dependencies } }; // If this primitive considers itself a backdrop candidate, apply further // checks to see if it matches all conditions to be a backdrop. if let Some(backdrop_candidate) = backdrop_candidate { let is_suitable_backdrop = match backdrop_candidate { BackdropKind::Clear => { // Clear prims are special - they always end up in their own slice, // and always set the backdrop. In future, we hope to completely // remove clear prims, since they don't integrate with the compositing // system cleanly. true } BackdropKind::Image | BackdropKind::Color { .. } => { // Check a number of conditions to see if we can consider this // primitive as an opaque backdrop rect. Several of these are conservative // checks and could be relaxed in future. However, these checks // are quick and capture the common cases of background rects and images. // Specifically, we currently require: // - The primitive is on the main picture cache surface. // - Same coord system as picture cache (ensures rects are axis-aligned). // - No clip masks exist. let same_coord_system = { let prim_spatial_node = &clip_scroll_tree .spatial_nodes[prim_spatial_node_index.0 as usize]; let surface_spatial_node = &clip_scroll_tree .spatial_nodes[self.spatial_node_index.0 as usize]; prim_spatial_node.coordinate_system_id == surface_spatial_node.coordinate_system_id }; same_coord_system && on_picture_surface } }; if is_suitable_backdrop { if !prim_clip_chain.needs_mask && pic_clip_rect.contains_rect(&self.backdrop.rect) { self.backdrop = BackdropInfo { rect: pic_clip_rect, kind: backdrop_candidate, } } } } // Record any new spatial nodes in the used list. self.used_spatial_nodes.extend(&prim_info.spatial_nodes); // Truncate the lengths of dependency arrays to the max size we can handle. // Any arrays this size or longer will invalidate every frame. prim_info.clips.truncate(MAX_PRIM_SUB_DEPS); prim_info.opacity_bindings.truncate(MAX_PRIM_SUB_DEPS); prim_info.spatial_nodes.truncate(MAX_PRIM_SUB_DEPS); prim_info.images.truncate(MAX_PRIM_SUB_DEPS); // Normalize the tile coordinates before adding to tile dependencies. // For each affected tile, mark any of the primitive dependencies. for y in p0.y .. p1.y { for x in p0.x .. p1.x { // TODO(gw): Convert to 2d array temporarily to avoid hash lookups per-tile? let key = TileOffset::new(x, y); let tile = self.tiles.get_mut(&key).expect("bug: no tile"); tile.add_prim_dependency(&prim_info); } } true } /// Print debug information about this picture cache to a tree printer. fn print(&self) { // TODO(gw): This initial implementation is very basic - just printing // the picture cache state to stdout. In future, we can // make this dump each frame to a file, and produce a report // stating which frames had invalidations. This will allow // diff'ing the invalidation states in a visual tool. let mut pt = PrintTree::new("Picture Cache"); pt.new_level(format!("Slice {}", self.slice)); pt.add_item(format!("fract_offset: {:?}", self.fract_offset)); pt.add_item(format!("background_color: {:?}", self.background_color)); for y in self.tile_bounds_p0.y .. self.tile_bounds_p1.y { for x in self.tile_bounds_p0.x .. self.tile_bounds_p1.x { let key = TileOffset::new(x, y); let tile = &self.tiles[&key]; tile.print(&mut pt); } } pt.end_level(); } /// Apply any updates after prim dependency updates. This applies /// any late tile invalidations, and sets up the dirty rect and /// set of tile blits. pub fn post_update( &mut self, frame_context: &FrameVisibilityContext, frame_state: &mut FrameVisibilityState, ) { self.tiles_to_draw.clear(); self.dirty_region.clear(); // Register the opaque region of this tile cache as an occluder, which // is used later in the frame to occlude other tiles. if self.backdrop.rect.is_well_formed_and_nonempty() { let backdrop_rect = self.backdrop.rect .intersection(&self.local_rect) .and_then(|r| { r.intersection(&self.local_clip_rect) }); if let Some(backdrop_rect) = backdrop_rect { let map_pic_to_world = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, self.spatial_node_index, frame_context.global_screen_world_rect, frame_context.clip_scroll_tree, ); let world_backdrop_rect = map_pic_to_world .map(&backdrop_rect) .expect("bug: unable to map backdrop to world space"); frame_state.composite_state.register_occluder( self.slice, world_backdrop_rect, ); } } // Detect if the picture cache was scrolled or scaled. In this case, // the device space dirty rects aren't applicable (until we properly // integrate with OS compositors that can handle scrolling slices). let root_transform = frame_context .clip_scroll_tree .get_relative_transform( self.spatial_node_index, ROOT_SPATIAL_NODE_INDEX, ) .into(); let root_transform_changed = root_transform != self.root_transform; if root_transform_changed { self.root_transform = root_transform; frame_state.composite_state.dirty_rects_are_valid = false; } // Diff the state of the spatial nodes between last frame build and now. mem::swap(&mut self.spatial_nodes, &mut self.old_spatial_nodes); // TODO(gw): Maybe remove the used_spatial_nodes set and just mutate / create these // diffs inside add_prim_dependency? self.spatial_nodes.clear(); for spatial_node_index in self.used_spatial_nodes.drain() { // Get the current relative transform. let mut value = get_transform_key( spatial_node_index, self.spatial_node_index, frame_context.clip_scroll_tree, ); // Check if the transform has changed from last frame let mut changed = true; if let Some(old_info) = self.old_spatial_nodes.remove(&spatial_node_index) { if old_info.value == value { // Since the transform key equality check applies epsilon, if we // consider the value to be the same, store that old value to avoid // missing very slow drifts in the value over time. // TODO(gw): We should change ComparableVec to use a trait for comparison // rather than PartialEq. value = old_info.value; changed = false; } } self.spatial_nodes.insert(spatial_node_index, SpatialNodeDependency { changed, value, }); } let ctx = TilePostUpdateContext { backdrop: self.backdrop, spatial_nodes: &self.spatial_nodes, opacity_bindings: &self.opacity_bindings, current_tile_size: self.current_tile_size, }; let mut state = TilePostUpdateState { resource_cache: frame_state.resource_cache, composite_state: frame_state.composite_state, compare_cache: &mut self.compare_cache, }; // Step through each tile and invalidate if the dependencies have changed. for (key, tile) in self.tiles.iter_mut() { if tile.post_update(&ctx, &mut state) { self.tiles_to_draw.push(*key); } } // When under test, record a copy of the dirty region to support // invalidation testing in wrench. if frame_context.config.testing { frame_state.scratch.recorded_dirty_regions.push(self.dirty_region.record()); } } } /// Maintains a stack of picture and surface information, that /// is used during the initial picture traversal. pub struct PictureUpdateState<'a> { surfaces: &'a mut Vec<SurfaceInfo>, surface_stack: Vec<SurfaceIndex>, picture_stack: Vec<PictureInfo>, are_raster_roots_assigned: bool, composite_state: &'a CompositeState, } impl<'a> PictureUpdateState<'a> { pub fn update_all( surfaces: &'a mut Vec<SurfaceInfo>, pic_index: PictureIndex, picture_primitives: &mut [PicturePrimitive], frame_context: &FrameBuildingContext, gpu_cache: &mut GpuCache, clip_store: &ClipStore, data_stores: &mut DataStores, composite_state: &CompositeState, ) { profile_marker!("UpdatePictures"); let mut state = PictureUpdateState { surfaces, surface_stack: vec![SurfaceIndex(0)], picture_stack: Vec::new(), are_raster_roots_assigned: true, composite_state, }; state.update( pic_index, picture_primitives, frame_context, gpu_cache, clip_store, data_stores, ); if !state.are_raster_roots_assigned { state.assign_raster_roots( pic_index, picture_primitives, ROOT_SPATIAL_NODE_INDEX, ); } } /// Return the current surface fn current_surface(&self) -> &SurfaceInfo { &self.surfaces[self.surface_stack.last().unwrap().0] } /// Return the current surface (mutable) fn current_surface_mut(&mut self) -> &mut SurfaceInfo { &mut self.surfaces[self.surface_stack.last().unwrap().0] } /// Push a new surface onto the update stack. fn push_surface( &mut self, surface: SurfaceInfo, ) -> SurfaceIndex { let surface_index = SurfaceIndex(self.surfaces.len()); self.surfaces.push(surface); self.surface_stack.push(surface_index); surface_index } /// Pop a surface on the way up the picture traversal fn pop_surface(&mut self) -> SurfaceIndex{ self.surface_stack.pop().unwrap() } /// Push information about a picture on the update stack fn push_picture( &mut self, info: PictureInfo, ) { self.picture_stack.push(info); } /// Pop the picture info off, on the way up the picture traversal fn pop_picture( &mut self, ) -> PictureInfo { self.picture_stack.pop().unwrap() } /// Update a picture, determining surface configuration, /// rasterization roots, and (in future) whether there /// are cached surfaces that can be used by this picture. fn update( &mut self, pic_index: PictureIndex, picture_primitives: &mut [PicturePrimitive], frame_context: &FrameBuildingContext, gpu_cache: &mut GpuCache, clip_store: &ClipStore, data_stores: &mut DataStores, ) { if let Some(prim_list) = picture_primitives[pic_index.0].pre_update( self, frame_context, ) { for cluster in &prim_list.clusters { if cluster.flags.contains(ClusterFlags::IS_PICTURE) { for prim_instance in &cluster.prim_instances { let child_pic_index = match prim_instance.kind { PrimitiveInstanceKind::Picture { pic_index, .. } => pic_index, _ => unreachable!(), }; self.update( child_pic_index, picture_primitives, frame_context, gpu_cache, clip_store, data_stores, ); } } } picture_primitives[pic_index.0].post_update( prim_list, self, frame_context, data_stores, ); } } /// Process the picture tree again in a depth-first order, /// and adjust the raster roots of the pictures that want to establish /// their own roots but are not able to due to the size constraints. fn assign_raster_roots( &mut self, pic_index: PictureIndex, picture_primitives: &[PicturePrimitive], fallback_raster_spatial_node: SpatialNodeIndex, ) { let picture = &picture_primitives[pic_index.0]; if !picture.is_visible() { return } let new_fallback = match picture.raster_config { Some(ref config) => { let surface = &mut self.surfaces[config.surface_index.0]; if !config.establishes_raster_root { surface.raster_spatial_node_index = fallback_raster_spatial_node; } surface.raster_spatial_node_index } None => fallback_raster_spatial_node, }; for cluster in &picture.prim_list.clusters { if cluster.flags.contains(ClusterFlags::IS_PICTURE) { for instance in &cluster.prim_instances { let child_pic_index = match instance.kind { PrimitiveInstanceKind::Picture { pic_index, .. } => pic_index, _ => unreachable!(), }; self.assign_raster_roots( child_pic_index, picture_primitives, new_fallback, ); } } } } } #[derive(Debug, Copy, Clone, PartialEq)] #[cfg_attr(feature = "capture", derive(Serialize))] pub struct SurfaceIndex(pub usize); pub const ROOT_SURFACE_INDEX: SurfaceIndex = SurfaceIndex(0); #[derive(Debug, Copy, Clone)] pub struct SurfaceRenderTasks { /// The root of the render task chain for this surface. This /// is attached to parent tasks, and also the surface that /// gets added during batching. pub root: RenderTaskId, /// The port of the render task change for this surface. This /// is where child tasks for this surface get attached to. pub port: RenderTaskId, } /// Information about an offscreen surface. For now, /// it contains information about the size and coordinate /// system of the surface. In the future, it will contain /// information about the contents of the surface, which /// will allow surfaces to be cached / retained between /// frames and display lists. #[derive(Debug)] pub struct SurfaceInfo { /// A local rect defining the size of this surface, in the /// coordinate system of the surface itself. pub rect: PictureRect, /// Helper structs for mapping local rects in different /// coordinate systems into the surface coordinates. pub map_local_to_surface: SpaceMapper<LayoutPixel, PicturePixel>, /// Defines the positioning node for the surface itself, /// and the rasterization root for this surface. pub raster_spatial_node_index: SpatialNodeIndex, pub surface_spatial_node_index: SpatialNodeIndex, /// This is set when the render task is created. pub render_tasks: Option<SurfaceRenderTasks>, /// How much the local surface rect should be inflated (for blur radii). pub inflation_factor: f32, /// The device pixel ratio specific to this surface. pub device_pixel_scale: DevicePixelScale, } impl SurfaceInfo { pub fn new( surface_spatial_node_index: SpatialNodeIndex, raster_spatial_node_index: SpatialNodeIndex, inflation_factor: f32, world_rect: WorldRect, clip_scroll_tree: &ClipScrollTree, device_pixel_scale: DevicePixelScale, ) -> Self { let map_surface_to_world = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, surface_spatial_node_index, world_rect, clip_scroll_tree, ); let pic_bounds = map_surface_to_world .unmap(&map_surface_to_world.bounds) .unwrap_or_else(PictureRect::max_rect); let map_local_to_surface = SpaceMapper::new( surface_spatial_node_index, pic_bounds, ); SurfaceInfo { rect: PictureRect::zero(), map_local_to_surface, render_tasks: None, raster_spatial_node_index, surface_spatial_node_index, inflation_factor, device_pixel_scale, } } } #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] pub struct RasterConfig { /// How this picture should be composited into /// the parent surface. pub composite_mode: PictureCompositeMode, /// Index to the surface descriptor for this /// picture. pub surface_index: SurfaceIndex, /// Whether this picture establishes a rasterization root. pub establishes_raster_root: bool, } bitflags! { /// A set of flags describing why a picture may need a backing surface. #[cfg_attr(feature = "capture", derive(Serialize))] pub struct BlitReason: u32 { /// Mix-blend-mode on a child that requires isolation. const ISOLATE = 1; /// Clip node that _might_ require a surface. const CLIP = 2; /// Preserve-3D requires a surface for plane-splitting. const PRESERVE3D = 4; /// A backdrop that is reused which requires a surface. const BACKDROP = 8; } } /// Specifies how this Picture should be composited /// onto the target it belongs to. #[allow(dead_code)] #[derive(Debug, Clone)] #[cfg_attr(feature = "capture", derive(Serialize))] pub enum PictureCompositeMode { /// Apply CSS mix-blend-mode effect. MixBlend(MixBlendMode), /// Apply a CSS filter (except component transfer). Filter(Filter), /// Apply a component transfer filter. ComponentTransferFilter(FilterDataHandle), /// Draw to intermediate surface, copy straight across. This /// is used for CSS isolation, and plane splitting. Blit(BlitReason), /// Used to cache a picture as a series of tiles. TileCache { }, /// Apply an SVG filter SvgFilter(Vec<FilterPrimitive>, Vec<SFilterData>), } impl PictureCompositeMode { pub fn inflate_picture_rect(&self, picture_rect: PictureRect, inflation_factor: f32) -> PictureRect { let mut result_rect = picture_rect; match self { PictureCompositeMode::Filter(filter) => match filter { Filter::Blur(_) => { result_rect = picture_rect.inflate(inflation_factor, inflation_factor); }, Filter::DropShadows(shadows) => { let mut max_inflation: f32 = 0.0; for shadow in shadows { let inflation_factor = shadow.blur_radius.round() * BLUR_SAMPLE_SCALE; max_inflation = max_inflation.max(inflation_factor); } result_rect = picture_rect.inflate(max_inflation, max_inflation); }, _ => {} } PictureCompositeMode::SvgFilter(primitives, _) => { let mut output_rects = Vec::with_capacity(primitives.len()); for (cur_index, primitive) in primitives.iter().enumerate() { let output_rect = match primitive.kind { FilterPrimitiveKind::Blur(ref primitive) => { let input = primitive.input.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect); let inflation_factor = primitive.radius.round() * BLUR_SAMPLE_SCALE; input.inflate(inflation_factor, inflation_factor) } FilterPrimitiveKind::DropShadow(ref primitive) => { let inflation_factor = primitive.shadow.blur_radius.round() * BLUR_SAMPLE_SCALE; let input = primitive.input.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect); let shadow_rect = input.inflate(inflation_factor, inflation_factor); input.union(&shadow_rect.translate(primitive.shadow.offset * Scale::new(1.0))) } FilterPrimitiveKind::Blend(ref primitive) => { primitive.input1.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect) .union(&primitive.input2.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect)) } FilterPrimitiveKind::Composite(ref primitive) => { primitive.input1.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect) .union(&primitive.input2.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect)) } FilterPrimitiveKind::Identity(ref primitive) => primitive.input.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect), FilterPrimitiveKind::Opacity(ref primitive) => primitive.input.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect), FilterPrimitiveKind::ColorMatrix(ref primitive) => primitive.input.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect), FilterPrimitiveKind::ComponentTransfer(ref primitive) => primitive.input.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect), FilterPrimitiveKind::Offset(ref primitive) => { let input_rect = primitive.input.to_index(cur_index).map(|index| output_rects[index]).unwrap_or(picture_rect); input_rect.translate(primitive.offset * Scale::new(1.0)) }, FilterPrimitiveKind::Flood(..) => picture_rect, }; output_rects.push(output_rect); result_rect = result_rect.union(&output_rect); } } _ => {}, } result_rect } } /// Enum value describing the place of a picture in a 3D context. #[derive(Clone, Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] pub enum Picture3DContext<C> { /// The picture is not a part of 3D context sub-hierarchy. Out, /// The picture is a part of 3D context. In { /// Additional data per child for the case of this a root of 3D hierarchy. root_data: Option<Vec<C>>, /// The spatial node index of an "ancestor" element, i.e. one /// that establishes the transformed element’s containing block. /// /// See CSS spec draft for more details: /// https://drafts.csswg.org/css-transforms-2/#accumulated-3d-transformation-matrix-computation ancestor_index: SpatialNodeIndex, }, } /// Information about a preserve-3D hierarchy child that has been plane-split /// and ordered according to the view direction. #[derive(Clone, Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] pub struct OrderedPictureChild { pub anchor: PlaneSplitAnchor, pub spatial_node_index: SpatialNodeIndex, pub gpu_address: GpuCacheAddress, } bitflags! { /// A set of flags describing why a picture may need a backing surface. #[cfg_attr(feature = "capture", derive(Serialize))] pub struct ClusterFlags: u32 { /// This cluster is a picture const IS_PICTURE = 1; /// Whether this cluster is visible when the position node is a backface. const IS_BACKFACE_VISIBLE = 2; /// This flag is set during the first pass picture traversal, depending on whether /// the cluster is visible or not. It's read during the second pass when primitives /// consult their owning clusters to see if the primitive itself is visible. const IS_VISIBLE = 4; /// Is a backdrop-filter cluster that requires special handling during post_update. const IS_BACKDROP_FILTER = 8; /// Force creation of a picture caching slice before this cluster. const CREATE_PICTURE_CACHE_PRE = 16; /// Force creation of a picture caching slice after this cluster. const CREATE_PICTURE_CACHE_POST = 32; /// If set, this cluster represents a scroll bar container. const SCROLLBAR_CONTAINER = 64; /// If set, this cluster contains clear rectangle primitives. const IS_CLEAR_PRIMITIVE = 128; } } /// Descriptor for a cluster of primitives. For now, this is quite basic but will be /// extended to handle more spatial clustering of primitives. #[cfg_attr(feature = "capture", derive(Serialize))] pub struct PrimitiveCluster { /// The positioning node for this cluster. pub spatial_node_index: SpatialNodeIndex, /// The bounding rect of the cluster, in the local space of the spatial node. /// This is used to quickly determine the overall bounding rect for a picture /// during the first picture traversal, which is needed for local scale /// determination, and render task size calculations. bounding_rect: LayoutRect, /// The list of primitive instances in this cluster. pub prim_instances: Vec<PrimitiveInstance>, /// Various flags / state for this cluster. pub flags: ClusterFlags, /// An optional scroll root to use if this cluster establishes a picture cache slice. pub cache_scroll_root: Option<SpatialNodeIndex>, } /// Where to insert a prim instance in a primitive list. #[derive(Debug, Copy, Clone)] enum PrimitiveListPosition { Begin, End, } impl PrimitiveCluster { /// Construct a new primitive cluster for a given positioning node. fn new( spatial_node_index: SpatialNodeIndex, flags: ClusterFlags, ) -> Self { PrimitiveCluster { bounding_rect: LayoutRect::zero(), spatial_node_index, flags, prim_instances: Vec::new(), cache_scroll_root: None, } } /// Return true if this cluster is compatible with the given params pub fn is_compatible( &self, spatial_node_index: SpatialNodeIndex, flags: ClusterFlags, ) -> bool { self.flags == flags && self.spatial_node_index == spatial_node_index } /// Add a primitive instance to this cluster, at the start or end fn push( &mut self, prim_instance: PrimitiveInstance, prim_size: LayoutSize, ) { let prim_rect = LayoutRect::new( prim_instance.prim_origin, prim_size, ); let culling_rect = prim_instance.local_clip_rect .intersection(&prim_rect) .unwrap_or_else(LayoutRect::zero); self.bounding_rect = self.bounding_rect.union(&culling_rect); self.prim_instances.push(prim_instance); } } /// A list of primitive instances that are added to a picture /// This ensures we can keep a list of primitives that /// are pictures, for a fast initial traversal of the picture /// tree without walking the instance list. #[cfg_attr(feature = "capture", derive(Serialize))] pub struct PrimitiveList { /// List of primitives grouped into clusters. pub clusters: Vec<PrimitiveCluster>, } impl PrimitiveList { /// Construct an empty primitive list. This is /// just used during the take_context / restore_context /// borrow check dance, which will be removed as the /// picture traversal pass is completed. pub fn empty() -> Self { PrimitiveList { clusters: Vec::new(), } } /// Add a primitive instance to this list, at the start or end fn push( &mut self, prim_instance: PrimitiveInstance, prim_size: LayoutSize, spatial_node_index: SpatialNodeIndex, prim_flags: PrimitiveFlags, insert_position: PrimitiveListPosition, ) { let mut flags = ClusterFlags::empty(); // Pictures are always put into a new cluster, to make it faster to // iterate all pictures in a given primitive list. match prim_instance.kind { PrimitiveInstanceKind::Picture { .. } => { flags.insert(ClusterFlags::IS_PICTURE); } PrimitiveInstanceKind::Backdrop { .. } => { flags.insert(ClusterFlags::IS_BACKDROP_FILTER); } PrimitiveInstanceKind::Clear { .. } => { flags.insert(ClusterFlags::IS_CLEAR_PRIMITIVE); } _ => {} } if prim_flags.contains(PrimitiveFlags::IS_BACKFACE_VISIBLE) { flags.insert(ClusterFlags::IS_BACKFACE_VISIBLE); } if prim_flags.contains(PrimitiveFlags::IS_SCROLLBAR_CONTAINER) { flags.insert(ClusterFlags::SCROLLBAR_CONTAINER); } // Insert the primitive into the first or last cluster as required match insert_position { PrimitiveListPosition::Begin => { let mut cluster = PrimitiveCluster::new( spatial_node_index, flags, ); cluster.push(prim_instance, prim_size); self.clusters.insert(0, cluster); } PrimitiveListPosition::End => { if let Some(cluster) = self.clusters.last_mut() { if cluster.is_compatible(spatial_node_index, flags) { cluster.push(prim_instance, prim_size); return; } } let mut cluster = PrimitiveCluster::new( spatial_node_index, flags, ); cluster.push(prim_instance, prim_size); self.clusters.push(cluster); } } } /// Add a primitive instance to the start of the list pub fn add_prim_to_start( &mut self, prim_instance: PrimitiveInstance, prim_size: LayoutSize, spatial_node_index: SpatialNodeIndex, flags: PrimitiveFlags, ) { self.push( prim_instance, prim_size, spatial_node_index, flags, PrimitiveListPosition::Begin, ) } /// Add a primitive instance to the end of the list pub fn add_prim( &mut self, prim_instance: PrimitiveInstance, prim_size: LayoutSize, spatial_node_index: SpatialNodeIndex, flags: PrimitiveFlags, ) { self.push( prim_instance, prim_size, spatial_node_index, flags, PrimitiveListPosition::End, ) } /// Returns true if there are no clusters (and thus primitives) pub fn is_empty(&self) -> bool { self.clusters.is_empty() } /// Add an existing cluster to this prim list pub fn add_cluster(&mut self, cluster: PrimitiveCluster) { self.clusters.push(cluster); } /// Merge another primitive list into this one pub fn extend(&mut self, prim_list: PrimitiveList) { self.clusters.extend(prim_list.clusters); } } /// Defines configuration options for a given picture primitive. #[cfg_attr(feature = "capture", derive(Serialize))] pub struct PictureOptions { /// If true, WR should inflate the bounding rect of primitives when /// using a filter effect that requires inflation. pub inflate_if_required: bool, } impl Default for PictureOptions { fn default() -> Self { PictureOptions { inflate_if_required: true, } } } #[cfg_attr(feature = "capture", derive(Serialize))] pub struct PicturePrimitive { /// List of primitives, and associated info for this picture. pub prim_list: PrimitiveList, #[cfg_attr(feature = "capture", serde(skip))] pub state: Option<PictureState>, /// If true, apply the local clip rect to primitive drawn /// in this picture. pub apply_local_clip_rect: bool, /// If false and transform ends up showing the back of the picture, /// it will be considered invisible. pub is_backface_visible: bool, // If a mix-blend-mode, contains the render task for // the readback of the framebuffer that we use to sample // from in the mix-blend-mode shader. // For drop-shadow filter, this will store the original // picture task which would be rendered on screen after // blur pass. pub secondary_render_task_id: Option<RenderTaskId>, /// How this picture should be composited. /// If None, don't composite - just draw directly on parent surface. pub requested_composite_mode: Option<PictureCompositeMode>, /// Requested rasterization space for this picture. It is /// a performance hint only. pub requested_raster_space: RasterSpace, pub raster_config: Option<RasterConfig>, pub context_3d: Picture3DContext<OrderedPictureChild>, // If requested as a frame output (for rendering // pages to a texture), this is the pipeline this // picture is the root of. pub frame_output_pipeline_id: Option<PipelineId>, // Optional cache handles for storing extra data // in the GPU cache, depending on the type of // picture. pub extra_gpu_data_handles: SmallVec<[GpuCacheHandle; 1]>, /// The spatial node index of this picture when it is /// composited into the parent picture. pub spatial_node_index: SpatialNodeIndex, /// The conservative local rect of this picture. It is /// built dynamically during the first picture traversal. /// It is composed of already snapped primitives. pub estimated_local_rect: LayoutRect, /// The local rect of this picture. It is built /// dynamically during the frame visibility update. It /// differs from the estimated_local_rect because it /// will not contain culled primitives, takes into /// account surface inflation and the whole clip chain. /// It is frequently the same, but may be quite /// different depending on how much was culled. pub precise_local_rect: LayoutRect, /// If false, this picture needs to (re)build segments /// if it supports segment rendering. This can occur /// if the local rect of the picture changes due to /// transform animation and/or scrolling. pub segments_are_valid: bool, /// If Some(..) the tile cache that is associated with this picture. #[cfg_attr(feature = "capture", serde(skip))] //TODO pub tile_cache: Option<Box<TileCacheInstance>>, /// The config options for this picture. pub options: PictureOptions, } impl PicturePrimitive { pub fn print<T: PrintTreePrinter>( &self, pictures: &[Self], self_index: PictureIndex, pt: &mut T, ) { pt.new_level(format!("{:?}", self_index)); pt.add_item(format!("cluster_count: {:?}", self.prim_list.clusters.len())); pt.add_item(format!("estimated_local_rect: {:?}", self.estimated_local_rect)); pt.add_item(format!("precise_local_rect: {:?}", self.precise_local_rect)); pt.add_item(format!("spatial_node_index: {:?}", self.spatial_node_index)); pt.add_item(format!("raster_config: {:?}", self.raster_config)); pt.add_item(format!("requested_composite_mode: {:?}", self.requested_composite_mode)); for cluster in &self.prim_list.clusters { if cluster.flags.contains(ClusterFlags::IS_PICTURE) { for instance in &cluster.prim_instances { let index = match instance.kind { PrimitiveInstanceKind::Picture { pic_index, .. } => pic_index, _ => unreachable!(), }; pictures[index.0].print(pictures, index, pt); } } } pt.end_level(); } /// Returns true if this picture supports segmented rendering. pub fn can_use_segments(&self) -> bool { match self.raster_config { // TODO(gw): Support brush segment rendering for filter and mix-blend // shaders. It's possible this already works, but I'm just // applying this optimization to Blit mode for now. Some(RasterConfig { composite_mode: PictureCompositeMode::MixBlend(..), .. }) | Some(RasterConfig { composite_mode: PictureCompositeMode::Filter(..), .. }) | Some(RasterConfig { composite_mode: PictureCompositeMode::ComponentTransferFilter(..), .. }) | Some(RasterConfig { composite_mode: PictureCompositeMode::TileCache { .. }, .. }) | Some(RasterConfig { composite_mode: PictureCompositeMode::SvgFilter(..), .. }) | None => { false } Some(RasterConfig { composite_mode: PictureCompositeMode::Blit(reason), ..}) => { reason == BlitReason::CLIP } } } fn resolve_scene_properties(&mut self, properties: &SceneProperties) -> bool { match self.requested_composite_mode { Some(PictureCompositeMode::Filter(ref mut filter)) => { match *filter { Filter::Opacity(ref binding, ref mut value) => { *value = properties.resolve_float(binding); } _ => {} } filter.is_visible() } _ => true, } } pub fn is_visible(&self) -> bool { match self.requested_composite_mode { Some(PictureCompositeMode::Filter(ref filter)) => { filter.is_visible() } _ => true, } } /// Destroy an existing picture. This is called just before /// a frame builder is replaced with a newly built scene. It /// gives a picture a chance to retain any cached tiles that /// may be useful during the next scene build. pub fn destroy( &mut self, retained_tiles: &mut RetainedTiles, ) { if let Some(tile_cache) = self.tile_cache.take() { if !tile_cache.tiles.is_empty() { retained_tiles.caches.insert( tile_cache.slice, PictureCacheState { tiles: tile_cache.tiles, spatial_nodes: tile_cache.spatial_nodes, opacity_bindings: tile_cache.opacity_bindings, root_transform: tile_cache.root_transform, current_tile_size: tile_cache.current_tile_size, allocations: PictureCacheRecycledAllocations { old_tiles: tile_cache.old_tiles, old_opacity_bindings: tile_cache.old_opacity_bindings, compare_cache: tile_cache.compare_cache, }, }, ); } } } // TODO(gw): We have the PictureOptions struct available. We // should move some of the parameter list in this // method to be part of the PictureOptions, and // avoid adding new parameters here. pub fn new_image( requested_composite_mode: Option<PictureCompositeMode>, context_3d: Picture3DContext<OrderedPictureChild>, frame_output_pipeline_id: Option<PipelineId>, apply_local_clip_rect: bool, flags: PrimitiveFlags, requested_raster_space: RasterSpace, prim_list: PrimitiveList, spatial_node_index: SpatialNodeIndex, tile_cache: Option<Box<TileCacheInstance>>, options: PictureOptions, ) -> Self { PicturePrimitive { prim_list, state: None, secondary_render_task_id: None, requested_composite_mode, raster_config: None, context_3d, frame_output_pipeline_id, extra_gpu_data_handles: SmallVec::new(), apply_local_clip_rect, is_backface_visible: flags.contains(PrimitiveFlags::IS_BACKFACE_VISIBLE), requested_raster_space, spatial_node_index, estimated_local_rect: LayoutRect::zero(), precise_local_rect: LayoutRect::zero(), tile_cache, options, segments_are_valid: false, } } /// Gets the raster space to use when rendering the picture. /// Usually this would be the requested raster space. However, if the /// picture's spatial node or one of its ancestors is being pinch zoomed /// then we round it. This prevents us rasterizing glyphs for every minor /// change in zoom level, as that would be too expensive. pub fn get_raster_space(&self, clip_scroll_tree: &ClipScrollTree) -> RasterSpace { let spatial_node = &clip_scroll_tree.spatial_nodes[self.spatial_node_index.0 as usize]; if spatial_node.is_ancestor_or_self_zooming { let scale_factors = clip_scroll_tree .get_relative_transform(self.spatial_node_index, ROOT_SPATIAL_NODE_INDEX) .scale_factors(); // Round the scale up to the nearest power of 2, but don't exceed 8. let scale = scale_factors.0.max(scale_factors.1).min(8.0); let rounded_up = 1 << scale.log2().ceil() as u32; RasterSpace::Local(rounded_up as f32) } else { self.requested_raster_space } } pub fn take_context( &mut self, pic_index: PictureIndex, clipped_prim_bounding_rect: WorldRect, surface_spatial_node_index: SpatialNodeIndex, raster_spatial_node_index: SpatialNodeIndex, parent_surface_index: SurfaceIndex, parent_subpixel_mode: SubpixelMode, frame_state: &mut FrameBuildingState, frame_context: &FrameBuildingContext, scratch: &mut PrimitiveScratchBuffer, ) -> Option<(PictureContext, PictureState, PrimitiveList)> { if !self.is_visible() { return None; } // Extract the raster and surface spatial nodes from the raster // config, if this picture establishes a surface. Otherwise just // pass in the spatial node indices from the parent context. let (raster_spatial_node_index, surface_spatial_node_index, surface_index, inflation_factor) = match self.raster_config { Some(ref raster_config) => { let surface = &frame_state.surfaces[raster_config.surface_index.0]; ( surface.raster_spatial_node_index, self.spatial_node_index, raster_config.surface_index, surface.inflation_factor, ) } None => { ( raster_spatial_node_index, surface_spatial_node_index, parent_surface_index, 0.0, ) } }; let map_pic_to_world = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, surface_spatial_node_index, frame_context.global_screen_world_rect, frame_context.clip_scroll_tree, ); let pic_bounds = map_pic_to_world.unmap(&map_pic_to_world.bounds) .unwrap_or_else(PictureRect::max_rect); let map_local_to_pic = SpaceMapper::new( surface_spatial_node_index, pic_bounds, ); let (map_raster_to_world, map_pic_to_raster) = create_raster_mappers( surface_spatial_node_index, raster_spatial_node_index, frame_context.global_screen_world_rect, frame_context.clip_scroll_tree, ); let plane_splitter = match self.context_3d { Picture3DContext::Out => { None } Picture3DContext::In { root_data: Some(_), .. } => { Some(PlaneSplitter::new()) } Picture3DContext::In { root_data: None, .. } => { None } }; match self.raster_config { Some(ref raster_config) => { let pic_rect = PictureRect::from_untyped(&self.precise_local_rect.to_untyped()); let device_pixel_scale = frame_state .surfaces[raster_config.surface_index.0] .device_pixel_scale; let (clipped, unclipped) = match get_raster_rects( pic_rect, &map_pic_to_raster, &map_raster_to_world, clipped_prim_bounding_rect, device_pixel_scale, ) { Some(info) => info, None => { return None } }; let transform = map_pic_to_raster.get_transform(); let dep_info = match raster_config.composite_mode { PictureCompositeMode::Filter(Filter::Blur(blur_radius)) => { let blur_std_deviation = blur_radius * device_pixel_scale.0; let scale_factors = scale_factors(&transform); let blur_std_deviation = DeviceSize::new( blur_std_deviation * scale_factors.0, blur_std_deviation * scale_factors.1 ); let mut device_rect = if self.options.inflate_if_required { let inflation_factor = frame_state.surfaces[raster_config.surface_index.0].inflation_factor; let inflation_factor = (inflation_factor * device_pixel_scale.0).ceil(); // The clipped field is the part of the picture that is visible // on screen. The unclipped field is the screen-space rect of // the complete picture, if no screen / clip-chain was applied // (this includes the extra space for blur region). To ensure // that we draw a large enough part of the picture to get correct // blur results, inflate that clipped area by the blur range, and // then intersect with the total screen rect, to minimize the // allocation size. // We cast clipped to f32 instead of casting unclipped to i32 // because unclipped can overflow an i32. let device_rect = clipped.to_f32() .inflate(inflation_factor, inflation_factor) .intersection(&unclipped) .unwrap(); match device_rect.try_cast::<i32>() { Some(rect) => rect, None => { return None } } } else { clipped }; let original_size = device_rect.size; // Adjust the size to avoid introducing sampling errors during the down-scaling passes. // what would be even better is to rasterize the picture at the down-scaled size // directly. device_rect.size = RenderTask::adjusted_blur_source_size( device_rect.size, blur_std_deviation, ); let uv_rect_kind = calculate_uv_rect_kind( &pic_rect, &transform, &device_rect, device_pixel_scale, true, ); let picture_task = RenderTask::new_picture( RenderTaskLocation::Dynamic(None, device_rect.size), unclipped.size, pic_index, device_rect.origin, uv_rect_kind, surface_spatial_node_index, device_pixel_scale, PrimitiveVisibilityMask::all(), None, ); let picture_task_id = frame_state.render_tasks.add(picture_task); let blur_render_task_id = RenderTask::new_blur( blur_std_deviation, picture_task_id, frame_state.render_tasks, RenderTargetKind::Color, ClearMode::Transparent, None, original_size, ); Some((blur_render_task_id, picture_task_id)) } PictureCompositeMode::Filter(Filter::DropShadows(ref shadows)) => { let mut max_std_deviation = 0.0; for shadow in shadows { // TODO(nical) presumably we should compute the clipped rect for each shadow // and compute the union of them to determine what we need to rasterize and blur? max_std_deviation = f32::max(max_std_deviation, shadow.blur_radius * device_pixel_scale.0); } max_std_deviation = max_std_deviation.round(); let max_blur_range = (max_std_deviation * BLUR_SAMPLE_SCALE).ceil(); // We cast clipped to f32 instead of casting unclipped to i32 // because unclipped can overflow an i32. let device_rect = clipped.to_f32() .inflate(max_blur_range, max_blur_range) .intersection(&unclipped) .unwrap(); let mut device_rect = match device_rect.try_cast::<i32>() { Some(rect) => rect, None => { return None } }; device_rect.size = RenderTask::adjusted_blur_source_size( device_rect.size, DeviceSize::new(max_std_deviation, max_std_deviation), ); let uv_rect_kind = calculate_uv_rect_kind( &pic_rect, &transform, &device_rect, device_pixel_scale, true, ); let mut picture_task = RenderTask::new_picture( RenderTaskLocation::Dynamic(None, device_rect.size), unclipped.size, pic_index, device_rect.origin, uv_rect_kind, surface_spatial_node_index, device_pixel_scale, PrimitiveVisibilityMask::all(), None, ); picture_task.mark_for_saving(); let picture_task_id = frame_state.render_tasks.add(picture_task); self.secondary_render_task_id = Some(picture_task_id); let mut blur_tasks = BlurTaskCache::default(); self.extra_gpu_data_handles.resize(shadows.len(), GpuCacheHandle::new()); let mut blur_render_task_id = picture_task_id; for shadow in shadows { let std_dev = f32::round(shadow.blur_radius * device_pixel_scale.0); blur_render_task_id = RenderTask::new_blur( DeviceSize::new(std_dev, std_dev), picture_task_id, frame_state.render_tasks, RenderTargetKind::Color, ClearMode::Transparent, Some(&mut blur_tasks), device_rect.size, ); } // TODO(nical) the second one should to be the blur's task id but we have several blurs now Some((blur_render_task_id, picture_task_id)) } PictureCompositeMode::MixBlend(..) if !frame_context.fb_config.gpu_supports_advanced_blend => { let uv_rect_kind = calculate_uv_rect_kind( &pic_rect, &transform, &clipped, device_pixel_scale, true, ); let picture_task = RenderTask::new_picture( RenderTaskLocation::Dynamic(None, clipped.size), unclipped.size, pic_index, clipped.origin, uv_rect_kind, surface_spatial_node_index, device_pixel_scale, PrimitiveVisibilityMask::all(), None, ); let readback_task_id = frame_state.render_tasks.add( RenderTask::new_readback(clipped) ); frame_state.render_tasks.add_dependency( frame_state.surfaces[parent_surface_index.0].render_tasks.unwrap().port, readback_task_id, ); self.secondary_render_task_id = Some(readback_task_id); let render_task_id = frame_state.render_tasks.add(picture_task); Some((render_task_id, render_task_id)) } PictureCompositeMode::Filter(..) => { let uv_rect_kind = calculate_uv_rect_kind( &pic_rect, &transform, &clipped, device_pixel_scale, true, ); let picture_task = RenderTask::new_picture( RenderTaskLocation::Dynamic(None, clipped.size), unclipped.size, pic_index, clipped.origin, uv_rect_kind, surface_spatial_node_index, device_pixel_scale, PrimitiveVisibilityMask::all(), None, ); let render_task_id = frame_state.render_tasks.add(picture_task); Some((render_task_id, render_task_id)) } PictureCompositeMode::ComponentTransferFilter(..) => { let uv_rect_kind = calculate_uv_rect_kind( &pic_rect, &transform, &clipped, device_pixel_scale, true, ); let picture_task = RenderTask::new_picture( RenderTaskLocation::Dynamic(None, clipped.size), unclipped.size, pic_index, clipped.origin, uv_rect_kind, surface_spatial_node_index, device_pixel_scale, PrimitiveVisibilityMask::all(), None, ); let render_task_id = frame_state.render_tasks.add(picture_task); Some((render_task_id, render_task_id)) } PictureCompositeMode::TileCache { .. } => { let tile_cache = self.tile_cache.as_mut().unwrap(); let mut first = true; // Get the overall world space rect of the picture cache. Used to clip // the tile rects below for occlusion testing to the relevant area. let local_clip_rect = tile_cache.local_rect .intersection(&tile_cache.local_clip_rect) .unwrap_or(PictureRect::zero()); let world_clip_rect = map_pic_to_world .map(&local_clip_rect) .expect("bug: unable to map clip rect"); for key in &tile_cache.tiles_to_draw { let tile = tile_cache.tiles.get_mut(key).expect("bug: no tile found!"); // Get the world space rect that this tile will actually occupy on screem let tile_draw_rect = match world_clip_rect.intersection(&tile.world_rect) { Some(rect) => rect, None => { tile.is_visible = false; continue; } }; // If that draw rect is occluded by some set of tiles in front of it, // then mark it as not visible and skip drawing. When it's not occluded // it will fail this test, and get rasterized by the render task setup // code below. if frame_state.composite_state.is_tile_occluded(tile_cache.slice, tile_draw_rect) { // If this tile has an allocated native surface, free it, since it's completely // occluded. We will need to re-allocate this surface if it becomes visible, // but that's likely to be rare (e.g. when there is no content display list // for a frame or two during a tab switch). let surface = tile.surface.as_mut().expect("no tile surface set!"); if let TileSurface::Texture { descriptor: SurfaceTextureDescriptor::NativeSurface { id, .. }, .. } = surface { if let Some(id) = id.take() { frame_state.resource_cache.destroy_compositor_surface(id); } } tile.is_visible = false; continue; } if frame_context.debug_flags.contains(DebugFlags::PICTURE_CACHING_DBG) { tile.root.draw_debug_rects( &map_pic_to_world, tile.is_opaque, scratch, frame_context.global_device_pixel_scale, ); let label_offset = DeviceVector2D::new(20.0, 30.0); let tile_device_rect = tile.world_rect * frame_context.global_device_pixel_scale; if tile_device_rect.size.height >= label_offset.y { let surface = tile.surface.as_ref().expect("no tile surface set!"); scratch.push_debug_string( tile_device_rect.origin + label_offset, debug_colors::RED, format!("{:?}: s={} is_opaque={} surface={}", tile.id, tile_cache.slice, tile.is_opaque, surface.kind(), ), ); } } if let TileSurface::Texture { descriptor, .. } = tile.surface.as_mut().unwrap() { match descriptor { SurfaceTextureDescriptor::TextureCache { ref handle, .. } => { // Invalidate if the backing texture was evicted. if frame_state.resource_cache.texture_cache.is_allocated(handle) { // Request the backing texture so it won't get evicted this frame. // We specifically want to mark the tile texture as used, even // if it's detected not visible below and skipped. This is because // we maintain the set of tiles we care about based on visibility // during pre_update. If a tile still exists after that, we are // assuming that it's either visible or we want to retain it for // a while in case it gets scrolled back onto screen soon. // TODO(gw): Consider switching to manual eviction policy? frame_state.resource_cache.texture_cache.request(handle, frame_state.gpu_cache); } else { // If the texture was evicted on a previous frame, we need to assume // that the entire tile rect is dirty. tile.invalidate(None, InvalidationReason::NoTexture); } } SurfaceTextureDescriptor::NativeSurface { id, .. } => { if id.is_none() { // There is no current surface allocation, so ensure the entire tile is invalidated tile.invalidate(None, InvalidationReason::NoSurface); } } } } // Update the world dirty rect tile.world_dirty_rect = map_pic_to_world.map(&tile.dirty_rect).expect("bug"); if tile.is_valid { continue; } // Ensure that this texture is allocated. if let TileSurface::Texture { ref mut descriptor, ref mut visibility_mask } = tile.surface.as_mut().unwrap() { match descriptor { SurfaceTextureDescriptor::TextureCache { ref mut handle } => { if !frame_state.resource_cache.texture_cache.is_allocated(handle) { frame_state.resource_cache.texture_cache.update_picture_cache( tile_cache.current_tile_size, handle, frame_state.gpu_cache, ); } } SurfaceTextureDescriptor::NativeSurface { id, size } => { if id.is_none() { *id = Some(frame_state.resource_cache.create_compositor_surface( *size, tile.is_opaque, )); } } } *visibility_mask = PrimitiveVisibilityMask::empty(); let dirty_region_index = tile_cache.dirty_region.dirty_rects.len(); // If we run out of dirty regions, then force the last dirty region to // be a union of any remaining regions. This is an inefficiency, in that // we'll add items to batches later on that are redundant / outside this // tile, but it's really rare except in pathological cases (even on a // 4k screen, the typical dirty region count is < 16). if dirty_region_index < PrimitiveVisibilityMask::MAX_DIRTY_REGIONS { visibility_mask.set_visible(dirty_region_index); tile_cache.dirty_region.push( tile.world_dirty_rect, *visibility_mask, ); } else { visibility_mask.set_visible(PrimitiveVisibilityMask::MAX_DIRTY_REGIONS - 1); tile_cache.dirty_region.include_rect( PrimitiveVisibilityMask::MAX_DIRTY_REGIONS - 1, tile.world_dirty_rect, ); } let content_origin_f = tile.world_rect.origin * device_pixel_scale; let content_origin = content_origin_f.round(); debug_assert!((content_origin_f.x - content_origin.x).abs() < 0.01); debug_assert!((content_origin_f.y - content_origin.y).abs() < 0.01); // Get a task-local scissor rect for the dirty region of this // picture cache task. let scissor_rect = tile.world_dirty_rect.translate( -tile.world_rect.origin.to_vector() ); // The world rect is guaranteed to be device pixel aligned, by the tile // sizing code in tile::pre_update. However, there might be some // small floating point accuracy issues (these were observed on ARM // CPUs). Round the rect here before casting to integer device pixels // to ensure the scissor rect is correct. let scissor_rect = (scissor_rect * device_pixel_scale).round(); let surface = descriptor.resolve(frame_state.resource_cache); let task = RenderTask::new_picture( RenderTaskLocation::PictureCache { size: tile_cache.current_tile_size, surface, }, tile_cache.current_tile_size.to_f32(), pic_index, content_origin.to_i32(), UvRectKind::Rect, surface_spatial_node_index, device_pixel_scale, *visibility_mask, Some(scissor_rect.to_i32()), ); let render_task_id = frame_state.render_tasks.add(task); frame_state.render_tasks.add_dependency( frame_state.surfaces[parent_surface_index.0].render_tasks.unwrap().port, render_task_id, ); if first { // TODO(gw): Maybe we can restructure this code to avoid the // first hack here. Or at least explain it with a follow up // bug. frame_state.surfaces[raster_config.surface_index.0].render_tasks = Some(SurfaceRenderTasks { root: render_task_id, port: render_task_id, }); first = false; } } // Now that the tile is valid, reset the dirty rect. tile.dirty_rect = PictureRect::zero(); tile.is_valid = true; } // If invalidation debugging is enabled, dump the picture cache state to a tree printer. if frame_context.debug_flags.contains(DebugFlags::INVALIDATION_DBG) { tile_cache.print(); } None } PictureCompositeMode::MixBlend(..) | PictureCompositeMode::Blit(_) => { // The SplitComposite shader used for 3d contexts doesn't snap // to pixels, so we shouldn't snap our uv coordinates either. let supports_snapping = match self.context_3d { Picture3DContext::In{ .. } => false, _ => true, }; let uv_rect_kind = calculate_uv_rect_kind( &pic_rect, &transform, &clipped, device_pixel_scale, supports_snapping, ); let picture_task = RenderTask::new_picture( RenderTaskLocation::Dynamic(None, clipped.size), unclipped.size, pic_index, clipped.origin, uv_rect_kind, surface_spatial_node_index, device_pixel_scale, PrimitiveVisibilityMask::all(), None, ); let render_task_id = frame_state.render_tasks.add(picture_task); Some((render_task_id, render_task_id)) } PictureCompositeMode::SvgFilter(ref primitives, ref filter_datas) => { let uv_rect_kind = calculate_uv_rect_kind( &pic_rect, &transform, &clipped, device_pixel_scale, true, ); let picture_task = RenderTask::new_picture( RenderTaskLocation::Dynamic(None, clipped.size), unclipped.size, pic_index, clipped.origin, uv_rect_kind, surface_spatial_node_index, device_pixel_scale, PrimitiveVisibilityMask::all(), None, ); let picture_task_id = frame_state.render_tasks.add(picture_task); let filter_task_id = RenderTask::new_svg_filter( primitives, filter_datas, &mut frame_state.render_tasks, clipped.size, uv_rect_kind, picture_task_id, device_pixel_scale, ); Some((filter_task_id, picture_task_id)) } }; if let Some((root, port)) = dep_info { frame_state.surfaces[raster_config.surface_index.0].render_tasks = Some(SurfaceRenderTasks { root, port, }); frame_state.render_tasks.add_dependency( frame_state.surfaces[parent_surface_index.0].render_tasks.unwrap().port, root, ); } } None => {} }; let state = PictureState { //TODO: check for MAX_CACHE_SIZE here? map_local_to_pic, map_pic_to_world, map_pic_to_raster, map_raster_to_world, plane_splitter, }; let mut dirty_region_count = 0; // If this is a picture cache, push the dirty region to ensure any // child primitives are culled and clipped to the dirty rect(s). if let Some(RasterConfig { composite_mode: PictureCompositeMode::TileCache { .. }, .. }) = self.raster_config { let dirty_region = self.tile_cache.as_ref().unwrap().dirty_region.clone(); frame_state.push_dirty_region(dirty_region); dirty_region_count += 1; } if inflation_factor > 0.0 { let inflated_region = frame_state.current_dirty_region().inflate(inflation_factor); frame_state.push_dirty_region(inflated_region); dirty_region_count += 1; } // Disallow subpixel AA if an intermediate surface is needed. // TODO(lsalzman): allow overriding parent if intermediate surface is opaque let (is_passthrough, subpixel_mode) = match self.raster_config { Some(RasterConfig { ref composite_mode, .. }) => { let subpixel_mode = match composite_mode { PictureCompositeMode::TileCache { .. } => { self.tile_cache.as_ref().unwrap().subpixel_mode } PictureCompositeMode::Blit(..) | PictureCompositeMode::ComponentTransferFilter(..) | PictureCompositeMode::Filter(..) | PictureCompositeMode::MixBlend(..) | PictureCompositeMode::SvgFilter(..) => { // TODO(gw): We can take advantage of the same logic that // exists in the opaque rect detection for tile // caches, to allow subpixel text on other surfaces // that can be detected as opaque. SubpixelMode::Deny } }; (false, subpixel_mode) } None => { (true, SubpixelMode::Allow) } }; // Still disable subpixel AA if parent forbids it let subpixel_mode = match (parent_subpixel_mode, subpixel_mode) { (SubpixelMode::Allow, SubpixelMode::Allow) => SubpixelMode::Allow, _ => SubpixelMode::Deny, }; let context = PictureContext { pic_index, apply_local_clip_rect: self.apply_local_clip_rect, is_passthrough, raster_spatial_node_index, surface_spatial_node_index, surface_index, dirty_region_count, subpixel_mode, }; let prim_list = mem::replace(&mut self.prim_list, PrimitiveList::empty()); Some((context, state, prim_list)) } pub fn restore_context( &mut self, prim_list: PrimitiveList, context: PictureContext, state: PictureState, frame_state: &mut FrameBuildingState, ) { // Pop any dirty regions this picture set for _ in 0 .. context.dirty_region_count { frame_state.pop_dirty_region(); } self.prim_list = prim_list; self.state = Some(state); } pub fn take_state(&mut self) -> PictureState { self.state.take().expect("bug: no state present!") } /// Add a primitive instance to the plane splitter. The function would generate /// an appropriate polygon, clip it against the frustum, and register with the /// given plane splitter. pub fn add_split_plane( splitter: &mut PlaneSplitter, clip_scroll_tree: &ClipScrollTree, prim_spatial_node_index: SpatialNodeIndex, original_local_rect: LayoutRect, combined_local_clip_rect: &LayoutRect, world_rect: WorldRect, plane_split_anchor: PlaneSplitAnchor, ) -> bool { let transform = clip_scroll_tree .get_world_transform(prim_spatial_node_index); let matrix = transform.clone().into_transform().cast(); // Apply the local clip rect here, before splitting. This is // because the local clip rect can't be applied in the vertex // shader for split composites, since we are drawing polygons // rather that rectangles. The interpolation still works correctly // since we determine the UVs by doing a bilerp with a factor // from the original local rect. let local_rect = match original_local_rect .intersection(combined_local_clip_rect) { Some(rect) => rect.cast(), None => return false, }; let world_rect = world_rect.cast(); match transform { CoordinateSpaceMapping::Local => { let polygon = Polygon::from_rect( local_rect * Scale::new(1.0), plane_split_anchor, ); splitter.add(polygon); } CoordinateSpaceMapping::ScaleOffset(scale_offset) if scale_offset.scale == Vector2D::new(1.0, 1.0) => { let inv_matrix = scale_offset.inverse().to_transform().cast(); let polygon = Polygon::from_transformed_rect_with_inverse( local_rect, &matrix, &inv_matrix, plane_split_anchor, ).unwrap(); splitter.add(polygon); } CoordinateSpaceMapping::ScaleOffset(_) | CoordinateSpaceMapping::Transform(_) => { let mut clipper = Clipper::new(); let results = clipper.clip_transformed( Polygon::from_rect( local_rect, plane_split_anchor, ), &matrix, Some(world_rect), ); if let Ok(results) = results { for poly in results { splitter.add(poly); } } } } true } pub fn resolve_split_planes( &mut self, splitter: &mut PlaneSplitter, gpu_cache: &mut GpuCache, clip_scroll_tree: &ClipScrollTree, ) { let ordered = match self.context_3d { Picture3DContext::In { root_data: Some(ref mut list), .. } => list, _ => panic!("Expected to find 3D context root"), }; ordered.clear(); // Process the accumulated split planes and order them for rendering. // Z axis is directed at the screen, `sort` is ascending, and we need back-to-front order. for poly in splitter.sort(vec3(0.0, 0.0, 1.0)) { let cluster = &self.prim_list.clusters[poly.anchor.cluster_index]; let spatial_node_index = cluster.spatial_node_index; let transform = match clip_scroll_tree .get_world_transform(spatial_node_index) .inverse() { Some(transform) => transform.into_transform(), // logging this would be a bit too verbose None => continue, }; let local_points = [ transform.transform_point3d(poly.points[0].cast()).unwrap(), transform.transform_point3d(poly.points[1].cast()).unwrap(), transform.transform_point3d(poly.points[2].cast()).unwrap(), transform.transform_point3d(poly.points[3].cast()).unwrap(), ]; let gpu_blocks = [ [local_points[0].x, local_points[0].y, local_points[1].x, local_points[1].y].into(), [local_points[2].x, local_points[2].y, local_points[3].x, local_points[3].y].into(), ]; let gpu_handle = gpu_cache.push_per_frame_blocks(&gpu_blocks); let gpu_address = gpu_cache.get_address(&gpu_handle); ordered.push(OrderedPictureChild { anchor: poly.anchor, spatial_node_index, gpu_address, }); } } /// Called during initial picture traversal, before we know the /// bounding rect of children. It is possible to determine the /// surface / raster config now though. fn pre_update( &mut self, state: &mut PictureUpdateState, frame_context: &FrameBuildingContext, ) -> Option<PrimitiveList> { // Reset raster config in case we early out below. self.raster_config = None; // Resolve animation properties, and early out if the filter // properties make this picture invisible. if !self.resolve_scene_properties(frame_context.scene_properties) { return None; } // For out-of-preserve-3d pictures, the backface visibility is determined by // the local transform only. // Note: we aren't taking the transform relativce to the parent picture, // since picture tree can be more dense than the corresponding spatial tree. if !self.is_backface_visible { if let Picture3DContext::Out = self.context_3d { match frame_context.clip_scroll_tree.get_local_visible_face(self.spatial_node_index) { VisibleFace::Front => {} VisibleFace::Back => return None, } } } // Push information about this pic on stack for children to read. state.push_picture(PictureInfo { _spatial_node_index: self.spatial_node_index, }); // See if this picture actually needs a surface for compositing. let actual_composite_mode = match self.requested_composite_mode { Some(PictureCompositeMode::Filter(ref filter)) if filter.is_noop() => None, Some(PictureCompositeMode::TileCache { .. }) => { // Only allow picture caching composite mode if global picture caching setting // is enabled this frame. if state.composite_state.picture_caching_is_enabled { Some(PictureCompositeMode::TileCache { }) } else { None } }, ref mode => mode.clone(), }; if let Some(composite_mode) = actual_composite_mode { // Retrieve the positioning node information for the parent surface. let parent_raster_node_index = state.current_surface().raster_spatial_node_index; let surface_spatial_node_index = self.spatial_node_index; // This inflation factor is to be applied to all primitives within the surface. let inflation_factor = match composite_mode { PictureCompositeMode::Filter(Filter::Blur(blur_radius)) => { // Only inflate if the caller hasn't already inflated // the bounding rects for this filter. if self.options.inflate_if_required { // The amount of extra space needed for primitives inside // this picture to ensure the visibility check is correct. BLUR_SAMPLE_SCALE * blur_radius } else { 0.0 } } PictureCompositeMode::SvgFilter(ref primitives, _) if self.options.inflate_if_required => { let mut max = 0.0; for primitive in primitives { if let FilterPrimitiveKind::Blur(ref blur) = primitive.kind { max = f32::max(max, blur.radius * BLUR_SAMPLE_SCALE); } } max } _ => { 0.0 } }; // Filters must be applied before transforms, to do this, we can mark this picture as establishing a raster root. let has_svg_filter = if let PictureCompositeMode::SvgFilter(..) = composite_mode { true } else { false }; // Check if there is perspective or if an SVG filter is applied, and thus whether a new // rasterization root should be established. let establishes_raster_root = has_svg_filter || frame_context.clip_scroll_tree .get_relative_transform(surface_spatial_node_index, parent_raster_node_index) .is_perspective(); let surface = SurfaceInfo::new( surface_spatial_node_index, if establishes_raster_root { surface_spatial_node_index } else { parent_raster_node_index }, inflation_factor, frame_context.global_screen_world_rect, &frame_context.clip_scroll_tree, frame_context.global_device_pixel_scale, ); self.raster_config = Some(RasterConfig { composite_mode, establishes_raster_root, surface_index: state.push_surface(surface), }); } Some(mem::replace(&mut self.prim_list, PrimitiveList::empty())) } /// Called after updating child pictures during the initial /// picture traversal. fn post_update( &mut self, prim_list: PrimitiveList, state: &mut PictureUpdateState, frame_context: &FrameBuildingContext, data_stores: &mut DataStores, ) { // Restore the pictures list used during recursion. self.prim_list = prim_list; // Pop the state information about this picture. state.pop_picture(); for cluster in &mut self.prim_list.clusters { cluster.flags.remove(ClusterFlags::IS_VISIBLE); // Skip the cluster if backface culled. if !cluster.flags.contains(ClusterFlags::IS_BACKFACE_VISIBLE) { // For in-preserve-3d primitives and pictures, the backface visibility is // evaluated relative to the containing block. if let Picture3DContext::In { ancestor_index, .. } = self.context_3d { match frame_context.clip_scroll_tree .get_relative_transform(cluster.spatial_node_index, ancestor_index) .visible_face() { VisibleFace::Back => continue, VisibleFace::Front => (), } } } // No point including this cluster if it can't be transformed let spatial_node = &frame_context .clip_scroll_tree .spatial_nodes[cluster.spatial_node_index.0 as usize]; if !spatial_node.invertible { continue; } // Update any primitives/cluster bounding rects that can only be done // with information available during frame building. if cluster.flags.contains(ClusterFlags::IS_BACKDROP_FILTER) { let backdrop_to_world_mapper = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, cluster.spatial_node_index, LayoutRect::max_rect(), frame_context.clip_scroll_tree, ); for prim_instance in &mut cluster.prim_instances { match prim_instance.kind { PrimitiveInstanceKind::Backdrop { data_handle, .. } => { // The actual size and clip rect of this primitive are determined by computing the bounding // box of the projected rect of the backdrop-filter element onto the backdrop. let prim_data = &mut data_stores.backdrop[data_handle]; let spatial_node_index = prim_data.kind.spatial_node_index; // We cannot use the relative transform between the backdrop and the element because // that doesn't take into account any projection transforms that both spatial nodes are children of. // Instead, we first project from the element to the world space and get a flattened 2D bounding rect // in the screen space, we then map this rect from the world space to the backdrop space to get the // proper bounding box where the backdrop-filter needs to be processed. let prim_to_world_mapper = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, spatial_node_index, LayoutRect::max_rect(), frame_context.clip_scroll_tree, ); // First map to the screen and get a flattened rect let prim_rect = prim_to_world_mapper.map(&prim_data.kind.border_rect).unwrap_or_else(LayoutRect::zero); // Backwards project the flattened rect onto the backdrop let prim_rect = backdrop_to_world_mapper.unmap(&prim_rect).unwrap_or_else(LayoutRect::zero); // TODO(aosmond): Is this safe? Updating the primitive size during // frame building is usually problematic since scene building will cache // the primitive information in the GPU already. prim_instance.prim_origin = prim_rect.origin; prim_data.common.prim_size = prim_rect.size; prim_instance.local_clip_rect = prim_rect; // Update the cluster bounding rect now that we have the backdrop rect. cluster.bounding_rect = cluster.bounding_rect.union(&prim_rect); } _ => { panic!("BUG: unexpected deferred primitive kind for cluster updates"); } } } } // Map the cluster bounding rect into the space of the surface, and // include it in the surface bounding rect. let surface = state.current_surface_mut(); surface.map_local_to_surface.set_target_spatial_node( cluster.spatial_node_index, frame_context.clip_scroll_tree, ); // Mark the cluster visible, since it passed the invertible and // backface checks. In future, this will include spatial clustering // which will allow the frame building code to skip most of the // current per-primitive culling code. cluster.flags.insert(ClusterFlags::IS_VISIBLE); if let Some(cluster_rect) = surface.map_local_to_surface.map(&cluster.bounding_rect) { surface.rect = surface.rect.union(&cluster_rect); } } // If this picture establishes a surface, then map the surface bounding // rect into the parent surface coordinate space, and propagate that up // to the parent. if let Some(ref mut raster_config) = self.raster_config { let surface = state.current_surface_mut(); // Inflate the local bounding rect if required by the filter effect. // This inflaction factor is to be applied to the surface itself. if self.options.inflate_if_required { surface.rect = raster_config.composite_mode.inflate_picture_rect(surface.rect, surface.inflation_factor); // The picture's local rect is calculated as the union of the // snapped primitive rects, which should result in a snapped // local rect, unless it was inflated. This is also done during // update visibility when calculating the picture's precise // local rect. let snap_surface_to_raster = SpaceSnapper::new_with_target( surface.raster_spatial_node_index, self.spatial_node_index, surface.device_pixel_scale, frame_context.clip_scroll_tree, ); surface.rect = snap_surface_to_raster.snap_rect(&surface.rect); } let mut surface_rect = surface.rect * Scale::new(1.0); // Pop this surface from the stack let surface_index = state.pop_surface(); debug_assert_eq!(surface_index, raster_config.surface_index); // Check if any of the surfaces can't be rasterized in local space but want to. if raster_config.establishes_raster_root { if surface_rect.size.width > MAX_SURFACE_SIZE || surface_rect.size.height > MAX_SURFACE_SIZE { raster_config.establishes_raster_root = false; state.are_raster_roots_assigned = false; } } // Set the estimated and precise local rects. The precise local rect // may be changed again during frame visibility. self.estimated_local_rect = surface_rect; self.precise_local_rect = surface_rect; // Drop shadows draw both a content and shadow rect, so need to expand the local // rect of any surfaces to be composited in parent surfaces correctly. match raster_config.composite_mode { PictureCompositeMode::Filter(Filter::DropShadows(ref shadows)) => { for shadow in shadows { let shadow_rect = self.estimated_local_rect.translate(shadow.offset); surface_rect = surface_rect.union(&shadow_rect); } } _ => {} } // Propagate up to parent surface, now that we know this surface's static rect let parent_surface = state.current_surface_mut(); parent_surface.map_local_to_surface.set_target_spatial_node( self.spatial_node_index, frame_context.clip_scroll_tree, ); if let Some(parent_surface_rect) = parent_surface .map_local_to_surface .map(&surface_rect) { parent_surface.rect = parent_surface.rect.union(&parent_surface_rect); } } } pub fn prepare_for_render( &mut self, frame_context: &FrameBuildingContext, frame_state: &mut FrameBuildingState, data_stores: &mut DataStores, ) -> bool { let mut pic_state_for_children = self.take_state(); if let Some(ref mut splitter) = pic_state_for_children.plane_splitter { self.resolve_split_planes( splitter, &mut frame_state.gpu_cache, &frame_context.clip_scroll_tree, ); } let raster_config = match self.raster_config { Some(ref mut raster_config) => raster_config, None => { return true } }; // TODO(gw): Almost all of the Picture types below use extra_gpu_cache_data // to store the same type of data. The exception is the filter // with a ColorMatrix, which stores the color matrix here. It's // probably worth tidying this code up to be a bit more consistent. // Perhaps store the color matrix after the common data, even though // it's not used by that shader. match raster_config.composite_mode { PictureCompositeMode::TileCache { .. } => {} PictureCompositeMode::Filter(Filter::Blur(..)) => {} PictureCompositeMode::Filter(Filter::DropShadows(ref shadows)) => { self.extra_gpu_data_handles.resize(shadows.len(), GpuCacheHandle::new()); for (shadow, extra_handle) in shadows.iter().zip(self.extra_gpu_data_handles.iter_mut()) { if let Some(mut request) = frame_state.gpu_cache.request(extra_handle) { // Basic brush primitive header is (see end of prepare_prim_for_render_inner in prim_store.rs) // [brush specific data] // [segment_rect, segment data] let shadow_rect = self.precise_local_rect.translate(shadow.offset); // ImageBrush colors request.push(shadow.color.premultiplied()); request.push(PremultipliedColorF::WHITE); request.push([ self.precise_local_rect.size.width, self.precise_local_rect.size.height, 0.0, 0.0, ]); // segment rect / extra data request.push(shadow_rect); request.push([0.0, 0.0, 0.0, 0.0]); } } } PictureCompositeMode::MixBlend(..) if !frame_context.fb_config.gpu_supports_advanced_blend => {} PictureCompositeMode::Filter(ref filter) => { match *filter { Filter::ColorMatrix(ref m) => { if self.extra_gpu_data_handles.is_empty() { self.extra_gpu_data_handles.push(GpuCacheHandle::new()); } if let Some(mut request) = frame_state.gpu_cache.request(&mut self.extra_gpu_data_handles[0]) { for i in 0..5 { request.push([m[i*4], m[i*4+1], m[i*4+2], m[i*4+3]]); } } } Filter::Flood(ref color) => { if self.extra_gpu_data_handles.is_empty() { self.extra_gpu_data_handles.push(GpuCacheHandle::new()); } if let Some(mut request) = frame_state.gpu_cache.request(&mut self.extra_gpu_data_handles[0]) { request.push(color.to_array()); } } _ => {} } } PictureCompositeMode::ComponentTransferFilter(handle) => { let filter_data = &mut data_stores.filter_data[handle]; filter_data.update(frame_state); } PictureCompositeMode::MixBlend(..) | PictureCompositeMode::Blit(_) | PictureCompositeMode::SvgFilter(..) => {} } true } } // Calculate a single homogeneous screen-space UV for a picture. fn calculate_screen_uv( local_pos: &PicturePoint, transform: &PictureToRasterTransform, rendered_rect: &DeviceRect, device_pixel_scale: DevicePixelScale, supports_snapping: bool, ) -> DeviceHomogeneousVector { let raster_pos = transform.transform_point2d_homogeneous(*local_pos); let mut device_vec = DeviceHomogeneousVector::new( raster_pos.x * device_pixel_scale.0, raster_pos.y * device_pixel_scale.0, 0.0, raster_pos.w, ); // Apply snapping for axis-aligned scroll nodes, as per prim_shared.glsl. if transform.transform_kind() == TransformedRectKind::AxisAligned && supports_snapping { device_vec = DeviceHomogeneousVector::new( (device_vec.x / device_vec.w + 0.5).floor(), (device_vec.y / device_vec.w + 0.5).floor(), 0.0, 1.0, ); } DeviceHomogeneousVector::new( (device_vec.x - rendered_rect.origin.x * device_vec.w) / rendered_rect.size.width, (device_vec.y - rendered_rect.origin.y * device_vec.w) / rendered_rect.size.height, 0.0, device_vec.w, ) } // Calculate a UV rect within an image based on the screen space // vertex positions of a picture. fn calculate_uv_rect_kind( pic_rect: &PictureRect, transform: &PictureToRasterTransform, rendered_rect: &DeviceIntRect, device_pixel_scale: DevicePixelScale, supports_snapping: bool, ) -> UvRectKind { let rendered_rect = rendered_rect.to_f32(); let top_left = calculate_screen_uv( &pic_rect.origin, transform, &rendered_rect, device_pixel_scale, supports_snapping, ); let top_right = calculate_screen_uv( &pic_rect.top_right(), transform, &rendered_rect, device_pixel_scale, supports_snapping, ); let bottom_left = calculate_screen_uv( &pic_rect.bottom_left(), transform, &rendered_rect, device_pixel_scale, supports_snapping, ); let bottom_right = calculate_screen_uv( &pic_rect.bottom_right(), transform, &rendered_rect, device_pixel_scale, supports_snapping, ); UvRectKind::Quad { top_left, top_right, bottom_left, bottom_right, } } fn create_raster_mappers( surface_spatial_node_index: SpatialNodeIndex, raster_spatial_node_index: SpatialNodeIndex, world_rect: WorldRect, clip_scroll_tree: &ClipScrollTree, ) -> (SpaceMapper<RasterPixel, WorldPixel>, SpaceMapper<PicturePixel, RasterPixel>) { let map_raster_to_world = SpaceMapper::new_with_target( ROOT_SPATIAL_NODE_INDEX, raster_spatial_node_index, world_rect, clip_scroll_tree, ); let raster_bounds = map_raster_to_world.unmap(&world_rect) .unwrap_or_else(RasterRect::max_rect); let map_pic_to_raster = SpaceMapper::new_with_target( raster_spatial_node_index, surface_spatial_node_index, raster_bounds, clip_scroll_tree, ); (map_raster_to_world, map_pic_to_raster) } fn get_transform_key( spatial_node_index: SpatialNodeIndex, cache_spatial_node_index: SpatialNodeIndex, clip_scroll_tree: &ClipScrollTree, ) -> TransformKey { // Note: this is the only place where we don't know beforehand if the tile-affecting // spatial node is below or above the current picture. let transform = if cache_spatial_node_index >= spatial_node_index { clip_scroll_tree .get_relative_transform( cache_spatial_node_index, spatial_node_index, ) } else { clip_scroll_tree .get_relative_transform( spatial_node_index, cache_spatial_node_index, ) }; transform.into() } /// A key for storing primitive comparison results during tile dependency tests. #[derive(Debug, Copy, Clone, Eq, Hash, PartialEq)] struct PrimitiveComparisonKey { prev_index: PrimitiveDependencyIndex, curr_index: PrimitiveDependencyIndex, } /// Information stored an image dependency #[derive(Debug, Copy, Clone, PartialEq)] struct ImageDependency { key: ImageKey, generation: ImageGeneration, } /// A helper struct to compare a primitive and all its sub-dependencies. struct PrimitiveComparer<'a> { clip_comparer: CompareHelper<'a, ItemUid>, transform_comparer: CompareHelper<'a, SpatialNodeIndex>, image_comparer: CompareHelper<'a, ImageDependency>, opacity_comparer: CompareHelper<'a, OpacityBinding>, resource_cache: &'a ResourceCache, spatial_nodes: &'a FastHashMap<SpatialNodeIndex, SpatialNodeDependency>, opacity_bindings: &'a FastHashMap<PropertyBindingId, OpacityBindingInfo>, } impl<'a> PrimitiveComparer<'a> { fn new( prev: &'a TileDescriptor, curr: &'a TileDescriptor, resource_cache: &'a ResourceCache, spatial_nodes: &'a FastHashMap<SpatialNodeIndex, SpatialNodeDependency>, opacity_bindings: &'a FastHashMap<PropertyBindingId, OpacityBindingInfo>, ) -> Self { let clip_comparer = CompareHelper::new( &prev.clips, &curr.clips, ); let transform_comparer = CompareHelper::new( &prev.transforms, &curr.transforms, ); let image_comparer = CompareHelper::new( &prev.images, &curr.images, ); let opacity_comparer = CompareHelper::new( &prev.opacity_bindings, &curr.opacity_bindings, ); PrimitiveComparer { clip_comparer, transform_comparer, image_comparer, opacity_comparer, resource_cache, spatial_nodes, opacity_bindings, } } fn reset(&mut self) { self.clip_comparer.reset(); self.transform_comparer.reset(); self.image_comparer.reset(); self.opacity_comparer.reset(); } fn advance_prev(&mut self, prim: &PrimitiveDescriptor) { self.clip_comparer.advance_prev(prim.clip_dep_count); self.transform_comparer.advance_prev(prim.transform_dep_count); self.image_comparer.advance_prev(prim.image_dep_count); self.opacity_comparer.advance_prev(prim.opacity_binding_dep_count); } fn advance_curr(&mut self, prim: &PrimitiveDescriptor) { self.clip_comparer.advance_curr(prim.clip_dep_count); self.transform_comparer.advance_curr(prim.transform_dep_count); self.image_comparer.advance_curr(prim.image_dep_count); self.opacity_comparer.advance_curr(prim.opacity_binding_dep_count); } /// Check if two primitive descriptors are the same. fn compare_prim( &mut self, prev: &PrimitiveDescriptor, curr: &PrimitiveDescriptor, ) -> PrimitiveCompareResult { let resource_cache = self.resource_cache; let spatial_nodes = self.spatial_nodes; let opacity_bindings = self.opacity_bindings; // Check equality of the PrimitiveDescriptor if prev != curr { return PrimitiveCompareResult::Descriptor; } // Check if any of the clips this prim has are different. if !self.clip_comparer.is_same( prev.clip_dep_count, curr.clip_dep_count, |_| { false } ) { return PrimitiveCompareResult::Clip; } // Check if any of the transforms this prim has are different. if !self.transform_comparer.is_same( prev.transform_dep_count, curr.transform_dep_count, |curr| { spatial_nodes[curr].changed } ) { return PrimitiveCompareResult::Transform; } // Check if any of the images this prim has are different. if !self.image_comparer.is_same( prev.image_dep_count, curr.image_dep_count, |curr| { resource_cache.get_image_generation(curr.key) != curr.generation } ) { return PrimitiveCompareResult::Image; } // Check if any of the opacity bindings this prim has are different. if !self.opacity_comparer.is_same( prev.opacity_binding_dep_count, curr.opacity_binding_dep_count, |curr| { if let OpacityBinding::Binding(id) = curr { if opacity_bindings .get(id) .map_or(true, |info| info.changed) { return true; } } false } ) { return PrimitiveCompareResult::OpacityBinding; } PrimitiveCompareResult::Equal } } /// Details for a node in a quadtree that tracks dirty rects for a tile. enum TileNodeKind { Leaf { /// The index buffer of primitives that affected this tile previous frame prev_indices: Vec<PrimitiveDependencyIndex>, /// The index buffer of primitives that affect this tile on this frame curr_indices: Vec<PrimitiveDependencyIndex>, /// A bitset of which of the last 64 frames have been dirty for this leaf. dirty_tracker: u64, /// The number of frames since this node split or merged. frames_since_modified: usize, }, Node { /// The four children of this node children: Vec<TileNode>, }, } /// The kind of modification that a tile wants to do #[derive(Copy, Clone, PartialEq, Debug)] enum TileModification { Split, Merge, } /// A node in the dirty rect tracking quadtree. struct TileNode { /// Leaf or internal node kind: TileNodeKind, /// Rect of this node in the same space as the tile cache picture rect: PictureRect, } impl TileNode { /// Construct a new leaf node, with the given primitive dependency index buffer fn new_leaf(curr_indices: Vec<PrimitiveDependencyIndex>) -> Self { TileNode { kind: TileNodeKind::Leaf { prev_indices: Vec::new(), curr_indices, dirty_tracker: 0, frames_since_modified: 0, }, rect: PictureRect::zero(), } } /// Draw debug information about this tile node fn draw_debug_rects( &self, pic_to_world_mapper: &SpaceMapper<PicturePixel, WorldPixel>, is_opaque: bool, scratch: &mut PrimitiveScratchBuffer, global_device_pixel_scale: DevicePixelScale, ) { match self.kind { TileNodeKind::Leaf { dirty_tracker, .. } => { let color = if (dirty_tracker & 1) != 0 { debug_colors::RED } else if is_opaque { debug_colors::GREEN } else { debug_colors::YELLOW }; let world_rect = pic_to_world_mapper.map(&self.rect).unwrap(); let device_rect = world_rect * global_device_pixel_scale; let outer_color = color.scale_alpha(0.3); let inner_color = outer_color.scale_alpha(0.5); scratch.push_debug_rect( device_rect.inflate(-3.0, -3.0), outer_color, inner_color ); } TileNodeKind::Node { ref children, .. } => { for child in children.iter() { child.draw_debug_rects( pic_to_world_mapper, is_opaque, scratch, global_device_pixel_scale, ); } } } } /// Calculate the four child rects for a given node fn get_child_rects( rect: &PictureRect, result: &mut [PictureRect; 4], ) { let p0 = rect.origin; let half_size = PictureSize::new(rect.size.width * 0.5, rect.size.height * 0.5); *result = [ PictureRect::new( PicturePoint::new(p0.x, p0.y), half_size, ), PictureRect::new( PicturePoint::new(p0.x + half_size.width, p0.y), half_size, ), PictureRect::new( PicturePoint::new(p0.x, p0.y + half_size.height), half_size, ), PictureRect::new( PicturePoint::new(p0.x + half_size.width, p0.y + half_size.height), half_size, ), ]; } /// Called during pre_update, to clear the current dependencies fn clear( &mut self, rect: PictureRect, ) { self.rect = rect; match self.kind { TileNodeKind::Leaf { ref mut prev_indices, ref mut curr_indices, ref mut dirty_tracker, ref mut frames_since_modified } => { // Swap current dependencies to be the previous frame mem::swap(prev_indices, curr_indices); curr_indices.clear(); // Note that another frame has passed in the dirty bit trackers *dirty_tracker = *dirty_tracker << 1; *frames_since_modified += 1; } TileNodeKind::Node { ref mut children, .. } => { let mut child_rects = [PictureRect::zero(); 4]; TileNode::get_child_rects(&rect, &mut child_rects); assert_eq!(child_rects.len(), children.len()); for (child, rect) in children.iter_mut().zip(child_rects.iter()) { child.clear(*rect); } } } } /// Add a primitive dependency to this node fn add_prim( &mut self, index: PrimitiveDependencyIndex, prim_rect: &PictureRect, ) { match self.kind { TileNodeKind::Leaf { ref mut curr_indices, .. } => { curr_indices.push(index); } TileNodeKind::Node { ref mut children, .. } => { for child in children.iter_mut() { if child.rect.intersects(prim_rect) { child.add_prim(index, prim_rect); } } } } } /// Apply a merge or split operation to this tile, if desired fn maybe_merge_or_split( &mut self, level: i32, curr_prims: &[PrimitiveDescriptor], max_split_levels: i32, ) { // Determine if this tile wants to split or merge let mut tile_mod = None; fn get_dirty_frames( dirty_tracker: u64, frames_since_modified: usize, ) -> Option<u32> { // Only consider splitting or merging at least 64 frames since we last changed if frames_since_modified > 64 { // Each bit in the tracker is a frame that was recently invalidated Some(dirty_tracker.count_ones()) } else { None } } match self.kind { TileNodeKind::Leaf { dirty_tracker, frames_since_modified, .. } => { // Only consider splitting if the tree isn't too deep. if level < max_split_levels { if let Some(dirty_frames) = get_dirty_frames(dirty_tracker, frames_since_modified) { // If the tile has invalidated > 50% of the recent number of frames, split. if dirty_frames > 32 { tile_mod = Some(TileModification::Split); } } } } TileNodeKind::Node { ref children, .. } => { // There's two conditions that cause a node to merge its children: // (1) If _all_ the child nodes are constantly invalidating, then we are wasting // CPU time tracking dependencies for each child, so merge them. // (2) If _none_ of the child nodes are recently invalid, then the page content // has probably changed, and we no longer need to track fine grained dependencies here. let mut static_count = 0; let mut changing_count = 0; for child in children { // Only consider merging nodes at the edge of the tree. if let TileNodeKind::Leaf { dirty_tracker, frames_since_modified, .. } = child.kind { if let Some(dirty_frames) = get_dirty_frames(dirty_tracker, frames_since_modified) { if dirty_frames == 0 { // Hasn't been invalidated for some time static_count += 1; } else if dirty_frames == 64 { // Is constantly being invalidated changing_count += 1; } } } // Only merge if all the child tiles are in agreement. Otherwise, we have some // that are invalidating / static, and it's worthwhile tracking dependencies for // them individually. if static_count == 4 || changing_count == 4 { tile_mod = Some(TileModification::Merge); } } } } match tile_mod { Some(TileModification::Split) => { // To split a node, take the current dependency index buffer for this node, and // split it into child index buffers. let curr_indices = match self.kind { TileNodeKind::Node { .. } => { unreachable!("bug - only leaves can split"); } TileNodeKind::Leaf { ref mut curr_indices, .. } => { curr_indices.take() } }; let mut child_rects = [PictureRect::zero(); 4]; TileNode::get_child_rects(&self.rect, &mut child_rects); let mut child_indices = [ Vec::new(), Vec::new(), Vec::new(), Vec::new(), ]; // Step through the index buffer, and add primitives to each of the children // that they intersect. for index in curr_indices { let prim = &curr_prims[index.0 as usize]; for (child_rect, indices) in child_rects.iter().zip(child_indices.iter_mut()) { let child_rect_key: RectangleKey = (*child_rect).into(); if prim.prim_clip_rect.intersects(&child_rect_key) { indices.push(index); } } } // Create the child nodes and switch from leaf -> node. let children = child_indices .iter_mut() .map(|i| TileNode::new_leaf(mem::replace(i, Vec::new()))) .collect(); self.kind = TileNodeKind::Node { children: children, }; } Some(TileModification::Merge) => { // Construct a merged index buffer by collecting the dependency index buffers // from each child, and merging them into a de-duplicated index buffer. let merged_indices = match self.kind { TileNodeKind::Node { ref mut children, .. } => { let mut merged_indices = Vec::new(); for child in children.iter() { let child_indices = match child.kind { TileNodeKind::Leaf { ref curr_indices, .. } => { curr_indices } TileNodeKind::Node { .. } => { unreachable!("bug: child is not a leaf"); } }; merged_indices.extend_from_slice(child_indices); } merged_indices.sort(); merged_indices.dedup(); merged_indices } TileNodeKind::Leaf { .. } => { unreachable!("bug - trying to merge a leaf"); } }; // Switch from a node to a leaf, with the combined index buffer self.kind = TileNodeKind::Leaf { prev_indices: Vec::new(), curr_indices: merged_indices, dirty_tracker: 0, frames_since_modified: 0, }; } None => { // If this node didn't merge / split, then recurse into children // to see if they want to split / merge. if let TileNodeKind::Node { ref mut children, .. } = self.kind { for child in children.iter_mut() { child.maybe_merge_or_split( level+1, curr_prims, max_split_levels, ); } } } } } /// Update the dirty state of this node, building the overall dirty rect fn update_dirty_rects( &mut self, prev_prims: &[PrimitiveDescriptor], curr_prims: &[PrimitiveDescriptor], prim_comparer: &mut PrimitiveComparer, dirty_rect: &mut PictureRect, compare_cache: &mut FastHashMap<PrimitiveComparisonKey, PrimitiveCompareResult>, invalidation_reason: &mut Option<InvalidationReason>, ) { match self.kind { TileNodeKind::Node { ref mut children, .. } => { for child in children.iter_mut() { child.update_dirty_rects( prev_prims, curr_prims, prim_comparer, dirty_rect, compare_cache, invalidation_reason, ); } } TileNodeKind::Leaf { ref prev_indices, ref curr_indices, ref mut dirty_tracker, .. } => { // If the index buffers are of different length, they must be different if prev_indices.len() == curr_indices.len() { let mut prev_i0 = 0; let mut prev_i1 = 0; prim_comparer.reset(); // Walk each index buffer, comparing primitives for (prev_index, curr_index) in prev_indices.iter().zip(curr_indices.iter()) { let i0 = prev_index.0 as usize; let i1 = curr_index.0 as usize; // Advance the dependency arrays for each primitive (this handles // prims that may be skipped by these index buffers). for i in prev_i0 .. i0 { prim_comparer.advance_prev(&prev_prims[i]); } for i in prev_i1 .. i1 { prim_comparer.advance_curr(&curr_prims[i]); } // Compare the primitives, caching the result in a hash map // to save comparisons in other tree nodes. let key = PrimitiveComparisonKey { prev_index: *prev_index, curr_index: *curr_index, }; let prim_compare_result = *compare_cache .entry(key) .or_insert_with(|| { let prev = &prev_prims[i0]; let curr = &curr_prims[i1]; prim_comparer.compare_prim(prev, curr) }); // If not the same, mark this node as dirty and update the dirty rect if prim_compare_result != PrimitiveCompareResult::Equal { if invalidation_reason.is_none() { *invalidation_reason = Some(InvalidationReason::Content { prim_compare_result, }); } *dirty_rect = self.rect.union(dirty_rect); *dirty_tracker = *dirty_tracker | 1; break; } prev_i0 = i0; prev_i1 = i1; } } else { if invalidation_reason.is_none() { *invalidation_reason = Some(InvalidationReason::PrimCount); } *dirty_rect = self.rect.union(dirty_rect); *dirty_tracker = *dirty_tracker | 1; } } } } } impl CompositeState { // A helper function to destroy all native surfaces for a given list of tiles pub fn destroy_native_surfaces<'a, I: Iterator<Item = &'a Tile>>( &mut self, tiles_iter: I, resource_cache: &mut ResourceCache, ) { // Any old tiles that remain after the loop above are going to be dropped. For // simple composite mode, the texture cache handle will expire and be collected // by the texture cache. For native compositor mode, we need to explicitly // invoke a callback to the client to destroy that surface. if let CompositorKind::Native { .. } = self.compositor_kind { for tile in tiles_iter { // Only destroy native surfaces that have been allocated. It's // possible for display port tiles to be created that never // come on screen, and thus never get a native surface allocated. if let Some(TileSurface::Texture { descriptor: SurfaceTextureDescriptor::NativeSurface { id, .. }, .. }) = tile.surface { if let Some(id) = id { resource_cache.destroy_compositor_surface(id); } } } } } }
use utils::bigint::M256; use utils::gas::Gas; use super::commit::{AccountState, BlockhashState}; use super::errors::{RequireError, MachineError, CommitError, EvalError, PCError}; use super::{Stack, Context, BlockHeader, Patch, PC, Storage, Memory, AccountCommitment, Log}; use self::check::{check_opcode, extra_check_opcode}; use self::run::run_opcode; use self::cost::{gas_refund, gas_stipend, gas_cost, memory_cost, memory_gas}; use self::utils::copy_into_memory; mod cost; mod run; mod check; mod utils; /// A VM state without PC. pub struct State<M, S> { pub memory: M, pub stack: Stack, pub context: Context, pub block: BlockHeader, pub patch: Patch, pub out: Vec<u8>, pub memory_cost: Gas, pub used_gas: Gas, pub refunded_gas: Gas, pub account_state: AccountState<S>, pub blockhash_state: BlockhashState, pub logs: Vec<Log>, pub depth: usize, } impl<M, S> State<M, S> { pub fn memory_gas(&self) -> Gas { memory_gas(self.memory_cost) } pub fn available_gas(&self) -> Gas { self.context.gas_limit - self.memory_gas() - self.used_gas } } /// A VM state with PC. pub struct Machine<M, S> { state: State<M, S>, pc: PC, status: MachineStatus, } #[derive(Debug, Clone)] pub enum MachineStatus { Running, ExitedOk, ExitedErr(MachineError), InvokeCreate(Context), InvokeCall(Context, (M256, M256)), } #[derive(Debug, Clone)] pub enum ControlCheck { Jump(M256), } #[derive(Debug, Clone)] pub enum Control { Stop, Jump(M256), InvokeCreate(Context), InvokeCall(Context, (M256, M256)), } impl<M: Memory + Default, S: Storage + Default + Clone> Machine<M, S> { pub fn new(context: Context, block: BlockHeader, patch: Patch, depth: usize) -> Self { Machine { pc: PC::new(context.code.as_slice()), status: MachineStatus::Running, state: State { memory: M::default(), stack: Stack::default(), context: context, block: block, patch: patch, out: Vec::new(), memory_cost: Gas::zero(), used_gas: Gas::zero(), refunded_gas: Gas::zero(), account_state: AccountState::default(), blockhash_state: BlockhashState::default(), logs: Vec::new(), depth: depth, }, } } pub fn derive(&self, context: Context) -> Self { Machine { pc: PC::new(context.code.as_slice()), status: MachineStatus::Running, state: State { memory: M::default(), stack: Stack::default(), context: context, block: self.state.block.clone(), patch: self.state.patch.clone(), out: Vec::new(), memory_cost: Gas::zero(), used_gas: Gas::zero(), refunded_gas: Gas::zero(), account_state: self.state.account_state.clone(), blockhash_state: self.state.blockhash_state.clone(), logs: self.state.logs.clone(), depth: self.state.depth + 1, }, } } pub fn commit_account(&mut self, commitment: AccountCommitment<S>) -> Result<(), CommitError> { self.state.account_state.commit(commitment) } pub fn commit_blockhash(&mut self, number: M256, hash: M256) -> Result<(), CommitError> { self.state.blockhash_state.commit(number, hash) } #[allow(unused_variables)] pub fn apply_sub(&mut self, sub: Machine<M, S>) { use std::mem::swap; let mut status = MachineStatus::Running; swap(&mut status, &mut self.status); match status { MachineStatus::InvokeCreate(_) => { self.apply_create(sub); }, MachineStatus::InvokeCall(_, (out_start, out_len)) => { self.apply_call(sub, out_start, out_len); }, _ => panic!(), } } fn apply_create(&mut self, sub: Machine<M, S>) { if self.state.available_gas() < sub.state.used_gas { panic!(); } match sub.status() { MachineStatus::ExitedOk => { self.state.account_state = sub.state.account_state; self.state.blockhash_state = sub.state.blockhash_state; self.state.logs = sub.state.logs; self.state.used_gas = self.state.used_gas + sub.state.used_gas; self.state.refunded_gas = self.state.refunded_gas + sub.state.refunded_gas; self.state.account_state.decrease_balance(self.state.context.address, sub.state.context.value); self.state.account_state.create(sub.state.context.address, sub.state.context.value, sub.state.out.as_slice()); }, MachineStatus::ExitedErr(_) => { self.state.used_gas = self.state.used_gas + sub.state.used_gas; self.state.stack.pop().unwrap(); self.state.stack.push(M256::zero()).unwrap(); }, _ => panic!(), } } fn apply_call(&mut self, sub: Machine<M, S>, out_start: M256, out_len: M256) { if self.state.available_gas() < sub.state.used_gas { panic!(); } match sub.status() { MachineStatus::ExitedOk => { self.state.account_state = sub.state.account_state; self.state.blockhash_state = sub.state.blockhash_state; self.state.logs = sub.state.logs; self.state.used_gas = self.state.used_gas + sub.state.used_gas; self.state.refunded_gas = self.state.refunded_gas + sub.state.refunded_gas; self.state.account_state.decrease_balance(self.state.context.address, sub.state.context.value); self.state.account_state.increase_balance(sub.state.context.address, sub.state.context.value); copy_into_memory(&mut self.state.memory, sub.state.out.as_slice(), out_start, M256::zero(), out_len); }, MachineStatus::ExitedErr(_) => { self.state.used_gas = self.state.used_gas + sub.state.used_gas; self.state.stack.pop().unwrap(); self.state.stack.push(M256::zero()).unwrap(); }, _ => panic!(), } } pub fn check(&self) -> Result<(), EvalError> { let instruction = self.pc.peek()?; check_opcode(instruction, &self.state).and_then(|v| { match v { None => Ok(()), Some(ControlCheck::Jump(dest)) => { if dest <= M256::from(usize::max_value()) && self.pc.is_valid(dest.into()) { Ok(()) } else { Err(EvalError::Machine(MachineError::PC(PCError::BadJumpDest))) } } } }) } pub fn step(&mut self) -> Result<(), RequireError> { match &self.status { &MachineStatus::Running => (), _ => panic!(), } if self.pc.is_end() { self.status = MachineStatus::ExitedOk; return Ok(()); } match self.check() { Ok(()) => (), Err(EvalError::Machine(error)) => { self.status = MachineStatus::ExitedErr(error); return Ok(()); }, Err(EvalError::Require(error)) => { return Err(error); }, }; let instruction = self.pc.peek().unwrap(); let position = self.pc.position(); let memory_cost = memory_cost(instruction, &self.state); let memory_gas = memory_gas(memory_cost); let gas_cost = gas_cost(instruction, &self.state); let gas_stipend = gas_stipend(instruction, &self.state); let gas_refund = gas_refund(instruction, &self.state); let after_gas = self.state.context.gas_limit - memory_gas - self.state.used_gas - gas_cost + gas_stipend; match extra_check_opcode(instruction, &self.state, gas_stipend, after_gas) { Ok(()) => (), Err(EvalError::Machine(error)) => { self.status = MachineStatus::ExitedErr(error); return Ok(()); }, Err(EvalError::Require(error)) => { return Err(error); }, } if self.state.context.gas_limit < memory_gas + self.state.used_gas + gas_cost - gas_stipend { self.status = MachineStatus::ExitedErr(MachineError::EmptyGas); return Ok(()); } let instruction = self.pc.read().unwrap(); let result = run_opcode((instruction, position), &mut self.state, gas_stipend, after_gas); self.state.used_gas = self.state.used_gas + gas_cost - gas_stipend; self.state.memory_cost = memory_cost; self.state.refunded_gas = self.state.refunded_gas + gas_refund; match result { None => Ok(()), Some(Control::Jump(dest)) => { self.pc.jump(dest.into()).unwrap(); Ok(()) }, Some(Control::InvokeCall(context, (from, len))) => { self.status = MachineStatus::InvokeCall(context, (from, len)); Ok(()) }, Some(Control::InvokeCreate(context)) => { self.status = MachineStatus::InvokeCreate(context); Ok(()) }, Some(Control::Stop) => { self.status = MachineStatus::ExitedOk; Ok(()) }, } } pub fn state(&self) -> &State<M, S> { &self.state } pub fn status(&self) -> MachineStatus { self.status.clone() } } Problem: failing tests Solution: patch for tests use utils::bigint::M256; use utils::gas::Gas; use super::commit::{AccountState, BlockhashState}; use super::errors::{RequireError, MachineError, CommitError, EvalError, PCError}; use super::{Stack, Context, BlockHeader, Patch, PC, Storage, Memory, AccountCommitment, Log}; use self::check::{check_opcode, extra_check_opcode}; use self::run::run_opcode; use self::cost::{gas_refund, gas_stipend, gas_cost, memory_cost, memory_gas}; use self::utils::copy_into_memory; mod cost; mod run; mod check; mod utils; /// A VM state without PC. pub struct State<M, S> { pub memory: M, pub stack: Stack, pub context: Context, pub block: BlockHeader, pub patch: Patch, pub out: Vec<u8>, pub memory_cost: Gas, pub used_gas: Gas, pub refunded_gas: Gas, pub account_state: AccountState<S>, pub blockhash_state: BlockhashState, pub logs: Vec<Log>, pub depth: usize, } impl<M, S> State<M, S> { pub fn memory_gas(&self) -> Gas { memory_gas(self.memory_cost) } pub fn available_gas(&self) -> Gas { self.context.gas_limit - self.memory_gas() - self.used_gas } } /// A VM state with PC. pub struct Machine<M, S> { state: State<M, S>, pc: PC, status: MachineStatus, } #[derive(Debug, Clone)] pub enum MachineStatus { Running, ExitedOk, ExitedErr(MachineError), InvokeCreate(Context), InvokeCall(Context, (M256, M256)), } #[derive(Debug, Clone)] pub enum ControlCheck { Jump(M256), } #[derive(Debug, Clone)] pub enum Control { Stop, Jump(M256), InvokeCreate(Context), InvokeCall(Context, (M256, M256)), } impl<M: Memory + Default, S: Storage + Default + Clone> Machine<M, S> { pub fn new(context: Context, block: BlockHeader, patch: Patch, depth: usize) -> Self { Machine { pc: PC::new(context.code.as_slice()), status: MachineStatus::Running, state: State { memory: M::default(), stack: Stack::default(), context: context, block: block, patch: patch, out: Vec::new(), memory_cost: Gas::zero(), used_gas: Gas::zero(), refunded_gas: Gas::zero(), account_state: AccountState::default(), blockhash_state: BlockhashState::default(), logs: Vec::new(), depth: depth, }, } } pub fn derive(&self, context: Context) -> Self { Machine { pc: PC::new(context.code.as_slice()), status: MachineStatus::Running, state: State { memory: M::default(), stack: Stack::default(), context: context, block: self.state.block.clone(), patch: self.state.patch.clone(), out: Vec::new(), memory_cost: Gas::zero(), used_gas: Gas::zero(), refunded_gas: Gas::zero(), account_state: self.state.account_state.clone(), blockhash_state: self.state.blockhash_state.clone(), logs: self.state.logs.clone(), depth: self.state.depth + 1, }, } } pub fn commit_account(&mut self, commitment: AccountCommitment<S>) -> Result<(), CommitError> { self.state.account_state.commit(commitment) } pub fn commit_blockhash(&mut self, number: M256, hash: M256) -> Result<(), CommitError> { self.state.blockhash_state.commit(number, hash) } #[allow(unused_variables)] pub fn apply_sub(&mut self, sub: Machine<M, S>) { use std::mem::swap; let mut status = MachineStatus::Running; swap(&mut status, &mut self.status); match status { MachineStatus::InvokeCreate(_) => { self.apply_create(sub); }, MachineStatus::InvokeCall(_, (out_start, out_len)) => { self.apply_call(sub, out_start, out_len); }, _ => panic!(), } } fn apply_create(&mut self, sub: Machine<M, S>) { if self.state.available_gas() < sub.state.used_gas { panic!(); } match sub.status() { MachineStatus::ExitedOk => { self.state.account_state = sub.state.account_state; self.state.blockhash_state = sub.state.blockhash_state; self.state.logs = sub.state.logs; self.state.used_gas = self.state.used_gas + sub.state.used_gas; self.state.refunded_gas = self.state.refunded_gas + sub.state.refunded_gas; self.state.account_state.decrease_balance(self.state.context.address, sub.state.context.value); self.state.account_state.create(sub.state.context.address, sub.state.context.value, sub.state.out.as_slice()); }, MachineStatus::ExitedErr(_) => { // self.state.used_gas = self.state.used_gas + sub.state.used_gas; self.state.stack.pop().unwrap(); self.state.stack.push(M256::zero()).unwrap(); }, _ => panic!(), } } fn apply_call(&mut self, sub: Machine<M, S>, out_start: M256, out_len: M256) { if self.state.available_gas() < sub.state.used_gas { panic!(); } match sub.status() { MachineStatus::ExitedOk => { self.state.account_state = sub.state.account_state; self.state.blockhash_state = sub.state.blockhash_state; self.state.logs = sub.state.logs; self.state.used_gas = self.state.used_gas + sub.state.used_gas; self.state.refunded_gas = self.state.refunded_gas + sub.state.refunded_gas; self.state.account_state.decrease_balance(self.state.context.address, sub.state.context.value); self.state.account_state.increase_balance(sub.state.context.address, sub.state.context.value); copy_into_memory(&mut self.state.memory, sub.state.out.as_slice(), out_start, M256::zero(), out_len); }, MachineStatus::ExitedErr(_) => { // self.state.used_gas = self.state.used_gas + sub.state.used_gas; self.state.stack.pop().unwrap(); self.state.stack.push(M256::from(1u64)).unwrap(); }, _ => panic!(), } } pub fn check(&self) -> Result<(), EvalError> { let instruction = self.pc.peek()?; check_opcode(instruction, &self.state).and_then(|v| { match v { None => Ok(()), Some(ControlCheck::Jump(dest)) => { if dest <= M256::from(usize::max_value()) && self.pc.is_valid(dest.into()) { Ok(()) } else { Err(EvalError::Machine(MachineError::PC(PCError::BadJumpDest))) } } } }) } pub fn step(&mut self) -> Result<(), RequireError> { match &self.status { &MachineStatus::Running => (), _ => panic!(), } if self.pc.is_end() { self.status = MachineStatus::ExitedOk; return Ok(()); } match self.check() { Ok(()) => (), Err(EvalError::Machine(error)) => { self.status = MachineStatus::ExitedErr(error); return Ok(()); }, Err(EvalError::Require(error)) => { return Err(error); }, }; let instruction = self.pc.peek().unwrap(); let position = self.pc.position(); let memory_cost = memory_cost(instruction, &self.state); let memory_gas = memory_gas(memory_cost); let gas_cost = gas_cost(instruction, &self.state); let gas_stipend = gas_stipend(instruction, &self.state); let gas_refund = gas_refund(instruction, &self.state); let after_gas = self.state.context.gas_limit - memory_gas - self.state.used_gas - gas_cost + gas_stipend; match extra_check_opcode(instruction, &self.state, gas_stipend, after_gas) { Ok(()) => (), Err(EvalError::Machine(error)) => { self.status = MachineStatus::ExitedErr(error); return Ok(()); }, Err(EvalError::Require(error)) => { return Err(error); }, } if self.state.context.gas_limit < memory_gas + self.state.used_gas + gas_cost - gas_stipend { self.status = MachineStatus::ExitedErr(MachineError::EmptyGas); return Ok(()); } let instruction = self.pc.read().unwrap(); let result = run_opcode((instruction, position), &mut self.state, gas_stipend, after_gas); self.state.used_gas = self.state.used_gas + gas_cost - gas_stipend; self.state.memory_cost = memory_cost; self.state.refunded_gas = self.state.refunded_gas + gas_refund; match result { None => Ok(()), Some(Control::Jump(dest)) => { self.pc.jump(dest.into()).unwrap(); Ok(()) }, Some(Control::InvokeCall(context, (from, len))) => { self.status = MachineStatus::InvokeCall(context, (from, len)); Ok(()) }, Some(Control::InvokeCreate(context)) => { self.status = MachineStatus::InvokeCreate(context); Ok(()) }, Some(Control::Stop) => { self.status = MachineStatus::ExitedOk; Ok(()) }, } } pub fn state(&self) -> &State<M, S> { &self.state } pub fn status(&self) -> MachineStatus { self.status.clone() } }
use redox::*; // nvp implementation version pub const NV_VERSION: i32 = 0; // nvlist header //#[derive(Debug)] pub struct NvList { pub version: i32, pub nvflag: u32, // persistent flags pub pairs: Vec<(String, NvValue)>, } impl NvList { pub fn new(nvflag: u32) -> Self { NvList { version: NV_VERSION, nvflag: nvflag, pairs: Vec::new(), } } pub fn find(&self, name: &str) -> Option<&NvValue> { for pair in &self.pairs { if pair.0 == name { return Some(&pair.1); } } None } pub fn find_mut(&mut self, name: &str) -> Option<&mut NvValue> { for pair in &mut self.pairs { if pair.0 == name { return Some(&mut pair.1); } } None } } impl fmt::Debug for NvList { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { try!(write!(f, "NvList {{ version: {:X}, nvflag: {:X}, pairs: [\n", self.version, self.nvflag)); for &(ref name, ref value) in &self.pairs { if name.is_empty() { break; } try!(write!(f, "{} : {:?}\n", name, value)); } try!(write!(f, "] }}\n")); Ok(()) } } // TODO Auto implement Debug. format! currently crashes with big u32 values //#[derive(Debug)] pub enum NvValue { Unknown, Boolean, Byte(u8), Int16(i16), Uint16(u16), Int32(i32), Uint32(u32), Int64(i64), Uint64(u64), String(String), ByteArray(Vec<u8>), Int16Array(Vec<i16>), Uint16Array(Vec<u16>), Int32Array(Vec<i32>), Uint32Array(Vec<u32>), Int64Array(Vec<i64>), Uint64Array(Vec<u64>), StringArray(Vec<String>), HrTime(i64), NvList(Box<NvList>), NvListArray(Vec<Box<NvList>>), BooleanValue(bool), Int8(i8), Uint8(u8), BooleanArray(Vec<bool>), Int8Array(Vec<i8>), Uint8Array(Vec<u8>), } impl NvValue { pub fn data_type(&self) -> DataType { match *self { NvValue::Unknown => DataType::Unknown, NvValue::Boolean => DataType::Boolean, NvValue::Byte(_) => DataType::Byte, NvValue::Int16(_) => DataType::Int16, NvValue::Uint16(_) => DataType::Uint16, NvValue::Int32(_) => DataType::Int32, NvValue::Uint32(_) => DataType::Uint32, NvValue::Int64(_) => DataType::Int64, NvValue::Uint64(_) => DataType::Uint64, NvValue::String(_) => DataType::String, NvValue::ByteArray(_) => DataType::ByteArray, NvValue::Int16Array(_) => DataType::Int16Array, NvValue::Uint16Array(_) => DataType::Uint16Array, NvValue::Int32Array(_) => DataType::Int32Array, NvValue::Uint32Array(_) => DataType::Uint32Array, NvValue::Int64Array(_) => DataType::Int64Array, NvValue::Uint64Array(_) => DataType::Uint64Array, NvValue::StringArray(_) => DataType::StringArray, NvValue::HrTime(_) => DataType::HrTime, NvValue::NvList(_) => DataType::NvList, NvValue::NvListArray(_) => DataType::NvListArray, NvValue::BooleanValue(_) => DataType::BooleanValue, NvValue::Int8(_) => DataType::Int8, NvValue::Uint8(_) => DataType::Uint8, NvValue::BooleanArray(_) => DataType::BooleanArray, NvValue::Int8Array(_) => DataType::Int8Array, NvValue::Uint8Array(_) => DataType::Uint8Array, } } pub fn num_elements(&self) -> usize { match *self { NvValue::Unknown => 1, NvValue::Boolean => 1, NvValue::Byte(_) => 1, NvValue::Int16(_) => 1, NvValue::Uint16(_) => 1, NvValue::Int32(_) => 1, NvValue::Uint32(_) => 1, NvValue::Int64(_) => 1, NvValue::Uint64(_) => 1, NvValue::String(_) => 1, NvValue::ByteArray(ref a) => a.len(), NvValue::Int16Array(ref a) => a.len(), NvValue::Uint16Array(ref a) => a.len(), NvValue::Int32Array(ref a) => a.len(), NvValue::Uint32Array(ref a) => a.len(), NvValue::Int64Array(ref a) => a.len(), NvValue::Uint64Array(ref a) => a.len(), NvValue::StringArray(ref a) => a.len(), NvValue::HrTime(_) => 1, NvValue::NvList(_) => 1, NvValue::NvListArray(ref a) => a.len(), NvValue::BooleanValue(_) => 1, NvValue::Int8(_) => 1, NvValue::Uint8(_) => 1, NvValue::BooleanArray(ref a) => a.len(), NvValue::Int8Array(ref a) => a.len(), NvValue::Uint8Array(ref a) => a.len(), } } } impl fmt::Debug for NvValue { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { NvValue::Int64(v) => write!(f, "Int64(0x{:X})", v), NvValue::Uint64(v) => write!(f, "Uint64(0x{:X})", v), NvValue::NvList(ref v) => write!(f, "NvList({:?})", **v), NvValue::NvListArray(ref v) => { try!(write!(f, "NvListArray([")); for nv_list in v { try!(write!(f, "NvList({:?})", **nv_list)); } write!(f, "])") }, NvValue::String(ref v) => { write!(f, "String({})", v) }, _ => write!(f, "{:?}", self), } } } #[derive(Copy, Clone, Debug)] pub enum DataType { Unknown = 0, Boolean, Byte, Int16, Uint16, Int32, Uint32, Int64, Uint64, String, ByteArray, Int16Array, Uint16Array, Int32Array, Uint32Array, Int64Array, Uint64Array, StringArray, HrTime, NvList, NvListArray, BooleanValue, Int8, Uint8, BooleanArray, Int8Array, Uint8Array, } impl DataType { pub fn from_u8(u: u8) -> Option<DataType> { match u { 0 => Some(DataType::Unknown), 1 => Some(DataType::Boolean), 2 => Some(DataType::Byte), 3 => Some(DataType::Int16), 4 => Some(DataType::Uint16), 5 => Some(DataType::Int32), 6 => Some(DataType::Uint32), 7 => Some(DataType::Int64), 8 => Some(DataType::Uint64), 9 => Some(DataType::String), 10 => Some(DataType::ByteArray), 11 => Some(DataType::Int16Array), 12 => Some(DataType::Uint16Array), 13 => Some(DataType::Int32Array), 14 => Some(DataType::Uint32Array), 15 => Some(DataType::Int64Array), 16 => Some(DataType::Uint64Array), 17 => Some(DataType::StringArray), 18 => Some(DataType::HrTime), 19 => Some(DataType::NvList), 20 => Some(DataType::NvListArray), 21 => Some(DataType::BooleanValue), 22 => Some(DataType::Int8), 23 => Some(DataType::Uint8), 24 => Some(DataType::BooleanArray), 25 => Some(DataType::Int8Array), 26 => Some(DataType::Uint8Array), _ => None, } } pub fn to_u8(self) -> u8 { match self { DataType::Unknown => 0, DataType::Boolean => 1, DataType::Byte => 2, DataType::Int16 => 3, DataType::Uint16 => 4, DataType::Int32 => 5, DataType::Uint32 => 6, DataType::Int64 => 7, DataType::Uint64 => 8, DataType::String => 9, DataType::ByteArray => 10, DataType::Int16Array => 11, DataType::Uint16Array => 12, DataType::Int32Array => 13, DataType::Uint32Array => 14, DataType::Int64Array => 15, DataType::Uint64Array => 16, DataType::StringArray => 17, DataType::HrTime => 18, DataType::NvList => 19, DataType::NvListArray => 20, DataType::BooleanValue => 21, DataType::Int8 => 22, DataType::Uint8 => 23, DataType::BooleanArray => 24, DataType::Int8Array => 25, DataType::Uint8Array => 26, } } } Pimped out nvpairs for more convenience use redox::*; // nvp implementation version pub const NV_VERSION: i32 = 0; // nvlist header //#[derive(Debug)] pub struct NvList { pub version: i32, pub nvflag: u32, // persistent flags pub pairs: Vec<(String, NvValue)>, } impl NvList { pub fn new(nvflag: u32) -> Self { NvList { version: NV_VERSION, nvflag: nvflag, pairs: Vec::new(), } } pub fn find(&self, name: &str) -> Option<&NvValue> { for pair in &self.pairs { if pair.0 == name { return Some(&pair.1); } } None } pub fn find_mut(&mut self, name: &str) -> Option<&mut NvValue> { for pair in &mut self.pairs { if pair.0 == name { return Some(&mut pair.1); } } None } pub fn get<'a, T: GetNvValue<'a>>(&'a self, name: &str) -> Option<T> { self.find(name).and_then(|x| GetNvValue::get(x)) } } impl fmt::Debug for NvList { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { try!(write!(f, "NvList {{ version: {:X}, nvflag: {:X}, pairs: [\n", self.version, self.nvflag)); for &(ref name, ref value) in &self.pairs { if name.is_empty() { break; } try!(write!(f, "{} : {:?}\n", name, value)); } try!(write!(f, "] }}\n")); Ok(()) } } // TODO Auto implement Debug. format! currently crashes with big u32 values //#[derive(Debug)] pub enum NvValue { Unknown, Boolean, Byte(u8), Int16(i16), Uint16(u16), Int32(i32), Uint32(u32), Int64(i64), Uint64(u64), String(String), ByteArray(Vec<u8>), Int16Array(Vec<i16>), Uint16Array(Vec<u16>), Int32Array(Vec<i32>), Uint32Array(Vec<u32>), Int64Array(Vec<i64>), Uint64Array(Vec<u64>), StringArray(Vec<String>), HrTime(i64), NvList(NvList), NvListArray(Vec<NvList>), BooleanValue(bool), Int8(i8), Uint8(u8), BooleanArray(Vec<bool>), Int8Array(Vec<i8>), Uint8Array(Vec<u8>), } impl NvValue { pub fn data_type(&self) -> DataType { match *self { NvValue::Unknown => DataType::Unknown, NvValue::Boolean => DataType::Boolean, NvValue::Byte(_) => DataType::Byte, NvValue::Int16(_) => DataType::Int16, NvValue::Uint16(_) => DataType::Uint16, NvValue::Int32(_) => DataType::Int32, NvValue::Uint32(_) => DataType::Uint32, NvValue::Int64(_) => DataType::Int64, NvValue::Uint64(_) => DataType::Uint64, NvValue::String(_) => DataType::String, NvValue::ByteArray(_) => DataType::ByteArray, NvValue::Int16Array(_) => DataType::Int16Array, NvValue::Uint16Array(_) => DataType::Uint16Array, NvValue::Int32Array(_) => DataType::Int32Array, NvValue::Uint32Array(_) => DataType::Uint32Array, NvValue::Int64Array(_) => DataType::Int64Array, NvValue::Uint64Array(_) => DataType::Uint64Array, NvValue::StringArray(_) => DataType::StringArray, NvValue::HrTime(_) => DataType::HrTime, NvValue::NvList(_) => DataType::NvList, NvValue::NvListArray(_) => DataType::NvListArray, NvValue::BooleanValue(_) => DataType::BooleanValue, NvValue::Int8(_) => DataType::Int8, NvValue::Uint8(_) => DataType::Uint8, NvValue::BooleanArray(_) => DataType::BooleanArray, NvValue::Int8Array(_) => DataType::Int8Array, NvValue::Uint8Array(_) => DataType::Uint8Array, } } pub fn num_elements(&self) -> usize { match *self { NvValue::Unknown => 1, NvValue::Boolean => 1, NvValue::Byte(_) => 1, NvValue::Int16(_) => 1, NvValue::Uint16(_) => 1, NvValue::Int32(_) => 1, NvValue::Uint32(_) => 1, NvValue::Int64(_) => 1, NvValue::Uint64(_) => 1, NvValue::String(_) => 1, NvValue::ByteArray(ref a) => a.len(), NvValue::Int16Array(ref a) => a.len(), NvValue::Uint16Array(ref a) => a.len(), NvValue::Int32Array(ref a) => a.len(), NvValue::Uint32Array(ref a) => a.len(), NvValue::Int64Array(ref a) => a.len(), NvValue::Uint64Array(ref a) => a.len(), NvValue::StringArray(ref a) => a.len(), NvValue::HrTime(_) => 1, NvValue::NvList(_) => 1, NvValue::NvListArray(ref a) => a.len(), NvValue::BooleanValue(_) => 1, NvValue::Int8(_) => 1, NvValue::Uint8(_) => 1, NvValue::BooleanArray(ref a) => a.len(), NvValue::Int8Array(ref a) => a.len(), NvValue::Uint8Array(ref a) => a.len(), } } } impl fmt::Debug for NvValue { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { NvValue::Int64(v) => write!(f, "Int64(0x{:X})", v), NvValue::Uint64(v) => write!(f, "Uint64(0x{:X})", v), NvValue::NvList(ref v) => write!(f, "NvList({:?})", v), NvValue::NvListArray(ref v) => { try!(write!(f, "NvListArray([")); for nv_list in v { try!(write!(f, "NvList({:?})", nv_list)); } write!(f, "])") }, NvValue::String(ref v) => { write!(f, "String({})", v) }, _ => write!(f, "{:?}", self), } } } #[derive(Copy, Clone, Debug)] pub enum DataType { Unknown = 0, Boolean, Byte, Int16, Uint16, Int32, Uint32, Int64, Uint64, String, ByteArray, Int16Array, Uint16Array, Int32Array, Uint32Array, Int64Array, Uint64Array, StringArray, HrTime, NvList, NvListArray, BooleanValue, Int8, Uint8, BooleanArray, Int8Array, Uint8Array, } impl DataType { pub fn from_u8(u: u8) -> Option<DataType> { match u { 0 => Some(DataType::Unknown), 1 => Some(DataType::Boolean), 2 => Some(DataType::Byte), 3 => Some(DataType::Int16), 4 => Some(DataType::Uint16), 5 => Some(DataType::Int32), 6 => Some(DataType::Uint32), 7 => Some(DataType::Int64), 8 => Some(DataType::Uint64), 9 => Some(DataType::String), 10 => Some(DataType::ByteArray), 11 => Some(DataType::Int16Array), 12 => Some(DataType::Uint16Array), 13 => Some(DataType::Int32Array), 14 => Some(DataType::Uint32Array), 15 => Some(DataType::Int64Array), 16 => Some(DataType::Uint64Array), 17 => Some(DataType::StringArray), 18 => Some(DataType::HrTime), 19 => Some(DataType::NvList), 20 => Some(DataType::NvListArray), 21 => Some(DataType::BooleanValue), 22 => Some(DataType::Int8), 23 => Some(DataType::Uint8), 24 => Some(DataType::BooleanArray), 25 => Some(DataType::Int8Array), 26 => Some(DataType::Uint8Array), _ => None, } } pub fn to_u8(self) -> u8 { match self { DataType::Unknown => 0, DataType::Boolean => 1, DataType::Byte => 2, DataType::Int16 => 3, DataType::Uint16 => 4, DataType::Int32 => 5, DataType::Uint32 => 6, DataType::Int64 => 7, DataType::Uint64 => 8, DataType::String => 9, DataType::ByteArray => 10, DataType::Int16Array => 11, DataType::Uint16Array => 12, DataType::Int32Array => 13, DataType::Uint32Array => 14, DataType::Int64Array => 15, DataType::Uint64Array => 16, DataType::StringArray => 17, DataType::HrTime => 18, DataType::NvList => 19, DataType::NvListArray => 20, DataType::BooleanValue => 21, DataType::Int8 => 22, DataType::Uint8 => 23, DataType::BooleanArray => 24, DataType::Int8Array => 25, DataType::Uint8Array => 26, } } } //////////////////////////////////////////////////////////////////////////////////////////////////// pub trait GetNvValue<'a>: Sized { fn get(value: &'a NvValue) -> Option<Self>; } impl<'a> GetNvValue<'a> for bool { fn get(value: &'a NvValue) -> Option<Self> { match *value { NvValue::BooleanValue(v) => Some(v), _ => None, } } } impl<'a> GetNvValue<'a> for u8 { fn get(value: &'a NvValue) -> Option<Self> { match *value { NvValue::Byte(v) => Some(v), _ => None, } } } impl<'a> GetNvValue<'a> for u16 { fn get(value: &'a NvValue) -> Option<Self> { match *value { NvValue::Uint16(v) => Some(v), _ => None, } } } impl<'a> GetNvValue<'a> for u32 { fn get(value: &'a NvValue) -> Option<Self> { match *value { NvValue::Uint32(v) => Some(v), _ => None, } } } impl<'a> GetNvValue<'a> for u64 { fn get(value: &'a NvValue) -> Option<Self> { match *value { NvValue::Uint64(v) => Some(v), _ => None, } } } impl<'a> GetNvValue<'a> for i16 { fn get(value: &'a NvValue) -> Option<Self> { match *value { NvValue::Int16(v) => Some(v), _ => None, } } } impl<'a> GetNvValue<'a> for i32 { fn get(value: &'a NvValue) -> Option<Self> { match *value { NvValue::Int32(v) => Some(v), _ => None, } } } impl<'a> GetNvValue<'a> for i64 { fn get(value: &'a NvValue) -> Option<Self> { match *value { NvValue::Int64(v) => Some(v), _ => None, } } } impl<'a> GetNvValue<'a> for &'a NvList { fn get(value: &'a NvValue) -> Option<Self> { match *value { NvValue::NvList(ref v) => Some(v), _ => None, } } } impl<'a> GetNvValue<'a> for &'a Vec<NvList> { fn get(value: &'a NvValue) -> Option<Self> { match *value { NvValue::NvListArray(ref v) => Some(v), _ => None, } } }
use std::collections::hash_map::Entry; use std::fmt; use std::mem::size_of; use std::result::Result as StdResult; use itertools::Itertools; use pretty::{Arena, DocAllocator, DocBuilder}; use base::symbol::Symbol; use base::types::{ArcType, Type, TypeEnv}; use types::*; use base::fnv::FnvMap; use interner::InternedStr; use compiler::DebugInfo; use gc::{DataDef, Gc, GcPtr, Generation, Move, Traverseable, WriteOnly}; use array::Array; use thread::{Status, Thread}; use {Error, Result, Variants}; use self::Value::{Closure, Float, Function, Int, PartialApplication, String}; mopafy!(Userdata); pub trait Userdata: ::mopa::Any + Traverseable + fmt::Debug + Send + Sync { fn deep_clone(&self, deep_cloner: &mut Cloner) -> Result<GcPtr<Box<Userdata>>> { let _ = deep_cloner; Err(Error::Message("Userdata cannot be cloned".into())) } } impl PartialEq for Userdata { fn eq(&self, other: &Userdata) -> bool { self as *const _ == other as *const _ } } #[derive(Debug, PartialEq)] #[repr(C)] pub struct ClosureData { pub function: GcPtr<BytecodeFunction>, pub upvars: Array<Value>, } impl Traverseable for ClosureData { fn traverse(&self, gc: &mut Gc) { self.function.traverse(gc); self.upvars.traverse(gc); } } pub struct ClosureDataDef<'b>(pub GcPtr<BytecodeFunction>, pub &'b [Value]); impl<'b> Traverseable for ClosureDataDef<'b> { fn traverse(&self, gc: &mut Gc) { self.0.traverse(gc); self.1.traverse(gc); } } unsafe impl<'b> DataDef for ClosureDataDef<'b> { type Value = ClosureData; fn size(&self) -> usize { size_of::<GcPtr<BytecodeFunction>>() + Array::<Value>::size_of(self.1.len()) } fn initialize<'w>(self, mut result: WriteOnly<'w, ClosureData>) -> &'w mut ClosureData { unsafe { let result = &mut *result.as_mut_ptr(); result.function = self.0; result.upvars.initialize(self.1.iter().cloned()); result } } } pub struct ClosureInitDef(pub GcPtr<BytecodeFunction>, pub usize); impl Traverseable for ClosureInitDef { fn traverse(&self, gc: &mut Gc) { self.0.traverse(gc); } } unsafe impl DataDef for ClosureInitDef { type Value = ClosureData; fn size(&self) -> usize { size_of::<GcPtr<BytecodeFunction>>() + Array::<Value>::size_of(self.1) } fn initialize<'w>(self, mut result: WriteOnly<'w, ClosureData>) -> &'w mut ClosureData { use std::ptr; unsafe { let result = &mut *result.as_mut_ptr(); result.function = self.0; result.upvars.set_len(self.1); for var in &mut result.upvars { ptr::write(var, Int(0)); } result } } } #[derive(Debug, PartialEq)] #[cfg_attr(feature = "serde_derive", derive(DeserializeState, SerializeState))] #[cfg_attr(feature = "serde_derive", serde(deserialize_state = "::serialization::DeSeed"))] #[cfg_attr(feature = "serde_derive", serde(serialize_state = "::serialization::SeSeed"))] pub struct BytecodeFunction { #[cfg_attr(feature = "serde_derive", serde(state_with = "::serialization::symbol"))] pub name: Symbol, pub args: VmIndex, pub max_stack_size: VmIndex, pub instructions: Vec<Instruction>, #[cfg_attr(feature = "serde_derive", serde(state))] pub inner_functions: Vec<GcPtr<BytecodeFunction>>, #[cfg_attr(feature = "serde_derive", serde(state))] pub strings: Vec<InternedStr>, #[cfg_attr(feature = "serde_derive", serde(state))] pub records: Vec<Vec<InternedStr>>, #[cfg_attr(feature = "serde_derive", serde(state))] pub debug_info: DebugInfo, } impl Traverseable for BytecodeFunction { fn traverse(&self, gc: &mut Gc) { self.inner_functions.traverse(gc); } } #[derive(Debug)] #[repr(C)] pub struct DataStruct { tag: VmTag, pub fields: Array<Value>, } impl Traverseable for DataStruct { fn traverse(&self, gc: &mut Gc) { self.fields.traverse(gc); } } impl PartialEq for DataStruct { fn eq(&self, other: &DataStruct) -> bool { self.tag == other.tag && self.fields == other.fields } } impl DataStruct { pub fn record_bit() -> VmTag { 1 << ((size_of::<VmTag>() * 8) - 1) } pub fn tag(&self) -> VmTag { self.tag & !Self::record_bit() } pub fn is_record(&self) -> bool { (self.tag & Self::record_bit()) != 0 } } /// Definition for data values in the VM pub struct Def<'b> { pub tag: VmTag, pub elems: &'b [Value], } unsafe impl<'b> DataDef for Def<'b> { type Value = DataStruct; fn size(&self) -> usize { size_of::<usize>() + Array::<Value>::size_of(self.elems.len()) } fn initialize<'w>(self, mut result: WriteOnly<'w, DataStruct>) -> &'w mut DataStruct { unsafe { let result = &mut *result.as_mut_ptr(); result.tag = self.tag; result.fields.initialize(self.elems.iter().cloned()); result } } } impl<'b> Traverseable for Def<'b> { fn traverse(&self, gc: &mut Gc) { self.elems.traverse(gc); } } pub struct RecordDef<'b> { pub tag: VmTag, pub elems: &'b [Value], } unsafe impl<'b> DataDef for RecordDef<'b> { type Value = DataStruct; fn size(&self) -> usize { size_of::<usize>() + Array::<Value>::size_of(self.elems.len()) } fn initialize<'w>(self, mut result: WriteOnly<'w, DataStruct>) -> &'w mut DataStruct { unsafe { let result = &mut *result.as_mut_ptr(); result.tag = self.tag | (1 << ((size_of::<VmTag>() * 8) - 1)); result.fields.initialize(self.elems.iter().cloned()); result } } } impl<'b> Traverseable for RecordDef<'b> { fn traverse(&self, gc: &mut Gc) { self.elems.traverse(gc); } } mod gc_str { use super::ValueArray; use gc::{Gc, GcPtr, Generation, Traverseable}; use std::fmt; use std::str; use std::ops::Deref; #[derive(Copy, Clone, PartialEq)] pub struct GcStr(GcPtr<ValueArray>); impl fmt::Debug for GcStr { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_tuple("GcStr").field(&&**self).finish() } } impl Eq for GcStr {} impl GcStr { pub fn from_utf8(array: GcPtr<ValueArray>) -> Result<GcStr, ()> { unsafe { if array .as_slice::<u8>() .and_then(|bytes| str::from_utf8(bytes).ok()) .is_some() { Ok(GcStr::from_utf8_unchecked(array)) } else { Err(()) } } } pub unsafe fn from_utf8_unchecked(array: GcPtr<ValueArray>) -> GcStr { GcStr(array) } pub fn into_inner(self) -> GcPtr<ValueArray> { self.0 } pub fn generation(&self) -> Generation { self.0.generation() } } impl Deref for GcStr { type Target = str; fn deref(&self) -> &str { unsafe { str::from_utf8_unchecked(self.0.as_slice::<u8>().unwrap()) } } } impl Traverseable for GcStr { fn traverse(&self, gc: &mut Gc) { self.0.traverse(gc) } } } pub use self::gc_str::GcStr; #[derive(Copy, Clone, PartialEq)] #[cfg_attr(feature = "serde_derive", derive(DeserializeState, SerializeState))] #[cfg_attr(feature = "serde_derive", serde(deserialize_state = "::serialization::DeSeed"))] #[cfg_attr(feature = "serde_derive", serde(serialize_state = "::serialization::SeSeed"))] pub enum Value { Byte(u8), Int(VmInt), Float(f64), String(#[cfg_attr(feature = "serde_derive", serde(deserialize_state))] GcStr), Tag(VmTag), Data( #[cfg_attr(feature = "serde_derive", serde(deserialize_state_with = "::serialization::gc::deserialize_data"))] #[cfg_attr(feature = "serde_derive", serde(serialize_state))] GcPtr<DataStruct>, ), Array( #[cfg_attr(feature = "serde_derive", serde(deserialize_state_with = "::serialization::gc::deserialize_array"))] #[cfg_attr(feature = "serde_derive", serde(serialize_state))] GcPtr<ValueArray>, ), Function(#[cfg_attr(feature = "serde_derive", serde(state))] GcPtr<ExternFunction>), Closure( #[cfg_attr(feature = "serde_derive", serde(state_with = "::serialization::closure"))] GcPtr<ClosureData>, ), PartialApplication( #[cfg_attr(feature = "serde_derive", serde(deserialize_state_with = "::serialization::deserialize_application"))] #[cfg_attr(feature = "serde_derive", serde(serialize_state))] GcPtr<PartialApplicationData>, ), // TODO Implement serializing of userdata #[cfg_attr(feature = "serde_derive", serde(skip_deserializing))] Userdata( #[cfg_attr(feature = "serde_derive", serde(serialize_with = "::serialization::serialize_userdata"))] GcPtr<Box<Userdata>>, ), #[cfg_attr(feature = "serde_derive", serde(skip_deserializing))] #[cfg_attr(feature = "serde_derive", serde(skip_serializing))] Thread(#[cfg_attr(feature = "serde_derive", serde(deserialize_state))] GcPtr<Thread>), } impl Value { pub fn generation(self) -> Generation { match self { String(p) => p.generation(), Value::Data(p) => p.generation(), Function(p) => p.generation(), Closure(p) => p.generation(), Value::Array(p) => p.generation(), PartialApplication(p) => p.generation(), Value::Userdata(p) => p.generation(), Value::Thread(p) => p.generation(), Value::Tag(_) | Value::Byte(_) | Int(_) | Float(_) => Generation::default(), } } } #[derive(PartialEq, Copy, Clone, PartialOrd)] enum Prec { Top, Constructor, } use self::Prec::*; pub struct ValuePrinter<'a> { pub typ: &'a ArcType, pub env: &'a TypeEnv, pub value: Value, pub max_level: i32, pub width: usize, } impl<'t> ValuePrinter<'t> { pub fn new(env: &'t TypeEnv, typ: &'t ArcType, value: Value) -> ValuePrinter<'t> { ValuePrinter { typ: typ, env: env, value: value, max_level: i32::max_value(), width: 80, } } pub fn max_level(&mut self, max_level: i32) -> &mut ValuePrinter<'t> { self.max_level = max_level; self } pub fn width(&mut self, width: usize) -> &mut ValuePrinter<'t> { self.width = width; self } } struct InternalPrinter<'a, 't> { typ: &'t ArcType, env: &'t TypeEnv, arena: &'a Arena<'a>, prec: Prec, level: i32, } impl<'a> fmt::Display for ValuePrinter<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let arena = Arena::new(); let mut s = Vec::new(); InternalPrinter { typ: self.typ, env: self.env, arena: &arena, prec: Top, level: self.max_level, }.pretty(self.value) .group() .1 .render(self.width, &mut s) .map_err(|_| fmt::Error)?; write!(f, "{}", ::std::str::from_utf8(&s).expect("utf-8")) } } impl<'a, 't> InternalPrinter<'a, 't> { fn pretty(&self, value: Value) -> DocBuilder<'a, Arena<'a>> { use std::iter; let arena = self.arena; match value { _ if self.level == 0 => arena.text(".."), Value::String(s) => arena.text(format!("{:?}", s)), Value::Data(ref data) => self.pretty_data(data.tag, data.fields.iter().cloned()), Value::Tag(tag) => self.pretty_data(tag, iter::empty()), Value::Function(ref function) => chain![arena; "<extern ", function.id.declared_name().to_string(), ">" ], Value::Closure(ref closure) => chain![arena; "<", arena.text(closure.function.name.declared_name().to_string()), arena.concat(closure.upvars.iter().zip(&closure.function.debug_info.upvars) .map(|(field, info)| { chain![arena; arena.space(), info.name.clone(), ":", arena.space(), self.p(&info.typ, Top).pretty(*field) ] }).intersperse(arena.text(","))), ">" ], Value::Array(ref array) => chain![arena; "[", arena.concat(array.iter().map(|field| { match **self.typ { Type::App(_, ref args) => self.p(&args[0], Top).pretty(field), _ => arena.text(format!("{:?}", field)), } }).intersperse(arena.text(",").append(arena.space()))), "]" ], Value::PartialApplication(p) => arena.text(format!("{:?}", p)), Value::Userdata(ref data) => arena.text(format!("{:?}", data)), Value::Thread(thread) => arena.text(format!("{:?}", thread)), Value::Byte(b) => arena.text(format!("{}", b)), Value::Int(i) => { use base::types::BuiltinType; match **self.typ { Type::Builtin(BuiltinType::Int) => arena.text(format!("{}", i)), Type::Builtin(BuiltinType::Char) => if 0 <= i && i <= ::std::u32::MAX as isize { match ::std::char::from_u32(i as u32) { Some('"') => arena.text(format!("'{}'", '"')), Some(c) => arena.text(format!("'{}'", c.escape_default())), None => unreachable!(), } } else { unreachable!() }, _ => unreachable!(), } }, Value::Float(f) => arena.text(format!("{}", f)), } } fn pretty_data<I>(&self, tag: VmTag, fields: I) -> DocBuilder<'a, Arena<'a>> where I: IntoIterator<Item = Value>, { fn enclose<'a>( p: Prec, limit: Prec, arena: &'a Arena<'a>, doc: DocBuilder<'a, Arena<'a>>, ) -> DocBuilder<'a, Arena<'a>> { if p >= limit { chain![arena; "(", doc, ")"] } else { doc } } use base::resolve::remove_aliases_cow; use base::types::arg_iter; let typ = remove_aliases_cow(self.env, self.typ); let arena = self.arena; match **typ { Type::Record(ref row) => chain![arena; "{", arena.concat(fields.into_iter().zip(row.row_iter()) .map(|(field, type_field)| { chain![arena; arena.space(), type_field.name.to_string(), ":", arena.space(), self.p(&type_field.typ, Top).pretty(field) ] }).intersperse(arena.text(","))), arena.space(), "}" ], Type::Variant(ref row) => { let type_field = row.row_iter() .nth(tag as usize) .expect("Variant tag is out of bounds"); let mut empty = true; let doc = chain![arena; type_field.name.declared_name().to_string(), arena.concat(fields.into_iter().zip(arg_iter(&type_field.typ)) .map(|(field, typ)| { empty = false; arena.space().append(self.p(typ, Constructor).pretty(field)) })) ]; if empty { doc } else { enclose(self.prec, Constructor, arena, doc) } } _ => chain![arena; "{", arena.concat(fields.into_iter().map(|field| { arena.space().append(self.p(&Type::hole(), Top).pretty(field)) }).intersperse(arena.text(","))), arena.space(), "}" ], } } fn p(&self, typ: &'t ArcType, prec: Prec) -> InternalPrinter<'a, 't> { InternalPrinter { typ: typ, env: self.env, arena: self.arena, prec: prec, level: self.level - 1, } } } #[derive(Copy, Clone, Debug)] #[cfg_attr(feature = "serde_derive", derive(DeserializeState, SerializeState))] #[cfg_attr(feature = "serde_derive", serde(deserialize_state = "::serialization::DeSeed"))] #[cfg_attr(feature = "serde_derive", serde(serialize_state = "::serialization::SeSeed"))] pub enum Callable { Closure( #[cfg_attr(feature = "serde_derive", serde(state_with = "::serialization::closure"))] GcPtr<ClosureData>, ), Extern(#[cfg_attr(feature = "serde_derive", serde(state))] GcPtr<ExternFunction>), } impl Callable { pub fn name(&self) -> &Symbol { match *self { Callable::Closure(ref closure) => &closure.function.name, Callable::Extern(ref ext) => &ext.id, } } pub fn args(&self) -> VmIndex { match *self { Callable::Closure(ref closure) => closure.function.args, Callable::Extern(ref ext) => ext.args, } } } impl PartialEq for Callable { fn eq(&self, _: &Callable) -> bool { false } } impl Traverseable for Callable { fn traverse(&self, gc: &mut Gc) { match *self { Callable::Closure(ref closure) => closure.traverse(gc), Callable::Extern(ref ext) => ext.traverse(gc), } } } #[derive(Debug)] #[repr(C)] #[cfg_attr(feature = "serde_derive", derive(SerializeState))] #[cfg_attr(feature = "serde_derive", serde(serialize_state = "::serialization::SeSeed"))] pub struct PartialApplicationData { #[cfg_attr(feature = "serde_derive", serde(serialize_state))] pub function: Callable, #[cfg_attr(feature = "serde_derive", serde(serialize_state))] pub args: Array<Value>, } impl PartialEq for PartialApplicationData { fn eq(&self, _: &PartialApplicationData) -> bool { false } } impl Traverseable for PartialApplicationData { fn traverse(&self, gc: &mut Gc) { self.function.traverse(gc); self.args.traverse(gc); } } pub struct PartialApplicationDataDef<'b>(pub Callable, pub &'b [Value]); impl<'b> Traverseable for PartialApplicationDataDef<'b> { fn traverse(&self, gc: &mut Gc) { self.0.traverse(gc); self.1.traverse(gc); } } unsafe impl<'b> DataDef for PartialApplicationDataDef<'b> { type Value = PartialApplicationData; fn size(&self) -> usize { use std::mem::size_of; size_of::<Callable>() + Array::<Value>::size_of(self.1.len()) } fn initialize<'w>( self, mut result: WriteOnly<'w, PartialApplicationData>, ) -> &'w mut PartialApplicationData { unsafe { let result = &mut *result.as_mut_ptr(); result.function = self.0; result.args.initialize(self.1.iter().cloned()); result } } } impl Traverseable for Value { fn traverse(&self, gc: &mut Gc) { match *self { String(ref data) => data.traverse(gc), Value::Data(ref data) => data.traverse(gc), Value::Array(ref data) => data.traverse(gc), Function(ref data) => data.traverse(gc), Closure(ref data) => data.traverse(gc), Value::Userdata(ref data) => data.traverse(gc), PartialApplication(ref data) => data.traverse(gc), Value::Thread(ref thread) => thread.traverse(gc), Value::Tag(_) | Value::Byte(_) | Int(_) | Float(_) => (), } } } impl fmt::Debug for Value { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { struct Level<'b>(i32, &'b Value); struct LevelSlice<'b>(i32, &'b [Value]); impl<'b> fmt::Debug for LevelSlice<'b> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let level = self.0; if level <= 0 || self.1.is_empty() { return Ok(()); } write!(f, "{:?}", Level(level - 1, &self.1[0]))?; for v in &self.1[1..] { write!(f, ", {:?}", Level(level - 1, v))?; } Ok(()) } } impl<'b> fmt::Debug for Level<'b> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let level = self.0; if level <= 0 { return Ok(()); } match *self.1 { Value::Byte(i) => write!(f, "{:?}b", i), Int(i) => write!(f, "{:?}", i), Float(x) => write!(f, "{:?}f", x), String(x) => write!(f, "{:?}", &*x), Value::Tag(tag) => write!(f, "{{{:?}: }}", tag), Value::Data(ref data) => write!( f, "{{{:?}: {:?}}}", data.tag, LevelSlice(level - 1, &data.fields) ), Value::Array(ref array) => { let mut first = true; write!(f, "[")?; for value in array.iter() { if !first { write!(f, ", ")?; } first = false; write!(f, "{:?}", Level(level - 1, &value))?; } write!(f, "]") } Function(ref func) => write!(f, "<EXTERN {:?}>", &**func), Closure(ref closure) => { let p: *const _ = &*closure.function; write!(f, "<{:?} {:?}>", closure.function.name, p) } PartialApplication(ref app) => { let name = match app.function { Callable::Closure(_) => "<CLOSURE>", Callable::Extern(_) => "<EXTERN>", }; write!(f, "<App {:?}{:?}>", name, LevelSlice(level - 1, &app.args)) } Value::Userdata(ref data) => write!(f, "<Userdata {:?}>", &**data), Value::Thread(_) => write!(f, "<thread>"), } } } write!(f, "{:?}", Level(7, self)) } } #[cfg_attr(feature = "serde_derive", derive(SerializeState))] #[cfg_attr(feature = "serde_derive", serde(serialize_state = "::serialization::SeSeed"))] pub struct ExternFunction { #[cfg_attr(feature = "serde_derive", serde(serialize_state_with = "::serialization::symbol::serialize"))] pub id: Symbol, pub args: VmIndex, #[cfg_attr(feature = "serde_derive", serde(skip_serializing))] pub function: extern "C" fn(&Thread) -> Status, } impl Clone for ExternFunction { fn clone(&self) -> ExternFunction { ExternFunction { id: self.id.clone(), args: self.args, function: self.function, } } } impl PartialEq for ExternFunction { fn eq(&self, other: &ExternFunction) -> bool { self.id == other.id && self.args == other.args && self.function as usize == other.function as usize } } impl fmt::Debug for ExternFunction { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { // read the v-table pointer of the Fn(..) type and print that let p: *const () = unsafe { ::std::mem::transmute(self.function) }; write!(f, "{} {:?}", self.id, p) } } impl Traverseable for ExternFunction { fn traverse(&self, _: &mut Gc) {} } /// Representation of values which can be stored directly in an array #[derive(Copy, Clone, PartialEq, Debug)] pub enum Repr { Byte, Int, Float, String, Array, Unknown, Userdata, Thread, } pub unsafe trait ArrayRepr { fn matches(repr: Repr) -> bool; } macro_rules! impl_repr { ($($id: ty, $repr: path),*) => { $( unsafe impl ArrayRepr for $id { fn matches(repr: Repr) -> bool { repr == $repr } } unsafe impl<'a> DataDef for &'a [$id] { type Value = ValueArray; fn size(&self) -> usize { use std::mem::size_of; size_of::<ValueArray>() + self.len() * size_of::<$id>() } fn initialize<'w>(self, mut result: WriteOnly<'w, ValueArray>) -> &'w mut ValueArray { unsafe { let result = &mut *result.as_mut_ptr(); result.set_repr($repr); result.unsafe_array_mut::<$id>().initialize(self.iter().cloned()); result } } } unsafe impl DataDef for Vec<$id> { type Value = ValueArray; fn size(&self) -> usize { DataDef::size(&&self[..]) } fn initialize<'w>(self, result: WriteOnly<'w, ValueArray>) -> &'w mut ValueArray { DataDef::initialize(&self[..], result) } } )* impl Repr { fn size_of(self) -> usize { use std::mem::size_of; match self { $( $repr => size_of::<$id>(), )* } } } } } impl_repr! { u8, Repr::Byte, VmInt, Repr::Int, f64, Repr::Float, GcStr, Repr::String, GcPtr<ValueArray>, Repr::Array, Value, Repr::Unknown, GcPtr<Box<Userdata>>, Repr::Userdata, GcPtr<Thread>, Repr::Thread } impl Repr { fn from_value(value: Value) -> Repr { match value { Value::Byte(_) => Repr::Byte, Value::Int(_) => Repr::Int, Value::Float(_) => Repr::Float, Value::String(_) => Repr::String, Value::Array(_) => Repr::Array, Value::Data(_) | Value::Tag(_) | Value::Function(_) | Value::Closure(_) | Value::PartialApplication(_) => Repr::Unknown, Value::Userdata(_) => Repr::Userdata, Value::Thread(_) => Repr::Thread, } } } macro_rules! on_array { ($array: expr, $f: expr) => { { let ref array = $array; unsafe { match array.repr() { Repr::Byte => $f(array.unsafe_array::<u8>()), Repr::Int => $f(array.unsafe_array::<VmInt>()), Repr::Float => $f(array.unsafe_array::<f64>()), Repr::String => $f(array.unsafe_array::<GcStr>()), Repr::Array => $f(array.unsafe_array::<GcPtr<ValueArray>>()), Repr::Unknown => $f(array.unsafe_array::<Value>()), Repr::Userdata => $f(array.unsafe_array::<GcPtr<Box<Userdata>>>()), Repr::Thread => $f(array.unsafe_array::<GcPtr<Thread>>()), } } } } } #[repr(C)] pub struct ValueArray { repr: Repr, array: Array<()>, } impl fmt::Debug for ValueArray { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("ValueArray") .field("repr", &self.repr) .field("array", on_array!(self, |x| x as &fmt::Debug)) .finish() } } impl PartialEq for ValueArray { fn eq(&self, other: &ValueArray) -> bool { self.repr == other.repr && self.iter().zip(other.iter()).all(|(l, r)| l == r) } } pub struct Iter<'a> { array: &'a ValueArray, index: usize, } impl<'a> Iterator for Iter<'a> { type Item = Value; fn next(&mut self) -> Option<Value> { if self.index < self.array.len() { let value = self.array.get(self.index); self.index += 1; Some(value) } else { None } } fn size_hint(&self) -> (usize, Option<usize>) { let i = self.array.len() - self.index; (i, Some(i)) } } pub struct VariantIter<'a> { array: &'a ValueArray, index: usize, } impl<'a> Iterator for VariantIter<'a> { type Item = Variants<'a>; fn next(&mut self) -> Option<Self::Item> { if self.index < self.array.len() { let value = self.array.get(self.index); self.index += 1; Some(unsafe { Variants::with_root(value, self.array) }) } else { None } } fn size_hint(&self) -> (usize, Option<usize>) { let i = self.array.len() - self.index; (i, Some(i)) } } impl Traverseable for ValueArray { fn traverse(&self, gc: &mut Gc) { on_array!(*self, |array: &Array<_>| array.traverse(gc)) } } impl ValueArray { pub fn get(&self, index: usize) -> Value { unsafe { match self.repr { Repr::Byte => Value::Byte(self.unsafe_get(index)), Repr::Int => Value::Int(self.unsafe_get(index)), Repr::Float => Value::Float(self.unsafe_get(index)), Repr::String => Value::String(self.unsafe_get(index)), Repr::Array => Value::Array(self.unsafe_get(index)), Repr::Unknown => self.unsafe_get(index), Repr::Userdata => Value::Userdata(self.unsafe_get(index)), Repr::Thread => Value::Thread(self.unsafe_get(index)), } } } pub fn is_empty(&self) -> bool { self.len() == 0 } pub fn len(&self) -> usize { self.array.len() } pub fn iter(&self) -> Iter { Iter { array: self, index: 0, } } pub fn variant_iter(&self) -> VariantIter { VariantIter { array: self, index: 0, } } pub fn size_of(repr: Repr, len: usize) -> usize { ::std::mem::size_of::<ValueArray>() + repr.size_of() * len } pub fn repr(&self) -> Repr { self.repr } pub unsafe fn set_repr(&mut self, repr: Repr) { self.repr = repr; } pub unsafe fn initialize<I>(&mut self, iter: I) where I: IntoIterator<Item = Value>, { let iter = iter.into_iter(); macro_rules! initialize_variants { ($($id: ident)+) => { match self.repr { $(Repr::$id => { let iter = iter.map(|v| match v { Value::$id(x) => x, _ => unreachable!(), }); self.unsafe_array_mut().initialize(iter); })+ Repr::Unknown => { self.unsafe_array_mut().initialize(iter); } } } } initialize_variants! { Byte Int Float String Array Userdata Thread } } pub fn as_slice<T: ArrayRepr + Copy>(&self) -> Option<&[T]> { unsafe { // If the array is empty then it may not have the correct type representation set since // there was no value to take the correct representation from if T::matches(self.repr) || self.is_empty() { Some(self.unsafe_array::<T>()) } else { None } } } unsafe fn unsafe_get<T: Copy>(&self, index: usize) -> T { ::std::mem::transmute::<&Array<()>, &Array<T>>(&self.array)[index] } unsafe fn unsafe_array<T: Copy>(&self) -> &Array<T> { ::std::mem::transmute::<&Array<()>, &Array<T>>(&self.array) } pub unsafe fn unsafe_array_mut<T: Copy>(&mut self) -> &mut Array<T> { ::std::mem::transmute::<&mut Array<()>, &mut Array<T>>(&mut self.array) } } unsafe impl<'a> DataDef for &'a ValueArray { type Value = ValueArray; fn size(&self) -> usize { ValueArray::size_of(self.repr, self.len()) } #[allow(unused_unsafe)] fn initialize<'w>(self, mut result: WriteOnly<'w, ValueArray>) -> &'w mut ValueArray { unsafe { let result = &mut *result.as_mut_ptr(); result.repr = self.repr; on_array!(self, |array: &Array<_>| { result.unsafe_array_mut().initialize(array.iter().cloned()) }); result } } } pub struct ArrayDef<'b>(pub &'b [Value]); impl<'b> Traverseable for ArrayDef<'b> { fn traverse(&self, gc: &mut Gc) { self.0.traverse(gc); } } unsafe impl<'b> DataDef for ArrayDef<'b> { type Value = ValueArray; fn size(&self) -> usize { use std::mem::size_of; let size = match self.0.first() { Some(value) => Repr::from_value(*value).size_of() * self.0.len(), None => 0, }; size_of::<ValueArray>() + size } fn initialize<'w>(self, mut result: WriteOnly<'w, ValueArray>) -> &'w mut ValueArray { unsafe { let result = &mut *result.as_mut_ptr(); match self.0.first() { Some(value) => { result.repr = Repr::from_value(*value); result.initialize(self.0.iter().cloned()); } None => { result.repr = Repr::Unknown; result.initialize(None); } } result } } } pub struct Cloner<'t> { visited: FnvMap<*const (), Value>, thread: &'t Thread, gc: &'t mut Gc, receiver_generation: Generation, } impl<'t> Cloner<'t> { pub fn new(thread: &'t Thread, gc: &'t mut Gc) -> Cloner<'t> { Cloner { visited: FnvMap::default(), thread: thread, receiver_generation: gc.generation(), gc: gc, } } pub fn thread(&self) -> &'t Thread { self.thread } pub fn gc(&mut self) -> &mut Gc { self.gc } /// Deep clones the entire value doing no sharing pub fn force_full_clone(&mut self) -> &mut Self { self.receiver_generation = Generation::disjoint(); self } pub fn deep_clone(&mut self, value: Value) -> Result<Value> { // Only need to clone values which belong to a younger generation than the gc that the new // value will live in if self.receiver_generation .can_contain_values_from(value.generation()) { return Ok(value); } match value { String(data) => self.deep_clone_str(data), Value::Data(data) => self.deep_clone_data(data).map(Value::Data), Value::Array(data) => self.deep_clone_array(data).map(Value::Array), Closure(data) => self.deep_clone_closure(data).map(Value::Closure), PartialApplication(data) => self.deep_clone_app(data).map(Value::PartialApplication), Function(f) => self.gc .alloc(Move(ExternFunction::clone(&f))) .map(Value::Function), Value::Tag(i) => Ok(Value::Tag(i)), Value::Byte(i) => Ok(Value::Byte(i)), Int(i) => Ok(Int(i)), Float(f) => Ok(Float(f)), Value::Userdata(userdata) => userdata.deep_clone(self).map(Value::Userdata), Value::Thread(_) => Err(Error::Message("Threads cannot be deep cloned yet".into())), } } fn deep_clone_ptr<T, A, R>(&mut self, value: GcPtr<T>, alloc: A) -> Result<StdResult<Value, R>> where A: FnOnce(&mut Gc, &T) -> Result<(Value, R)>, { let key = &*value as *const T as *const (); let new_ptr = match self.visited.entry(key) { Entry::Occupied(entry) => return Ok(Ok(*entry.get())), Entry::Vacant(entry) => { // FIXME Should allocate the real `Value` and possibly fill it later let (value, new_ptr) = alloc(self.gc, &value)?; entry.insert(value); new_ptr } }; Ok(Err(new_ptr)) } fn deep_clone_str(&mut self, data: GcStr) -> Result<Value> { unsafe { Ok( self.deep_clone_ptr(data.into_inner(), |gc, data| { let ptr = GcStr::from_utf8_unchecked(gc.alloc(data)?); Ok((String(ptr), ptr)) })? .unwrap_or_else(String), ) } } fn deep_clone_data(&mut self, data: GcPtr<DataStruct>) -> Result<GcPtr<DataStruct>> { let result = self.deep_clone_ptr(data, |gc, data| { let ptr = gc.alloc(Def { tag: data.tag, elems: &data.fields, })?; Ok((Value::Data(ptr), ptr)) })?; match result { Ok(Value::Data(ptr)) => Ok(ptr), Ok(_) => unreachable!(), Err(mut new_data) => { { let new_fields = unsafe { &mut new_data.as_mut().fields }; for (new, old) in new_fields.iter_mut().zip(&data.fields) { *new = self.deep_clone(*old)?; } } Ok(new_data) } } } fn deep_clone_userdata(&mut self, ptr: GcPtr<Box<Userdata>>) -> Result<GcPtr<Box<Userdata>>> { ptr.deep_clone(self) } fn deep_clone_array(&mut self, array: GcPtr<ValueArray>) -> Result<GcPtr<ValueArray>> { unsafe fn deep_clone_elems<T, F>( mut new_array: GcPtr<ValueArray>, mut deep_clone: F, ) -> Result<()> where T: Copy, F: FnMut(T) -> Result<T>, { let new_array = new_array.as_mut().unsafe_array_mut::<T>(); for field in new_array.iter_mut() { *field = deep_clone(*field)?; } Ok(()) } let result = self.deep_clone_ptr(array, |gc, array| { let ptr = gc.alloc(array)?; Ok((Value::Array(ptr), ptr)) })?; match result { Ok(Value::Array(ptr)) => Ok(ptr), Ok(_) => unreachable!(), Err(new_array) => { unsafe { match new_array.repr() { Repr::Byte | Repr::Int | Repr::Float | Repr::String => Ok(()), Repr::Array => deep_clone_elems(new_array, |e| self.deep_clone_array(e)), Repr::Unknown => deep_clone_elems(new_array, |e| self.deep_clone(e)), Repr::Userdata => { deep_clone_elems(new_array, |e| self.deep_clone_userdata(e)) } Repr::Thread => { return Err(Error::Message("Threads cannot be deep cloned yet".into())) } }?; } Ok(new_array) } } } fn deep_clone_closure(&mut self, data: GcPtr<ClosureData>) -> Result<GcPtr<ClosureData>> { let result = self.deep_clone_ptr(data, |gc, data| { let ptr = gc.alloc(ClosureDataDef(data.function, &data.upvars))?; Ok((Closure(ptr), ptr)) })?; match result { Ok(Value::Closure(ptr)) => Ok(ptr), Ok(_) => unreachable!(), Err(mut new_data) => { { let new_upvars = unsafe { &mut new_data.as_mut().upvars }; for (new, old) in new_upvars.iter_mut().zip(&data.upvars) { *new = self.deep_clone(*old)?; } } Ok(new_data) } } } fn deep_clone_app( &mut self, data: GcPtr<PartialApplicationData>, ) -> Result<GcPtr<PartialApplicationData>> { let result = self.deep_clone_ptr(data, |gc, data| { let ptr = gc.alloc(PartialApplicationDataDef(data.function, &data.args))?; Ok((PartialApplication(ptr), ptr)) })?; match result { Ok(Value::PartialApplication(ptr)) => Ok(ptr), Ok(_) => unreachable!(), Err(mut new_data) => { { let new_args = unsafe { &mut new_data.as_mut().args }; for (new, old) in new_args.iter_mut().zip(&data.args) { *new = self.deep_clone(*old)?; } } Ok(new_data) } } } } #[cfg(test)] mod tests { use super::*; use gc::{Gc, Generation}; use types::VmInt; use base::kind::{ArcKind, KindEnv}; use base::types::{Alias, ArcType, Field, RecordSelector, Type, TypeEnv}; use base::symbol::{Symbol, SymbolRef}; struct MockEnv(Option<Alias<Symbol, ArcType>>); impl KindEnv for MockEnv { fn find_kind(&self, _type_name: &SymbolRef) -> Option<ArcKind> { None } } impl TypeEnv for MockEnv { fn find_type(&self, _id: &SymbolRef) -> Option<&ArcType> { None } fn find_type_info(&self, _id: &SymbolRef) -> Option<&Alias<Symbol, ArcType>> { self.0.as_ref() } fn find_record( &self, _fields: &[Symbol], _selector: RecordSelector, ) -> Option<(ArcType, ArcType)> { None } } #[test] fn pretty_variant() { let mut gc = Gc::new(Generation::default(), usize::max_value()); let list = Symbol::from("List"); let typ: ArcType = Type::variant(vec![ Field { name: Symbol::from("Cons"), typ: Type::function( vec![Type::int(), Type::ident(list.clone())], Type::ident(list.clone()), ), }, Field { name: Symbol::from("Nil"), typ: Type::ident(list.clone()), }, ]); let env = MockEnv(Some(Alias::new(list.clone(), typ.clone()))); let nil = Value::Tag(1); assert_eq!(format!("{}", ValuePrinter::new(&env, &typ, nil)), "Nil"); let list1 = Value::Data( gc.alloc(Def { tag: 0, elems: &[Value::Int(123), nil], }).unwrap(), ); assert_eq!( format!("{}", ValuePrinter::new(&env, &typ, list1)), "Cons 123 Nil" ); let list2 = Value::Data( gc.alloc(Def { tag: 0, elems: &[Value::Int(0), list1], }).unwrap(), ); assert_eq!( format!("{}", ValuePrinter::new(&env, &typ, list2)), "Cons 0 (Cons 123 Nil)" ); } #[test] fn pretty_array() { let mut gc = Gc::new(Generation::default(), usize::max_value()); let typ = Type::array(Type::int()); let env = MockEnv(None); let nil = Value::Array(gc.alloc(&[1 as VmInt, 2, 3][..]).unwrap()); assert_eq!( format!("{}", ValuePrinter::new(&env, &typ, nil)), "[1, 2, 3]" ); } #[test] fn closure_data_upvars_location() { use std::mem; use std::ptr; unsafe { let p: *const ClosureData = ptr::null(); assert_eq!(p as *const u8, &(*p).function as *const _ as *const u8); assert_eq!( (p as *const u8).offset(mem::size_of::<*const ()>() as isize), &(*p).upvars as *const _ as *const u8 ); } } } fix(repl): Print out Char as the "character" instead of code point Fix #395 Remove meaningless code and unreachable! macro. use std::collections::hash_map::Entry; use std::fmt; use std::mem::size_of; use std::result::Result as StdResult; use itertools::Itertools; use pretty::{Arena, DocAllocator, DocBuilder}; use base::symbol::Symbol; use base::types::{ArcType, Type, TypeEnv}; use types::*; use base::fnv::FnvMap; use interner::InternedStr; use compiler::DebugInfo; use gc::{DataDef, Gc, GcPtr, Generation, Move, Traverseable, WriteOnly}; use array::Array; use thread::{Status, Thread}; use {Error, Result, Variants}; use self::Value::{Closure, Float, Function, Int, PartialApplication, String}; mopafy!(Userdata); pub trait Userdata: ::mopa::Any + Traverseable + fmt::Debug + Send + Sync { fn deep_clone(&self, deep_cloner: &mut Cloner) -> Result<GcPtr<Box<Userdata>>> { let _ = deep_cloner; Err(Error::Message("Userdata cannot be cloned".into())) } } impl PartialEq for Userdata { fn eq(&self, other: &Userdata) -> bool { self as *const _ == other as *const _ } } #[derive(Debug, PartialEq)] #[repr(C)] pub struct ClosureData { pub function: GcPtr<BytecodeFunction>, pub upvars: Array<Value>, } impl Traverseable for ClosureData { fn traverse(&self, gc: &mut Gc) { self.function.traverse(gc); self.upvars.traverse(gc); } } pub struct ClosureDataDef<'b>(pub GcPtr<BytecodeFunction>, pub &'b [Value]); impl<'b> Traverseable for ClosureDataDef<'b> { fn traverse(&self, gc: &mut Gc) { self.0.traverse(gc); self.1.traverse(gc); } } unsafe impl<'b> DataDef for ClosureDataDef<'b> { type Value = ClosureData; fn size(&self) -> usize { size_of::<GcPtr<BytecodeFunction>>() + Array::<Value>::size_of(self.1.len()) } fn initialize<'w>(self, mut result: WriteOnly<'w, ClosureData>) -> &'w mut ClosureData { unsafe { let result = &mut *result.as_mut_ptr(); result.function = self.0; result.upvars.initialize(self.1.iter().cloned()); result } } } pub struct ClosureInitDef(pub GcPtr<BytecodeFunction>, pub usize); impl Traverseable for ClosureInitDef { fn traverse(&self, gc: &mut Gc) { self.0.traverse(gc); } } unsafe impl DataDef for ClosureInitDef { type Value = ClosureData; fn size(&self) -> usize { size_of::<GcPtr<BytecodeFunction>>() + Array::<Value>::size_of(self.1) } fn initialize<'w>(self, mut result: WriteOnly<'w, ClosureData>) -> &'w mut ClosureData { use std::ptr; unsafe { let result = &mut *result.as_mut_ptr(); result.function = self.0; result.upvars.set_len(self.1); for var in &mut result.upvars { ptr::write(var, Int(0)); } result } } } #[derive(Debug, PartialEq)] #[cfg_attr(feature = "serde_derive", derive(DeserializeState, SerializeState))] #[cfg_attr(feature = "serde_derive", serde(deserialize_state = "::serialization::DeSeed"))] #[cfg_attr(feature = "serde_derive", serde(serialize_state = "::serialization::SeSeed"))] pub struct BytecodeFunction { #[cfg_attr(feature = "serde_derive", serde(state_with = "::serialization::symbol"))] pub name: Symbol, pub args: VmIndex, pub max_stack_size: VmIndex, pub instructions: Vec<Instruction>, #[cfg_attr(feature = "serde_derive", serde(state))] pub inner_functions: Vec<GcPtr<BytecodeFunction>>, #[cfg_attr(feature = "serde_derive", serde(state))] pub strings: Vec<InternedStr>, #[cfg_attr(feature = "serde_derive", serde(state))] pub records: Vec<Vec<InternedStr>>, #[cfg_attr(feature = "serde_derive", serde(state))] pub debug_info: DebugInfo, } impl Traverseable for BytecodeFunction { fn traverse(&self, gc: &mut Gc) { self.inner_functions.traverse(gc); } } #[derive(Debug)] #[repr(C)] pub struct DataStruct { tag: VmTag, pub fields: Array<Value>, } impl Traverseable for DataStruct { fn traverse(&self, gc: &mut Gc) { self.fields.traverse(gc); } } impl PartialEq for DataStruct { fn eq(&self, other: &DataStruct) -> bool { self.tag == other.tag && self.fields == other.fields } } impl DataStruct { pub fn record_bit() -> VmTag { 1 << ((size_of::<VmTag>() * 8) - 1) } pub fn tag(&self) -> VmTag { self.tag & !Self::record_bit() } pub fn is_record(&self) -> bool { (self.tag & Self::record_bit()) != 0 } } /// Definition for data values in the VM pub struct Def<'b> { pub tag: VmTag, pub elems: &'b [Value], } unsafe impl<'b> DataDef for Def<'b> { type Value = DataStruct; fn size(&self) -> usize { size_of::<usize>() + Array::<Value>::size_of(self.elems.len()) } fn initialize<'w>(self, mut result: WriteOnly<'w, DataStruct>) -> &'w mut DataStruct { unsafe { let result = &mut *result.as_mut_ptr(); result.tag = self.tag; result.fields.initialize(self.elems.iter().cloned()); result } } } impl<'b> Traverseable for Def<'b> { fn traverse(&self, gc: &mut Gc) { self.elems.traverse(gc); } } pub struct RecordDef<'b> { pub tag: VmTag, pub elems: &'b [Value], } unsafe impl<'b> DataDef for RecordDef<'b> { type Value = DataStruct; fn size(&self) -> usize { size_of::<usize>() + Array::<Value>::size_of(self.elems.len()) } fn initialize<'w>(self, mut result: WriteOnly<'w, DataStruct>) -> &'w mut DataStruct { unsafe { let result = &mut *result.as_mut_ptr(); result.tag = self.tag | (1 << ((size_of::<VmTag>() * 8) - 1)); result.fields.initialize(self.elems.iter().cloned()); result } } } impl<'b> Traverseable for RecordDef<'b> { fn traverse(&self, gc: &mut Gc) { self.elems.traverse(gc); } } mod gc_str { use super::ValueArray; use gc::{Gc, GcPtr, Generation, Traverseable}; use std::fmt; use std::str; use std::ops::Deref; #[derive(Copy, Clone, PartialEq)] pub struct GcStr(GcPtr<ValueArray>); impl fmt::Debug for GcStr { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_tuple("GcStr").field(&&**self).finish() } } impl Eq for GcStr {} impl GcStr { pub fn from_utf8(array: GcPtr<ValueArray>) -> Result<GcStr, ()> { unsafe { if array .as_slice::<u8>() .and_then(|bytes| str::from_utf8(bytes).ok()) .is_some() { Ok(GcStr::from_utf8_unchecked(array)) } else { Err(()) } } } pub unsafe fn from_utf8_unchecked(array: GcPtr<ValueArray>) -> GcStr { GcStr(array) } pub fn into_inner(self) -> GcPtr<ValueArray> { self.0 } pub fn generation(&self) -> Generation { self.0.generation() } } impl Deref for GcStr { type Target = str; fn deref(&self) -> &str { unsafe { str::from_utf8_unchecked(self.0.as_slice::<u8>().unwrap()) } } } impl Traverseable for GcStr { fn traverse(&self, gc: &mut Gc) { self.0.traverse(gc) } } } pub use self::gc_str::GcStr; #[derive(Copy, Clone, PartialEq)] #[cfg_attr(feature = "serde_derive", derive(DeserializeState, SerializeState))] #[cfg_attr(feature = "serde_derive", serde(deserialize_state = "::serialization::DeSeed"))] #[cfg_attr(feature = "serde_derive", serde(serialize_state = "::serialization::SeSeed"))] pub enum Value { Byte(u8), Int(VmInt), Float(f64), String(#[cfg_attr(feature = "serde_derive", serde(deserialize_state))] GcStr), Tag(VmTag), Data( #[cfg_attr(feature = "serde_derive", serde(deserialize_state_with = "::serialization::gc::deserialize_data"))] #[cfg_attr(feature = "serde_derive", serde(serialize_state))] GcPtr<DataStruct>, ), Array( #[cfg_attr(feature = "serde_derive", serde(deserialize_state_with = "::serialization::gc::deserialize_array"))] #[cfg_attr(feature = "serde_derive", serde(serialize_state))] GcPtr<ValueArray>, ), Function(#[cfg_attr(feature = "serde_derive", serde(state))] GcPtr<ExternFunction>), Closure( #[cfg_attr(feature = "serde_derive", serde(state_with = "::serialization::closure"))] GcPtr<ClosureData>, ), PartialApplication( #[cfg_attr(feature = "serde_derive", serde(deserialize_state_with = "::serialization::deserialize_application"))] #[cfg_attr(feature = "serde_derive", serde(serialize_state))] GcPtr<PartialApplicationData>, ), // TODO Implement serializing of userdata #[cfg_attr(feature = "serde_derive", serde(skip_deserializing))] Userdata( #[cfg_attr(feature = "serde_derive", serde(serialize_with = "::serialization::serialize_userdata"))] GcPtr<Box<Userdata>>, ), #[cfg_attr(feature = "serde_derive", serde(skip_deserializing))] #[cfg_attr(feature = "serde_derive", serde(skip_serializing))] Thread(#[cfg_attr(feature = "serde_derive", serde(deserialize_state))] GcPtr<Thread>), } impl Value { pub fn generation(self) -> Generation { match self { String(p) => p.generation(), Value::Data(p) => p.generation(), Function(p) => p.generation(), Closure(p) => p.generation(), Value::Array(p) => p.generation(), PartialApplication(p) => p.generation(), Value::Userdata(p) => p.generation(), Value::Thread(p) => p.generation(), Value::Tag(_) | Value::Byte(_) | Int(_) | Float(_) => Generation::default(), } } } #[derive(PartialEq, Copy, Clone, PartialOrd)] enum Prec { Top, Constructor, } use self::Prec::*; pub struct ValuePrinter<'a> { pub typ: &'a ArcType, pub env: &'a TypeEnv, pub value: Value, pub max_level: i32, pub width: usize, } impl<'t> ValuePrinter<'t> { pub fn new(env: &'t TypeEnv, typ: &'t ArcType, value: Value) -> ValuePrinter<'t> { ValuePrinter { typ: typ, env: env, value: value, max_level: i32::max_value(), width: 80, } } pub fn max_level(&mut self, max_level: i32) -> &mut ValuePrinter<'t> { self.max_level = max_level; self } pub fn width(&mut self, width: usize) -> &mut ValuePrinter<'t> { self.width = width; self } } struct InternalPrinter<'a, 't> { typ: &'t ArcType, env: &'t TypeEnv, arena: &'a Arena<'a>, prec: Prec, level: i32, } impl<'a> fmt::Display for ValuePrinter<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let arena = Arena::new(); let mut s = Vec::new(); InternalPrinter { typ: self.typ, env: self.env, arena: &arena, prec: Top, level: self.max_level, }.pretty(self.value) .group() .1 .render(self.width, &mut s) .map_err(|_| fmt::Error)?; write!(f, "{}", ::std::str::from_utf8(&s).expect("utf-8")) } } impl<'a, 't> InternalPrinter<'a, 't> { fn pretty(&self, value: Value) -> DocBuilder<'a, Arena<'a>> { use std::iter; let arena = self.arena; match value { _ if self.level == 0 => arena.text(".."), Value::String(s) => arena.text(format!("{:?}", s)), Value::Data(ref data) => self.pretty_data(data.tag, data.fields.iter().cloned()), Value::Tag(tag) => self.pretty_data(tag, iter::empty()), Value::Function(ref function) => chain![arena; "<extern ", function.id.declared_name().to_string(), ">" ], Value::Closure(ref closure) => chain![arena; "<", arena.text(closure.function.name.declared_name().to_string()), arena.concat(closure.upvars.iter().zip(&closure.function.debug_info.upvars) .map(|(field, info)| { chain![arena; arena.space(), info.name.clone(), ":", arena.space(), self.p(&info.typ, Top).pretty(*field) ] }).intersperse(arena.text(","))), ">" ], Value::Array(ref array) => chain![arena; "[", arena.concat(array.iter().map(|field| { match **self.typ { Type::App(_, ref args) => self.p(&args[0], Top).pretty(field), _ => arena.text(format!("{:?}", field)), } }).intersperse(arena.text(",").append(arena.space()))), "]" ], Value::PartialApplication(p) => arena.text(format!("{:?}", p)), Value::Userdata(ref data) => arena.text(format!("{:?}", data)), Value::Thread(thread) => arena.text(format!("{:?}", thread)), Value::Byte(b) => arena.text(format!("{}", b)), Value::Int(i) => { use base::types::BuiltinType; match **self.typ { Type::Builtin(BuiltinType::Int) => arena.text(format!("{}", i)), Type::Builtin(BuiltinType::Char) => match ::std::char::from_u32(i as u32) { Some('"') => arena.text(format!("'{}'", '"')), Some(c) => arena.text(format!("'{}'", c.escape_default())), None => ice!( "Invalid character (code point {}) passed to pretty printing", i ), }, _ => arena.text(format!("{}", i)), } }, Value::Float(f) => arena.text(format!("{}", f)), } } fn pretty_data<I>(&self, tag: VmTag, fields: I) -> DocBuilder<'a, Arena<'a>> where I: IntoIterator<Item = Value>, { fn enclose<'a>( p: Prec, limit: Prec, arena: &'a Arena<'a>, doc: DocBuilder<'a, Arena<'a>>, ) -> DocBuilder<'a, Arena<'a>> { if p >= limit { chain![arena; "(", doc, ")"] } else { doc } } use base::resolve::remove_aliases_cow; use base::types::arg_iter; let typ = remove_aliases_cow(self.env, self.typ); let arena = self.arena; match **typ { Type::Record(ref row) => chain![arena; "{", arena.concat(fields.into_iter().zip(row.row_iter()) .map(|(field, type_field)| { chain![arena; arena.space(), type_field.name.to_string(), ":", arena.space(), self.p(&type_field.typ, Top).pretty(field) ] }).intersperse(arena.text(","))), arena.space(), "}" ], Type::Variant(ref row) => { let type_field = row.row_iter() .nth(tag as usize) .expect("Variant tag is out of bounds"); let mut empty = true; let doc = chain![arena; type_field.name.declared_name().to_string(), arena.concat(fields.into_iter().zip(arg_iter(&type_field.typ)) .map(|(field, typ)| { empty = false; arena.space().append(self.p(typ, Constructor).pretty(field)) })) ]; if empty { doc } else { enclose(self.prec, Constructor, arena, doc) } } _ => chain![arena; "{", arena.concat(fields.into_iter().map(|field| { arena.space().append(self.p(&Type::hole(), Top).pretty(field)) }).intersperse(arena.text(","))), arena.space(), "}" ], } } fn p(&self, typ: &'t ArcType, prec: Prec) -> InternalPrinter<'a, 't> { InternalPrinter { typ: typ, env: self.env, arena: self.arena, prec: prec, level: self.level - 1, } } } #[derive(Copy, Clone, Debug)] #[cfg_attr(feature = "serde_derive", derive(DeserializeState, SerializeState))] #[cfg_attr(feature = "serde_derive", serde(deserialize_state = "::serialization::DeSeed"))] #[cfg_attr(feature = "serde_derive", serde(serialize_state = "::serialization::SeSeed"))] pub enum Callable { Closure( #[cfg_attr(feature = "serde_derive", serde(state_with = "::serialization::closure"))] GcPtr<ClosureData>, ), Extern(#[cfg_attr(feature = "serde_derive", serde(state))] GcPtr<ExternFunction>), } impl Callable { pub fn name(&self) -> &Symbol { match *self { Callable::Closure(ref closure) => &closure.function.name, Callable::Extern(ref ext) => &ext.id, } } pub fn args(&self) -> VmIndex { match *self { Callable::Closure(ref closure) => closure.function.args, Callable::Extern(ref ext) => ext.args, } } } impl PartialEq for Callable { fn eq(&self, _: &Callable) -> bool { false } } impl Traverseable for Callable { fn traverse(&self, gc: &mut Gc) { match *self { Callable::Closure(ref closure) => closure.traverse(gc), Callable::Extern(ref ext) => ext.traverse(gc), } } } #[derive(Debug)] #[repr(C)] #[cfg_attr(feature = "serde_derive", derive(SerializeState))] #[cfg_attr(feature = "serde_derive", serde(serialize_state = "::serialization::SeSeed"))] pub struct PartialApplicationData { #[cfg_attr(feature = "serde_derive", serde(serialize_state))] pub function: Callable, #[cfg_attr(feature = "serde_derive", serde(serialize_state))] pub args: Array<Value>, } impl PartialEq for PartialApplicationData { fn eq(&self, _: &PartialApplicationData) -> bool { false } } impl Traverseable for PartialApplicationData { fn traverse(&self, gc: &mut Gc) { self.function.traverse(gc); self.args.traverse(gc); } } pub struct PartialApplicationDataDef<'b>(pub Callable, pub &'b [Value]); impl<'b> Traverseable for PartialApplicationDataDef<'b> { fn traverse(&self, gc: &mut Gc) { self.0.traverse(gc); self.1.traverse(gc); } } unsafe impl<'b> DataDef for PartialApplicationDataDef<'b> { type Value = PartialApplicationData; fn size(&self) -> usize { use std::mem::size_of; size_of::<Callable>() + Array::<Value>::size_of(self.1.len()) } fn initialize<'w>( self, mut result: WriteOnly<'w, PartialApplicationData>, ) -> &'w mut PartialApplicationData { unsafe { let result = &mut *result.as_mut_ptr(); result.function = self.0; result.args.initialize(self.1.iter().cloned()); result } } } impl Traverseable for Value { fn traverse(&self, gc: &mut Gc) { match *self { String(ref data) => data.traverse(gc), Value::Data(ref data) => data.traverse(gc), Value::Array(ref data) => data.traverse(gc), Function(ref data) => data.traverse(gc), Closure(ref data) => data.traverse(gc), Value::Userdata(ref data) => data.traverse(gc), PartialApplication(ref data) => data.traverse(gc), Value::Thread(ref thread) => thread.traverse(gc), Value::Tag(_) | Value::Byte(_) | Int(_) | Float(_) => (), } } } impl fmt::Debug for Value { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { struct Level<'b>(i32, &'b Value); struct LevelSlice<'b>(i32, &'b [Value]); impl<'b> fmt::Debug for LevelSlice<'b> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let level = self.0; if level <= 0 || self.1.is_empty() { return Ok(()); } write!(f, "{:?}", Level(level - 1, &self.1[0]))?; for v in &self.1[1..] { write!(f, ", {:?}", Level(level - 1, v))?; } Ok(()) } } impl<'b> fmt::Debug for Level<'b> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let level = self.0; if level <= 0 { return Ok(()); } match *self.1 { Value::Byte(i) => write!(f, "{:?}b", i), Int(i) => write!(f, "{:?}", i), Float(x) => write!(f, "{:?}f", x), String(x) => write!(f, "{:?}", &*x), Value::Tag(tag) => write!(f, "{{{:?}: }}", tag), Value::Data(ref data) => write!( f, "{{{:?}: {:?}}}", data.tag, LevelSlice(level - 1, &data.fields) ), Value::Array(ref array) => { let mut first = true; write!(f, "[")?; for value in array.iter() { if !first { write!(f, ", ")?; } first = false; write!(f, "{:?}", Level(level - 1, &value))?; } write!(f, "]") } Function(ref func) => write!(f, "<EXTERN {:?}>", &**func), Closure(ref closure) => { let p: *const _ = &*closure.function; write!(f, "<{:?} {:?}>", closure.function.name, p) } PartialApplication(ref app) => { let name = match app.function { Callable::Closure(_) => "<CLOSURE>", Callable::Extern(_) => "<EXTERN>", }; write!(f, "<App {:?}{:?}>", name, LevelSlice(level - 1, &app.args)) } Value::Userdata(ref data) => write!(f, "<Userdata {:?}>", &**data), Value::Thread(_) => write!(f, "<thread>"), } } } write!(f, "{:?}", Level(7, self)) } } #[cfg_attr(feature = "serde_derive", derive(SerializeState))] #[cfg_attr(feature = "serde_derive", serde(serialize_state = "::serialization::SeSeed"))] pub struct ExternFunction { #[cfg_attr(feature = "serde_derive", serde(serialize_state_with = "::serialization::symbol::serialize"))] pub id: Symbol, pub args: VmIndex, #[cfg_attr(feature = "serde_derive", serde(skip_serializing))] pub function: extern "C" fn(&Thread) -> Status, } impl Clone for ExternFunction { fn clone(&self) -> ExternFunction { ExternFunction { id: self.id.clone(), args: self.args, function: self.function, } } } impl PartialEq for ExternFunction { fn eq(&self, other: &ExternFunction) -> bool { self.id == other.id && self.args == other.args && self.function as usize == other.function as usize } } impl fmt::Debug for ExternFunction { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { // read the v-table pointer of the Fn(..) type and print that let p: *const () = unsafe { ::std::mem::transmute(self.function) }; write!(f, "{} {:?}", self.id, p) } } impl Traverseable for ExternFunction { fn traverse(&self, _: &mut Gc) {} } /// Representation of values which can be stored directly in an array #[derive(Copy, Clone, PartialEq, Debug)] pub enum Repr { Byte, Int, Float, String, Array, Unknown, Userdata, Thread, } pub unsafe trait ArrayRepr { fn matches(repr: Repr) -> bool; } macro_rules! impl_repr { ($($id: ty, $repr: path),*) => { $( unsafe impl ArrayRepr for $id { fn matches(repr: Repr) -> bool { repr == $repr } } unsafe impl<'a> DataDef for &'a [$id] { type Value = ValueArray; fn size(&self) -> usize { use std::mem::size_of; size_of::<ValueArray>() + self.len() * size_of::<$id>() } fn initialize<'w>(self, mut result: WriteOnly<'w, ValueArray>) -> &'w mut ValueArray { unsafe { let result = &mut *result.as_mut_ptr(); result.set_repr($repr); result.unsafe_array_mut::<$id>().initialize(self.iter().cloned()); result } } } unsafe impl DataDef for Vec<$id> { type Value = ValueArray; fn size(&self) -> usize { DataDef::size(&&self[..]) } fn initialize<'w>(self, result: WriteOnly<'w, ValueArray>) -> &'w mut ValueArray { DataDef::initialize(&self[..], result) } } )* impl Repr { fn size_of(self) -> usize { use std::mem::size_of; match self { $( $repr => size_of::<$id>(), )* } } } } } impl_repr! { u8, Repr::Byte, VmInt, Repr::Int, f64, Repr::Float, GcStr, Repr::String, GcPtr<ValueArray>, Repr::Array, Value, Repr::Unknown, GcPtr<Box<Userdata>>, Repr::Userdata, GcPtr<Thread>, Repr::Thread } impl Repr { fn from_value(value: Value) -> Repr { match value { Value::Byte(_) => Repr::Byte, Value::Int(_) => Repr::Int, Value::Float(_) => Repr::Float, Value::String(_) => Repr::String, Value::Array(_) => Repr::Array, Value::Data(_) | Value::Tag(_) | Value::Function(_) | Value::Closure(_) | Value::PartialApplication(_) => Repr::Unknown, Value::Userdata(_) => Repr::Userdata, Value::Thread(_) => Repr::Thread, } } } macro_rules! on_array { ($array: expr, $f: expr) => { { let ref array = $array; unsafe { match array.repr() { Repr::Byte => $f(array.unsafe_array::<u8>()), Repr::Int => $f(array.unsafe_array::<VmInt>()), Repr::Float => $f(array.unsafe_array::<f64>()), Repr::String => $f(array.unsafe_array::<GcStr>()), Repr::Array => $f(array.unsafe_array::<GcPtr<ValueArray>>()), Repr::Unknown => $f(array.unsafe_array::<Value>()), Repr::Userdata => $f(array.unsafe_array::<GcPtr<Box<Userdata>>>()), Repr::Thread => $f(array.unsafe_array::<GcPtr<Thread>>()), } } } } } #[repr(C)] pub struct ValueArray { repr: Repr, array: Array<()>, } impl fmt::Debug for ValueArray { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("ValueArray") .field("repr", &self.repr) .field("array", on_array!(self, |x| x as &fmt::Debug)) .finish() } } impl PartialEq for ValueArray { fn eq(&self, other: &ValueArray) -> bool { self.repr == other.repr && self.iter().zip(other.iter()).all(|(l, r)| l == r) } } pub struct Iter<'a> { array: &'a ValueArray, index: usize, } impl<'a> Iterator for Iter<'a> { type Item = Value; fn next(&mut self) -> Option<Value> { if self.index < self.array.len() { let value = self.array.get(self.index); self.index += 1; Some(value) } else { None } } fn size_hint(&self) -> (usize, Option<usize>) { let i = self.array.len() - self.index; (i, Some(i)) } } pub struct VariantIter<'a> { array: &'a ValueArray, index: usize, } impl<'a> Iterator for VariantIter<'a> { type Item = Variants<'a>; fn next(&mut self) -> Option<Self::Item> { if self.index < self.array.len() { let value = self.array.get(self.index); self.index += 1; Some(unsafe { Variants::with_root(value, self.array) }) } else { None } } fn size_hint(&self) -> (usize, Option<usize>) { let i = self.array.len() - self.index; (i, Some(i)) } } impl Traverseable for ValueArray { fn traverse(&self, gc: &mut Gc) { on_array!(*self, |array: &Array<_>| array.traverse(gc)) } } impl ValueArray { pub fn get(&self, index: usize) -> Value { unsafe { match self.repr { Repr::Byte => Value::Byte(self.unsafe_get(index)), Repr::Int => Value::Int(self.unsafe_get(index)), Repr::Float => Value::Float(self.unsafe_get(index)), Repr::String => Value::String(self.unsafe_get(index)), Repr::Array => Value::Array(self.unsafe_get(index)), Repr::Unknown => self.unsafe_get(index), Repr::Userdata => Value::Userdata(self.unsafe_get(index)), Repr::Thread => Value::Thread(self.unsafe_get(index)), } } } pub fn is_empty(&self) -> bool { self.len() == 0 } pub fn len(&self) -> usize { self.array.len() } pub fn iter(&self) -> Iter { Iter { array: self, index: 0, } } pub fn variant_iter(&self) -> VariantIter { VariantIter { array: self, index: 0, } } pub fn size_of(repr: Repr, len: usize) -> usize { ::std::mem::size_of::<ValueArray>() + repr.size_of() * len } pub fn repr(&self) -> Repr { self.repr } pub unsafe fn set_repr(&mut self, repr: Repr) { self.repr = repr; } pub unsafe fn initialize<I>(&mut self, iter: I) where I: IntoIterator<Item = Value>, { let iter = iter.into_iter(); macro_rules! initialize_variants { ($($id: ident)+) => { match self.repr { $(Repr::$id => { let iter = iter.map(|v| match v { Value::$id(x) => x, _ => unreachable!(), }); self.unsafe_array_mut().initialize(iter); })+ Repr::Unknown => { self.unsafe_array_mut().initialize(iter); } } } } initialize_variants! { Byte Int Float String Array Userdata Thread } } pub fn as_slice<T: ArrayRepr + Copy>(&self) -> Option<&[T]> { unsafe { // If the array is empty then it may not have the correct type representation set since // there was no value to take the correct representation from if T::matches(self.repr) || self.is_empty() { Some(self.unsafe_array::<T>()) } else { None } } } unsafe fn unsafe_get<T: Copy>(&self, index: usize) -> T { ::std::mem::transmute::<&Array<()>, &Array<T>>(&self.array)[index] } unsafe fn unsafe_array<T: Copy>(&self) -> &Array<T> { ::std::mem::transmute::<&Array<()>, &Array<T>>(&self.array) } pub unsafe fn unsafe_array_mut<T: Copy>(&mut self) -> &mut Array<T> { ::std::mem::transmute::<&mut Array<()>, &mut Array<T>>(&mut self.array) } } unsafe impl<'a> DataDef for &'a ValueArray { type Value = ValueArray; fn size(&self) -> usize { ValueArray::size_of(self.repr, self.len()) } #[allow(unused_unsafe)] fn initialize<'w>(self, mut result: WriteOnly<'w, ValueArray>) -> &'w mut ValueArray { unsafe { let result = &mut *result.as_mut_ptr(); result.repr = self.repr; on_array!(self, |array: &Array<_>| { result.unsafe_array_mut().initialize(array.iter().cloned()) }); result } } } pub struct ArrayDef<'b>(pub &'b [Value]); impl<'b> Traverseable for ArrayDef<'b> { fn traverse(&self, gc: &mut Gc) { self.0.traverse(gc); } } unsafe impl<'b> DataDef for ArrayDef<'b> { type Value = ValueArray; fn size(&self) -> usize { use std::mem::size_of; let size = match self.0.first() { Some(value) => Repr::from_value(*value).size_of() * self.0.len(), None => 0, }; size_of::<ValueArray>() + size } fn initialize<'w>(self, mut result: WriteOnly<'w, ValueArray>) -> &'w mut ValueArray { unsafe { let result = &mut *result.as_mut_ptr(); match self.0.first() { Some(value) => { result.repr = Repr::from_value(*value); result.initialize(self.0.iter().cloned()); } None => { result.repr = Repr::Unknown; result.initialize(None); } } result } } } pub struct Cloner<'t> { visited: FnvMap<*const (), Value>, thread: &'t Thread, gc: &'t mut Gc, receiver_generation: Generation, } impl<'t> Cloner<'t> { pub fn new(thread: &'t Thread, gc: &'t mut Gc) -> Cloner<'t> { Cloner { visited: FnvMap::default(), thread: thread, receiver_generation: gc.generation(), gc: gc, } } pub fn thread(&self) -> &'t Thread { self.thread } pub fn gc(&mut self) -> &mut Gc { self.gc } /// Deep clones the entire value doing no sharing pub fn force_full_clone(&mut self) -> &mut Self { self.receiver_generation = Generation::disjoint(); self } pub fn deep_clone(&mut self, value: Value) -> Result<Value> { // Only need to clone values which belong to a younger generation than the gc that the new // value will live in if self.receiver_generation .can_contain_values_from(value.generation()) { return Ok(value); } match value { String(data) => self.deep_clone_str(data), Value::Data(data) => self.deep_clone_data(data).map(Value::Data), Value::Array(data) => self.deep_clone_array(data).map(Value::Array), Closure(data) => self.deep_clone_closure(data).map(Value::Closure), PartialApplication(data) => self.deep_clone_app(data).map(Value::PartialApplication), Function(f) => self.gc .alloc(Move(ExternFunction::clone(&f))) .map(Value::Function), Value::Tag(i) => Ok(Value::Tag(i)), Value::Byte(i) => Ok(Value::Byte(i)), Int(i) => Ok(Int(i)), Float(f) => Ok(Float(f)), Value::Userdata(userdata) => userdata.deep_clone(self).map(Value::Userdata), Value::Thread(_) => Err(Error::Message("Threads cannot be deep cloned yet".into())), } } fn deep_clone_ptr<T, A, R>(&mut self, value: GcPtr<T>, alloc: A) -> Result<StdResult<Value, R>> where A: FnOnce(&mut Gc, &T) -> Result<(Value, R)>, { let key = &*value as *const T as *const (); let new_ptr = match self.visited.entry(key) { Entry::Occupied(entry) => return Ok(Ok(*entry.get())), Entry::Vacant(entry) => { // FIXME Should allocate the real `Value` and possibly fill it later let (value, new_ptr) = alloc(self.gc, &value)?; entry.insert(value); new_ptr } }; Ok(Err(new_ptr)) } fn deep_clone_str(&mut self, data: GcStr) -> Result<Value> { unsafe { Ok( self.deep_clone_ptr(data.into_inner(), |gc, data| { let ptr = GcStr::from_utf8_unchecked(gc.alloc(data)?); Ok((String(ptr), ptr)) })? .unwrap_or_else(String), ) } } fn deep_clone_data(&mut self, data: GcPtr<DataStruct>) -> Result<GcPtr<DataStruct>> { let result = self.deep_clone_ptr(data, |gc, data| { let ptr = gc.alloc(Def { tag: data.tag, elems: &data.fields, })?; Ok((Value::Data(ptr), ptr)) })?; match result { Ok(Value::Data(ptr)) => Ok(ptr), Ok(_) => unreachable!(), Err(mut new_data) => { { let new_fields = unsafe { &mut new_data.as_mut().fields }; for (new, old) in new_fields.iter_mut().zip(&data.fields) { *new = self.deep_clone(*old)?; } } Ok(new_data) } } } fn deep_clone_userdata(&mut self, ptr: GcPtr<Box<Userdata>>) -> Result<GcPtr<Box<Userdata>>> { ptr.deep_clone(self) } fn deep_clone_array(&mut self, array: GcPtr<ValueArray>) -> Result<GcPtr<ValueArray>> { unsafe fn deep_clone_elems<T, F>( mut new_array: GcPtr<ValueArray>, mut deep_clone: F, ) -> Result<()> where T: Copy, F: FnMut(T) -> Result<T>, { let new_array = new_array.as_mut().unsafe_array_mut::<T>(); for field in new_array.iter_mut() { *field = deep_clone(*field)?; } Ok(()) } let result = self.deep_clone_ptr(array, |gc, array| { let ptr = gc.alloc(array)?; Ok((Value::Array(ptr), ptr)) })?; match result { Ok(Value::Array(ptr)) => Ok(ptr), Ok(_) => unreachable!(), Err(new_array) => { unsafe { match new_array.repr() { Repr::Byte | Repr::Int | Repr::Float | Repr::String => Ok(()), Repr::Array => deep_clone_elems(new_array, |e| self.deep_clone_array(e)), Repr::Unknown => deep_clone_elems(new_array, |e| self.deep_clone(e)), Repr::Userdata => { deep_clone_elems(new_array, |e| self.deep_clone_userdata(e)) } Repr::Thread => { return Err(Error::Message("Threads cannot be deep cloned yet".into())) } }?; } Ok(new_array) } } } fn deep_clone_closure(&mut self, data: GcPtr<ClosureData>) -> Result<GcPtr<ClosureData>> { let result = self.deep_clone_ptr(data, |gc, data| { let ptr = gc.alloc(ClosureDataDef(data.function, &data.upvars))?; Ok((Closure(ptr), ptr)) })?; match result { Ok(Value::Closure(ptr)) => Ok(ptr), Ok(_) => unreachable!(), Err(mut new_data) => { { let new_upvars = unsafe { &mut new_data.as_mut().upvars }; for (new, old) in new_upvars.iter_mut().zip(&data.upvars) { *new = self.deep_clone(*old)?; } } Ok(new_data) } } } fn deep_clone_app( &mut self, data: GcPtr<PartialApplicationData>, ) -> Result<GcPtr<PartialApplicationData>> { let result = self.deep_clone_ptr(data, |gc, data| { let ptr = gc.alloc(PartialApplicationDataDef(data.function, &data.args))?; Ok((PartialApplication(ptr), ptr)) })?; match result { Ok(Value::PartialApplication(ptr)) => Ok(ptr), Ok(_) => unreachable!(), Err(mut new_data) => { { let new_args = unsafe { &mut new_data.as_mut().args }; for (new, old) in new_args.iter_mut().zip(&data.args) { *new = self.deep_clone(*old)?; } } Ok(new_data) } } } } #[cfg(test)] mod tests { use super::*; use gc::{Gc, Generation}; use types::VmInt; use base::kind::{ArcKind, KindEnv}; use base::types::{Alias, ArcType, Field, RecordSelector, Type, TypeEnv}; use base::symbol::{Symbol, SymbolRef}; struct MockEnv(Option<Alias<Symbol, ArcType>>); impl KindEnv for MockEnv { fn find_kind(&self, _type_name: &SymbolRef) -> Option<ArcKind> { None } } impl TypeEnv for MockEnv { fn find_type(&self, _id: &SymbolRef) -> Option<&ArcType> { None } fn find_type_info(&self, _id: &SymbolRef) -> Option<&Alias<Symbol, ArcType>> { self.0.as_ref() } fn find_record( &self, _fields: &[Symbol], _selector: RecordSelector, ) -> Option<(ArcType, ArcType)> { None } } #[test] fn pretty_variant() { let mut gc = Gc::new(Generation::default(), usize::max_value()); let list = Symbol::from("List"); let typ: ArcType = Type::variant(vec![ Field { name: Symbol::from("Cons"), typ: Type::function( vec![Type::int(), Type::ident(list.clone())], Type::ident(list.clone()), ), }, Field { name: Symbol::from("Nil"), typ: Type::ident(list.clone()), }, ]); let env = MockEnv(Some(Alias::new(list.clone(), typ.clone()))); let nil = Value::Tag(1); assert_eq!(format!("{}", ValuePrinter::new(&env, &typ, nil)), "Nil"); let list1 = Value::Data( gc.alloc(Def { tag: 0, elems: &[Value::Int(123), nil], }).unwrap(), ); assert_eq!( format!("{}", ValuePrinter::new(&env, &typ, list1)), "Cons 123 Nil" ); let list2 = Value::Data( gc.alloc(Def { tag: 0, elems: &[Value::Int(0), list1], }).unwrap(), ); assert_eq!( format!("{}", ValuePrinter::new(&env, &typ, list2)), "Cons 0 (Cons 123 Nil)" ); } #[test] fn pretty_array() { let mut gc = Gc::new(Generation::default(), usize::max_value()); let typ = Type::array(Type::int()); let env = MockEnv(None); let nil = Value::Array(gc.alloc(&[1 as VmInt, 2, 3][..]).unwrap()); assert_eq!( format!("{}", ValuePrinter::new(&env, &typ, nil)), "[1, 2, 3]" ); } #[test] fn closure_data_upvars_location() { use std::mem; use std::ptr; unsafe { let p: *const ClosureData = ptr::null(); assert_eq!(p as *const u8, &(*p).function as *const _ as *const u8); assert_eq!( (p as *const u8).offset(mem::size_of::<*const ()>() as isize), &(*p).upvars as *const _ as *const u8 ); } } }
// Implements http://rosettacode.org/wiki/24_game // with a recursive descent parser for a simple calculator (+ - * /) // using the shunting yard algorithm as explained on // http://www.engr.mun.ca/~theo/Misc/exp_parsing.htm // It follows operator precedence (i.e. 2 + 3 * 3 = 11), // understands negation (-5 + 6 = 1), ignores whitespace // and allows the use of parentheses #![feature(macro_rules)] use std::char::to_digit; #[cfg(not(test))] use std::rand; #[cfg(not(test))] fn main() { let mut rng=rand::task_rng(); let mut input = std::io::stdin(); loop { let mut sample = rand::sample(&mut rng, range(1u, 10), 4); println!("make 24 by combining the following 4 numbers with + - * / or (q)uit"); println!("{}", sample); let line = input.read_line().unwrap(); match line.as_slice().trim() { "q" => break, input if check_values(sample.as_mut_slice(), input) => { let mut p = Parser :: new(input); match p.parse() { Ok(i) if i==24f32 => println!("you made it!"), Ok(i) => println!("you entered {}, try again!", i), Err(s) => println!("{}",s) }; } _ => println!("unrecognized input, try again") } } } // Returns true if the entered expression uses the values contained in sample fn check_values(sample:&mut [uint], input:&str) -> bool { let lex=Lexer::new(input); let mut numbers_used : Vec<uint> = lex.filter_map(|a| match a { Int(i) => Some(i), _ => None }).collect(); numbers_used.sort(); sample.sort(); numbers_used.as_slice() == sample } // the tokens that our parser is going to recognize #[deriving(PartialEq,Eq,Show)] enum Token {LParen, RParen, Plus, Minus, Slash, Star, Int(uint)} impl Token { // are tokens associated to a binary operation? fn is_binary(&self) -> bool { match self { &Plus | &Minus | &Slash | &Star => true, _ => false } } } #[inline] // map a character to its corresponding token fn single_char_to_token(ch:Option<char>) -> Option<Token> { match ch { Some('(') => Some(LParen), Some(')') => Some(RParen), Some('+') => Some(Plus), Some('-') => Some(Minus), Some('/') => Some(Slash), Some('*') => Some(Star), _ => None } } // Lexer reads an expression like (a + b) / c * d // as an iterator on the tokens that compose it // Int(a), LParen, Plus, Int(b), RParen... struct Lexer<'a> { input_str: &'a str, position: uint } impl <'a> Lexer<'a> { fn new(input_str: &'a str) -> Lexer<'a> { Lexer { input_str: input_str, position: 0u } } fn expect(&mut self, expected:&[Token]) -> Result<Token, String> { let n = self.position; match self.next() { Some(a) if expected.contains(&a) => Ok(a), other => Err(format!("Parsing error: {} was unexpected at position {}", other, n)) } } } impl <'a> Iterator<Token> for Lexer<'a> { fn next(&mut self) -> Option<Token> { let remaining_input=self.input_str.slice_from(self.position); let ch_iter=remaining_input.chars(); // skip spaces and update position let mut spaces=ch_iter.take_while(|ch| ch.is_whitespace()); let n_spaces=spaces.count(); self.position+=n_spaces; let mut trimmed=ch_iter.skip(n_spaces); // read digits (note: not checking length, risks overflowing uint) let digits=trimmed.take_while(|d| d.is_digit()); // transform char digits to a uint and update position let val=digits.map(|c| to_digit(c, 10).unwrap()) .fold(0u, |a, b| { self.position+=1; b + a * 10 }); let ret = if val > 0 { Some(Int(val)) } else { // not a value. Read other tokens let cur=trimmed.next(); self.position +=1; single_char_to_token(cur) }; ret } } // operators are a "higher level" concept than tokens // as they define the semantics of the expression language // e.g. token "Minus" can correspond to the unary Neg Operator (-a) // or to the binary Sub operator (a - b) #[deriving(PartialEq,Eq)] enum Operator {Neg, Add, Sub, Mul, Div, Sentinel} #[deriving(PartialEq,Eq)] struct OperatorPrecedence(Operator); impl Operator { fn precedence(self) -> OperatorPrecedence { OperatorPrecedence(self) } } impl OperatorPrecedence { fn prec(self) -> uint { match self { OperatorPrecedence(Sentinel) => 0u, OperatorPrecedence(Add) | OperatorPrecedence(Sub) => 1u, OperatorPrecedence(Neg) => 2u, OperatorPrecedence(Mul) | OperatorPrecedence(Div) => 3u } } } /* Operator precedence for binary operators: * if x has higher precedence than y precedence for x > precedence for y * if x and y have equal precedence the first one has precedence e.g. in expression (4 / 2 * 2) operators * and / have the same precedence, but the operations must be performed in the order they appear (division first, multiplication second) otherwise results are different */ impl PartialOrd for OperatorPrecedence { fn lt(&self, other: &OperatorPrecedence) -> bool { match (self, other) { (&OperatorPrecedence(Mul), &OperatorPrecedence(Div)) => false, (&OperatorPrecedence(Div), &OperatorPrecedence(Mul)) => false, (&OperatorPrecedence(Add), &OperatorPrecedence(Sub)) => false, (&OperatorPrecedence(Sub), &OperatorPrecedence(Add)) => false, _ => self.prec() < other.prec() } } fn gt(&self, other: &OperatorPrecedence) -> bool { match (self, other) { (&OperatorPrecedence(Mul), &OperatorPrecedence(Div)) => true, (&OperatorPrecedence(Div), &OperatorPrecedence(Mul)) => true, (&OperatorPrecedence(Add), &OperatorPrecedence(Sub)) => true, (&OperatorPrecedence(Sub), &OperatorPrecedence(Add)) => true, _ => self.prec() > other.prec() } } } // recursive descent parser // with the shunting yard algorithm as explained on // http://www.engr.mun.ca/~theo/Misc/exp_parsing.htm // I followed the names of the methods as closely as possible vs the pseudo-code // that illustrates the algorithm struct Parser<'a> { operators: Vec<Operator>, operands: Vec<f32>, lexer: Lexer<'a> } impl <'a> Parser<'a> { fn new(input_str: &'a str) -> Parser<'a> { Parser { operators: Vec::new(), operands: Vec::new(), lexer: Lexer::new(input_str) } } fn parse(&mut self) -> Result<f32, String> { self.operators.push(Sentinel); try!(self.e()); let res=self.operands.last(); let ret=match res { Some(&r) => Ok(r), None => Err("something went wrong, got no result".to_string()) }; ret } fn e(&mut self) -> Result<(), String> { try!(self.p()); let mut n=self.lexer.next(); while n.is_some() && n.unwrap().is_binary() { match n { Some(Plus) => self.push_operator(Add), Some(Minus) => self.push_operator(Sub), Some(Star) => self.push_operator(Mul), Some(Slash) => self.push_operator(Div), _ => unreachable!() //shouldn't get there (there are no other // binary operators }; try!(self.p()); let mut n_peek = self.lexer.peekable(); if n_peek.peek().is_none() || !n_peek.peek().unwrap().is_binary() { break; } n = self.lexer.next(); } while self.operators.last().is_some() && self.operators.last().unwrap() != &Sentinel { self.pop_operator(); } Ok(()) } fn p(&mut self) -> Result<(), String> { match self.lexer.next() { Some(Int(n)) => { self.operands.push(n as f32); }, Some(LParen) => { self.operators.push(Sentinel); try!(self.e()); try!(self.lexer.expect(&[RParen])); self.operators.pop(); }, Some(Minus) => { self.push_operator(Neg); try!(self.p()); }, Some(e) => return Err(format!("unexpected token {}", e)), _ => return Err("unexpected end of command".to_string()) //Some(Minus) => } Ok(()) } fn pop_operator(&mut self) { match self.operators.pop() { Some(Add) => self.binary_op(|t1,t2| t1+t2), Some(Sub) => self.binary_op(|t1,t2| t1-t2), Some(Mul) => self.binary_op(|t1,t2| t1*t2), Some(Div) => self.binary_op(|t1,t2| t1/t2), Some(Neg) => self.unary_op(|t1| -t1), _ => unreachable!() } } fn push_operator(&mut self, op:Operator) { match self.operators.last() { Some(&a) if a.precedence() > op.precedence() => self.pop_operator(), _ => () } self.operators.push(op); } #[inline] fn binary_op(&mut self, op:|f32,f32| -> f32) { let t1=self.operands.pop().unwrap(); let t2=self.operands.pop().unwrap(); self.operands.push(op(t2,t1)); } #[inline] fn unary_op(&mut self, op:|f32| -> f32) { let t1=self.operands.pop().unwrap(); self.operands.push(op(t1)); } } #[test] fn test_precedence() { assert!(Mul.precedence() > Add.precedence()); assert!(Mul.precedence() > Div.precedence()); assert!((Mul.precedence() < Div.precedence())==false); } #[test] fn lexer_iter() { // test read token and character's position in the iterator macro_rules! test_tok( ($tok:ident, $exp_tok:expr, $exp_pos:expr) => ( assert_eq!(($tok.next(), $tok.position), (Some($exp_tok), $exp_pos));)) let mut tok=Lexer::new(" 15 + 4"); test_tok!(tok, Int(15), 4); test_tok!(tok, Plus, 6); let read=tok.expect(&[LParen,Int(4),RParen]); assert_eq!(read, Ok(Int(4))); let mut tok=Lexer::new(""); assert_eq!(tok.next(), None); let mut tok=Lexer::new(" "); assert_eq!(tok.next(), None); let mut tok=Lexer::new("2 * (3+4/2)"); test_tok!(tok, Int(2), 1); test_tok!(tok, Star, 3); test_tok!(tok, LParen, 5); test_tok!(tok, Int(3), 6); test_tok!(tok, Plus, 7); test_tok!(tok, Int(4), 8); test_tok!(tok, Slash, 9); test_tok!(tok, Int(2), 10); test_tok!(tok, RParen, 11); } #[test] fn parse() { assert_eq!(Parser::new("2+2").parse(), Ok(4.)); assert_eq!(Parser::new("2+3*4").parse(), Ok(14.)); assert_eq!(Parser::new("4*(3+2)").parse(), Ok(20.)); assert_eq!(Parser::new("5/(3+2)*3").parse(), Ok(3.)); assert_eq!(Parser::new("2++12").parse(), Err("unexpected token Plus".to_string())); assert_eq!(Parser::new("-2+12").parse(), Ok(10.)); assert_eq!(Parser::new("-2*(2+3)").parse(), Ok(-10.)); } #[test] fn try_check_values() { let m = &mut [1,2,3,4]; assert!(check_values(m, "1+3 -(4/2)")); } refectoring the Lexer iterator // Implements http://rosettacode.org/wiki/24_game // with a recursive descent parser for a simple calculator (+ - * /) // using the shunting yard algorithm as explained on // http://www.engr.mun.ca/~theo/Misc/exp_parsing.htm // It follows operator precedence (i.e. 2 + 3 * 3 = 11), // understands negation (-5 + 6 = 1), ignores whitespace // and allows the use of parentheses #![feature(macro_rules)] use std::char; #[cfg(not(test))] use std::rand; #[cfg(not(test))] fn main() { let mut rng=rand::task_rng(); let mut input = std::io::stdin(); loop { let mut sample = rand::sample(&mut rng, range(1u, 10), 4); println!("make 24 by combining the following 4 numbers with + - * / or (q)uit"); println!("{}", sample); let line = input.read_line().unwrap(); match line.as_slice().trim() { "q" => break, input if check_values(sample.as_mut_slice(), input) => { let mut p = Parser :: new(input); match p.parse() { Ok(i) if i==24f32 => println!("you made it!"), Ok(i) => println!("you entered {}, try again!", i), Err(s) => println!("{}",s) }; } _ => println!("unrecognized input, try again") } } } // Returns true if the entered expression uses the values contained in sample fn check_values(sample:&mut [uint], input:&str) -> bool { let lex=Lexer::new(input); let mut numbers_used : Vec<uint> = lex.filter_map(|a| match a { Int(i) => Some(i), _ => None }).collect(); numbers_used.sort(); sample.sort(); numbers_used.as_slice() == sample } // the tokens that our parser is going to recognize #[deriving(PartialEq,Eq,Show)] enum Token {LParen, RParen, Plus, Minus, Slash, Star, Int(uint)} impl Token { // are tokens associated to a binary operation? fn is_binary(&self) -> bool { match self { &Plus | &Minus | &Slash | &Star => true, _ => false } } } #[inline] // map a character to its corresponding token fn single_char_to_token(ch: char) -> Option<Token> { match ch { '(' => Some(LParen), ')' => Some(RParen), '+' => Some(Plus), '-' => Some(Minus), '/' => Some(Slash), '*' => Some(Star), _ => None } } // Lexer reads an expression like (a + b) / c * d // as an iterator on the tokens that compose it // Int(a), LParen, Plus, Int(b), RParen... struct Lexer<'a> { input_str: &'a str, offset: uint } impl <'a> Lexer<'a> { fn new(input_str: &'a str) -> Lexer<'a> { Lexer { input_str: input_str, offset: 0u } } fn expect(&mut self, expected:&[Token]) -> Result<Token, String> { let n = self.offset; match self.next() { Some(a) if expected.contains(&a) => Ok(a), other => Err(format!("Parsing error: {} was unexpected at offset {}", other, n)) } } } impl <'a> Iterator<Token> for Lexer<'a> { fn next(&mut self) -> Option<Token> { // slice the original string starting from the current offset let remaining_input=self.input_str.slice_from(self.offset); // keep track of the offset while advancing chars, with enumerate() let ch_iter=remaining_input.chars().enumerate(); // advance to the next non-whitespace character let mut trimmed=ch_iter.skip_while(|&(_, ch)| ch.is_whitespace()); let (tok, cur_offset) = match trimmed.next() { // found digit, check if there are others // and transform them to a uint Some((o, d)) if d.is_digit() => { let (mut val, mut offset)=(char::to_digit(d, 10).unwrap(), o); for (idx, ch) in trimmed { if ch.is_digit() {val=val*10 + char::to_digit(ch, 10).unwrap();} else { offset=idx; break; } } (Some(Int(val)), offset) }, // found non-digit, try transforming it to the corresponding token Some((o, t)) => (single_char_to_token(t), o+1), _ => (None, 0u) }; // update the offset for the next iteration self.offset += cur_offset; tok } } // operators are a "higher level" concept than tokens // as they define the semantics of the expression language // e.g. token "Minus" can correspond to the unary Neg Operator (-a) // or to the binary Sub operator (a - b) #[deriving(PartialEq,Eq)] enum Operator {Neg, Add, Sub, Mul, Div, Sentinel} #[deriving(PartialEq,Eq)] struct OperatorPrecedence(Operator); impl Operator { fn precedence(self) -> OperatorPrecedence { OperatorPrecedence(self) } } impl OperatorPrecedence { fn prec(self) -> uint { match self { OperatorPrecedence(Sentinel) => 0u, OperatorPrecedence(Add) | OperatorPrecedence(Sub) => 1u, OperatorPrecedence(Neg) => 2u, OperatorPrecedence(Mul) | OperatorPrecedence(Div) => 3u } } } /* Operator precedence for binary operators: * if x has higher precedence than y precedence for x > precedence for y * if x and y have equal precedence the first one has precedence e.g. in expression (4 / 2 * 2) operators * and / have the same precedence, but the operations must be performed in the order they appear (division first, multiplication second) otherwise results are different */ impl PartialOrd for OperatorPrecedence { fn lt(&self, other: &OperatorPrecedence) -> bool { match (self, other) { (&OperatorPrecedence(Mul), &OperatorPrecedence(Div)) => false, (&OperatorPrecedence(Div), &OperatorPrecedence(Mul)) => false, (&OperatorPrecedence(Add), &OperatorPrecedence(Sub)) => false, (&OperatorPrecedence(Sub), &OperatorPrecedence(Add)) => false, _ => self.prec() < other.prec() } } fn gt(&self, other: &OperatorPrecedence) -> bool { match (self, other) { (&OperatorPrecedence(Mul), &OperatorPrecedence(Div)) => true, (&OperatorPrecedence(Div), &OperatorPrecedence(Mul)) => true, (&OperatorPrecedence(Add), &OperatorPrecedence(Sub)) => true, (&OperatorPrecedence(Sub), &OperatorPrecedence(Add)) => true, _ => self.prec() > other.prec() } } } // recursive descent parser // with the shunting yard algorithm as explained on // http://www.engr.mun.ca/~theo/Misc/exp_parsing.htm // I followed the names of the methods as closely as possible vs the pseudo-code // that illustrates the algorithm struct Parser<'a> { operators: Vec<Operator>, operands: Vec<f32>, lexer: Lexer<'a> } impl <'a> Parser<'a> { fn new(input_str: &'a str) -> Parser<'a> { Parser { operators: Vec::new(), operands: Vec::new(), lexer: Lexer::new(input_str) } } fn parse(&mut self) -> Result<f32, String> { self.operators.push(Sentinel); try!(self.e()); let res=self.operands.last(); let ret=match res { Some(&r) => Ok(r), None => Err("something went wrong, got no result".to_string()) }; ret } fn e(&mut self) -> Result<(), String> { try!(self.p()); let mut n=self.lexer.next(); while n.is_some() && n.unwrap().is_binary() { match n { Some(Plus) => self.push_operator(Add), Some(Minus) => self.push_operator(Sub), Some(Star) => self.push_operator(Mul), Some(Slash) => self.push_operator(Div), _ => unreachable!() //shouldn't get there (there are no other // binary operators }; try!(self.p()); let mut n_peek = self.lexer.peekable(); if n_peek.peek().is_none() || !n_peek.peek().unwrap().is_binary() { break; } n = self.lexer.next(); } while self.operators.last().is_some() && self.operators.last().unwrap() != &Sentinel { self.pop_operator(); } Ok(()) } fn p(&mut self) -> Result<(), String> { match self.lexer.next() { Some(Int(n)) => { self.operands.push(n as f32); }, Some(LParen) => { self.operators.push(Sentinel); try!(self.e()); try!(self.lexer.expect(&[RParen])); self.operators.pop(); }, Some(Minus) => { self.push_operator(Neg); try!(self.p()); }, Some(e) => return Err(format!("unexpected token {}", e)), _ => return Err("unexpected end of command".to_string()) //Some(Minus) => } Ok(()) } fn pop_operator(&mut self) { match self.operators.pop() { Some(Add) => self.binary_op(|t1,t2| t1+t2), Some(Sub) => self.binary_op(|t1,t2| t1-t2), Some(Mul) => self.binary_op(|t1,t2| t1*t2), Some(Div) => self.binary_op(|t1,t2| t1/t2), Some(Neg) => self.unary_op(|t1| -t1), _ => unreachable!() } } fn push_operator(&mut self, op:Operator) { match self.operators.last() { Some(&a) if a.precedence() > op.precedence() => self.pop_operator(), _ => () } self.operators.push(op); } #[inline] fn binary_op(&mut self, op:|f32,f32| -> f32) { let t1=self.operands.pop().unwrap(); let t2=self.operands.pop().unwrap(); self.operands.push(op(t2,t1)); } #[inline] fn unary_op(&mut self, op:|f32| -> f32) { let t1=self.operands.pop().unwrap(); self.operands.push(op(t1)); } } #[test] fn test_precedence() { assert!(Mul.precedence() > Add.precedence()); assert!(Mul.precedence() > Div.precedence()); assert!((Mul.precedence() < Div.precedence())==false); } #[test] fn lexer_iter() { // test read token and character's offset in the iterator macro_rules! test_tok( ($tok:ident, $exp_tok:expr, $exp_pos:expr) => ( assert_eq!(($tok.next(), $tok.offset), (Some($exp_tok), $exp_pos));)) let mut tok=Lexer::new(" 15 + 4"); test_tok!(tok, Int(15), 4); test_tok!(tok, Plus, 6); let read=tok.expect(&[LParen,Int(4),RParen]); assert_eq!(read, Ok(Int(4))); let mut tok=Lexer::new(""); assert_eq!(tok.next(), None); let mut tok=Lexer::new(" "); assert_eq!(tok.next(), None); let mut tok=Lexer::new("2 * (3+4/2)"); test_tok!(tok, Int(2), 1); test_tok!(tok, Star, 3); test_tok!(tok, LParen, 5); test_tok!(tok, Int(3), 6); test_tok!(tok, Plus, 7); test_tok!(tok, Int(4), 8); test_tok!(tok, Slash, 9); test_tok!(tok, Int(2), 10); test_tok!(tok, RParen, 11); } #[test] fn parse() { assert_eq!(Parser::new("2+2").parse(), Ok(4.)); assert_eq!(Parser::new("2+3*4").parse(), Ok(14.)); assert_eq!(Parser::new("4*(3+2)").parse(), Ok(20.)); assert_eq!(Parser::new("5/(3+2)*3").parse(), Ok(3.)); assert_eq!(Parser::new("2++12").parse(), Err("unexpected token Plus".to_string())); assert_eq!(Parser::new("-2+12").parse(), Ok(10.)); assert_eq!(Parser::new("-2*(2+3)").parse(), Ok(-10.)); } #[test] fn try_check_values() { let m = &mut [1,2,3,4]; assert!(check_values(m, "1+3 -(4/2)")); }
use ide_db::helpers::FamousDefs; use ide_db::RootDatabase; use syntax::ast::{self, AstNode, NameOwner}; use test_utils::mark; use crate::{AssistContext, AssistId, AssistKind, Assists}; // Assist: generate_default_from_enum_variant // // Adds a Default impl for an enum using a variant. // // ``` // enum Version { // Undefined, // Minor<|>, // Major, // } // ``` // -> // ``` // enum Version { // Undefined, // Minor, // Major, // } // // impl Default for Version { // fn default() -> Self { // Self::Minor // } // } // ``` pub(crate) fn generate_default_from_enum_variant( acc: &mut Assists, ctx: &AssistContext, ) -> Option<()> { let variant = ctx.find_node_at_offset::<ast::Variant>()?; let variant_name = variant.name()?; let enum_name = variant.parent_enum().name()?; if !matches!(variant.kind(), ast::StructKind::Unit) { mark::hit!(test_gen_default_on_non_unit_variant_not_implemented); return None; } if existing_default_impl(&ctx.sema, &variant).is_some() { mark::hit!(test_gen_default_impl_already_exists); return None; } let target = variant.syntax().text_range(); acc.add( AssistId("generate_default_from_enum_variant", AssistKind::Generate), "Generate `Default` impl from this enum variant", target, |edit| { let start_offset = variant.parent_enum().syntax().text_range().end(); let buf = format!( r#" impl Default for {0} {{ fn default() -> Self {{ Self::{1} }} }}"#, enum_name, variant_name ); edit.insert(start_offset, buf); }, ) } fn existing_default_impl( sema: &'_ hir::Semantics<'_, RootDatabase>, variant: &ast::Variant, ) -> Option<()> { let variant = sema.to_def(variant)?; let enum_ = variant.parent_enum(sema.db); let krate = enum_.module(sema.db).krate(); let default_trait = FamousDefs(sema, Some(krate)).core_default_Default()?; let enum_type = enum_.ty(sema.db); if enum_type.impls_trait(sema.db, default_trait, &[]) { Some(()) } else { None } } #[cfg(test)] mod tests { use test_utils::mark; use crate::tests::{check_assist, check_assist_not_applicable}; use super::*; fn check_not_applicable(ra_fixture: &str) { let fixture = format!("//- /main.rs crate:main deps:core\n{}\n{}", ra_fixture, FamousDefs::FIXTURE); check_assist_not_applicable(generate_default_from_enum_variant, &fixture) } #[test] fn test_generate_default_from_variant() { check_assist( generate_default_from_enum_variant, r#"enum Variant { Undefined, Minor<|>, Major, }"#, r#"enum Variant { Undefined, Minor, Major, } impl Default for Variant { fn default() -> Self { Self::Minor } }"#, ); } #[test] fn test_generate_default_already_implemented() { mark::check!(test_gen_default_impl_already_exists); check_not_applicable( r#"enum Variant { Undefined, Minor<|>, Major, } impl Default for Variant { fn default() -> Self { Self::Minor } }"#, ); } #[test] fn test_add_from_impl_no_element() { mark::check!(test_gen_default_on_non_unit_variant_not_implemented); check_not_applicable( r#"enum Variant { Undefined, Minor(u32)<|>, Major, }"#, ); } #[test] fn test_generate_default_from_variant_with_one_variant() { check_assist( generate_default_from_enum_variant, r#"enum Variant { Undefi<|>ned }"#, r#"enum Variant { Undefined } impl Default for Variant { fn default() -> Self { Self::Undefined } }"#, ); } } generate default implementation for an enum from an enum variant #6860 Signed-off-by: Benjamin Coenen <481f8cf7e6db3368f7a1a2be85b2c44f6c1b4e1e@users.noreply.github.com> use ide_db::helpers::FamousDefs; use ide_db::RootDatabase; use syntax::ast::{self, AstNode, NameOwner}; use test_utils::mark; use crate::{AssistContext, AssistId, AssistKind, Assists}; // Assist: generate_default_from_enum_variant // // Adds a Default impl for an enum using a variant. // // ``` // enum Version { // Undefined, // Minor<|>, // Major, // } // ``` // -> // ``` // enum Version { // Undefined, // Minor, // Major, // } // // impl Default for Version { // fn default() -> Self { // Self::Minor // } // } // ``` pub(crate) fn generate_default_from_enum_variant( acc: &mut Assists, ctx: &AssistContext, ) -> Option<()> { let variant = ctx.find_node_at_offset::<ast::Variant>()?; let variant_name = variant.name()?; let enum_name = variant.parent_enum().name()?; if !matches!(variant.kind(), ast::StructKind::Unit) { mark::hit!(test_gen_default_on_non_unit_variant_not_implemented); return None; } if existing_default_impl(&ctx.sema, &variant).is_some() { mark::hit!(test_gen_default_impl_already_exists); return None; } let target = variant.syntax().text_range(); acc.add( AssistId("generate_default_from_enum_variant", AssistKind::Generate), "Generate `Default` impl from this enum variant", target, |edit| { let start_offset = variant.parent_enum().syntax().text_range().end(); let buf = format!( r#" impl Default for {0} {{ fn default() -> Self {{ Self::{1} }} }}"#, enum_name, variant_name ); edit.insert(start_offset, buf); }, ) } fn existing_default_impl( sema: &'_ hir::Semantics<'_, RootDatabase>, variant: &ast::Variant, ) -> Option<()> { let variant = sema.to_def(variant)?; let enum_ = variant.parent_enum(sema.db); let krate = enum_.module(sema.db).krate(); let default_trait = FamousDefs(sema, Some(krate)).core_default_Default()?; let enum_type = enum_.ty(sema.db); if enum_type.impls_trait(sema.db, default_trait, &[]) { Some(()) } else { None } } #[cfg(test)] mod tests { use test_utils::mark; use crate::tests::{check_assist, check_assist_not_applicable}; use super::*; fn check_not_applicable(ra_fixture: &str) { let fixture = format!("//- /main.rs crate:main deps:core\n{}\n{}", ra_fixture, FamousDefs::FIXTURE); check_assist_not_applicable(generate_default_from_enum_variant, &fixture) } #[test] fn test_generate_default_from_variant() { check_assist( generate_default_from_enum_variant, r#"enum Variant { Undefined, Minor<|>, Major, }"#, r#"enum Variant { Undefined, Minor, Major, } impl Default for Variant { fn default() -> Self { Self::Minor } }"#, ); } #[test] fn test_generate_default_already_implemented() { mark::check!(test_gen_default_impl_already_exists); check_not_applicable( r#"enum Variant { Undefined, Minor<|>, Major, } impl Default for Variant { fn default() -> Self { Self::Minor } }"#, ); } #[test] fn test_add_from_impl_no_element() { mark::check!(test_gen_default_on_non_unit_variant_not_implemented); check_not_applicable( r#"enum Variant { Undefined, Minor(u32)<|>, Major, }"#, ); } #[test] fn test_generate_default_from_variant_with_one_variant() { check_assist( generate_default_from_enum_variant, r#"enum Variant { Undefi<|>ned }"#, r#"enum Variant { Undefined } impl Default for Variant { fn default() -> Self { Self::Undefined } }"#, ); } }
use std::vec::IntoIter; use crate::BuildMethod; use darling::util::{Flag, PathList}; use darling::{self, FromMeta}; use proc_macro2::Span; use syn::{self, spanned::Spanned, Attribute, Generics, Ident, Path, Visibility}; use crate::{ Builder, BuilderField, BuilderPattern, DefaultExpression, DeprecationNotes, Each, Initializer, Setter, }; /// `derive_builder` uses separate sibling keywords to represent /// mutually-exclusive visibility states. This trait requires implementers to /// expose those flags and provides a method to compute any explicit visibility /// bounds. trait FlagVisibility { fn public(&self) -> &Flag; fn private(&self) -> &Flag; /// Get the explicitly-expressed visibility preference from the attribute. /// This returns `None` if the input didn't include either keyword. /// /// # Panics /// This method panics if the input specifies both `public` and `private`. fn as_expressed_vis(&self) -> Option<Visibility> { match (self.public().is_some(), self.private().is_some()) { (true, true) => panic!("A field cannot be both public and private"), (true, false) => Some(syn::parse_str("pub").unwrap()), (false, true) => Some(Visibility::Inherited), (false, false) => None, } } } /// Options for the `build_fn` property in struct-level builder options. /// There is no inheritance for these settings from struct-level to field-level, /// so we don't bother using `Option` for values in this struct. #[derive(Debug, Clone, FromMeta)] #[darling(default)] pub struct BuildFn { skip: bool, name: Ident, validate: Option<Path>, public: Flag, private: Flag, /// The path to an existing error type that the build method should return. /// /// Setting this will prevent `derive_builder` from generating an error type for the build /// method. /// /// # Type Bounds /// This type's bounds depend on other settings of the builder. /// /// * If uninitialized fields cause `build()` to fail, then this type /// must `impl From<UninitializedFieldError>`. Uninitialized fields do not cause errors /// when default values are provided for every field or at the struct level. /// * If `validate` is specified, then this type must provide a conversion from the specified /// function's error type. error: Option<Path>, } impl Default for BuildFn { fn default() -> Self { BuildFn { skip: false, name: Ident::new("build", Span::call_site()), validate: None, public: Default::default(), private: Default::default(), error: None, } } } impl FlagVisibility for BuildFn { fn public(&self) -> &Flag { &self.public } fn private(&self) -> &Flag { &self.private } } /// Contents of the `field` meta in `builder` attributes. #[derive(Debug, Clone, Default, FromMeta)] #[darling(default)] pub struct FieldMeta { public: Flag, private: Flag, } impl FlagVisibility for FieldMeta { fn public(&self) -> &Flag { &self.public } fn private(&self) -> &Flag { &self.private } } #[derive(Debug, Clone, Default, FromMeta)] #[darling(default)] pub struct StructLevelSetter { prefix: Option<Ident>, into: Option<bool>, strip_option: Option<bool>, skip: Option<bool>, } impl StructLevelSetter { /// Check if setters are explicitly enabled or disabled at /// the struct level. pub fn enabled(&self) -> Option<bool> { self.skip.map(|x| !x) } } /// Adapter that enables: /// /// 1. Use of a derived `FromMeta` on `Each`, /// 2. Support for `each = "..."` and `each(name = "...")` /// 3. The rest of the builder crate to directly access fields on `Each` struct EachLongOrShort(Each); /// Create `Each` from an attribute's `Meta`. /// /// Two formats are supported: /// /// * `each = "..."`, which provides the name of the `each` setter and otherwise uses default values /// * `each(name = "...")`, which allows setting additional options on the `each` setter impl FromMeta for EachLongOrShort { fn from_value(value: &syn::Lit) -> darling::Result<Self> { if let syn::Lit::Str(v) = value { v.parse::<Ident>() .map(Each::from) .map(Self) .map_err(|_| darling::Error::unknown_value(&v.value()).with_span(value)) } else { Err(darling::Error::unexpected_lit_type(value)) } } fn from_list(items: &[syn::NestedMeta]) -> darling::Result<Self> { Each::from_list(items).map(Self) } } fn unpack_each_shorthand(input: Option<EachLongOrShort>) -> Option<Each> { input.map(|v| v.0) } /// The `setter` meta item on fields in the input type. /// Unlike the `setter` meta item at the struct level, this allows specific /// name overrides. #[derive(Debug, Clone, Default, FromMeta)] #[darling(default)] pub struct FieldLevelSetter { prefix: Option<Ident>, name: Option<Ident>, into: Option<bool>, strip_option: Option<bool>, skip: Option<bool>, custom: Option<bool>, #[darling(map = "unpack_each_shorthand")] each: Option<Each>, } impl FieldLevelSetter { /// Get whether the setter should be emitted. The rules are the same as /// for `field_enabled`, except we only skip the setter if `setter(custom)` is present. pub fn setter_enabled(&self) -> Option<bool> { if self.custom.is_some() { return self.custom.map(|x| !x); } self.field_enabled() } /// Get whether or not this field-level setter indicates a setter and /// field should be emitted. The setter shorthand rules are that the /// presence of a `setter` with _any_ properties set forces the setter /// to be emitted. pub fn field_enabled(&self) -> Option<bool> { if self.skip.is_some() { return self.skip.map(|x| !x); } if self.prefix.is_some() || self.name.is_some() || self.into.is_some() || self.strip_option.is_some() || self.each.is_some() { return Some(true); } None } } /// `derive_builder` allows the calling code to use `setter` as a word to enable /// setters when they've been disabled at the struct level. /// `darling` doesn't provide that out of the box, so we read the user input /// into this enum then convert it into the `FieldLevelSetter`. #[derive(Debug, Clone)] enum FieldSetterMeta { /// The keyword in isolation. /// This is equivalent to `setter(skip = false)`. Shorthand, Longhand(FieldLevelSetter), } impl From<FieldSetterMeta> for FieldLevelSetter { fn from(v: FieldSetterMeta) -> Self { match v { FieldSetterMeta::Shorthand => FieldLevelSetter { skip: Some(false), ..Default::default() }, FieldSetterMeta::Longhand(val) => val, } } } impl FromMeta for FieldSetterMeta { fn from_word() -> darling::Result<Self> { Ok(FieldSetterMeta::Shorthand) } fn from_meta(value: &syn::Meta) -> darling::Result<Self> { if let syn::Meta::Path(_) = *value { FieldSetterMeta::from_word() } else { FieldLevelSetter::from_meta(value).map(FieldSetterMeta::Longhand) } } } /// Data extracted from the fields of the input struct. #[derive(Debug, Clone, FromField)] #[darling(attributes(builder), forward_attrs(doc, cfg, allow))] pub struct Field { ident: Option<Ident>, attrs: Vec<Attribute>, ty: syn::Type, /// Field-level override for builder pattern. /// Note that setting this may force the builder to derive `Clone`. #[darling(default)] pattern: Option<BuilderPattern>, #[darling(default)] public: Flag, #[darling(default)] private: Flag, // See the documentation for `FieldSetterMeta` to understand how `darling` // is interpreting this field. #[darling(default, map = "FieldSetterMeta::into")] setter: FieldLevelSetter, /// The value for this field if the setter is never invoked. /// /// A field can get its default one of three ways: /// /// 1. An explicit `default = "..."` expression /// 2. An explicit `default` word, in which case the field type's `Default::default()` /// value is used /// 3. Inherited from the field's value in the struct's `default` value. /// /// This property only captures the first two, the third is computed in `FieldWithDefaults`. #[darling(default)] default: Option<DefaultExpression>, #[darling(default)] try_setter: Flag, #[darling(default)] field: FieldMeta, } impl FlagVisibility for Field { fn public(&self) -> &Flag { &self.public } fn private(&self) -> &Flag { &self.private } } #[derive(Debug, Clone, FromDeriveInput)] #[darling( attributes(builder), forward_attrs(doc, cfg, allow), supports(struct_named) )] pub struct Options { ident: Ident, // These are currently unused, but that means the generated builder cannot have // inherited the cfg or allow attributes from the base struct. // see https://github.com/colin-kiegel/rust-derive-builder/issues/222 // attrs: Vec<Attribute>, vis: Visibility, generics: Generics, /// The name of the generated builder. Defaults to `#{ident}Builder`. #[darling(default)] name: Option<Ident>, #[darling(default)] pattern: BuilderPattern, #[darling(default)] build_fn: BuildFn, /// Additional traits to derive on the builder. #[darling(default)] derive: PathList, /// Setter options applied to all field setters in the struct. #[darling(default)] setter: StructLevelSetter, /// Struct-level value to use in place of any unfilled fields #[darling(default)] default: Option<DefaultExpression>, #[darling(default)] public: Flag, #[darling(default)] private: Flag, /// The parsed body of the derived struct. data: darling::ast::Data<darling::util::Ignored, Field>, #[darling(default)] no_std: Flag, /// When present, emit additional fallible setters alongside each regular /// setter. #[darling(default)] try_setter: Flag, #[darling(default)] field: FieldMeta, #[darling(skip, default)] deprecation_notes: DeprecationNotes, } impl FlagVisibility for Options { fn public(&self) -> &Flag { &self.public } fn private(&self) -> &Flag { &self.private } } /// Accessors for parsed properties. impl Options { pub fn builder_ident(&self) -> Ident { if let Some(ref custom) = self.name { return custom.clone(); } syn::parse_str(&format!("{}Builder", self.ident)) .expect("Struct name with Builder suffix should be an ident") } pub fn builder_error_ident(&self) -> Path { if let Some(existing) = self.build_fn.error.as_ref() { existing.clone() } else if let Some(ref custom) = self.name { format_ident!("{}Error", custom).into() } else { format_ident!("{}BuilderError", self.ident).into() } } /// The visibility of the builder struct. /// If a visibility was declared in attributes, that will be used; /// otherwise the struct's own visibility will be used. pub fn builder_vis(&self) -> Visibility { self.as_expressed_vis().unwrap_or_else(|| self.vis.clone()) } /// Get the visibility of the emitted `build` method. /// This defaults to the visibility of the parent builder, but can be overridden. pub fn build_method_vis(&self) -> Visibility { self.build_fn .as_expressed_vis() .unwrap_or_else(|| self.builder_vis()) } pub fn raw_fields(&self) -> Vec<&Field> { self.data .as_ref() .take_struct() .expect("Only structs supported") .fields } /// A builder requires `Clone` to be derived if its build method or any of its setters /// use the mutable or immutable pattern. pub fn requires_clone(&self) -> bool { self.pattern.requires_clone() || self.fields().any(|f| f.pattern().requires_clone()) } /// Get an iterator over the input struct's fields which pulls fallback /// values from struct-level settings. pub fn fields(&self) -> FieldIter { FieldIter(self, self.raw_fields().into_iter()) } pub fn field_count(&self) -> usize { self.raw_fields().len() } } /// Converters to codegen structs impl Options { pub fn as_builder(&self) -> Builder { Builder { enabled: true, ident: self.builder_ident(), pattern: self.pattern, derives: &self.derive, generics: Some(&self.generics), visibility: self.builder_vis(), fields: Vec::with_capacity(self.field_count()), field_initializers: Vec::with_capacity(self.field_count()), functions: Vec::with_capacity(self.field_count()), generate_error: self.build_fn.error.is_none(), must_derive_clone: self.requires_clone(), doc_comment: None, deprecation_notes: Default::default(), std: { let no_std: bool = self.no_std.into(); !no_std }, } } pub fn as_build_method(&self) -> BuildMethod { let (_, ty_generics, _) = self.generics.split_for_impl(); BuildMethod { enabled: !self.build_fn.skip, ident: &self.build_fn.name, visibility: self.build_method_vis(), pattern: self.pattern, target_ty: &self.ident, target_ty_generics: Some(ty_generics), error_ty: self.builder_error_ident(), initializers: Vec::with_capacity(self.field_count()), doc_comment: None, default_struct: self.default.as_ref(), validate_fn: self.build_fn.validate.as_ref(), } } } /// Accessor for field data which can pull through options from the parent /// struct. pub struct FieldWithDefaults<'a> { parent: &'a Options, field: &'a Field, } /// Accessors for parsed properties, with transparent pull-through from the /// parent struct's configuration. impl<'a> FieldWithDefaults<'a> { /// Check if this field should emit a setter. pub fn setter_enabled(&self) -> bool { self.field .setter .setter_enabled() .or_else(|| self.parent.setter.enabled()) .unwrap_or(true) } pub fn field_enabled(&self) -> bool { self.field .setter .field_enabled() .or_else(|| self.parent.setter.enabled()) .unwrap_or(true) } /// Check if this field should emit a fallible setter. /// This depends on the `TryFrom` trait, which hasn't yet stabilized. pub fn try_setter(&self) -> bool { self.field.try_setter.is_some() || self.parent.try_setter.is_some() } /// Get the prefix that should be applied to the field name to produce /// the setter ident, if any. pub fn setter_prefix(&self) -> Option<&Ident> { self.field .setter .prefix .as_ref() .or(self.parent.setter.prefix.as_ref()) } /// Get the ident of the emitted setter method pub fn setter_ident(&self) -> syn::Ident { if let Some(ref custom) = self.field.setter.name { return custom.clone(); } let ident = &self.field.ident; if let Some(ref prefix) = self.setter_prefix() { return syn::parse_str(&format!("{}_{}", prefix, ident.as_ref().unwrap())).unwrap(); } ident.clone().unwrap() } /// Checks if the emitted setter should be generic over types that impl /// `Into<FieldType>`. pub fn setter_into(&self) -> bool { self.field .setter .into .or(self.parent.setter.into) .unwrap_or_default() } /// Checks if the emitted setter should strip the wrapper Option over types that impl /// `Option<FieldType>`. pub fn setter_strip_option(&self) -> bool { self.field .setter .strip_option .or(self.parent.setter.strip_option) .unwrap_or_default() } /// Get the visibility of the emitted setter, if there will be one. pub fn setter_vis(&self) -> Visibility { self.field .as_expressed_vis() .or_else(|| self.parent.as_expressed_vis()) .unwrap_or_else(|| syn::parse_str("pub").unwrap()) } /// Get the ident of the input field. This is also used as the ident of the /// emitted field. pub fn field_ident(&self) -> &syn::Ident { self.field .ident .as_ref() .expect("Tuple structs are not supported") } pub fn field_vis(&self) -> Visibility { self.field .field .as_expressed_vis() .or_else(|| self.parent.field.as_expressed_vis()) .unwrap_or(Visibility::Inherited) } pub fn pattern(&self) -> BuilderPattern { self.field.pattern.unwrap_or(self.parent.pattern) } pub fn use_parent_default(&self) -> bool { self.field.default.is_none() && self.parent.default.is_some() } pub fn deprecation_notes(&self) -> &DeprecationNotes { &self.parent.deprecation_notes } } /// Converters to codegen structs impl<'a> FieldWithDefaults<'a> { /// Returns a `Setter` according to the options. pub fn as_setter(&'a self) -> Setter<'a> { Setter { setter_enabled: self.setter_enabled(), try_setter: self.try_setter(), visibility: self.setter_vis(), pattern: self.pattern(), attrs: &self.field.attrs, ident: self.setter_ident(), field_ident: self.field_ident(), field_type: &self.field.ty, generic_into: self.setter_into(), strip_option: self.setter_strip_option(), deprecation_notes: self.deprecation_notes(), each: self.field.setter.each.as_ref(), } } /// Returns an `Initializer` according to the options. /// /// # Panics /// /// if `default_expression` can not be parsed as `Block`. pub fn as_initializer(&'a self) -> Initializer<'a> { Initializer { field_enabled: self.field_enabled(), field_ident: self.field_ident(), builder_pattern: self.pattern(), default_value: self.field.default.as_ref(), use_default_struct: self.use_parent_default(), custom_error_type_span: self .parent .build_fn .error .as_ref() .map(|err_ty| err_ty.span()), } } pub fn as_builder_field(&'a self) -> BuilderField<'a> { BuilderField { field_ident: self.field_ident(), field_type: &self.field.ty, field_enabled: self.field_enabled(), field_visibility: self.field_vis(), attrs: &self.field.attrs, } } } pub struct FieldIter<'a>(&'a Options, IntoIter<&'a Field>); impl<'a> Iterator for FieldIter<'a> { type Item = FieldWithDefaults<'a>; fn next(&mut self) -> Option<Self::Item> { self.1.next().map(|field| FieldWithDefaults { parent: self.0, field, }) } } Clean up `each` using `with` use std::vec::IntoIter; use crate::BuildMethod; use darling::util::{Flag, PathList}; use darling::{self, FromMeta}; use proc_macro2::Span; use syn::Meta; use syn::{self, spanned::Spanned, Attribute, Generics, Ident, Path, Visibility}; use crate::{ Builder, BuilderField, BuilderPattern, DefaultExpression, DeprecationNotes, Each, Initializer, Setter, }; /// `derive_builder` uses separate sibling keywords to represent /// mutually-exclusive visibility states. This trait requires implementers to /// expose those flags and provides a method to compute any explicit visibility /// bounds. trait FlagVisibility { fn public(&self) -> &Flag; fn private(&self) -> &Flag; /// Get the explicitly-expressed visibility preference from the attribute. /// This returns `None` if the input didn't include either keyword. /// /// # Panics /// This method panics if the input specifies both `public` and `private`. fn as_expressed_vis(&self) -> Option<Visibility> { match (self.public().is_some(), self.private().is_some()) { (true, true) => panic!("A field cannot be both public and private"), (true, false) => Some(syn::parse_str("pub").unwrap()), (false, true) => Some(Visibility::Inherited), (false, false) => None, } } } /// Options for the `build_fn` property in struct-level builder options. /// There is no inheritance for these settings from struct-level to field-level, /// so we don't bother using `Option` for values in this struct. #[derive(Debug, Clone, FromMeta)] #[darling(default)] pub struct BuildFn { skip: bool, name: Ident, validate: Option<Path>, public: Flag, private: Flag, /// The path to an existing error type that the build method should return. /// /// Setting this will prevent `derive_builder` from generating an error type for the build /// method. /// /// # Type Bounds /// This type's bounds depend on other settings of the builder. /// /// * If uninitialized fields cause `build()` to fail, then this type /// must `impl From<UninitializedFieldError>`. Uninitialized fields do not cause errors /// when default values are provided for every field or at the struct level. /// * If `validate` is specified, then this type must provide a conversion from the specified /// function's error type. error: Option<Path>, } impl Default for BuildFn { fn default() -> Self { BuildFn { skip: false, name: Ident::new("build", Span::call_site()), validate: None, public: Default::default(), private: Default::default(), error: None, } } } impl FlagVisibility for BuildFn { fn public(&self) -> &Flag { &self.public } fn private(&self) -> &Flag { &self.private } } /// Contents of the `field` meta in `builder` attributes. #[derive(Debug, Clone, Default, FromMeta)] #[darling(default)] pub struct FieldMeta { public: Flag, private: Flag, } impl FlagVisibility for FieldMeta { fn public(&self) -> &Flag { &self.public } fn private(&self) -> &Flag { &self.private } } #[derive(Debug, Clone, Default, FromMeta)] #[darling(default)] pub struct StructLevelSetter { prefix: Option<Ident>, into: Option<bool>, strip_option: Option<bool>, skip: Option<bool>, } impl StructLevelSetter { /// Check if setters are explicitly enabled or disabled at /// the struct level. pub fn enabled(&self) -> Option<bool> { self.skip.map(|x| !x) } } /// Create `Each` from an attribute's `Meta`. /// /// Two formats are supported: /// /// * `each = "..."`, which provides the name of the `each` setter and otherwise uses default values /// * `each(name = "...")`, which allows setting additional options on the `each` setter fn parse_each(meta: &Meta) -> darling::Result<Option<Each>> { if let Meta::NameValue(mnv) = meta { if let syn::Lit::Str(v) = &mnv.lit { v.parse::<Ident>() .map(Each::from) .map(Some) .map_err(|_| darling::Error::unknown_value(&v.value()).with_span(v)) } else { Err(darling::Error::unexpected_lit_type(&mnv.lit)) } } else { Each::from_meta(meta).map(Some) } } /// The `setter` meta item on fields in the input type. /// Unlike the `setter` meta item at the struct level, this allows specific /// name overrides. #[derive(Debug, Clone, Default, FromMeta)] #[darling(default)] pub struct FieldLevelSetter { prefix: Option<Ident>, name: Option<Ident>, into: Option<bool>, strip_option: Option<bool>, skip: Option<bool>, custom: Option<bool>, #[darling(with = "parse_each")] each: Option<Each>, } impl FieldLevelSetter { /// Get whether the setter should be emitted. The rules are the same as /// for `field_enabled`, except we only skip the setter if `setter(custom)` is present. pub fn setter_enabled(&self) -> Option<bool> { if self.custom.is_some() { return self.custom.map(|x| !x); } self.field_enabled() } /// Get whether or not this field-level setter indicates a setter and /// field should be emitted. The setter shorthand rules are that the /// presence of a `setter` with _any_ properties set forces the setter /// to be emitted. pub fn field_enabled(&self) -> Option<bool> { if self.skip.is_some() { return self.skip.map(|x| !x); } if self.prefix.is_some() || self.name.is_some() || self.into.is_some() || self.strip_option.is_some() || self.each.is_some() { return Some(true); } None } } /// `derive_builder` allows the calling code to use `setter` as a word to enable /// setters when they've been disabled at the struct level. /// `darling` doesn't provide that out of the box, so we read the user input /// into this enum then convert it into the `FieldLevelSetter`. #[derive(Debug, Clone)] enum FieldSetterMeta { /// The keyword in isolation. /// This is equivalent to `setter(skip = false)`. Shorthand, Longhand(FieldLevelSetter), } impl From<FieldSetterMeta> for FieldLevelSetter { fn from(v: FieldSetterMeta) -> Self { match v { FieldSetterMeta::Shorthand => FieldLevelSetter { skip: Some(false), ..Default::default() }, FieldSetterMeta::Longhand(val) => val, } } } impl FromMeta for FieldSetterMeta { fn from_word() -> darling::Result<Self> { Ok(FieldSetterMeta::Shorthand) } fn from_meta(value: &syn::Meta) -> darling::Result<Self> { if let syn::Meta::Path(_) = *value { FieldSetterMeta::from_word() } else { FieldLevelSetter::from_meta(value).map(FieldSetterMeta::Longhand) } } } /// Data extracted from the fields of the input struct. #[derive(Debug, Clone, FromField)] #[darling(attributes(builder), forward_attrs(doc, cfg, allow))] pub struct Field { ident: Option<Ident>, attrs: Vec<Attribute>, ty: syn::Type, /// Field-level override for builder pattern. /// Note that setting this may force the builder to derive `Clone`. #[darling(default)] pattern: Option<BuilderPattern>, #[darling(default)] public: Flag, #[darling(default)] private: Flag, // See the documentation for `FieldSetterMeta` to understand how `darling` // is interpreting this field. #[darling(default, map = "FieldSetterMeta::into")] setter: FieldLevelSetter, /// The value for this field if the setter is never invoked. /// /// A field can get its default one of three ways: /// /// 1. An explicit `default = "..."` expression /// 2. An explicit `default` word, in which case the field type's `Default::default()` /// value is used /// 3. Inherited from the field's value in the struct's `default` value. /// /// This property only captures the first two, the third is computed in `FieldWithDefaults`. #[darling(default)] default: Option<DefaultExpression>, #[darling(default)] try_setter: Flag, #[darling(default)] field: FieldMeta, } impl FlagVisibility for Field { fn public(&self) -> &Flag { &self.public } fn private(&self) -> &Flag { &self.private } } #[derive(Debug, Clone, FromDeriveInput)] #[darling( attributes(builder), forward_attrs(doc, cfg, allow), supports(struct_named) )] pub struct Options { ident: Ident, // These are currently unused, but that means the generated builder cannot have // inherited the cfg or allow attributes from the base struct. // see https://github.com/colin-kiegel/rust-derive-builder/issues/222 // attrs: Vec<Attribute>, vis: Visibility, generics: Generics, /// The name of the generated builder. Defaults to `#{ident}Builder`. #[darling(default)] name: Option<Ident>, #[darling(default)] pattern: BuilderPattern, #[darling(default)] build_fn: BuildFn, /// Additional traits to derive on the builder. #[darling(default)] derive: PathList, /// Setter options applied to all field setters in the struct. #[darling(default)] setter: StructLevelSetter, /// Struct-level value to use in place of any unfilled fields #[darling(default)] default: Option<DefaultExpression>, #[darling(default)] public: Flag, #[darling(default)] private: Flag, /// The parsed body of the derived struct. data: darling::ast::Data<darling::util::Ignored, Field>, #[darling(default)] no_std: Flag, /// When present, emit additional fallible setters alongside each regular /// setter. #[darling(default)] try_setter: Flag, #[darling(default)] field: FieldMeta, #[darling(skip, default)] deprecation_notes: DeprecationNotes, } impl FlagVisibility for Options { fn public(&self) -> &Flag { &self.public } fn private(&self) -> &Flag { &self.private } } /// Accessors for parsed properties. impl Options { pub fn builder_ident(&self) -> Ident { if let Some(ref custom) = self.name { return custom.clone(); } syn::parse_str(&format!("{}Builder", self.ident)) .expect("Struct name with Builder suffix should be an ident") } pub fn builder_error_ident(&self) -> Path { if let Some(existing) = self.build_fn.error.as_ref() { existing.clone() } else if let Some(ref custom) = self.name { format_ident!("{}Error", custom).into() } else { format_ident!("{}BuilderError", self.ident).into() } } /// The visibility of the builder struct. /// If a visibility was declared in attributes, that will be used; /// otherwise the struct's own visibility will be used. pub fn builder_vis(&self) -> Visibility { self.as_expressed_vis().unwrap_or_else(|| self.vis.clone()) } /// Get the visibility of the emitted `build` method. /// This defaults to the visibility of the parent builder, but can be overridden. pub fn build_method_vis(&self) -> Visibility { self.build_fn .as_expressed_vis() .unwrap_or_else(|| self.builder_vis()) } pub fn raw_fields(&self) -> Vec<&Field> { self.data .as_ref() .take_struct() .expect("Only structs supported") .fields } /// A builder requires `Clone` to be derived if its build method or any of its setters /// use the mutable or immutable pattern. pub fn requires_clone(&self) -> bool { self.pattern.requires_clone() || self.fields().any(|f| f.pattern().requires_clone()) } /// Get an iterator over the input struct's fields which pulls fallback /// values from struct-level settings. pub fn fields(&self) -> FieldIter { FieldIter(self, self.raw_fields().into_iter()) } pub fn field_count(&self) -> usize { self.raw_fields().len() } } /// Converters to codegen structs impl Options { pub fn as_builder(&self) -> Builder { Builder { enabled: true, ident: self.builder_ident(), pattern: self.pattern, derives: &self.derive, generics: Some(&self.generics), visibility: self.builder_vis(), fields: Vec::with_capacity(self.field_count()), field_initializers: Vec::with_capacity(self.field_count()), functions: Vec::with_capacity(self.field_count()), generate_error: self.build_fn.error.is_none(), must_derive_clone: self.requires_clone(), doc_comment: None, deprecation_notes: Default::default(), std: { let no_std: bool = self.no_std.into(); !no_std }, } } pub fn as_build_method(&self) -> BuildMethod { let (_, ty_generics, _) = self.generics.split_for_impl(); BuildMethod { enabled: !self.build_fn.skip, ident: &self.build_fn.name, visibility: self.build_method_vis(), pattern: self.pattern, target_ty: &self.ident, target_ty_generics: Some(ty_generics), error_ty: self.builder_error_ident(), initializers: Vec::with_capacity(self.field_count()), doc_comment: None, default_struct: self.default.as_ref(), validate_fn: self.build_fn.validate.as_ref(), } } } /// Accessor for field data which can pull through options from the parent /// struct. pub struct FieldWithDefaults<'a> { parent: &'a Options, field: &'a Field, } /// Accessors for parsed properties, with transparent pull-through from the /// parent struct's configuration. impl<'a> FieldWithDefaults<'a> { /// Check if this field should emit a setter. pub fn setter_enabled(&self) -> bool { self.field .setter .setter_enabled() .or_else(|| self.parent.setter.enabled()) .unwrap_or(true) } pub fn field_enabled(&self) -> bool { self.field .setter .field_enabled() .or_else(|| self.parent.setter.enabled()) .unwrap_or(true) } /// Check if this field should emit a fallible setter. /// This depends on the `TryFrom` trait, which hasn't yet stabilized. pub fn try_setter(&self) -> bool { self.field.try_setter.is_some() || self.parent.try_setter.is_some() } /// Get the prefix that should be applied to the field name to produce /// the setter ident, if any. pub fn setter_prefix(&self) -> Option<&Ident> { self.field .setter .prefix .as_ref() .or(self.parent.setter.prefix.as_ref()) } /// Get the ident of the emitted setter method pub fn setter_ident(&self) -> syn::Ident { if let Some(ref custom) = self.field.setter.name { return custom.clone(); } let ident = &self.field.ident; if let Some(ref prefix) = self.setter_prefix() { return syn::parse_str(&format!("{}_{}", prefix, ident.as_ref().unwrap())).unwrap(); } ident.clone().unwrap() } /// Checks if the emitted setter should be generic over types that impl /// `Into<FieldType>`. pub fn setter_into(&self) -> bool { self.field .setter .into .or(self.parent.setter.into) .unwrap_or_default() } /// Checks if the emitted setter should strip the wrapper Option over types that impl /// `Option<FieldType>`. pub fn setter_strip_option(&self) -> bool { self.field .setter .strip_option .or(self.parent.setter.strip_option) .unwrap_or_default() } /// Get the visibility of the emitted setter, if there will be one. pub fn setter_vis(&self) -> Visibility { self.field .as_expressed_vis() .or_else(|| self.parent.as_expressed_vis()) .unwrap_or_else(|| syn::parse_str("pub").unwrap()) } /// Get the ident of the input field. This is also used as the ident of the /// emitted field. pub fn field_ident(&self) -> &syn::Ident { self.field .ident .as_ref() .expect("Tuple structs are not supported") } pub fn field_vis(&self) -> Visibility { self.field .field .as_expressed_vis() .or_else(|| self.parent.field.as_expressed_vis()) .unwrap_or(Visibility::Inherited) } pub fn pattern(&self) -> BuilderPattern { self.field.pattern.unwrap_or(self.parent.pattern) } pub fn use_parent_default(&self) -> bool { self.field.default.is_none() && self.parent.default.is_some() } pub fn deprecation_notes(&self) -> &DeprecationNotes { &self.parent.deprecation_notes } } /// Converters to codegen structs impl<'a> FieldWithDefaults<'a> { /// Returns a `Setter` according to the options. pub fn as_setter(&'a self) -> Setter<'a> { Setter { setter_enabled: self.setter_enabled(), try_setter: self.try_setter(), visibility: self.setter_vis(), pattern: self.pattern(), attrs: &self.field.attrs, ident: self.setter_ident(), field_ident: self.field_ident(), field_type: &self.field.ty, generic_into: self.setter_into(), strip_option: self.setter_strip_option(), deprecation_notes: self.deprecation_notes(), each: self.field.setter.each.as_ref(), } } /// Returns an `Initializer` according to the options. /// /// # Panics /// /// if `default_expression` can not be parsed as `Block`. pub fn as_initializer(&'a self) -> Initializer<'a> { Initializer { field_enabled: self.field_enabled(), field_ident: self.field_ident(), builder_pattern: self.pattern(), default_value: self.field.default.as_ref(), use_default_struct: self.use_parent_default(), custom_error_type_span: self .parent .build_fn .error .as_ref() .map(|err_ty| err_ty.span()), } } pub fn as_builder_field(&'a self) -> BuilderField<'a> { BuilderField { field_ident: self.field_ident(), field_type: &self.field.ty, field_enabled: self.field_enabled(), field_visibility: self.field_vis(), attrs: &self.field.attrs, } } } pub struct FieldIter<'a>(&'a Options, IntoIter<&'a Field>); impl<'a> Iterator for FieldIter<'a> { type Item = FieldWithDefaults<'a>; fn next(&mut self) -> Option<Self::Item> { self.1.next().map(|field| FieldWithDefaults { parent: self.0, field, }) } }
// Copyright 2020 The Exonum Team // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use assert_matches::assert_matches; use exonum_crypto::KeyPair; use exonum_merkledb::{ access::{AccessExt, CopyAccessExt}, migration::Migration, HashTag, ObjectHash, SystemSchema, TemporaryDB, }; use std::time::Duration; use super::{ panic, remove_local_migration_result, rollback_migration, thread, Arc, ArtifactId, Blockchain, Database, Dispatcher, ExecutionError, Fork, Hash, HashMap, InstanceId, InstanceMigration, InstanceSpec, InstanceState, InstanceStatus, Mailbox, MigrationContext, MigrationScript, MigrationThread, MigrationType, Runtime, RuntimeFeature, Snapshot, Version, }; use crate::{ blockchain::{ApiSender, Block, BlockParams, BlockchainMut}, helpers::{Height, ValidatorId}, runtime::{ execution_context::TopLevelContext, migrations::{InitMigrationError, MigrationError}, oneshot::Receiver, BlockchainData, CoreError, DispatcherSchema, ErrorMatch, ExecutionContext, MethodId, RuntimeIdentifier, SnapshotExt, WellKnownRuntime, }, }; const DELAY: Duration = Duration::from_millis(150); #[derive(Default, Debug, Clone)] struct MigrationRuntime { /// Flag to run good or erroneous migration script for `good-or-not-good` artifact. run_good_script: bool, } impl MigrationRuntime { fn with_script_flag(flag: bool) -> Self { Self { run_good_script: flag, } } } impl WellKnownRuntime for MigrationRuntime { const ID: u32 = 2; } impl Runtime for MigrationRuntime { // We use service freezing in some tests. fn is_supported(&self, feature: &RuntimeFeature) -> bool { match feature { RuntimeFeature::FreezingServices => true, } } fn deploy_artifact(&mut self, _artifact: ArtifactId, _deploy_spec: Vec<u8>) -> Receiver { Receiver::with_result(Ok(())) } fn is_artifact_deployed(&self, _id: &ArtifactId) -> bool { true } fn initiate_adding_service( &self, _context: ExecutionContext<'_>, _artifact: &ArtifactId, _parameters: Vec<u8>, ) -> Result<(), ExecutionError> { Ok(()) } fn initiate_resuming_service( &self, _context: ExecutionContext<'_>, _artifact: &ArtifactId, _parameters: Vec<u8>, ) -> Result<(), ExecutionError> { Ok(()) } fn update_service_status(&mut self, _snapshot: &dyn Snapshot, _state: &InstanceState) {} fn migrate( &self, new_artifact: &ArtifactId, data_version: &Version, ) -> Result<Option<MigrationScript>, InitMigrationError> { let mut end_version = new_artifact.version.clone(); end_version.patch = 0; let script = match new_artifact.name.as_str() { "good" => simple_delayed_migration, "complex" => { let version1 = Version::new(0, 2, 0); let version2 = Version::new(0, 3, 0); if *data_version < version1 { end_version = version1; complex_migration_part1 } else if *data_version < version2 && new_artifact.version >= version2 { end_version = version2; complex_migration_part2 } else { return Ok(None); } } "not-good" => erroneous_migration, "bad" => panicking_migration, "with-state" => migration_modifying_state_hash, "none" => return Ok(None), "good-or-not-good" => { if self.run_good_script { simple_delayed_migration } else { erroneous_migration } } _ => return Err(InitMigrationError::NotSupported), }; let script = MigrationScript::new(script, end_version); Ok(Some(script)) } fn execute( &self, _context: ExecutionContext<'_>, _method_id: MethodId, _arguments: &[u8], ) -> Result<(), ExecutionError> { Ok(()) } fn before_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> { Ok(()) } fn after_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> { Ok(()) } fn after_commit(&mut self, _snapshot: &dyn Snapshot, _mailbox: &mut Mailbox) {} } fn simple_delayed_migration(_ctx: &mut MigrationContext) -> Result<(), MigrationError> { thread::sleep(DELAY); Ok(()) } fn erroneous_migration(_ctx: &mut MigrationContext) -> Result<(), MigrationError> { thread::sleep(DELAY); Err(MigrationError::new("This migration is unsuccessful!")) } fn panicking_migration(_ctx: &mut MigrationContext) -> Result<(), MigrationError> { thread::sleep(DELAY); panic!("This migration is unsuccessful!"); } fn migration_modifying_state_hash(ctx: &mut MigrationContext) -> Result<(), MigrationError> { for i in 1_u32..=2 { ctx.helper.new_data().get_proof_entry("entry").set(i); thread::sleep(DELAY / 2); ctx.helper.merge()?; } Ok(()) } fn complex_migration_part1(ctx: &mut MigrationContext) -> Result<(), MigrationError> { assert!(ctx.data_version < Version::new(0, 2, 0)); ctx.helper.new_data().get_proof_entry("entry").set(1_u32); Ok(()) } fn complex_migration_part2(ctx: &mut MigrationContext) -> Result<(), MigrationError> { assert!(ctx.data_version >= Version::new(0, 2, 0)); assert!(ctx.data_version < Version::new(0, 3, 0)); ctx.helper.new_data().get_proof_entry("entry").set(2_u32); Ok(()) } #[derive(Debug, Clone, Copy, PartialEq, Eq)] enum LocalResult { None, InMemory, Saved, SavedWithNodeRestart, } /// Test rig encapsulating typical tasks for migration tests, such as artifact deployment /// and service instantiation. #[derive(Debug)] struct Rig { blockchain: BlockchainMut, next_service_id: InstanceId, } impl Rig { fn new() -> Self { Self::with_db_and_flag(Arc::new(TemporaryDB::new()), false) } fn with_db_and_flag(db: Arc<TemporaryDB>, flag: bool) -> Self { let blockchain = Blockchain::new( db as Arc<dyn Database>, KeyPair::random(), ApiSender::closed(), ); let blockchain = blockchain .into_mut_with_dummy_config() .with_runtime(MigrationRuntime::with_script_flag(flag)) .build(); Self { blockchain, next_service_id: 100, } } /// Computes expected state hash of a migration. fn migration_hash(&self, indexes: &[(&str, Hash)]) -> Hash { let fork = self.blockchain.fork(); let mut aggregator = fork.get_proof_map::<_, str, Hash>("_aggregator"); for &(index_name, hash) in indexes { aggregator.put(index_name, hash); } aggregator.object_hash() } /// Emulates node stopping. fn stop(self) -> Blockchain { self.blockchain.immutable_view() } /// Emulates node restart by recreating the dispatcher. fn restart(&mut self) { let blockchain = self.blockchain.as_ref().clone(); let blockchain = blockchain .into_mut_with_dummy_config() .with_runtime(MigrationRuntime::default()) .build(); self.blockchain = blockchain; } fn dispatcher(&mut self) -> &mut Dispatcher { self.blockchain.dispatcher() } fn migration_threads(&mut self) -> &HashMap<String, MigrationThread> { &self.dispatcher().migrations.threads } /// Asserts that no migration scripts are currently being executed. fn assert_no_migration_threads(&mut self) { assert!(self.migration_threads().is_empty()); } /// Waits for migration scripts to finish according to the specified policy. fn wait_migration_threads(&mut self, local_result: LocalResult) { if local_result == LocalResult::None { // Don't wait at all. } else { // Wait for the script to finish. thread::sleep(DELAY * 3); if local_result == LocalResult::InMemory { // Keep the local result in memory. } else { self.create_block(self.blockchain.fork()); assert!(self.dispatcher().migrations.threads.is_empty()); if local_result == LocalResult::SavedWithNodeRestart { self.restart(); } } } } fn create_block(&mut self, fork: Fork) -> Block { let block_params = BlockParams::new(ValidatorId(0), Height(100), &[]); let patch = self .blockchain .create_patch_inner(fork, &block_params, &[], &()); self.blockchain.commit(patch, vec![]).unwrap(); self.blockchain.as_ref().last_block() } fn deploy_artifact(&mut self, name: &str, version: Version) -> ArtifactId { let artifact = ArtifactId::from_raw_parts(MigrationRuntime::ID, name.into(), version); let fork = self.blockchain.fork(); Dispatcher::commit_artifact(&fork, &artifact, vec![]); self.create_block(fork); artifact } fn initialize_service(&mut self, artifact: ArtifactId, name: &str) -> InstanceSpec { let service = InstanceSpec::from_raw_parts(self.next_service_id, name.to_owned(), artifact); self.next_service_id += 1; let mut fork = self.blockchain.fork(); TopLevelContext::for_block_call(self.dispatcher(), &mut fork, service.as_descriptor()) .call(|mut ctx| ctx.initiate_adding_service(service.clone(), vec![])) .expect("`initiate_adding_service` failed"); self.create_block(fork); service } fn stop_service(&mut self, spec: &InstanceSpec) { let fork = self.blockchain.fork(); Dispatcher::initiate_stopping_service(&fork, spec.id).unwrap(); self.create_block(fork); } fn freeze_service(&mut self, spec: &InstanceSpec) { let fork = self.blockchain.fork(); self.dispatcher() .initiate_freezing_service(&fork, spec.id) .unwrap(); self.create_block(fork); } } fn test_migration_workflow(freeze_service: bool) { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("good", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("good", "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact.clone(), "good"); // Since service is not stopped, the migration should fail. let fork = rig.blockchain.fork(); let err = rig .dispatcher() .initiate_migration(&fork, new_artifact.clone(), &service.name) .unwrap_err(); assert_eq!( err, ErrorMatch::from_fail(&CoreError::InvalidServiceTransition) .with_description_containing("Data migration cannot be initiated") ); // Stop or freeze the service. if freeze_service { rig.freeze_service(&service); } else { rig.stop_service(&service); } // Now, the migration start should succeed. let fork = rig.blockchain.fork(); let ty = rig .dispatcher() .initiate_migration(&fork, new_artifact.clone(), &service.name) .unwrap(); assert_matches!(ty, MigrationType::Async); // Migration scripts should not start executing immediately, but only on block commit. assert!(!rig.migration_threads().contains_key(&service.name)); // Check that the migration target cannot be unloaded. let err = Dispatcher::unload_artifact(&fork, &new_artifact).unwrap_err(); assert_eq!( err, ErrorMatch::from_fail(&CoreError::CannotUnloadArtifact) .with_description_containing("`100:good` references it as the data migration target") ); rig.create_block(fork); // Check that the migration was initiated. assert!(rig.migration_threads().contains_key(&service.name)); // Check that the old service data can be accessed. let snapshot = rig.blockchain.snapshot(); assert!(snapshot.for_service(service.id).is_some()); // Check that it is now impossible to unload either the old or the new artifact. let fork = rig.blockchain.fork(); let err = Dispatcher::unload_artifact(&fork, &old_artifact).unwrap_err(); assert_eq!( err, ErrorMatch::from_fail(&CoreError::CannotUnloadArtifact) .with_description_containing("`100:good` references it as the current artifact") ); let err = Dispatcher::unload_artifact(&fork, &new_artifact).unwrap_err(); assert_eq!( err, ErrorMatch::from_fail(&CoreError::CannotUnloadArtifact) .with_description_containing("`100:good` references it as the data migration target") ); // Create several more blocks before the migration is complete and check that // we don't spawn multiple migration scripts at once (this check is performed in `Migrations`). for _ in 0..3 { rig.create_block(rig.blockchain.fork()); } // Wait until the migration script is completed and check that its result is recorded. thread::sleep(DELAY * 3); rig.create_block(rig.blockchain.fork()); let snapshot = rig.blockchain.snapshot(); let schema = DispatcherSchema::new(&snapshot); let state = schema.get_instance(service.id).unwrap(); let end_version = match state.status.unwrap() { InstanceStatus::Migrating(migration) => migration.end_version, status => panic!("Unexpected service status: {:?}", status), }; assert_eq!(end_version, Version::new(0, 5, 0)); let res = schema.local_migration_result(&service.name).unwrap(); assert_eq!(res.0, Ok(HashTag::empty_map_hash())); assert!(!rig.migration_threads().contains_key(&service.name)); // Create couple more blocks to check that the migration script is not launched again, // and the migration result is not overridden (these checks are `debug_assert`s // in the `Dispatcher` code). for _ in 0..3 { rig.create_block(rig.blockchain.fork()); } assert!(!rig.migration_threads().contains_key(&service.name)); } /// Tests basic workflow of migration initiation. #[test] fn migration_workflow() { test_migration_workflow(false); } #[test] fn migration_workflow_with_frozen_service() { test_migration_workflow(true); } #[test] fn migration_after_artifact_unloading() { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("good", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("good", "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact, "good"); // Stop the service. rig.stop_service(&service); // Mark the new artifact for unload. This is valid because so far, no services are // associated with it. let fork = rig.blockchain.fork(); Dispatcher::unload_artifact(&fork, &new_artifact).unwrap(); // However, unloading means that we cannot initiate migration to the artifact. let err = rig .dispatcher() .initiate_migration(&fork, new_artifact, &service.name) .unwrap_err(); let expected_msg = "artifact `2:good:0.5.2` for data migration of service `100:good` is not active"; assert_eq!( err, ErrorMatch::from_fail(&CoreError::ArtifactNotDeployed) .with_description_containing(expected_msg) ); } fn test_fast_forward_migration(freeze_service: bool) { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("none", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("none", "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact.clone(), "service"); if freeze_service { rig.freeze_service(&service); } else { rig.stop_service(&service); } let fork = rig.blockchain.fork(); let ty = rig .dispatcher() .initiate_migration(&fork, new_artifact.clone(), &service.name) .unwrap(); assert_matches!(ty, MigrationType::FastForward); rig.create_block(fork); // Service version should be updated when the block is merged. let snapshot = rig.blockchain.snapshot(); let schema = DispatcherSchema::new(&snapshot); let state = schema.get_instance(service.id).unwrap(); assert_eq!(state.status, Some(InstanceStatus::Stopped)); assert_eq!(state.pending_status, None); assert_eq!(state.spec.artifact, new_artifact); assert_eq!(state.data_version, None); // Check that the old artifact can now be unloaded. let fork = rig.blockchain.fork(); Dispatcher::unload_artifact(&fork, &old_artifact).unwrap(); rig.create_block(fork); let snapshot = rig.blockchain.snapshot(); assert!(DispatcherSchema::new(&snapshot) .get_artifact(&old_artifact) .is_none()); } /// Tests fast-forwarding a migration. #[test] fn fast_forward_migration() { test_fast_forward_migration(false); } #[test] fn fast_forward_migration_with_service_freezing() { test_fast_forward_migration(true); } /// Tests checks performed by the dispatcher during migration initiation. #[test] fn migration_immediate_errors() { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("good", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("good", "0.5.2".parse().unwrap()); let unrelated_artifact = rig.deploy_artifact("unrelated", "1.0.1".parse().unwrap()); let old_service = rig.initialize_service(old_artifact.clone(), "old"); rig.stop_service(&old_service); let new_service = rig.initialize_service(new_artifact.clone(), "new"); rig.stop_service(&new_service); let fork = rig.blockchain.fork(); // Attempt to upgrade service to an unrelated artifact. let err = rig .dispatcher() .initiate_migration(&fork, unrelated_artifact, &old_service.name) .unwrap_err(); assert_eq!( err, ErrorMatch::from_fail(&CoreError::CannotUpgradeService).with_any_description() ); // Attempt to downgrade service. let err = rig .dispatcher() .initiate_migration(&fork, old_artifact, &new_service.name) .unwrap_err(); assert_eq!( err, ErrorMatch::from_fail(&CoreError::CannotUpgradeService).with_any_description() ); // Attempt to migrate to the same version. let err = rig .dispatcher() .initiate_migration(&fork, new_artifact.clone(), &new_service.name) .unwrap_err(); assert_eq!( err, ErrorMatch::from_fail(&CoreError::CannotUpgradeService).with_any_description() ); // Attempt to migrate unknown service. let err = rig .dispatcher() .initiate_migration(&fork, new_artifact, "bogus-service") .unwrap_err(); assert_eq!( err, ErrorMatch::from_fail(&CoreError::IncorrectInstanceId) .with_description_containing("for non-existing service `bogus-service`") ); // Attempt to migrate to unknown artifact. let unknown_artifact = ArtifactId::from_raw_parts( RuntimeIdentifier::Rust as _, "good".into(), Version::new(0, 6, 0), ); let err = rig .dispatcher() .initiate_migration(&fork, unknown_artifact.clone(), &old_service.name) .unwrap_err(); assert_eq!( err, ErrorMatch::from_fail(&CoreError::UnknownArtifactId).with_any_description() ); // Mark the artifact as pending. Dispatcher::commit_artifact(&fork, &unknown_artifact, vec![]); let err = rig .dispatcher() .initiate_migration(&fork, unknown_artifact, &old_service.name) .unwrap_err(); assert_eq!( err, ErrorMatch::from_fail(&CoreError::ArtifactNotDeployed).with_any_description() ); } /// Tests that an unfinished migration script is restarted on node restart. #[test] fn migration_is_resumed_after_node_restart() { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("good", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("good", "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact, "good"); rig.stop_service(&service); // Start migration. let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact, &service.name) .unwrap(); rig.create_block(fork); // Emulate node restart. Note that the old migration thread will continue running // as a detached thread, but since `Dispatcher.migrations` is dropped, the migration // will be aborted. rig.restart(); assert!(rig.migration_threads().contains_key(&service.name)); thread::sleep(DELAY * 3); rig.create_block(rig.blockchain.fork()); let snapshot = rig.blockchain.snapshot(); let schema = DispatcherSchema::new(&snapshot); let res = schema.local_migration_result(&service.name).unwrap(); assert_eq!(res.0, Ok(HashTag::empty_map_hash())); } /// Tests that migration scripts are timely aborted on node stop. #[test] fn migration_threads_are_timely_aborted() { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("with-state", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("with-state", "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact, "good"); rig.stop_service(&service); let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact, &service.name) .unwrap(); rig.create_block(fork); thread::sleep(DELAY * 2 / 3); let blockchain = rig.stop(); thread::sleep(DELAY * 10); let snapshot = blockchain.snapshot(); let migration = Migration::new(&service.name, &snapshot); // The `migration_modifying_state_hash` script should complete the 0 or 1 merge, but not // 2 merges. let val = migration .get_proof_entry::<_, u32>("entry") .get() .unwrap_or(0); assert!(val < 2); // New merges should not be added with time. thread::sleep(DELAY * 2); let snapshot = blockchain.snapshot(); let migration = Migration::new(&service.name, &snapshot); let new_val = migration .get_proof_entry::<_, u32>("entry") .get() .unwrap_or(0); assert_eq!(val, new_val); } /// Tests that a completed migration script is not launched again. #[test] fn completed_migration_is_not_resumed_after_node_restart() { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("good", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("good", "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact, "good"); rig.stop_service(&service); // Start migration. let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact, &service.name) .unwrap(); rig.create_block(fork); thread::sleep(DELAY * 3); rig.create_block(rig.blockchain.fork()); // Migration should be completed. rig.assert_no_migration_threads(); // Check that the local migration result is persisted. let snapshot = rig.blockchain.snapshot(); let schema = DispatcherSchema::new(&snapshot); assert!(schema.local_migration_result(&service.name).is_some()); // Therefore, the script should not resume after blockchain restart. rig.restart(); rig.assert_no_migration_threads(); } /// Tests that an error in a migration script is reflected in the local migration result. fn test_erroneous_migration(artifact_name: &str) { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact(artifact_name, "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact(artifact_name, "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact, "service"); rig.stop_service(&service); // Start migration. let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact, &service.name) .unwrap(); rig.create_block(fork); // Wait for the migration script to complete. let res = loop { thread::sleep(DELAY * 3); rig.create_block(rig.blockchain.fork()); let snapshot = rig.blockchain.snapshot(); let schema = DispatcherSchema::new(&snapshot); if let Some(res) = schema.local_migration_result(&service.name) { break res; } }; assert!(res .0 .unwrap_err() .contains("This migration is unsuccessful!")); } #[test] fn migration_with_error() { test_erroneous_migration("not-good"); } #[test] fn migration_with_panic() { test_erroneous_migration("bad"); } /// Tests that concurrent migrations with the same artifact are independent. #[test] fn concurrent_migrations_to_same_artifact() { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("good", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("good", "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact.clone(), "service"); rig.stop_service(&service); let other_service = rig.initialize_service(old_artifact.clone(), "other-service"); rig.stop_service(&other_service); let another_service = rig.initialize_service(old_artifact, "another-service"); rig.stop_service(&another_service); // Place two migration starts in the same block. let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact.clone(), &service.name) .unwrap(); rig.dispatcher() .initiate_migration(&fork, new_artifact.clone(), &other_service.name) .unwrap(); rig.create_block(fork); let threads = rig.migration_threads(); assert!(threads.contains_key(&service.name)); assert!(threads.contains_key(&other_service.name)); assert!(!threads.contains_key(&another_service.name)); // ...and one more in the following block. thread::sleep(DELAY * 2 / 3); let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact, &another_service.name) .unwrap(); rig.create_block(fork); assert!(rig.migration_threads().contains_key(&another_service.name)); // Wait for first two migrations to finish. thread::sleep(DELAY / 2); rig.create_block(rig.blockchain.fork()); let snapshot = rig.blockchain.snapshot(); let schema = DispatcherSchema::new(&snapshot); let res = schema.local_migration_result(&service.name).unwrap(); assert_eq!(res.0, Ok(HashTag::empty_map_hash())); let res = schema.local_migration_result(&other_service.name).unwrap(); assert_eq!(res.0, Ok(HashTag::empty_map_hash())); // Wait for the third migration to finish. thread::sleep(DELAY); rig.create_block(rig.blockchain.fork()); let snapshot = rig.blockchain.snapshot(); let schema = DispatcherSchema::new(&snapshot); let res = schema .local_migration_result(&another_service.name) .unwrap(); assert_eq!(res.0, Ok(HashTag::empty_map_hash())); rig.assert_no_migration_threads(); } /// Tests that migration workflow changes state hash as expected. #[test] fn migration_influencing_state_hash() { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("with-state", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("with-state", "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact, "service"); rig.stop_service(&service); let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact, &service.name) .unwrap(); let state_hash = rig.create_block(fork).state_hash; for _ in 0..2 { // The sleeping interval is chosen to be larger than the interval of DB merges // in the migration script. thread::sleep(DELAY * 2 / 3); let fork = rig.blockchain.fork(); // Check that we can access the old service data from outside. let blockchain_data = BlockchainData::new(&fork, "test"); assert!(!blockchain_data .for_service(service.id) .unwrap() .get_proof_entry::<_, u32>("entry") .exists()); // Check that the state during migration does not influence the default `state_hash`. let new_state_hash = rig.create_block(fork).state_hash; assert_eq!(state_hash, new_state_hash); } let snapshot = rig.blockchain.snapshot(); let schema = DispatcherSchema::new(&snapshot); let res = schema.local_migration_result(&service.name).unwrap(); let migration_hash = res.0.unwrap(); let migration = Migration::new(&service.name, &snapshot); assert_eq!(migration_hash, migration.state_hash()); let aggregator = migration.state_aggregator(); assert_eq!( aggregator.keys().collect::<Vec<_>>(), vec!["service.entry".to_owned()] ); assert_eq!(aggregator.get("service.entry"), Some(2_u32.object_hash())); } /// Tests the basic workflow of migration rollback. #[test] fn migration_rollback_workflow() { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("good", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("good", "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact, "good"); rig.stop_service(&service); let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact, &service.name) .unwrap(); rig.create_block(fork); // Wait until the migration is finished locally. thread::sleep(DELAY * 3); rig.create_block(rig.blockchain.fork()); let snapshot = rig.blockchain.snapshot(); let schema = DispatcherSchema::new(&snapshot); schema.local_migration_result(&service.name).unwrap(); rig.assert_no_migration_threads(); // Signal the rollback. let fork = rig.blockchain.fork(); Dispatcher::rollback_migration(&fork, &service.name).unwrap(); rig.create_block(fork); // Check that local migration result is erased. let snapshot = rig.blockchain.snapshot(); let schema = DispatcherSchema::new(&snapshot); assert!(schema.local_migration_result(&service.name).is_none()); let state = schema.get_instance(service.id).unwrap(); assert_eq!(state.status, Some(InstanceStatus::Stopped)); // The artifact version hasn't changed. assert_eq!(state.data_version, None); } /// Tests the checks performed by the dispatcher during migration rollback. #[test] fn migration_rollback_invariants() { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("good", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("good", "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact, "good"); // Non-existing service. let fork = rig.blockchain.fork(); let err = Dispatcher::rollback_migration(&fork, "bogus").unwrap_err(); assert_eq!( err, ErrorMatch::from_fail(&CoreError::IncorrectInstanceId) .with_description_containing("Cannot rollback migration for unknown service `bogus`") ); // Service is not stopped. let err = Dispatcher::rollback_migration(&fork, &service.name).unwrap_err(); let no_migration_match = ErrorMatch::from_fail(&CoreError::NoMigration) .with_description_containing("it has no ongoing migration"); assert_eq!(err, no_migration_match); rig.stop_service(&service); // Service is stopped, but there is no migration happening. let fork = rig.blockchain.fork(); let err = Dispatcher::rollback_migration(&fork, &service.name).unwrap_err(); assert_eq!(err, no_migration_match); // Start migration and commit its result, thus making the rollback impossible. rig.dispatcher() .initiate_migration(&fork, new_artifact, &service.name) .unwrap(); rig.create_block(fork); let fork = rig.blockchain.fork(); Dispatcher::commit_migration(&fork, &service.name, HashTag::empty_map_hash()).unwrap(); // In the same block, we'll get an error because the service already has // a pending status update. let err = Dispatcher::rollback_migration(&fork, &service.name).unwrap_err(); assert_eq!(err, ErrorMatch::from_fail(&CoreError::ServicePending)); rig.create_block(fork); // ...In the next block, we'll get another error. let fork = rig.blockchain.fork(); let err = Dispatcher::rollback_migration(&fork, &service.name).unwrap_err(); assert_eq!(err, no_migration_match); } /// Tests that migration rollback aborts locally executed migration script. #[test] fn migration_rollback_aborts_migration_script() { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("with-state", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("with-state", "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact, "good"); rig.stop_service(&service); let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact, &service.name) .unwrap(); rig.create_block(fork); // Rollback the migration without waiting for the migration script to succeed locally. let fork = rig.blockchain.fork(); Dispatcher::rollback_migration(&fork, &service.name).unwrap(); rig.create_block(fork); let snapshot = rig.blockchain.snapshot(); let schema = DispatcherSchema::new(&snapshot); assert!(schema.local_migration_result(&service.name).is_none()); rig.assert_no_migration_threads(); let migration = Migration::new(&service.name, &snapshot); assert!(!migration.get_proof_entry::<_, u32>("entry").exists()); // Wait some time to ensure that script doesn't merge changes to the DB. thread::sleep(DELAY); let snapshot = rig.blockchain.snapshot(); let migration = Migration::new(&service.name, &snapshot); assert!(!migration.get_proof_entry::<_, u32>("entry").exists()); } /// Tests that migration rollback erases data created by the migration script. #[test] fn migration_rollback_erases_migration_data() { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("with-state", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("with-state", "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact, "good"); rig.stop_service(&service); let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact, &service.name) .unwrap(); rig.create_block(fork); // Wait until the migration is finished locally. thread::sleep(DELAY * 10); rig.create_block(rig.blockchain.fork()); let snapshot = rig.blockchain.snapshot(); let migration = Migration::new(&service.name, &snapshot); assert_eq!(migration.get_proof_entry::<_, u32>("entry").get(), Some(2)); let fork = rig.blockchain.fork(); Dispatcher::rollback_migration(&fork, &service.name).unwrap(); rig.create_block(fork); // Migration data should be dropped now. let snapshot = rig.blockchain.snapshot(); let migration = Migration::new(&service.name, &snapshot); assert!(!migration.get_proof_entry::<_, u32>("entry").exists()); } /// Tests basic migration commit workflow. #[test] fn migration_commit_workflow() { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("good", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("good", "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact, "good"); rig.stop_service(&service); let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact.clone(), &service.name) .unwrap(); rig.create_block(fork); // Wait until the migration is finished locally. thread::sleep(DELAY * 3); rig.create_block(rig.blockchain.fork()); let fork = rig.blockchain.fork(); Dispatcher::commit_migration(&fork, &service.name, HashTag::empty_map_hash()).unwrap(); rig.create_block(fork); // Check that local migration result is erased. let snapshot = rig.blockchain.snapshot(); let schema = DispatcherSchema::new(&snapshot); let res = schema.local_migration_result(&service.name).unwrap(); assert_eq!(res.0.unwrap(), HashTag::empty_map_hash()); let state = schema.get_instance(service.id).unwrap(); let expected_status = InstanceStatus::migrating(InstanceMigration::from_raw_parts( new_artifact, Version::new(0, 5, 0), Some(HashTag::empty_map_hash()), )); assert_eq!(state.status, Some(expected_status)); } /// Tests checks performed by the dispatcher during migration commit. #[test] fn migration_commit_invariants() { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("good", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("good", "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact, "good"); // Non-existing service. let fork = rig.blockchain.fork(); let err = Dispatcher::commit_migration(&fork, "bogus", Hash::zero()).unwrap_err(); assert_eq!( err, ErrorMatch::from_fail(&CoreError::IncorrectInstanceId) .with_description_containing("Cannot commit migration for unknown service `bogus`") ); // Service is not stopped. let err = Dispatcher::commit_migration(&fork, &service.name, Hash::zero()).unwrap_err(); let no_migration_match = ErrorMatch::from_fail(&CoreError::NoMigration) .with_description_containing("Cannot commit migration for service `100:good`"); assert_eq!(err, no_migration_match); rig.stop_service(&service); // Service is stopped, but there is no migration happening. let fork = rig.blockchain.fork(); let err = Dispatcher::commit_migration(&fork, &service.name, Hash::zero()).unwrap_err(); assert_eq!(err, no_migration_match); // Start migration and commit its result, making the second commit impossible. rig.dispatcher() .initiate_migration(&fork, new_artifact, &service.name) .unwrap(); rig.create_block(fork); let fork = rig.blockchain.fork(); let migration_hash = HashTag::empty_map_hash(); Dispatcher::commit_migration(&fork, &service.name, migration_hash).unwrap(); // In the same block, we'll get an error because the service already has // a pending status update. let err = Dispatcher::commit_migration(&fork, &service.name, migration_hash).unwrap_err(); assert_eq!(err, ErrorMatch::from_fail(&CoreError::ServicePending)); rig.create_block(fork); // ...In the next block, we'll get another error. let fork = rig.blockchain.fork(); let err = Dispatcher::commit_migration(&fork, &service.name, migration_hash).unwrap_err(); assert_eq!(err, no_migration_match); } /// Tests that a migration commit after the migration script finished locally with an error /// leads to node stopping. fn test_migration_commit_with_local_error( rig: &mut Rig, local_result: LocalResult, artifact_name: &str, ) { let old_artifact = rig.deploy_artifact(artifact_name, "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact(artifact_name, "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact, "service"); rig.stop_service(&service); let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact, &service.name) .unwrap(); rig.create_block(fork); rig.wait_migration_threads(local_result); let fork = rig.blockchain.fork(); Dispatcher::commit_migration(&fork, &service.name, Hash::zero()).unwrap(); rig.create_block(fork); // << should panic } #[test] #[should_panic(expected = "locally it has finished with an error: This migration is unsuccessful")] fn migration_commit_with_local_error_blocking() { test_migration_commit_with_local_error(&mut Rig::new(), LocalResult::None, "not-good"); } #[test] #[should_panic(expected = "locally it has finished with an error: This migration is unsuccessful")] fn migration_commit_with_local_error_in_memory() { test_migration_commit_with_local_error(&mut Rig::new(), LocalResult::InMemory, "not-good"); } #[test] #[should_panic(expected = "locally it has finished with an error: This migration is unsuccessful")] fn migration_commit_with_local_error_saved() { test_migration_commit_with_local_error(&mut Rig::new(), LocalResult::Saved, "not-good"); } #[test] #[should_panic(expected = "locally it has finished with an error: This migration is unsuccessful")] fn migration_commit_with_local_error_saved_and_node_restart() { test_migration_commit_with_local_error( &mut Rig::new(), LocalResult::SavedWithNodeRestart, "not-good", ); } #[test] fn test_migration_restart() { let artifact_name = "good-or-not-good"; let service_name = "service"; let db = Arc::new(TemporaryDB::new()); // Running migration that should fail. std::panic::catch_unwind(|| { // Set script flag to fail migration. let mut rig = Rig::with_db_and_flag(Arc::clone(&db), false); test_migration_commit_with_local_error(&mut rig, LocalResult::Saved, artifact_name); }) .expect_err("Node should panic on unsuccessful migration commit"); // Check that we have failed result locally. let snapshot = db.snapshot(); let schema = DispatcherSchema::new(&snapshot); let res = schema .local_migration_result(service_name) .expect("Schema does not have local result"); assert_eq!(res.0.unwrap_err(), "This migration is unsuccessful!"); // Remove local migration result. let mut fork = db.fork(); rollback_migration(&mut fork, service_name); remove_local_migration_result(&fork, service_name); db.merge_sync(fork.into_patch()) .expect("Failed to merge patch after local migration result remove"); // Check that local result is removed. let snapshot = db.snapshot(); let schema = DispatcherSchema::new(&snapshot); assert!(schema.local_migration_result(service_name).is_none()); // Set script flag to migrate successfully. let mut rig = Rig::with_db_and_flag(Arc::clone(&db), true); let fork = rig.blockchain.fork(); Dispatcher::commit_migration(&fork, service_name, HashTag::empty_map_hash()) .expect("Failed to commit migration"); rig.create_block(fork); // Check that the migration script has finished. rig.assert_no_migration_threads(); // Check that local migration result is erased. let snapshot = rig.blockchain.snapshot(); let schema = DispatcherSchema::new(&snapshot); let res = schema.local_migration_result(service_name).unwrap(); assert_eq!(res.0.unwrap(), HashTag::empty_map_hash()); // Check current instance migration status. let state = schema.get_instance(100).unwrap(); let artifact = ArtifactId::from_raw_parts( MigrationRuntime::ID, artifact_name.to_string(), "0.5.2".parse().unwrap(), ); let expected_status = InstanceStatus::migrating(InstanceMigration::from_raw_parts( artifact, Version::new(0, 5, 0), Some(HashTag::empty_map_hash()), )); assert_eq!(state.status, Some(expected_status)); } /// Tests that a migration commit after the migration script finished locally with another hash /// leads to node stopping. fn test_migration_commit_with_differing_hash(local_result: LocalResult) { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("good", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("good", "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact, "service"); rig.stop_service(&service); let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact, &service.name) .unwrap(); rig.create_block(fork); rig.wait_migration_threads(local_result); let fork = rig.blockchain.fork(); Dispatcher::commit_migration(&fork, &service.name, Hash::zero()).unwrap(); rig.create_block(fork); // << should panic } #[test] #[should_panic(expected = "locally it has finished with another hash")] fn migration_commit_with_differing_hash_blocking() { test_migration_commit_with_differing_hash(LocalResult::None); } #[test] #[should_panic(expected = "locally it has finished with another hash")] fn migration_commit_with_differing_hash_in_memory() { test_migration_commit_with_differing_hash(LocalResult::InMemory); } #[test] #[should_panic(expected = "locally it has finished with another hash")] fn migration_commit_with_differing_hash_saved() { test_migration_commit_with_differing_hash(LocalResult::Saved); } #[test] #[should_panic(expected = "locally it has finished with another hash")] fn migration_commit_with_differing_hash_saved_and_node_restarted() { test_migration_commit_with_differing_hash(LocalResult::SavedWithNodeRestart); } /// Tests that committing a migration with a locally running migration script leads to the node /// waiting until the script is completed. #[test] fn migration_commit_without_completing_script_locally() { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("with-state", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("with-state", "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact, "test"); rig.stop_service(&service); let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact.clone(), &service.name) .unwrap(); rig.create_block(fork); // Compute migration hash using the knowledge about the end state of migrated data. let migration_hash = rig.migration_hash(&[("test.entry", 2_u32.object_hash())]); let fork = rig.blockchain.fork(); Dispatcher::commit_migration(&fork, &service.name, migration_hash).unwrap(); rig.create_block(fork); // Check that the migration script has finished. rig.assert_no_migration_threads(); let snapshot = rig.blockchain.snapshot(); let schema = DispatcherSchema::new(&snapshot); let state = schema.get_instance(service.id).unwrap(); let expected_status = InstanceStatus::migrating(InstanceMigration::from_raw_parts( new_artifact, Version::new(0, 5, 0), Some(migration_hash), )); assert_eq!(state.status, Some(expected_status)); // Flush the migration. let mut fork = rig.blockchain.fork(); Dispatcher::flush_migration(&mut fork, &service.name).unwrap(); let state_hash = rig.create_block(fork).state_hash; // The artifact version should be updated. let snapshot = rig.blockchain.snapshot(); let schema = DispatcherSchema::new(&snapshot); let state = schema.get_instance(service.id).unwrap(); assert_eq!(state.data_version, Some(Version::new(0, 5, 0))); assert_eq!(state.status, Some(InstanceStatus::Stopped)); assert!(schema.local_migration_result(&service.name).is_none()); // Check that service data has been updated. let entry = snapshot.get_proof_entry::<_, u32>("test.entry"); assert_eq!(entry.get(), Some(2)); // Check state aggregation. let aggregator = SystemSchema::new(&snapshot).state_aggregator(); assert_eq!(aggregator.get("test.entry"), Some(2_u32.object_hash())); assert_eq!(aggregator.object_hash(), state_hash); } /// Tests that the migration workflow is applicable to a migration spanning multiple scripts. #[test] fn two_part_migration() { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("complex", "0.1.1".parse().unwrap()); let new_artifact = rig.deploy_artifact("complex", "0.3.7".parse().unwrap()); let service = rig.initialize_service(old_artifact.clone(), "test"); rig.stop_service(&service); // First part of migration. let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact.clone(), &service.name) .unwrap(); rig.create_block(fork); let migration_hash = rig.migration_hash(&[("test.entry", 1_u32.object_hash())]); let fork = rig.blockchain.fork(); Dispatcher::commit_migration(&fork, &service.name, migration_hash).unwrap(); rig.create_block(fork); let mut fork = rig.blockchain.fork(); Dispatcher::flush_migration(&mut fork, &service.name).unwrap(); rig.create_block(fork); // Check service data and metadata. let snapshot = rig.blockchain.snapshot(); assert_eq!( snapshot.get_proof_entry::<_, u32>("test.entry").get(), Some(1) ); let schema = DispatcherSchema::new(&snapshot); let instance_state = schema.get_instance(service.id).unwrap(); assert_eq!(instance_state.data_version, Some(Version::new(0, 2, 0))); // The old artifact can now be unloaded, since it's no longer associated with the service. // In other words, the service cannot be started with the old artifact due to a different // data layout, so it can be removed from the blockchain. let fork = rig.blockchain.fork(); Dispatcher::unload_artifact(&fork, &old_artifact).unwrap(); // Second part of migration. let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact.clone(), &service.name) .unwrap(); rig.create_block(fork); let migration_hash = rig.migration_hash(&[("test.entry", 2_u32.object_hash())]); let fork = rig.blockchain.fork(); Dispatcher::commit_migration(&fork, &service.name, migration_hash).unwrap(); rig.create_block(fork); let mut fork = rig.blockchain.fork(); Dispatcher::flush_migration(&mut fork, &service.name).unwrap(); rig.create_block(fork); // Check service data and metadata. let snapshot = rig.blockchain.snapshot(); assert_eq!( snapshot.get_proof_entry::<_, u32>("test.entry").get(), Some(2) ); let schema = DispatcherSchema::new(&snapshot); let instance_state = schema.get_instance(service.id).unwrap(); assert_eq!(instance_state.data_version, Some(Version::new(0, 3, 0))); // Check that the new artifact can be unloaded. let fork = rig.blockchain.fork(); Dispatcher::unload_artifact(&fork, &new_artifact).unwrap(); rig.create_block(fork); } #[test] fn two_part_migration_with_intermediate_artifact() { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("complex", "0.1.1".parse().unwrap()); let intermediate_artifact = rig.deploy_artifact("complex", "0.2.2".parse().unwrap()); let new_artifact = rig.deploy_artifact("complex", "0.3.7".parse().unwrap()); let service = rig.initialize_service(old_artifact, "test"); rig.stop_service(&service); // First part of migration. let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact.clone(), &service.name) .unwrap(); rig.create_block(fork); let migration_hash = rig.migration_hash(&[("test.entry", 1_u32.object_hash())]); let fork = rig.blockchain.fork(); Dispatcher::commit_migration(&fork, &service.name, migration_hash).unwrap(); rig.create_block(fork); let mut fork = rig.blockchain.fork(); Dispatcher::flush_migration(&mut fork, &service.name).unwrap(); rig.create_block(fork); // Use a fast-forward migration to associate the service with an intermediate artifact. let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, intermediate_artifact.clone(), &service.name) .unwrap(); rig.create_block(fork); // Check service data and metadata. let snapshot = rig.blockchain.snapshot(); assert_eq!( snapshot.get_proof_entry::<_, u32>("test.entry").get(), Some(1) ); let schema = DispatcherSchema::new(&snapshot); let instance_state = schema.get_instance(service.id).unwrap(); assert_eq!(instance_state.status, Some(InstanceStatus::Stopped)); assert_eq!(instance_state.spec.artifact, intermediate_artifact); assert_eq!(instance_state.data_version, None); // Second part of migration. Since we've associated the service with a newer artifact, // the state will indicate that read endpoints may be retained for the service. let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact, &service.name) .unwrap(); rig.create_block(fork); thread::sleep(DELAY * 5); let migration_hash = rig.migration_hash(&[("test.entry", 2_u32.object_hash())]); let fork = rig.blockchain.fork(); // Check that intermediate blockchain data can be accessed. let blockchain_data = BlockchainData::new(&fork, "other"); let entry_value = blockchain_data .for_service(service.id) .unwrap() .get_proof_entry::<_, u32>("entry") .get(); assert_eq!(entry_value, Some(1)); Dispatcher::commit_migration(&fork, &service.name, migration_hash).unwrap(); rig.create_block(fork); let mut fork = rig.blockchain.fork(); Dispatcher::flush_migration(&mut fork, &service.name).unwrap(); rig.create_block(fork); // Check service data and metadata. let snapshot = rig.blockchain.snapshot(); assert_eq!( snapshot.get_proof_entry::<_, u32>("test.entry").get(), Some(2) ); let schema = DispatcherSchema::new(&snapshot); let instance_state = schema.get_instance(service.id).unwrap(); assert_eq!(instance_state.data_version, Some(Version::new(0, 3, 0))); } Increase delays in migration tests to fix them on macos host on CI // Copyright 2020 The Exonum Team // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use assert_matches::assert_matches; use exonum_crypto::KeyPair; use exonum_merkledb::{ access::{AccessExt, CopyAccessExt}, migration::Migration, HashTag, ObjectHash, SystemSchema, TemporaryDB, }; use std::time::Duration; use super::{ panic, remove_local_migration_result, rollback_migration, thread, Arc, ArtifactId, Blockchain, Database, Dispatcher, ExecutionError, Fork, Hash, HashMap, InstanceId, InstanceMigration, InstanceSpec, InstanceState, InstanceStatus, Mailbox, MigrationContext, MigrationScript, MigrationThread, MigrationType, Runtime, RuntimeFeature, Snapshot, Version, }; use crate::{ blockchain::{ApiSender, Block, BlockParams, BlockchainMut}, helpers::{Height, ValidatorId}, runtime::{ execution_context::TopLevelContext, migrations::{InitMigrationError, MigrationError}, oneshot::Receiver, BlockchainData, CoreError, DispatcherSchema, ErrorMatch, ExecutionContext, MethodId, RuntimeIdentifier, SnapshotExt, WellKnownRuntime, }, }; const DELAY: Duration = Duration::from_millis(150); #[derive(Default, Debug, Clone)] struct MigrationRuntime { /// Flag to run good or erroneous migration script for `good-or-not-good` artifact. run_good_script: bool, } impl MigrationRuntime { fn with_script_flag(flag: bool) -> Self { Self { run_good_script: flag, } } } impl WellKnownRuntime for MigrationRuntime { const ID: u32 = 2; } impl Runtime for MigrationRuntime { // We use service freezing in some tests. fn is_supported(&self, feature: &RuntimeFeature) -> bool { match feature { RuntimeFeature::FreezingServices => true, } } fn deploy_artifact(&mut self, _artifact: ArtifactId, _deploy_spec: Vec<u8>) -> Receiver { Receiver::with_result(Ok(())) } fn is_artifact_deployed(&self, _id: &ArtifactId) -> bool { true } fn initiate_adding_service( &self, _context: ExecutionContext<'_>, _artifact: &ArtifactId, _parameters: Vec<u8>, ) -> Result<(), ExecutionError> { Ok(()) } fn initiate_resuming_service( &self, _context: ExecutionContext<'_>, _artifact: &ArtifactId, _parameters: Vec<u8>, ) -> Result<(), ExecutionError> { Ok(()) } fn update_service_status(&mut self, _snapshot: &dyn Snapshot, _state: &InstanceState) {} fn migrate( &self, new_artifact: &ArtifactId, data_version: &Version, ) -> Result<Option<MigrationScript>, InitMigrationError> { let mut end_version = new_artifact.version.clone(); end_version.patch = 0; let script = match new_artifact.name.as_str() { "good" => simple_delayed_migration, "complex" => { let version1 = Version::new(0, 2, 0); let version2 = Version::new(0, 3, 0); if *data_version < version1 { end_version = version1; complex_migration_part1 } else if *data_version < version2 && new_artifact.version >= version2 { end_version = version2; complex_migration_part2 } else { return Ok(None); } } "not-good" => erroneous_migration, "bad" => panicking_migration, "with-state" => migration_modifying_state_hash, "none" => return Ok(None), "good-or-not-good" => { if self.run_good_script { simple_delayed_migration } else { erroneous_migration } } _ => return Err(InitMigrationError::NotSupported), }; let script = MigrationScript::new(script, end_version); Ok(Some(script)) } fn execute( &self, _context: ExecutionContext<'_>, _method_id: MethodId, _arguments: &[u8], ) -> Result<(), ExecutionError> { Ok(()) } fn before_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> { Ok(()) } fn after_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> { Ok(()) } fn after_commit(&mut self, _snapshot: &dyn Snapshot, _mailbox: &mut Mailbox) {} } fn simple_delayed_migration(_ctx: &mut MigrationContext) -> Result<(), MigrationError> { thread::sleep(DELAY); Ok(()) } fn erroneous_migration(_ctx: &mut MigrationContext) -> Result<(), MigrationError> { thread::sleep(DELAY); Err(MigrationError::new("This migration is unsuccessful!")) } fn panicking_migration(_ctx: &mut MigrationContext) -> Result<(), MigrationError> { thread::sleep(DELAY); panic!("This migration is unsuccessful!"); } fn migration_modifying_state_hash(ctx: &mut MigrationContext) -> Result<(), MigrationError> { for i in 1_u32..=2 { ctx.helper.new_data().get_proof_entry("entry").set(i); thread::sleep(DELAY); ctx.helper.merge()?; } Ok(()) } fn complex_migration_part1(ctx: &mut MigrationContext) -> Result<(), MigrationError> { assert!(ctx.data_version < Version::new(0, 2, 0)); ctx.helper.new_data().get_proof_entry("entry").set(1_u32); Ok(()) } fn complex_migration_part2(ctx: &mut MigrationContext) -> Result<(), MigrationError> { assert!(ctx.data_version >= Version::new(0, 2, 0)); assert!(ctx.data_version < Version::new(0, 3, 0)); ctx.helper.new_data().get_proof_entry("entry").set(2_u32); Ok(()) } #[derive(Debug, Clone, Copy, PartialEq, Eq)] enum LocalResult { None, InMemory, Saved, SavedWithNodeRestart, } /// Test rig encapsulating typical tasks for migration tests, such as artifact deployment /// and service instantiation. #[derive(Debug)] struct Rig { blockchain: BlockchainMut, next_service_id: InstanceId, } impl Rig { fn new() -> Self { Self::with_db_and_flag(Arc::new(TemporaryDB::new()), false) } fn with_db_and_flag(db: Arc<TemporaryDB>, flag: bool) -> Self { let blockchain = Blockchain::new( db as Arc<dyn Database>, KeyPair::random(), ApiSender::closed(), ); let blockchain = blockchain .into_mut_with_dummy_config() .with_runtime(MigrationRuntime::with_script_flag(flag)) .build(); Self { blockchain, next_service_id: 100, } } /// Computes expected state hash of a migration. fn migration_hash(&self, indexes: &[(&str, Hash)]) -> Hash { let fork = self.blockchain.fork(); let mut aggregator = fork.get_proof_map::<_, str, Hash>("_aggregator"); for &(index_name, hash) in indexes { aggregator.put(index_name, hash); } aggregator.object_hash() } /// Emulates node stopping. fn stop(self) -> Blockchain { self.blockchain.immutable_view() } /// Emulates node restart by recreating the dispatcher. fn restart(&mut self) { let blockchain = self.blockchain.as_ref().clone(); let blockchain = blockchain .into_mut_with_dummy_config() .with_runtime(MigrationRuntime::default()) .build(); self.blockchain = blockchain; } fn dispatcher(&mut self) -> &mut Dispatcher { self.blockchain.dispatcher() } fn migration_threads(&mut self) -> &HashMap<String, MigrationThread> { &self.dispatcher().migrations.threads } /// Asserts that no migration scripts are currently being executed. fn assert_no_migration_threads(&mut self) { assert!(self.migration_threads().is_empty()); } /// Waits for migration scripts to finish according to the specified policy. fn wait_migration_threads(&mut self, local_result: LocalResult) { if local_result == LocalResult::None { // Don't wait at all. } else { // Wait for the script to finish. thread::sleep(DELAY * 3); if local_result == LocalResult::InMemory { // Keep the local result in memory. } else { self.create_block(self.blockchain.fork()); assert!(self.dispatcher().migrations.threads.is_empty()); if local_result == LocalResult::SavedWithNodeRestart { self.restart(); } } } } fn create_block(&mut self, fork: Fork) -> Block { let block_params = BlockParams::new(ValidatorId(0), Height(100), &[]); let patch = self .blockchain .create_patch_inner(fork, &block_params, &[], &()); self.blockchain.commit(patch, vec![]).unwrap(); self.blockchain.as_ref().last_block() } fn deploy_artifact(&mut self, name: &str, version: Version) -> ArtifactId { let artifact = ArtifactId::from_raw_parts(MigrationRuntime::ID, name.into(), version); let fork = self.blockchain.fork(); Dispatcher::commit_artifact(&fork, &artifact, vec![]); self.create_block(fork); artifact } fn initialize_service(&mut self, artifact: ArtifactId, name: &str) -> InstanceSpec { let service = InstanceSpec::from_raw_parts(self.next_service_id, name.to_owned(), artifact); self.next_service_id += 1; let mut fork = self.blockchain.fork(); TopLevelContext::for_block_call(self.dispatcher(), &mut fork, service.as_descriptor()) .call(|mut ctx| ctx.initiate_adding_service(service.clone(), vec![])) .expect("`initiate_adding_service` failed"); self.create_block(fork); service } fn stop_service(&mut self, spec: &InstanceSpec) { let fork = self.blockchain.fork(); Dispatcher::initiate_stopping_service(&fork, spec.id).unwrap(); self.create_block(fork); } fn freeze_service(&mut self, spec: &InstanceSpec) { let fork = self.blockchain.fork(); self.dispatcher() .initiate_freezing_service(&fork, spec.id) .unwrap(); self.create_block(fork); } } fn test_migration_workflow(freeze_service: bool) { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("good", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("good", "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact.clone(), "good"); // Since service is not stopped, the migration should fail. let fork = rig.blockchain.fork(); let err = rig .dispatcher() .initiate_migration(&fork, new_artifact.clone(), &service.name) .unwrap_err(); assert_eq!( err, ErrorMatch::from_fail(&CoreError::InvalidServiceTransition) .with_description_containing("Data migration cannot be initiated") ); // Stop or freeze the service. if freeze_service { rig.freeze_service(&service); } else { rig.stop_service(&service); } // Now, the migration start should succeed. let fork = rig.blockchain.fork(); let ty = rig .dispatcher() .initiate_migration(&fork, new_artifact.clone(), &service.name) .unwrap(); assert_matches!(ty, MigrationType::Async); // Migration scripts should not start executing immediately, but only on block commit. assert!(!rig.migration_threads().contains_key(&service.name)); // Check that the migration target cannot be unloaded. let err = Dispatcher::unload_artifact(&fork, &new_artifact).unwrap_err(); assert_eq!( err, ErrorMatch::from_fail(&CoreError::CannotUnloadArtifact) .with_description_containing("`100:good` references it as the data migration target") ); rig.create_block(fork); // Check that the migration was initiated. assert!(rig.migration_threads().contains_key(&service.name)); // Check that the old service data can be accessed. let snapshot = rig.blockchain.snapshot(); assert!(snapshot.for_service(service.id).is_some()); // Check that it is now impossible to unload either the old or the new artifact. let fork = rig.blockchain.fork(); let err = Dispatcher::unload_artifact(&fork, &old_artifact).unwrap_err(); assert_eq!( err, ErrorMatch::from_fail(&CoreError::CannotUnloadArtifact) .with_description_containing("`100:good` references it as the current artifact") ); let err = Dispatcher::unload_artifact(&fork, &new_artifact).unwrap_err(); assert_eq!( err, ErrorMatch::from_fail(&CoreError::CannotUnloadArtifact) .with_description_containing("`100:good` references it as the data migration target") ); // Create several more blocks before the migration is complete and check that // we don't spawn multiple migration scripts at once (this check is performed in `Migrations`). for _ in 0..3 { rig.create_block(rig.blockchain.fork()); } // Wait until the migration script is completed and check that its result is recorded. thread::sleep(DELAY * 3); rig.create_block(rig.blockchain.fork()); let snapshot = rig.blockchain.snapshot(); let schema = DispatcherSchema::new(&snapshot); let state = schema.get_instance(service.id).unwrap(); let end_version = match state.status.unwrap() { InstanceStatus::Migrating(migration) => migration.end_version, status => panic!("Unexpected service status: {:?}", status), }; assert_eq!(end_version, Version::new(0, 5, 0)); let res = schema.local_migration_result(&service.name).unwrap(); assert_eq!(res.0, Ok(HashTag::empty_map_hash())); assert!(!rig.migration_threads().contains_key(&service.name)); // Create couple more blocks to check that the migration script is not launched again, // and the migration result is not overridden (these checks are `debug_assert`s // in the `Dispatcher` code). for _ in 0..3 { rig.create_block(rig.blockchain.fork()); } assert!(!rig.migration_threads().contains_key(&service.name)); } /// Tests basic workflow of migration initiation. #[test] fn migration_workflow() { test_migration_workflow(false); } #[test] fn migration_workflow_with_frozen_service() { test_migration_workflow(true); } #[test] fn migration_after_artifact_unloading() { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("good", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("good", "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact, "good"); // Stop the service. rig.stop_service(&service); // Mark the new artifact for unload. This is valid because so far, no services are // associated with it. let fork = rig.blockchain.fork(); Dispatcher::unload_artifact(&fork, &new_artifact).unwrap(); // However, unloading means that we cannot initiate migration to the artifact. let err = rig .dispatcher() .initiate_migration(&fork, new_artifact, &service.name) .unwrap_err(); let expected_msg = "artifact `2:good:0.5.2` for data migration of service `100:good` is not active"; assert_eq!( err, ErrorMatch::from_fail(&CoreError::ArtifactNotDeployed) .with_description_containing(expected_msg) ); } fn test_fast_forward_migration(freeze_service: bool) { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("none", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("none", "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact.clone(), "service"); if freeze_service { rig.freeze_service(&service); } else { rig.stop_service(&service); } let fork = rig.blockchain.fork(); let ty = rig .dispatcher() .initiate_migration(&fork, new_artifact.clone(), &service.name) .unwrap(); assert_matches!(ty, MigrationType::FastForward); rig.create_block(fork); // Service version should be updated when the block is merged. let snapshot = rig.blockchain.snapshot(); let schema = DispatcherSchema::new(&snapshot); let state = schema.get_instance(service.id).unwrap(); assert_eq!(state.status, Some(InstanceStatus::Stopped)); assert_eq!(state.pending_status, None); assert_eq!(state.spec.artifact, new_artifact); assert_eq!(state.data_version, None); // Check that the old artifact can now be unloaded. let fork = rig.blockchain.fork(); Dispatcher::unload_artifact(&fork, &old_artifact).unwrap(); rig.create_block(fork); let snapshot = rig.blockchain.snapshot(); assert!(DispatcherSchema::new(&snapshot) .get_artifact(&old_artifact) .is_none()); } /// Tests fast-forwarding a migration. #[test] fn fast_forward_migration() { test_fast_forward_migration(false); } #[test] fn fast_forward_migration_with_service_freezing() { test_fast_forward_migration(true); } /// Tests checks performed by the dispatcher during migration initiation. #[test] fn migration_immediate_errors() { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("good", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("good", "0.5.2".parse().unwrap()); let unrelated_artifact = rig.deploy_artifact("unrelated", "1.0.1".parse().unwrap()); let old_service = rig.initialize_service(old_artifact.clone(), "old"); rig.stop_service(&old_service); let new_service = rig.initialize_service(new_artifact.clone(), "new"); rig.stop_service(&new_service); let fork = rig.blockchain.fork(); // Attempt to upgrade service to an unrelated artifact. let err = rig .dispatcher() .initiate_migration(&fork, unrelated_artifact, &old_service.name) .unwrap_err(); assert_eq!( err, ErrorMatch::from_fail(&CoreError::CannotUpgradeService).with_any_description() ); // Attempt to downgrade service. let err = rig .dispatcher() .initiate_migration(&fork, old_artifact, &new_service.name) .unwrap_err(); assert_eq!( err, ErrorMatch::from_fail(&CoreError::CannotUpgradeService).with_any_description() ); // Attempt to migrate to the same version. let err = rig .dispatcher() .initiate_migration(&fork, new_artifact.clone(), &new_service.name) .unwrap_err(); assert_eq!( err, ErrorMatch::from_fail(&CoreError::CannotUpgradeService).with_any_description() ); // Attempt to migrate unknown service. let err = rig .dispatcher() .initiate_migration(&fork, new_artifact, "bogus-service") .unwrap_err(); assert_eq!( err, ErrorMatch::from_fail(&CoreError::IncorrectInstanceId) .with_description_containing("for non-existing service `bogus-service`") ); // Attempt to migrate to unknown artifact. let unknown_artifact = ArtifactId::from_raw_parts( RuntimeIdentifier::Rust as _, "good".into(), Version::new(0, 6, 0), ); let err = rig .dispatcher() .initiate_migration(&fork, unknown_artifact.clone(), &old_service.name) .unwrap_err(); assert_eq!( err, ErrorMatch::from_fail(&CoreError::UnknownArtifactId).with_any_description() ); // Mark the artifact as pending. Dispatcher::commit_artifact(&fork, &unknown_artifact, vec![]); let err = rig .dispatcher() .initiate_migration(&fork, unknown_artifact, &old_service.name) .unwrap_err(); assert_eq!( err, ErrorMatch::from_fail(&CoreError::ArtifactNotDeployed).with_any_description() ); } /// Tests that an unfinished migration script is restarted on node restart. #[test] fn migration_is_resumed_after_node_restart() { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("good", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("good", "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact, "good"); rig.stop_service(&service); // Start migration. let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact, &service.name) .unwrap(); rig.create_block(fork); // Emulate node restart. Note that the old migration thread will continue running // as a detached thread, but since `Dispatcher.migrations` is dropped, the migration // will be aborted. rig.restart(); assert!(rig.migration_threads().contains_key(&service.name)); thread::sleep(DELAY * 3); rig.create_block(rig.blockchain.fork()); let snapshot = rig.blockchain.snapshot(); let schema = DispatcherSchema::new(&snapshot); let res = schema.local_migration_result(&service.name).unwrap(); assert_eq!(res.0, Ok(HashTag::empty_map_hash())); } /// Tests that migration scripts are timely aborted on node stop. #[test] fn migration_threads_are_timely_aborted() { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("with-state", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("with-state", "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact, "good"); rig.stop_service(&service); let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact, &service.name) .unwrap(); rig.create_block(fork); thread::sleep(DELAY); let blockchain = rig.stop(); thread::sleep(DELAY * 10); let snapshot = blockchain.snapshot(); let migration = Migration::new(&service.name, &snapshot); // The `migration_modifying_state_hash` script should complete the 0 or 1 merge, but not // 2 merges. let val = migration .get_proof_entry::<_, u32>("entry") .get() .unwrap_or(0); assert!(val < 2); // New merges should not be added with time. thread::sleep(DELAY * 2); let snapshot = blockchain.snapshot(); let migration = Migration::new(&service.name, &snapshot); let new_val = migration .get_proof_entry::<_, u32>("entry") .get() .unwrap_or(0); assert_eq!(val, new_val); } /// Tests that a completed migration script is not launched again. #[test] fn completed_migration_is_not_resumed_after_node_restart() { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("good", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("good", "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact, "good"); rig.stop_service(&service); // Start migration. let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact, &service.name) .unwrap(); rig.create_block(fork); thread::sleep(DELAY * 3); rig.create_block(rig.blockchain.fork()); // Migration should be completed. rig.assert_no_migration_threads(); // Check that the local migration result is persisted. let snapshot = rig.blockchain.snapshot(); let schema = DispatcherSchema::new(&snapshot); assert!(schema.local_migration_result(&service.name).is_some()); // Therefore, the script should not resume after blockchain restart. rig.restart(); rig.assert_no_migration_threads(); } /// Tests that an error in a migration script is reflected in the local migration result. fn test_erroneous_migration(artifact_name: &str) { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact(artifact_name, "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact(artifact_name, "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact, "service"); rig.stop_service(&service); // Start migration. let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact, &service.name) .unwrap(); rig.create_block(fork); // Wait for the migration script to complete. let res = loop { thread::sleep(DELAY * 3); rig.create_block(rig.blockchain.fork()); let snapshot = rig.blockchain.snapshot(); let schema = DispatcherSchema::new(&snapshot); if let Some(res) = schema.local_migration_result(&service.name) { break res; } }; assert!(res .0 .unwrap_err() .contains("This migration is unsuccessful!")); } #[test] fn migration_with_error() { test_erroneous_migration("not-good"); } #[test] fn migration_with_panic() { test_erroneous_migration("bad"); } /// Tests that concurrent migrations with the same artifact are independent. #[test] fn concurrent_migrations_to_same_artifact() { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("good", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("good", "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact.clone(), "service"); rig.stop_service(&service); let other_service = rig.initialize_service(old_artifact.clone(), "other-service"); rig.stop_service(&other_service); let another_service = rig.initialize_service(old_artifact, "another-service"); rig.stop_service(&another_service); // Place two migration starts in the same block. let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact.clone(), &service.name) .unwrap(); rig.dispatcher() .initiate_migration(&fork, new_artifact.clone(), &other_service.name) .unwrap(); rig.create_block(fork); let threads = rig.migration_threads(); assert!(threads.contains_key(&service.name)); assert!(threads.contains_key(&other_service.name)); assert!(!threads.contains_key(&another_service.name)); // ...and one more in the following block. thread::sleep(DELAY); let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact, &another_service.name) .unwrap(); rig.create_block(fork); assert!(rig.migration_threads().contains_key(&another_service.name)); // Wait for first two migrations to finish. thread::sleep(DELAY); rig.create_block(rig.blockchain.fork()); let snapshot = rig.blockchain.snapshot(); let schema = DispatcherSchema::new(&snapshot); let res = schema.local_migration_result(&service.name).unwrap(); assert_eq!(res.0, Ok(HashTag::empty_map_hash())); let res = schema.local_migration_result(&other_service.name).unwrap(); assert_eq!(res.0, Ok(HashTag::empty_map_hash())); // Wait for the third migration to finish. thread::sleep(DELAY); rig.create_block(rig.blockchain.fork()); let snapshot = rig.blockchain.snapshot(); let schema = DispatcherSchema::new(&snapshot); let res = schema .local_migration_result(&another_service.name) .unwrap(); assert_eq!(res.0, Ok(HashTag::empty_map_hash())); rig.assert_no_migration_threads(); } /// Tests that migration workflow changes state hash as expected. #[test] fn migration_influencing_state_hash() { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("with-state", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("with-state", "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact, "service"); rig.stop_service(&service); let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact, &service.name) .unwrap(); let state_hash = rig.create_block(fork).state_hash; for _ in 0..2 { // The sleeping interval is chosen to be larger than the interval of DB merges // in the migration script. thread::sleep(DELAY * 2); let fork = rig.blockchain.fork(); // Check that we can access the old service data from outside. let blockchain_data = BlockchainData::new(&fork, "test"); assert!(!blockchain_data .for_service(service.id) .unwrap() .get_proof_entry::<_, u32>("entry") .exists()); // Check that the state during migration does not influence the default `state_hash`. let new_state_hash = rig.create_block(fork).state_hash; assert_eq!(state_hash, new_state_hash); } let snapshot = rig.blockchain.snapshot(); let schema = DispatcherSchema::new(&snapshot); let res = schema.local_migration_result(&service.name).unwrap(); let migration_hash = res.0.unwrap(); let migration = Migration::new(&service.name, &snapshot); assert_eq!(migration_hash, migration.state_hash()); let aggregator = migration.state_aggregator(); assert_eq!( aggregator.keys().collect::<Vec<_>>(), vec!["service.entry".to_owned()] ); assert_eq!(aggregator.get("service.entry"), Some(2_u32.object_hash())); } /// Tests the basic workflow of migration rollback. #[test] fn migration_rollback_workflow() { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("good", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("good", "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact, "good"); rig.stop_service(&service); let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact, &service.name) .unwrap(); rig.create_block(fork); // Wait until the migration is finished locally. thread::sleep(DELAY * 3); rig.create_block(rig.blockchain.fork()); let snapshot = rig.blockchain.snapshot(); let schema = DispatcherSchema::new(&snapshot); schema.local_migration_result(&service.name).unwrap(); rig.assert_no_migration_threads(); // Signal the rollback. let fork = rig.blockchain.fork(); Dispatcher::rollback_migration(&fork, &service.name).unwrap(); rig.create_block(fork); // Check that local migration result is erased. let snapshot = rig.blockchain.snapshot(); let schema = DispatcherSchema::new(&snapshot); assert!(schema.local_migration_result(&service.name).is_none()); let state = schema.get_instance(service.id).unwrap(); assert_eq!(state.status, Some(InstanceStatus::Stopped)); // The artifact version hasn't changed. assert_eq!(state.data_version, None); } /// Tests the checks performed by the dispatcher during migration rollback. #[test] fn migration_rollback_invariants() { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("good", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("good", "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact, "good"); // Non-existing service. let fork = rig.blockchain.fork(); let err = Dispatcher::rollback_migration(&fork, "bogus").unwrap_err(); assert_eq!( err, ErrorMatch::from_fail(&CoreError::IncorrectInstanceId) .with_description_containing("Cannot rollback migration for unknown service `bogus`") ); // Service is not stopped. let err = Dispatcher::rollback_migration(&fork, &service.name).unwrap_err(); let no_migration_match = ErrorMatch::from_fail(&CoreError::NoMigration) .with_description_containing("it has no ongoing migration"); assert_eq!(err, no_migration_match); rig.stop_service(&service); // Service is stopped, but there is no migration happening. let fork = rig.blockchain.fork(); let err = Dispatcher::rollback_migration(&fork, &service.name).unwrap_err(); assert_eq!(err, no_migration_match); // Start migration and commit its result, thus making the rollback impossible. rig.dispatcher() .initiate_migration(&fork, new_artifact, &service.name) .unwrap(); rig.create_block(fork); let fork = rig.blockchain.fork(); Dispatcher::commit_migration(&fork, &service.name, HashTag::empty_map_hash()).unwrap(); // In the same block, we'll get an error because the service already has // a pending status update. let err = Dispatcher::rollback_migration(&fork, &service.name).unwrap_err(); assert_eq!(err, ErrorMatch::from_fail(&CoreError::ServicePending)); rig.create_block(fork); // ...In the next block, we'll get another error. let fork = rig.blockchain.fork(); let err = Dispatcher::rollback_migration(&fork, &service.name).unwrap_err(); assert_eq!(err, no_migration_match); } /// Tests that migration rollback aborts locally executed migration script. #[test] fn migration_rollback_aborts_migration_script() { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("with-state", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("with-state", "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact, "good"); rig.stop_service(&service); let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact, &service.name) .unwrap(); rig.create_block(fork); // Rollback the migration without waiting for the migration script to succeed locally. let fork = rig.blockchain.fork(); Dispatcher::rollback_migration(&fork, &service.name).unwrap(); rig.create_block(fork); let snapshot = rig.blockchain.snapshot(); let schema = DispatcherSchema::new(&snapshot); assert!(schema.local_migration_result(&service.name).is_none()); rig.assert_no_migration_threads(); let migration = Migration::new(&service.name, &snapshot); assert!(!migration.get_proof_entry::<_, u32>("entry").exists()); // Wait some time to ensure that script doesn't merge changes to the DB. thread::sleep(DELAY); let snapshot = rig.blockchain.snapshot(); let migration = Migration::new(&service.name, &snapshot); assert!(!migration.get_proof_entry::<_, u32>("entry").exists()); } /// Tests that migration rollback erases data created by the migration script. #[test] fn migration_rollback_erases_migration_data() { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("with-state", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("with-state", "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact, "good"); rig.stop_service(&service); let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact, &service.name) .unwrap(); rig.create_block(fork); // Wait until the migration is finished locally. thread::sleep(DELAY * 10); rig.create_block(rig.blockchain.fork()); let snapshot = rig.blockchain.snapshot(); let migration = Migration::new(&service.name, &snapshot); assert_eq!(migration.get_proof_entry::<_, u32>("entry").get(), Some(2)); let fork = rig.blockchain.fork(); Dispatcher::rollback_migration(&fork, &service.name).unwrap(); rig.create_block(fork); // Migration data should be dropped now. let snapshot = rig.blockchain.snapshot(); let migration = Migration::new(&service.name, &snapshot); assert!(!migration.get_proof_entry::<_, u32>("entry").exists()); } /// Tests basic migration commit workflow. #[test] fn migration_commit_workflow() { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("good", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("good", "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact, "good"); rig.stop_service(&service); let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact.clone(), &service.name) .unwrap(); rig.create_block(fork); // Wait until the migration is finished locally. thread::sleep(DELAY * 3); rig.create_block(rig.blockchain.fork()); let fork = rig.blockchain.fork(); Dispatcher::commit_migration(&fork, &service.name, HashTag::empty_map_hash()).unwrap(); rig.create_block(fork); // Check that local migration result is erased. let snapshot = rig.blockchain.snapshot(); let schema = DispatcherSchema::new(&snapshot); let res = schema.local_migration_result(&service.name).unwrap(); assert_eq!(res.0.unwrap(), HashTag::empty_map_hash()); let state = schema.get_instance(service.id).unwrap(); let expected_status = InstanceStatus::migrating(InstanceMigration::from_raw_parts( new_artifact, Version::new(0, 5, 0), Some(HashTag::empty_map_hash()), )); assert_eq!(state.status, Some(expected_status)); } /// Tests checks performed by the dispatcher during migration commit. #[test] fn migration_commit_invariants() { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("good", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("good", "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact, "good"); // Non-existing service. let fork = rig.blockchain.fork(); let err = Dispatcher::commit_migration(&fork, "bogus", Hash::zero()).unwrap_err(); assert_eq!( err, ErrorMatch::from_fail(&CoreError::IncorrectInstanceId) .with_description_containing("Cannot commit migration for unknown service `bogus`") ); // Service is not stopped. let err = Dispatcher::commit_migration(&fork, &service.name, Hash::zero()).unwrap_err(); let no_migration_match = ErrorMatch::from_fail(&CoreError::NoMigration) .with_description_containing("Cannot commit migration for service `100:good`"); assert_eq!(err, no_migration_match); rig.stop_service(&service); // Service is stopped, but there is no migration happening. let fork = rig.blockchain.fork(); let err = Dispatcher::commit_migration(&fork, &service.name, Hash::zero()).unwrap_err(); assert_eq!(err, no_migration_match); // Start migration and commit its result, making the second commit impossible. rig.dispatcher() .initiate_migration(&fork, new_artifact, &service.name) .unwrap(); rig.create_block(fork); let fork = rig.blockchain.fork(); let migration_hash = HashTag::empty_map_hash(); Dispatcher::commit_migration(&fork, &service.name, migration_hash).unwrap(); // In the same block, we'll get an error because the service already has // a pending status update. let err = Dispatcher::commit_migration(&fork, &service.name, migration_hash).unwrap_err(); assert_eq!(err, ErrorMatch::from_fail(&CoreError::ServicePending)); rig.create_block(fork); // ...In the next block, we'll get another error. let fork = rig.blockchain.fork(); let err = Dispatcher::commit_migration(&fork, &service.name, migration_hash).unwrap_err(); assert_eq!(err, no_migration_match); } /// Tests that a migration commit after the migration script finished locally with an error /// leads to node stopping. fn test_migration_commit_with_local_error( rig: &mut Rig, local_result: LocalResult, artifact_name: &str, ) { let old_artifact = rig.deploy_artifact(artifact_name, "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact(artifact_name, "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact, "service"); rig.stop_service(&service); let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact, &service.name) .unwrap(); rig.create_block(fork); rig.wait_migration_threads(local_result); let fork = rig.blockchain.fork(); Dispatcher::commit_migration(&fork, &service.name, Hash::zero()).unwrap(); rig.create_block(fork); // << should panic } #[test] #[should_panic(expected = "locally it has finished with an error: This migration is unsuccessful")] fn migration_commit_with_local_error_blocking() { test_migration_commit_with_local_error(&mut Rig::new(), LocalResult::None, "not-good"); } #[test] #[should_panic(expected = "locally it has finished with an error: This migration is unsuccessful")] fn migration_commit_with_local_error_in_memory() { test_migration_commit_with_local_error(&mut Rig::new(), LocalResult::InMemory, "not-good"); } #[test] #[should_panic(expected = "locally it has finished with an error: This migration is unsuccessful")] fn migration_commit_with_local_error_saved() { test_migration_commit_with_local_error(&mut Rig::new(), LocalResult::Saved, "not-good"); } #[test] #[should_panic(expected = "locally it has finished with an error: This migration is unsuccessful")] fn migration_commit_with_local_error_saved_and_node_restart() { test_migration_commit_with_local_error( &mut Rig::new(), LocalResult::SavedWithNodeRestart, "not-good", ); } #[test] fn test_migration_restart() { let artifact_name = "good-or-not-good"; let service_name = "service"; let db = Arc::new(TemporaryDB::new()); // Running migration that should fail. std::panic::catch_unwind(|| { // Set script flag to fail migration. let mut rig = Rig::with_db_and_flag(Arc::clone(&db), false); test_migration_commit_with_local_error(&mut rig, LocalResult::Saved, artifact_name); }) .expect_err("Node should panic on unsuccessful migration commit"); // Check that we have failed result locally. let snapshot = db.snapshot(); let schema = DispatcherSchema::new(&snapshot); let res = schema .local_migration_result(service_name) .expect("Schema does not have local result"); assert_eq!(res.0.unwrap_err(), "This migration is unsuccessful!"); // Remove local migration result. let mut fork = db.fork(); rollback_migration(&mut fork, service_name); remove_local_migration_result(&fork, service_name); db.merge_sync(fork.into_patch()) .expect("Failed to merge patch after local migration result remove"); // Check that local result is removed. let snapshot = db.snapshot(); let schema = DispatcherSchema::new(&snapshot); assert!(schema.local_migration_result(service_name).is_none()); // Set script flag to migrate successfully. let mut rig = Rig::with_db_and_flag(Arc::clone(&db), true); let fork = rig.blockchain.fork(); Dispatcher::commit_migration(&fork, service_name, HashTag::empty_map_hash()) .expect("Failed to commit migration"); rig.create_block(fork); // Check that the migration script has finished. rig.assert_no_migration_threads(); // Check that local migration result is erased. let snapshot = rig.blockchain.snapshot(); let schema = DispatcherSchema::new(&snapshot); let res = schema.local_migration_result(service_name).unwrap(); assert_eq!(res.0.unwrap(), HashTag::empty_map_hash()); // Check current instance migration status. let state = schema.get_instance(100).unwrap(); let artifact = ArtifactId::from_raw_parts( MigrationRuntime::ID, artifact_name.to_string(), "0.5.2".parse().unwrap(), ); let expected_status = InstanceStatus::migrating(InstanceMigration::from_raw_parts( artifact, Version::new(0, 5, 0), Some(HashTag::empty_map_hash()), )); assert_eq!(state.status, Some(expected_status)); } /// Tests that a migration commit after the migration script finished locally with another hash /// leads to node stopping. fn test_migration_commit_with_differing_hash(local_result: LocalResult) { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("good", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("good", "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact, "service"); rig.stop_service(&service); let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact, &service.name) .unwrap(); rig.create_block(fork); rig.wait_migration_threads(local_result); let fork = rig.blockchain.fork(); Dispatcher::commit_migration(&fork, &service.name, Hash::zero()).unwrap(); rig.create_block(fork); // << should panic } #[test] #[should_panic(expected = "locally it has finished with another hash")] fn migration_commit_with_differing_hash_blocking() { test_migration_commit_with_differing_hash(LocalResult::None); } #[test] #[should_panic(expected = "locally it has finished with another hash")] fn migration_commit_with_differing_hash_in_memory() { test_migration_commit_with_differing_hash(LocalResult::InMemory); } #[test] #[should_panic(expected = "locally it has finished with another hash")] fn migration_commit_with_differing_hash_saved() { test_migration_commit_with_differing_hash(LocalResult::Saved); } #[test] #[should_panic(expected = "locally it has finished with another hash")] fn migration_commit_with_differing_hash_saved_and_node_restarted() { test_migration_commit_with_differing_hash(LocalResult::SavedWithNodeRestart); } /// Tests that committing a migration with a locally running migration script leads to the node /// waiting until the script is completed. #[test] fn migration_commit_without_completing_script_locally() { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("with-state", "0.3.0".parse().unwrap()); let new_artifact = rig.deploy_artifact("with-state", "0.5.2".parse().unwrap()); let service = rig.initialize_service(old_artifact, "test"); rig.stop_service(&service); let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact.clone(), &service.name) .unwrap(); rig.create_block(fork); // Compute migration hash using the knowledge about the end state of migrated data. let migration_hash = rig.migration_hash(&[("test.entry", 2_u32.object_hash())]); let fork = rig.blockchain.fork(); Dispatcher::commit_migration(&fork, &service.name, migration_hash).unwrap(); rig.create_block(fork); // Check that the migration script has finished. rig.assert_no_migration_threads(); let snapshot = rig.blockchain.snapshot(); let schema = DispatcherSchema::new(&snapshot); let state = schema.get_instance(service.id).unwrap(); let expected_status = InstanceStatus::migrating(InstanceMigration::from_raw_parts( new_artifact, Version::new(0, 5, 0), Some(migration_hash), )); assert_eq!(state.status, Some(expected_status)); // Flush the migration. let mut fork = rig.blockchain.fork(); Dispatcher::flush_migration(&mut fork, &service.name).unwrap(); let state_hash = rig.create_block(fork).state_hash; // The artifact version should be updated. let snapshot = rig.blockchain.snapshot(); let schema = DispatcherSchema::new(&snapshot); let state = schema.get_instance(service.id).unwrap(); assert_eq!(state.data_version, Some(Version::new(0, 5, 0))); assert_eq!(state.status, Some(InstanceStatus::Stopped)); assert!(schema.local_migration_result(&service.name).is_none()); // Check that service data has been updated. let entry = snapshot.get_proof_entry::<_, u32>("test.entry"); assert_eq!(entry.get(), Some(2)); // Check state aggregation. let aggregator = SystemSchema::new(&snapshot).state_aggregator(); assert_eq!(aggregator.get("test.entry"), Some(2_u32.object_hash())); assert_eq!(aggregator.object_hash(), state_hash); } /// Tests that the migration workflow is applicable to a migration spanning multiple scripts. #[test] fn two_part_migration() { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("complex", "0.1.1".parse().unwrap()); let new_artifact = rig.deploy_artifact("complex", "0.3.7".parse().unwrap()); let service = rig.initialize_service(old_artifact.clone(), "test"); rig.stop_service(&service); // First part of migration. let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact.clone(), &service.name) .unwrap(); rig.create_block(fork); let migration_hash = rig.migration_hash(&[("test.entry", 1_u32.object_hash())]); let fork = rig.blockchain.fork(); Dispatcher::commit_migration(&fork, &service.name, migration_hash).unwrap(); rig.create_block(fork); let mut fork = rig.blockchain.fork(); Dispatcher::flush_migration(&mut fork, &service.name).unwrap(); rig.create_block(fork); // Check service data and metadata. let snapshot = rig.blockchain.snapshot(); assert_eq!( snapshot.get_proof_entry::<_, u32>("test.entry").get(), Some(1) ); let schema = DispatcherSchema::new(&snapshot); let instance_state = schema.get_instance(service.id).unwrap(); assert_eq!(instance_state.data_version, Some(Version::new(0, 2, 0))); // The old artifact can now be unloaded, since it's no longer associated with the service. // In other words, the service cannot be started with the old artifact due to a different // data layout, so it can be removed from the blockchain. let fork = rig.blockchain.fork(); Dispatcher::unload_artifact(&fork, &old_artifact).unwrap(); // Second part of migration. let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact.clone(), &service.name) .unwrap(); rig.create_block(fork); let migration_hash = rig.migration_hash(&[("test.entry", 2_u32.object_hash())]); let fork = rig.blockchain.fork(); Dispatcher::commit_migration(&fork, &service.name, migration_hash).unwrap(); rig.create_block(fork); let mut fork = rig.blockchain.fork(); Dispatcher::flush_migration(&mut fork, &service.name).unwrap(); rig.create_block(fork); // Check service data and metadata. let snapshot = rig.blockchain.snapshot(); assert_eq!( snapshot.get_proof_entry::<_, u32>("test.entry").get(), Some(2) ); let schema = DispatcherSchema::new(&snapshot); let instance_state = schema.get_instance(service.id).unwrap(); assert_eq!(instance_state.data_version, Some(Version::new(0, 3, 0))); // Check that the new artifact can be unloaded. let fork = rig.blockchain.fork(); Dispatcher::unload_artifact(&fork, &new_artifact).unwrap(); rig.create_block(fork); } #[test] fn two_part_migration_with_intermediate_artifact() { let mut rig = Rig::new(); let old_artifact = rig.deploy_artifact("complex", "0.1.1".parse().unwrap()); let intermediate_artifact = rig.deploy_artifact("complex", "0.2.2".parse().unwrap()); let new_artifact = rig.deploy_artifact("complex", "0.3.7".parse().unwrap()); let service = rig.initialize_service(old_artifact, "test"); rig.stop_service(&service); // First part of migration. let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact.clone(), &service.name) .unwrap(); rig.create_block(fork); let migration_hash = rig.migration_hash(&[("test.entry", 1_u32.object_hash())]); let fork = rig.blockchain.fork(); Dispatcher::commit_migration(&fork, &service.name, migration_hash).unwrap(); rig.create_block(fork); let mut fork = rig.blockchain.fork(); Dispatcher::flush_migration(&mut fork, &service.name).unwrap(); rig.create_block(fork); // Use a fast-forward migration to associate the service with an intermediate artifact. let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, intermediate_artifact.clone(), &service.name) .unwrap(); rig.create_block(fork); // Check service data and metadata. let snapshot = rig.blockchain.snapshot(); assert_eq!( snapshot.get_proof_entry::<_, u32>("test.entry").get(), Some(1) ); let schema = DispatcherSchema::new(&snapshot); let instance_state = schema.get_instance(service.id).unwrap(); assert_eq!(instance_state.status, Some(InstanceStatus::Stopped)); assert_eq!(instance_state.spec.artifact, intermediate_artifact); assert_eq!(instance_state.data_version, None); // Second part of migration. Since we've associated the service with a newer artifact, // the state will indicate that read endpoints may be retained for the service. let fork = rig.blockchain.fork(); rig.dispatcher() .initiate_migration(&fork, new_artifact, &service.name) .unwrap(); rig.create_block(fork); thread::sleep(DELAY * 5); let migration_hash = rig.migration_hash(&[("test.entry", 2_u32.object_hash())]); let fork = rig.blockchain.fork(); // Check that intermediate blockchain data can be accessed. let blockchain_data = BlockchainData::new(&fork, "other"); let entry_value = blockchain_data .for_service(service.id) .unwrap() .get_proof_entry::<_, u32>("entry") .get(); assert_eq!(entry_value, Some(1)); Dispatcher::commit_migration(&fork, &service.name, migration_hash).unwrap(); rig.create_block(fork); let mut fork = rig.blockchain.fork(); Dispatcher::flush_migration(&mut fork, &service.name).unwrap(); rig.create_block(fork); // Check service data and metadata. let snapshot = rig.blockchain.snapshot(); assert_eq!( snapshot.get_proof_entry::<_, u32>("test.entry").get(), Some(2) ); let schema = DispatcherSchema::new(&snapshot); let instance_state = schema.get_instance(service.id).unwrap(); assert_eq!(instance_state.data_version, Some(Version::new(0, 3, 0))); }
use gl; use yaglw::gl_context::GLContext; use yaglw::shader::Shader; pub struct T<'a> { #[allow(missing_docs)] pub shader: Shader<'a>, } pub fn new<'a, 'b:'a>(gl: &'a GLContext) -> T<'b> { let components = vec!( (gl::VERTEX_SHADER, " #version 330 core void main() { if (gl_VertexID == 0) { gl_Position = vec4(1, -1, 0, 1); } else if (gl_VertexID == 1) { gl_Position = vec4(1, 1, 0, 1); } else if (gl_VertexID == 2) { gl_Position = vec4(-1, -1, 0, 1); } else if (gl_VertexID == 3) { gl_Position = vec4(-1, 1, 0, 1); } }".to_owned()), (gl::FRAGMENT_SHADER, format!(r#" #version 330 core uniform vec2 window_size; uniform struct Sun {{ vec3 direction; vec3 intensity; }} sun; const float sun_angular_radius = 3.14/32; uniform mat4 projection_matrix; uniform vec3 eye_position; uniform float time_ms; out vec4 frag_color; // include depth fog {} // include cnoise {} vec3 pixel_direction(vec2 pixel) {{ // Scale to [0, 1] pixel /= window_size; // Scale to [-1, 1] pixel = 2*pixel - 1; vec4 p = inverse(projection_matrix) * vec4(pixel, -1, 1); return normalize(vec3(p / p.w) - eye_position); }} float cloud_noise(vec3 seed) {{ float f = cnoise(seed + vec3(0, time_ms / 8000, 0)); return f; }} float cloud_density(vec3 seed) {{ float f = (2.0*cloud_noise(seed / 2) + cloud_noise(seed) + 0.5*cloud_noise(2.0 * seed) + 0.25*cloud_noise(4.0*seed)) / 3.75; return (f + 1) / 2; }} void main() {{ vec3 c = sun.intensity; vec3 direction = pixel_direction(gl_FragCoord.xy); const int HEIGHTS = 2; float heights[HEIGHTS] = float[](150, 1000); vec3 offsets[HEIGHTS] = vec3[](vec3(12,553,239), vec3(-10, 103, 10004)); float sunniness = exp(64 * (dot(sun.direction, direction) - cos(sun_angular_radius))); c = mix(c, vec3(1), sunniness); float alpha = 0; for (int i = 0; i < HEIGHTS; ++i) {{ float cloud_height = heights[i]; float dist = (cloud_height - eye_position.y) / direction.y; if (dist <= 0 || dist > 1000000) {{ continue; }} else {{ vec3 seed = (eye_position + dist * direction + offsets[i]) / 1000 * vec3(1, 4, 1); float f = cloud_density(seed); alpha += f * (1 - fog_density(dist / 64)); }} }} alpha = alpha / HEIGHTS; float min_cloud = 0.4; float max_cloud = 0.8; alpha = (alpha - min_cloud) / (max_cloud - min_cloud); alpha = min(max(alpha, 0), 1); vec3 cloud_color = mix(vec3(0.4), vec3(1), (exp(1 - alpha) - 1) / exp(1)); c = mix(c, cloud_color, alpha); frag_color = min(vec4(c, 1), vec4(1)); }}"#, ::shaders::depth_fog::to_string(), ::shaders::noise::cnoise(), ) ), ); T { shader: Shader::new(gl, components.into_iter()), } } different sky color model use gl; use yaglw::gl_context::GLContext; use yaglw::shader::Shader; pub struct T<'a> { #[allow(missing_docs)] pub shader: Shader<'a>, } pub fn new<'a, 'b:'a>(gl: &'a GLContext) -> T<'b> { let components = vec!( (gl::VERTEX_SHADER, " #version 330 core void main() { if (gl_VertexID == 0) { gl_Position = vec4(1, -1, 0, 1); } else if (gl_VertexID == 1) { gl_Position = vec4(1, 1, 0, 1); } else if (gl_VertexID == 2) { gl_Position = vec4(-1, -1, 0, 1); } else if (gl_VertexID == 3) { gl_Position = vec4(-1, 1, 0, 1); } }".to_owned()), (gl::FRAGMENT_SHADER, format!(r#" #version 330 core uniform vec2 window_size; uniform struct Sun {{ vec3 direction; vec3 intensity; }} sun; const float sun_angular_radius = 3.14/32; uniform mat4 projection_matrix; uniform vec3 eye_position; uniform float time_ms; out vec4 frag_color; // include depth fog {} // include cnoise {} vec3 pixel_direction(vec2 pixel) {{ // Scale to [0, 1] pixel /= window_size; // Scale to [-1, 1] pixel = 2*pixel - 1; vec4 p = inverse(projection_matrix) * vec4(pixel, -1, 1); return normalize(vec3(p / p.w) - eye_position); }} float cloud_noise(vec3 seed) {{ float f = cnoise(seed + vec3(0, time_ms / 8000, 0)); return f; }} float cloud_density(vec3 seed) {{ float d = (2.0*cloud_noise(seed / 2) + cloud_noise(seed) + 0.5*cloud_noise(2.0 * seed) + 0.25*cloud_noise(4.0*seed)) / 3.75; d = (d + 1) / 2; float min_cloud = 0.4; float max_cloud = 0.8; d = (d - min_cloud) / (max_cloud - min_cloud); d = min(max(d, 0), 1); return d; }} void main() {{ vec3 direction = pixel_direction(gl_FragCoord.xy); const int HEIGHTS = 2; float heights[HEIGHTS] = float[](150, 1000); vec3 offsets[HEIGHTS] = vec3[](vec3(12,553,239), vec3(-10, 103, 10004)); vec3 c = vec3(0); float alpha = 1; for (int i = 0; i < HEIGHTS; ++i) {{ float cloud_height = heights[i]; float dist = (cloud_height - eye_position.y) / direction.y; if (dist <= 0 || dist > 1000000) {{ continue; }} else {{ vec3 seed = (eye_position + dist * direction + offsets[i]) / 1000 * vec3(1, 4, 1); float depth_alpha = fog_density(dist / 64); float density = cloud_density(seed); float cloud_alpha = density * (1 - depth_alpha); c += alpha * cloud_alpha * vec3(mix(0.4, 1, (exp(1 - density) - 1) / (exp(1) - 1))); alpha *= (1 - cloud_alpha); }} }} float sunniness = exp(64 * (dot(sun.direction, direction) - cos(sun_angular_radius))); vec3 infinity_color = mix(sun.intensity, vec3(1), sunniness); c += alpha * infinity_color; frag_color = min(vec4(c, 1), vec4(1)); }}"#, ::shaders::depth_fog::to_string(), ::shaders::noise::cnoise(), ) ), ); T { shader: Shader::new(gl, components.into_iter()), } }
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. /// The location of something in an address space. /// /// This is used to provide a location of a [`Function`], [`Instruction`], /// or other item. /// /// The meaning of this address is flexibly interpreted by the rest of this /// library as the meaning depends upon the application embedding and using /// this library. It may be an actual machine address or it might be something /// as simple as the offset of an instruction into an array. /// /// XXX: Should this have any indication for what type of address it is? /// An address might be an address within a file, a resolved address /// after being loaded, etc. /// /// XXX: Should this include any information about the address space /// that it is from? /// /// [`Function`]: struct.Function.html /// [`Instruction`]: trait.Instruction.html #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct Address { address: u64, } impl Address { /// Construct an `Address`. pub fn new(address: u64) -> Self { Address { address: address } } } #[cfg(test)] mod tests { use super::Address; #[test] fn address_comparison() { let addr1 = Address::new(5); let addr2 = Address::new(3); let addr3 = Address::new(5); assert!(addr2 < addr1); assert_eq!(addr1, addr3); } } impl fmt::{Binary,Octal,LowerHex,UpperHex} for Address. This improves the usability of printing addresses. // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use std::fmt; /// The location of something in an address space. /// /// This is used to provide a location of a [`Function`], [`Instruction`], /// or other item. /// /// The meaning of this address is flexibly interpreted by the rest of this /// library as the meaning depends upon the application embedding and using /// this library. It may be an actual machine address or it might be something /// as simple as the offset of an instruction into an array. /// /// XXX: Should this have any indication for what type of address it is? /// An address might be an address within a file, a resolved address /// after being loaded, etc. /// /// XXX: Should this include any information about the address space /// that it is from? /// /// ## Formatting /// /// `Address` implements the `fmt::Binary`, `fmt::Octal`, `fmt::LowerHex` /// and `fmt::UpperHex` traits from `std::fmt`. This makes it integrate /// readily with Rust's standard I/O facilities: /// /// ``` /// # use disassemble::Address; /// let a = Address::new(0x6502); /// // Print with 0x in hex. /// assert_eq!("0x6502", format!("{:#x}", a)); /// // Print with 0x, zero padded, 10 characters wide, in hex. /// assert_eq!("0x00006502", format!("{:#010x}", a)); /// ``` /// /// [`Function`]: struct.Function.html /// [`Instruction`]: trait.Instruction.html #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct Address { address: u64, } impl Address { /// Construct an `Address`. pub fn new(address: u64) -> Self { Address { address: address } } } impl fmt::Binary for Address { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.address.fmt(f) } } impl fmt::Octal for Address { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.address.fmt(f) } } impl fmt::LowerHex for Address { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.address.fmt(f) } } impl fmt::UpperHex for Address { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.address.fmt(f) } } #[cfg(test)] mod tests { use super::Address; #[test] fn address_comparison() { let addr1 = Address::new(5); let addr2 = Address::new(3); let addr3 = Address::new(5); assert!(addr2 < addr1); assert_eq!(addr1, addr3); } #[test] fn binary_fmt() { let a = Address::new(4); assert_eq!("100", format!("{:b}", a)); } #[test] fn octal_fmt() { let a = Address::new(10); assert_eq!("12", format!("{:o}", a)); } #[test] fn lower_hex_fmt() { let a = Address::new(0xc1); assert_eq!("0xc1", format!("{:#x}", a)); assert_eq!(" 0xc1", format!("{:#6x}", a)); assert_eq!(" c1", format!("{:6x}", a)); assert_eq!("0000c1", format!("{:06x}", a)); } #[test] fn upper_hex_fmt() { let a = Address::new(0xc1); assert_eq!("0xC1", format!("{:#X}", a)); assert_eq!(" 0xC1", format!("{:#6X}", a)); assert_eq!(" C1", format!("{:6X}", a)); assert_eq!("0000C1", format!("{:06X}", a)); } }
/* * Copyright (c) 2017 Boucher, Antoni <bouanto@zoho.com> * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ use app::App; use app::Msg::CreateWindow; use webview::Msg::PageOpen; use url::{Url, Position}; use urls::{offset, get_filename}; impl App { /// Open the given URL in the web view. pub fn open(&self, url: &str) { let url = self.transform_url(url); self.webview.emit(PageOpen(url)); } /// Open the given URL in a new window. pub fn open_in_new_window(&mut self, url: &str) { let url = self.transform_url(url); self.model.relm.stream().emit(CreateWindow(url)); } /// Open in a new window the url from the system clipboard. pub fn win_paste_url(&mut self) { if let Some(url) = self.get_url_from_clipboard() { self.open_in_new_window(&url); } } /// Go up one directory in url. pub fn go_parent_directory(&self) { if let Some(ref url) = self.webview.widget().get_uri() { let mut parent = String::new(); // TODO: Do manually without use of get_filename if let Some(filename) = get_filename(url) { if filename.is_empty() { if let Ok(base_url) = Url::parse(url) { parent = base_url.join("../").unwrap().to_string(); } } else { parent = url[..url.len()-filename.len()].to_string(); } } if !parent.is_empty() { self.open(&parent); } } } /// Go to the root directory or url hostname. pub fn go_root_directory(&self) { if let Some(ref url) = self.webview.widget().get_uri() { if let Ok(base_url) = Url::parse(url) { let root = &base_url[..Position::BeforePath]; if !root.is_empty() { self.open(root); } } } } pub fn url_increment(&self) { if let Some(ref url) = self.webview.widget().get_uri() { if let Some(url) = offset(url, 1) { self.open(&url); } } } pub fn url_decrement(&self) { if let Some(ref url) = self.webview.widget().get_uri() { if let Some(url) = offset(url, -1) { self.open(&url); } } } } Simply go_parent_directory() /* * Copyright (c) 2017 Boucher, Antoni <bouanto@zoho.com> * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ use app::App; use app::Msg::CreateWindow; use webview::Msg::PageOpen; use url::{Url, Position}; use urls::offset; impl App { /// Open the given URL in the web view. pub fn open(&self, url: &str) { let url = self.transform_url(url); self.webview.emit(PageOpen(url)); } /// Open the given URL in a new window. pub fn open_in_new_window(&mut self, url: &str) { let url = self.transform_url(url); self.model.relm.stream().emit(CreateWindow(url)); } /// Open in a new window the url from the system clipboard. pub fn win_paste_url(&mut self) { if let Some(url) = self.get_url_from_clipboard() { self.open_in_new_window(&url); } } /// Go up one directory in url. pub fn go_parent_directory(&self) { if let Some(ref url) = self.webview.widget().get_uri() { if let Ok(mut url) = Url::parse(url) { match url.path_segments_mut() { Ok(mut segments) => { let _ = segments.pop_if_empty() .pop(); }, Err(_) => return, } self.open(url.as_str()); } } } /// Go to the root directory or url hostname. pub fn go_root_directory(&self) { if let Some(ref url) = self.webview.widget().get_uri() { if let Ok(base_url) = Url::parse(url) { let root = &base_url[..Position::BeforePath]; if !root.is_empty() { self.open(root); } } } } pub fn url_increment(&self) { if let Some(ref url) = self.webview.widget().get_uri() { if let Some(url) = offset(url, 1) { self.open(&url); } } } pub fn url_decrement(&self) { if let Some(ref url) = self.webview.widget().get_uri() { if let Some(url) = offset(url, -1) { self.open(&url); } } } }
//! Data flow graph tracking Instructions, Values, and EBBs. use entity_map::{EntityMap, PrimaryEntityData}; use ir::builder::{InsertBuilder, ReplaceBuilder}; use ir::extfunc::ExtFuncData; use ir::instructions::{Opcode, InstructionData, CallInfo}; use ir::layout::Cursor; use ir::types; use ir::{Ebb, Inst, Value, Type, SigRef, Signature, FuncRef, ValueList, ValueListPool}; use write::write_operands; use std::fmt; use std::iter; use std::ops::{Index, IndexMut}; use std::u16; /// A data flow graph defines all instructions and extended basic blocks in a function as well as /// the data flow dependencies between them. The DFG also tracks values which can be either /// instruction results or EBB arguments. /// /// The layout of EBBs in the function and of instructions in each EBB is recorded by the /// `FunctionLayout` data structure which form the other half of the function representation. /// #[derive(Clone)] pub struct DataFlowGraph { /// Data about all of the instructions in the function, including opcodes and operands. /// The instructions in this map are not in program order. That is tracked by `Layout`, along /// with the EBB containing each instruction. insts: EntityMap<Inst, InstructionData>, /// List of result values for each instruction. /// /// This map gets resized automatically by `make_inst()` so it is always in sync with the /// primary `insts` map. results: EntityMap<Inst, ValueList>, /// Extended basic blocks in the function and their arguments. /// This map is not in program order. That is handled by `Layout`, and so is the sequence of /// instructions contained in each EBB. ebbs: EntityMap<Ebb, EbbData>, /// Memory pool of value lists. /// /// The `ValueList` references into this pool appear in many places: /// /// - Instructions in `insts` that don't have room for their entire argument list inline. /// - Instruction result values in `results`. /// - EBB arguments in `ebbs`. pub value_lists: ValueListPool, /// Primary value table with entries for all values. values: EntityMap<Value, ValueData>, /// Function signature table. These signatures are referenced by indirect call instructions as /// well as the external function references. pub signatures: EntityMap<SigRef, Signature>, /// External function references. These are functions that can be called directly. pub ext_funcs: EntityMap<FuncRef, ExtFuncData>, } impl PrimaryEntityData for InstructionData {} impl PrimaryEntityData for EbbData {} impl PrimaryEntityData for ValueData {} impl PrimaryEntityData for Signature {} impl PrimaryEntityData for ExtFuncData {} impl DataFlowGraph { /// Create a new empty `DataFlowGraph`. pub fn new() -> DataFlowGraph { DataFlowGraph { insts: EntityMap::new(), results: EntityMap::new(), ebbs: EntityMap::new(), value_lists: ValueListPool::new(), values: EntityMap::new(), signatures: EntityMap::new(), ext_funcs: EntityMap::new(), } } /// Get the total number of instructions created in this function, whether they are currently /// inserted in the layout or not. /// /// This is intended for use with `EntityMap::with_capacity`. pub fn num_insts(&self) -> usize { self.insts.len() } /// Returns `true` if the given instruction reference is valid. pub fn inst_is_valid(&self, inst: Inst) -> bool { self.insts.is_valid(inst) } /// Get the total number of extended basic blocks created in this function, whether they are /// currently inserted in the layout or not. /// /// This is intended for use with `EntityMap::with_capacity`. pub fn num_ebbs(&self) -> usize { self.ebbs.len() } /// Returns `true` if the given ebb reference is valid. pub fn ebb_is_valid(&self, ebb: Ebb) -> bool { self.ebbs.is_valid(ebb) } } /// Resolve value aliases. /// /// Find the original SSA value that `value` aliases. fn resolve_aliases(values: &EntityMap<Value, ValueData>, value: Value) -> Value { let mut v = value; // Note that values may be empty here. for _ in 0..1 + values.len() { if let ValueData::Alias { original, .. } = values[v] { v = original; } else { return v; } } panic!("Value alias loop detected for {}", value); } /// Handling values. /// /// Values are either EBB arguments or instruction results. impl DataFlowGraph { // Allocate an extended value entry. fn make_value(&mut self, data: ValueData) -> Value { self.values.push(data) } /// Check if a value reference is valid. pub fn value_is_valid(&self, v: Value) -> bool { self.values.is_valid(v) } /// Get the type of a value. pub fn value_type(&self, v: Value) -> Type { match self.values[v] { ValueData::Inst { ty, .. } | ValueData::Arg { ty, .. } | ValueData::Alias { ty, .. } => ty, } } /// Get the definition of a value. /// /// This is either the instruction that defined it or the Ebb that has the value as an /// argument. pub fn value_def(&self, v: Value) -> ValueDef { match self.values[v] { ValueData::Inst { inst, num, .. } => { assert_eq!(Some(v), self.results[inst].get(num as usize, &self.value_lists), "Dangling result value {}: {}", v, self.display_inst(inst)); ValueDef::Res(inst, num as usize) } ValueData::Arg { ebb, num, .. } => { assert_eq!(Some(v), self.ebbs[ebb].args.get(num as usize, &self.value_lists), "Dangling EBB argument value"); ValueDef::Arg(ebb, num as usize) } ValueData::Alias { original, .. } => { // Make sure we only recurse one level. `resolve_aliases` has safeguards to // detect alias loops without overrunning the stack. self.value_def(self.resolve_aliases(original)) } } } /// Determine if `v` is an attached instruction result / EBB argument. /// /// An attached value can't be attached to something else without first being detached. /// /// Value aliases are not considered to be attached to anything. Use `resolve_aliases()` to /// determine if the original aliased value is attached. pub fn value_is_attached(&self, v: Value) -> bool { use self::ValueData::*; match self.values[v] { Inst { inst, num, .. } => Some(&v) == self.inst_results(inst).get(num as usize), Arg { ebb, num, .. } => Some(&v) == self.ebb_args(ebb).get(num as usize), Alias { .. } => false, } } /// Resolve value aliases. /// /// Find the original SSA value that `value` aliases. pub fn resolve_aliases(&self, value: Value) -> Value { resolve_aliases(&self.values, value) } /// Resolve value copies. /// /// Find the original definition of a value, looking through value aliases as well as /// copy/spill/fill instructions. pub fn resolve_copies(&self, value: Value) -> Value { let mut v = value; for _ in 0..self.insts.len() { v = self.resolve_aliases(v); v = match self.value_def(v) { ValueDef::Res(inst, 0) => { match self[inst] { InstructionData::Unary { opcode, arg, .. } => { match opcode { Opcode::Copy | Opcode::Spill | Opcode::Fill => arg, _ => return v, } } _ => return v, } } _ => return v, }; } panic!("Copy loop detected for {}", value); } /// Resolve all aliases among inst's arguments. /// /// For each argument of inst which is defined by an alias, replace the /// alias with the aliased value. pub fn resolve_aliases_in_arguments(&mut self, inst: Inst) { for arg in self.insts[inst].arguments_mut(&mut self.value_lists) { let resolved = resolve_aliases(&self.values, *arg); if resolved != *arg { *arg = resolved; } } } /// Turn a value into an alias of another. /// /// Change the `dest` value to behave as an alias of `src`. This means that all uses of `dest` /// will behave as if they used that value `src`. /// /// The `dest` value can't be attached to an instruction or EBB. pub fn change_to_alias(&mut self, dest: Value, src: Value) { assert!(!self.value_is_attached(dest)); // Try to create short alias chains by finding the original source value. // This also avoids the creation of loops. let original = self.resolve_aliases(src); assert!(dest != original, "Aliasing {} to {} would create a loop", dest, src); let ty = self.value_type(original); assert_eq!(self.value_type(dest), ty, "Aliasing {} to {} would change its type {} to {}", dest, src, self.value_type(dest), ty); self.values[dest] = ValueData::Alias { ty, original }; } /// Replace the results of one instruction with aliases to the results of another. /// /// Change all the results of `dest_inst` to behave as aliases of /// corresponding results of `src_inst`, as if calling change_to_alias for /// each. /// /// After calling this instruction, `dest_inst` will have had its results /// cleared, so it likely needs to be removed from the graph. /// pub fn replace_with_aliases(&mut self, dest_inst: Inst, src_inst: Inst) { debug_assert_ne!(dest_inst, src_inst, "Replacing {} with itself would create a loop", dest_inst); debug_assert_eq!(self.results[dest_inst].len(&self.value_lists), self.results[src_inst].len(&self.value_lists), "Replacing {} with {} would produce a different number of results.", dest_inst, src_inst); for (&dest, &src) in self.results[dest_inst] .as_slice(&self.value_lists) .iter() .zip(self.results[src_inst].as_slice(&self.value_lists)) { let original = src; let ty = self.value_type(original); assert_eq!(self.value_type(dest), ty, "Aliasing {} to {} would change its type {} to {}", dest, src, self.value_type(dest), ty); self.values[dest] = ValueData::Alias { ty, original }; } self.clear_results(dest_inst); } /// Create a new value alias. /// /// Note that this function should only be called by the parser. pub fn make_value_alias(&mut self, src: Value) -> Value { let ty = self.value_type(src); let data = ValueData::Alias { ty, original: src }; self.make_value(data) } } /// Where did a value come from? #[derive(Debug, PartialEq, Eq)] pub enum ValueDef { /// Value is the n'th result of an instruction. Res(Inst, usize), /// Value is the n'th argument to an EBB. Arg(Ebb, usize), } // Internal table storage for extended values. #[derive(Clone, Debug)] enum ValueData { // Value is defined by an instruction. Inst { ty: Type, num: u16, inst: Inst }, // Value is an EBB argument. Arg { ty: Type, num: u16, ebb: Ebb }, // Value is an alias of another value. // An alias value can't be linked as an instruction result or EBB argument. It is used as a // placeholder when the original instruction or EBB has been rewritten or modified. Alias { ty: Type, original: Value }, } /// Instructions. /// impl DataFlowGraph { /// Create a new instruction. /// /// The type of the first result is indicated by `data.ty`. If the instruction produces /// multiple results, also call `make_inst_results` to allocate value table entries. pub fn make_inst(&mut self, data: InstructionData) -> Inst { let n = self.num_insts() + 1; self.results.resize(n); self.insts.push(data) } /// Get the instruction reference that will be assigned to the next instruction created by /// `make_inst`. /// /// This is only really useful to the parser. pub fn next_inst(&self) -> Inst { self.insts.next_key() } /// Returns an object that displays `inst`. pub fn display_inst(&self, inst: Inst) -> DisplayInst { DisplayInst(self, inst) } /// Get all value arguments on `inst` as a slice. pub fn inst_args(&self, inst: Inst) -> &[Value] { self.insts[inst].arguments(&self.value_lists) } /// Get all value arguments on `inst` as a mutable slice. pub fn inst_args_mut(&mut self, inst: Inst) -> &mut [Value] { self.insts[inst].arguments_mut(&mut self.value_lists) } /// Get the fixed value arguments on `inst` as a slice. pub fn inst_fixed_args(&self, inst: Inst) -> &[Value] { let fixed_args = self[inst].opcode().constraints().fixed_value_arguments(); &self.inst_args(inst)[..fixed_args] } /// Get the fixed value arguments on `inst` as a mutable slice. pub fn inst_fixed_args_mut(&mut self, inst: Inst) -> &mut [Value] { let fixed_args = self[inst].opcode().constraints().fixed_value_arguments(); &mut self.inst_args_mut(inst)[..fixed_args] } /// Get the variable value arguments on `inst` as a slice. pub fn inst_variable_args(&self, inst: Inst) -> &[Value] { let fixed_args = self[inst].opcode().constraints().fixed_value_arguments(); &self.inst_args(inst)[fixed_args..] } /// Get the variable value arguments on `inst` as a mutable slice. pub fn inst_variable_args_mut(&mut self, inst: Inst) -> &mut [Value] { let fixed_args = self[inst].opcode().constraints().fixed_value_arguments(); &mut self.inst_args_mut(inst)[fixed_args..] } /// Create result values for an instruction that produces multiple results. /// /// Instructions that produce no result values only need to be created with `make_inst`, /// otherwise call `make_inst_results` to allocate value table entries for the results. /// /// The result value types are determined from the instruction's value type constraints and the /// provided `ctrl_typevar` type for polymorphic instructions. For non-polymorphic /// instructions, `ctrl_typevar` is ignored, and `VOID` can be used. /// /// The type of the first result value is also set, even if it was already set in the /// `InstructionData` passed to `make_inst`. If this function is called with a single-result /// instruction, that is the only effect. pub fn make_inst_results(&mut self, inst: Inst, ctrl_typevar: Type) -> usize { self.make_inst_results_reusing(inst, ctrl_typevar, iter::empty()) } /// Create result values for `inst`, reusing the provided detached values. /// /// Create a new set of result values for `inst` using `ctrl_typevar` to determine the result /// types. Any values provided by `reuse` will be reused. When `reuse` is exhausted or when it /// produces `None`, a new value is created. pub fn make_inst_results_reusing<I>(&mut self, inst: Inst, ctrl_typevar: Type, reuse: I) -> usize where I: Iterator<Item = Option<Value>> { let mut reuse = reuse.fuse(); let constraints = self.insts[inst].opcode().constraints(); let fixed_results = constraints.fixed_results(); let mut total_results = fixed_results; self.results[inst].clear(&mut self.value_lists); // The fixed results will appear at the front of the list. for res_idx in 0..fixed_results { let ty = constraints.result_type(res_idx, ctrl_typevar); if let Some(Some(v)) = reuse.next() { debug_assert_eq!(self.value_type(v), ty, "Reused {} is wrong type", ty); self.attach_result(inst, v); } else { self.append_result(inst, ty); } } // Get the call signature if this is a function call. if let Some(sig) = self.call_signature(inst) { // Create result values corresponding to the call return types. let var_results = self.signatures[sig].return_types.len(); total_results += var_results; for res_idx in 0..var_results { let ty = self.signatures[sig].return_types[res_idx].value_type; if let Some(Some(v)) = reuse.next() { debug_assert_eq!(self.value_type(v), ty, "Reused {} is wrong type", ty); self.attach_result(inst, v); } else { self.append_result(inst, ty); } } } total_results } /// Create an `InsertBuilder` that will insert an instruction at the cursor's current position. pub fn ins<'c, 'fc: 'c, 'fd>(&'fd mut self, at: &'c mut Cursor<'fc>) -> InsertBuilder<'c, 'fc, 'fd> { InsertBuilder::new(self, at) } /// Create a `ReplaceBuilder` that will replace `inst` with a new instruction in place. pub fn replace(&mut self, inst: Inst) -> ReplaceBuilder { ReplaceBuilder::new(self, inst) } /// Detach the list of result values from `inst` and return it. /// /// This leaves `inst` without any result values. New result values can be created by calling /// `make_inst_results` or by using a `replace(inst)` builder. pub fn detach_results(&mut self, inst: Inst) -> ValueList { self.results[inst].take() } /// Clear the list of result values from `inst`. /// /// This leaves `inst` without any result values. New result values can be created by calling /// `make_inst_results` or by using a `replace(inst)` builder. pub fn clear_results(&mut self, inst: Inst) { self.results[inst].clear(&mut self.value_lists) } /// Attach an existing value to the result value list for `inst`. /// /// The `res` value is appended to the end of the result list. /// /// This is a very low-level operation. Usually, instruction results with the correct types are /// created automatically. The `res` value must not be attached to anything else. pub fn attach_result(&mut self, inst: Inst, res: Value) { assert!(!self.value_is_attached(res)); let num = self.results[inst].push(res, &mut self.value_lists); assert!(num <= u16::MAX as usize, "Too many result values"); let ty = self.value_type(res); self.values[res] = ValueData::Inst { ty, num: num as u16, inst, }; } /// Append a new instruction result value to `inst`. pub fn append_result(&mut self, inst: Inst, ty: Type) -> Value { let res = self.values.next_key(); let num = self.results[inst].push(res, &mut self.value_lists); assert!(num <= u16::MAX as usize, "Too many result values"); self.make_value(ValueData::Inst { ty, inst, num: num as u16, }) } /// Get the first result of an instruction. /// /// This function panics if the instruction doesn't have any result. pub fn first_result(&self, inst: Inst) -> Value { self.results[inst] .first(&self.value_lists) .expect("Instruction has no results") } /// Test if `inst` has any result values currently. pub fn has_results(&self, inst: Inst) -> bool { !self.results[inst].is_empty() } /// Return all the results of an instruction. pub fn inst_results(&self, inst: Inst) -> &[Value] { self.results[inst].as_slice(&self.value_lists) } /// Get the call signature of a direct or indirect call instruction. /// Returns `None` if `inst` is not a call instruction. pub fn call_signature(&self, inst: Inst) -> Option<SigRef> { match self.insts[inst].analyze_call(&self.value_lists) { CallInfo::NotACall => None, CallInfo::Direct(f, _) => Some(self.ext_funcs[f].signature), CallInfo::Indirect(s, _) => Some(s), } } /// Compute the type of an instruction result from opcode constraints and call signatures. /// /// This computes the same sequence of result types that `make_inst_results()` above would /// assign to the created result values, but it does not depend on `make_inst_results()` being /// called first. /// /// Returns `None` if asked about a result index that is too large. pub fn compute_result_type(&self, inst: Inst, result_idx: usize, ctrl_typevar: Type) -> Option<Type> { let constraints = self.insts[inst].opcode().constraints(); let fixed_results = constraints.fixed_results(); if result_idx < fixed_results { return Some(constraints.result_type(result_idx, ctrl_typevar)); } // Not a fixed result, try to extract a return type from the call signature. self.call_signature(inst) .and_then(|sigref| { self.signatures[sigref] .return_types .get(result_idx - fixed_results) .map(|&arg| arg.value_type) }) } /// Get the controlling type variable, or `VOID` if `inst` isn't polymorphic. pub fn ctrl_typevar(&self, inst: Inst) -> Type { let constraints = self[inst].opcode().constraints(); if !constraints.is_polymorphic() { types::VOID } else if constraints.requires_typevar_operand() { // Not all instruction formats have a designated operand, but in that case // `requires_typevar_operand()` should never be true. self.value_type(self[inst].typevar_operand(&self.value_lists) .expect("Instruction format doesn't have a designated operand, bad opcode.")) } else { self.value_type(self.first_result(inst)) } } } /// Allow immutable access to instructions via indexing. impl Index<Inst> for DataFlowGraph { type Output = InstructionData; fn index<'a>(&'a self, inst: Inst) -> &'a InstructionData { &self.insts[inst] } } /// Allow mutable access to instructions via indexing. impl IndexMut<Inst> for DataFlowGraph { fn index_mut<'a>(&'a mut self, inst: Inst) -> &'a mut InstructionData { &mut self.insts[inst] } } /// Extended basic blocks. impl DataFlowGraph { /// Create a new basic block. pub fn make_ebb(&mut self) -> Ebb { self.ebbs.push(EbbData::new()) } /// Get the number of arguments on `ebb`. pub fn num_ebb_args(&self, ebb: Ebb) -> usize { self.ebbs[ebb].args.len(&self.value_lists) } /// Get the arguments to an EBB. pub fn ebb_args(&self, ebb: Ebb) -> &[Value] { self.ebbs[ebb].args.as_slice(&self.value_lists) } /// Append an argument with type `ty` to `ebb`. pub fn append_ebb_arg(&mut self, ebb: Ebb, ty: Type) -> Value { let arg = self.values.next_key(); let num = self.ebbs[ebb].args.push(arg, &mut self.value_lists); assert!(num <= u16::MAX as usize, "Too many arguments to EBB"); self.make_value(ValueData::Arg { ty, num: num as u16, ebb, }) } /// Append an existing argument value to `ebb`. /// /// The appended value can't already be attached to something else. /// /// In almost all cases, you should be using `append_ebb_arg()` instead of this method. pub fn attach_ebb_arg(&mut self, ebb: Ebb, arg: Value) { assert!(!self.value_is_attached(arg)); let num = self.ebbs[ebb].args.push(arg, &mut self.value_lists); assert!(num <= u16::MAX as usize, "Too many arguments to EBB"); let ty = self.value_type(arg); self.values[arg] = ValueData::Arg { ty, num: num as u16, ebb, }; } /// Replace an EBB argument with a new value of type `ty`. /// /// The `old_value` must be an attached EBB argument. It is removed from its place in the list /// of arguments and replaced by a new value of type `new_type`. The new value gets the same /// position in the list, and other arguments are not disturbed. /// /// The old value is left detached, so it should probably be changed into something else. /// /// Returns the new value. pub fn replace_ebb_arg(&mut self, old_arg: Value, new_type: Type) -> Value { // Create new value identical to the old one except for the type. let (ebb, num) = if let ValueData::Arg { num, ebb, .. } = self.values[old_arg] { (ebb, num) } else { panic!("{} must be an EBB argument", old_arg); }; let new_arg = self.make_value(ValueData::Arg { ty: new_type, num, ebb, }); self.ebbs[ebb].args.as_mut_slice(&mut self.value_lists)[num as usize] = new_arg; new_arg } /// Detach all the arguments from `ebb` and return them as a `ValueList`. /// /// This is a quite low-level operation. Sensible things to do with the detached EBB arguments /// is to put them back on the same EBB with `attach_ebb_arg()` or change them into aliases /// with `change_to_alias()`. pub fn detach_ebb_args(&mut self, ebb: Ebb) -> ValueList { self.ebbs[ebb].args.take() } } // Contents of an extended basic block. // // Arguments for an extended basic block are values that dominate everything in the EBB. All // branches to this EBB must provide matching arguments, and the arguments to the entry EBB must // match the function arguments. #[derive(Clone)] struct EbbData { // List of arguments to this EBB. args: ValueList, } impl EbbData { fn new() -> EbbData { EbbData { args: ValueList::new() } } } /// Object that can display an instruction. pub struct DisplayInst<'a>(&'a DataFlowGraph, Inst); impl<'a> fmt::Display for DisplayInst<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let dfg = self.0; let inst = &dfg[self.1]; if let Some((first, rest)) = dfg.inst_results(self.1).split_first() { write!(f, "{}", first)?; for v in rest { write!(f, ", {}", v)?; } write!(f, " = ")?; } let typevar = dfg.ctrl_typevar(self.1); if typevar.is_void() { write!(f, "{}", inst.opcode())?; } else { write!(f, "{}.{}", inst.opcode(), typevar)?; } write_operands(f, dfg, None, self.1) } } #[cfg(test)] mod tests { use super::*; use ir::types; use ir::{Function, Cursor, Opcode, InstructionData}; #[test] fn make_inst() { let mut dfg = DataFlowGraph::new(); let idata = InstructionData::Nullary { opcode: Opcode::Iconst }; let inst = dfg.make_inst(idata); dfg.make_inst_results(inst, types::I32); assert_eq!(inst.to_string(), "inst0"); assert_eq!(dfg.display_inst(inst).to_string(), "v0 = iconst.i32"); // Immutable reference resolution. { let immdfg = &dfg; let ins = &immdfg[inst]; assert_eq!(ins.opcode(), Opcode::Iconst); } // Results. let val = dfg.first_result(inst); assert_eq!(dfg.inst_results(inst), &[val]); assert_eq!(dfg.value_def(val), ValueDef::Res(inst, 0)); assert_eq!(dfg.value_type(val), types::I32); } #[test] fn no_results() { let mut dfg = DataFlowGraph::new(); let idata = InstructionData::Nullary { opcode: Opcode::Trap }; let inst = dfg.make_inst(idata); assert_eq!(dfg.display_inst(inst).to_string(), "trap"); // Result slice should be empty. assert_eq!(dfg.inst_results(inst), &[]); } #[test] fn ebb() { let mut dfg = DataFlowGraph::new(); let ebb = dfg.make_ebb(); assert_eq!(ebb.to_string(), "ebb0"); assert_eq!(dfg.num_ebb_args(ebb), 0); assert_eq!(dfg.ebb_args(ebb), &[]); assert!(dfg.detach_ebb_args(ebb).is_empty()); assert_eq!(dfg.num_ebb_args(ebb), 0); assert_eq!(dfg.ebb_args(ebb), &[]); let arg1 = dfg.append_ebb_arg(ebb, types::F32); assert_eq!(arg1.to_string(), "v0"); assert_eq!(dfg.num_ebb_args(ebb), 1); assert_eq!(dfg.ebb_args(ebb), &[arg1]); let arg2 = dfg.append_ebb_arg(ebb, types::I16); assert_eq!(arg2.to_string(), "v1"); assert_eq!(dfg.num_ebb_args(ebb), 2); assert_eq!(dfg.ebb_args(ebb), &[arg1, arg2]); assert_eq!(dfg.value_def(arg1), ValueDef::Arg(ebb, 0)); assert_eq!(dfg.value_def(arg2), ValueDef::Arg(ebb, 1)); assert_eq!(dfg.value_type(arg1), types::F32); assert_eq!(dfg.value_type(arg2), types::I16); // Swap the two EBB arguments. let vlist = dfg.detach_ebb_args(ebb); assert_eq!(dfg.num_ebb_args(ebb), 0); assert_eq!(dfg.ebb_args(ebb), &[]); assert_eq!(vlist.as_slice(&dfg.value_lists), &[arg1, arg2]); dfg.attach_ebb_arg(ebb, arg2); let arg3 = dfg.append_ebb_arg(ebb, types::I32); dfg.attach_ebb_arg(ebb, arg1); assert_eq!(dfg.ebb_args(ebb), &[arg2, arg3, arg1]); } #[test] fn replace_ebb_arguments() { let mut dfg = DataFlowGraph::new(); let ebb = dfg.make_ebb(); let arg1 = dfg.append_ebb_arg(ebb, types::F32); let new1 = dfg.replace_ebb_arg(arg1, types::I64); assert_eq!(dfg.value_type(arg1), types::F32); assert_eq!(dfg.value_type(new1), types::I64); assert_eq!(dfg.ebb_args(ebb), &[new1]); dfg.attach_ebb_arg(ebb, arg1); assert_eq!(dfg.ebb_args(ebb), &[new1, arg1]); let new2 = dfg.replace_ebb_arg(arg1, types::I8); assert_eq!(dfg.value_type(arg1), types::F32); assert_eq!(dfg.value_type(new2), types::I8); assert_eq!(dfg.ebb_args(ebb), &[new1, new2]); dfg.attach_ebb_arg(ebb, arg1); assert_eq!(dfg.ebb_args(ebb), &[new1, new2, arg1]); let new3 = dfg.replace_ebb_arg(new2, types::I16); assert_eq!(dfg.value_type(new1), types::I64); assert_eq!(dfg.value_type(new2), types::I8); assert_eq!(dfg.value_type(new3), types::I16); assert_eq!(dfg.ebb_args(ebb), &[new1, new3, arg1]); } #[test] fn aliases() { use ir::InstBuilder; use ir::condcodes::IntCC; let mut func = Function::new(); let dfg = &mut func.dfg; let ebb0 = dfg.make_ebb(); let pos = &mut Cursor::new(&mut func.layout); pos.insert_ebb(ebb0); // Build a little test program. let v1 = dfg.ins(pos).iconst(types::I32, 42); // Make sure we can resolve value aliases even when values is empty. assert_eq!(dfg.resolve_aliases(v1), v1); let arg0 = dfg.append_ebb_arg(ebb0, types::I32); let (s, c) = dfg.ins(pos).iadd_cout(v1, arg0); let iadd = match dfg.value_def(s) { ValueDef::Res(i, 0) => i, _ => panic!(), }; // Remove `c` from the result list. dfg.clear_results(iadd); dfg.attach_result(iadd, s); // Replace `iadd_cout` with a normal `iadd` and an `icmp`. dfg.replace(iadd).iadd(v1, arg0); let c2 = dfg.ins(pos).icmp(IntCC::UnsignedLessThan, s, v1); dfg.change_to_alias(c, c2); assert_eq!(dfg.resolve_aliases(c2), c2); assert_eq!(dfg.resolve_aliases(c), c2); // Make a copy of the alias. let c3 = dfg.ins(pos).copy(c); // This does not see through copies. assert_eq!(dfg.resolve_aliases(c3), c3); // But this goes through both copies and aliases. assert_eq!(dfg.resolve_copies(c3), c2); } } Add a dfg::replace_result() method. This is analogous to replace_ebb_arg(). It replaces an instruction result value with a new value, leaving the old value in a detached state. //! Data flow graph tracking Instructions, Values, and EBBs. use entity_map::{EntityMap, PrimaryEntityData}; use ir::builder::{InsertBuilder, ReplaceBuilder}; use ir::extfunc::ExtFuncData; use ir::instructions::{Opcode, InstructionData, CallInfo}; use ir::layout::Cursor; use ir::types; use ir::{Ebb, Inst, Value, Type, SigRef, Signature, FuncRef, ValueList, ValueListPool}; use write::write_operands; use std::fmt; use std::iter; use std::mem; use std::ops::{Index, IndexMut}; use std::u16; /// A data flow graph defines all instructions and extended basic blocks in a function as well as /// the data flow dependencies between them. The DFG also tracks values which can be either /// instruction results or EBB arguments. /// /// The layout of EBBs in the function and of instructions in each EBB is recorded by the /// `FunctionLayout` data structure which form the other half of the function representation. /// #[derive(Clone)] pub struct DataFlowGraph { /// Data about all of the instructions in the function, including opcodes and operands. /// The instructions in this map are not in program order. That is tracked by `Layout`, along /// with the EBB containing each instruction. insts: EntityMap<Inst, InstructionData>, /// List of result values for each instruction. /// /// This map gets resized automatically by `make_inst()` so it is always in sync with the /// primary `insts` map. results: EntityMap<Inst, ValueList>, /// Extended basic blocks in the function and their arguments. /// This map is not in program order. That is handled by `Layout`, and so is the sequence of /// instructions contained in each EBB. ebbs: EntityMap<Ebb, EbbData>, /// Memory pool of value lists. /// /// The `ValueList` references into this pool appear in many places: /// /// - Instructions in `insts` that don't have room for their entire argument list inline. /// - Instruction result values in `results`. /// - EBB arguments in `ebbs`. pub value_lists: ValueListPool, /// Primary value table with entries for all values. values: EntityMap<Value, ValueData>, /// Function signature table. These signatures are referenced by indirect call instructions as /// well as the external function references. pub signatures: EntityMap<SigRef, Signature>, /// External function references. These are functions that can be called directly. pub ext_funcs: EntityMap<FuncRef, ExtFuncData>, } impl PrimaryEntityData for InstructionData {} impl PrimaryEntityData for EbbData {} impl PrimaryEntityData for ValueData {} impl PrimaryEntityData for Signature {} impl PrimaryEntityData for ExtFuncData {} impl DataFlowGraph { /// Create a new empty `DataFlowGraph`. pub fn new() -> DataFlowGraph { DataFlowGraph { insts: EntityMap::new(), results: EntityMap::new(), ebbs: EntityMap::new(), value_lists: ValueListPool::new(), values: EntityMap::new(), signatures: EntityMap::new(), ext_funcs: EntityMap::new(), } } /// Get the total number of instructions created in this function, whether they are currently /// inserted in the layout or not. /// /// This is intended for use with `EntityMap::with_capacity`. pub fn num_insts(&self) -> usize { self.insts.len() } /// Returns `true` if the given instruction reference is valid. pub fn inst_is_valid(&self, inst: Inst) -> bool { self.insts.is_valid(inst) } /// Get the total number of extended basic blocks created in this function, whether they are /// currently inserted in the layout or not. /// /// This is intended for use with `EntityMap::with_capacity`. pub fn num_ebbs(&self) -> usize { self.ebbs.len() } /// Returns `true` if the given ebb reference is valid. pub fn ebb_is_valid(&self, ebb: Ebb) -> bool { self.ebbs.is_valid(ebb) } } /// Resolve value aliases. /// /// Find the original SSA value that `value` aliases. fn resolve_aliases(values: &EntityMap<Value, ValueData>, value: Value) -> Value { let mut v = value; // Note that values may be empty here. for _ in 0..1 + values.len() { if let ValueData::Alias { original, .. } = values[v] { v = original; } else { return v; } } panic!("Value alias loop detected for {}", value); } /// Handling values. /// /// Values are either EBB arguments or instruction results. impl DataFlowGraph { // Allocate an extended value entry. fn make_value(&mut self, data: ValueData) -> Value { self.values.push(data) } /// Check if a value reference is valid. pub fn value_is_valid(&self, v: Value) -> bool { self.values.is_valid(v) } /// Get the type of a value. pub fn value_type(&self, v: Value) -> Type { match self.values[v] { ValueData::Inst { ty, .. } | ValueData::Arg { ty, .. } | ValueData::Alias { ty, .. } => ty, } } /// Get the definition of a value. /// /// This is either the instruction that defined it or the Ebb that has the value as an /// argument. pub fn value_def(&self, v: Value) -> ValueDef { match self.values[v] { ValueData::Inst { inst, num, .. } => { assert_eq!(Some(v), self.results[inst].get(num as usize, &self.value_lists), "Dangling result value {}: {}", v, self.display_inst(inst)); ValueDef::Res(inst, num as usize) } ValueData::Arg { ebb, num, .. } => { assert_eq!(Some(v), self.ebbs[ebb].args.get(num as usize, &self.value_lists), "Dangling EBB argument value"); ValueDef::Arg(ebb, num as usize) } ValueData::Alias { original, .. } => { // Make sure we only recurse one level. `resolve_aliases` has safeguards to // detect alias loops without overrunning the stack. self.value_def(self.resolve_aliases(original)) } } } /// Determine if `v` is an attached instruction result / EBB argument. /// /// An attached value can't be attached to something else without first being detached. /// /// Value aliases are not considered to be attached to anything. Use `resolve_aliases()` to /// determine if the original aliased value is attached. pub fn value_is_attached(&self, v: Value) -> bool { use self::ValueData::*; match self.values[v] { Inst { inst, num, .. } => Some(&v) == self.inst_results(inst).get(num as usize), Arg { ebb, num, .. } => Some(&v) == self.ebb_args(ebb).get(num as usize), Alias { .. } => false, } } /// Resolve value aliases. /// /// Find the original SSA value that `value` aliases. pub fn resolve_aliases(&self, value: Value) -> Value { resolve_aliases(&self.values, value) } /// Resolve value copies. /// /// Find the original definition of a value, looking through value aliases as well as /// copy/spill/fill instructions. pub fn resolve_copies(&self, value: Value) -> Value { let mut v = value; for _ in 0..self.insts.len() { v = self.resolve_aliases(v); v = match self.value_def(v) { ValueDef::Res(inst, 0) => { match self[inst] { InstructionData::Unary { opcode, arg, .. } => { match opcode { Opcode::Copy | Opcode::Spill | Opcode::Fill => arg, _ => return v, } } _ => return v, } } _ => return v, }; } panic!("Copy loop detected for {}", value); } /// Resolve all aliases among inst's arguments. /// /// For each argument of inst which is defined by an alias, replace the /// alias with the aliased value. pub fn resolve_aliases_in_arguments(&mut self, inst: Inst) { for arg in self.insts[inst].arguments_mut(&mut self.value_lists) { let resolved = resolve_aliases(&self.values, *arg); if resolved != *arg { *arg = resolved; } } } /// Turn a value into an alias of another. /// /// Change the `dest` value to behave as an alias of `src`. This means that all uses of `dest` /// will behave as if they used that value `src`. /// /// The `dest` value can't be attached to an instruction or EBB. pub fn change_to_alias(&mut self, dest: Value, src: Value) { assert!(!self.value_is_attached(dest)); // Try to create short alias chains by finding the original source value. // This also avoids the creation of loops. let original = self.resolve_aliases(src); assert!(dest != original, "Aliasing {} to {} would create a loop", dest, src); let ty = self.value_type(original); assert_eq!(self.value_type(dest), ty, "Aliasing {} to {} would change its type {} to {}", dest, src, self.value_type(dest), ty); self.values[dest] = ValueData::Alias { ty, original }; } /// Replace the results of one instruction with aliases to the results of another. /// /// Change all the results of `dest_inst` to behave as aliases of /// corresponding results of `src_inst`, as if calling change_to_alias for /// each. /// /// After calling this instruction, `dest_inst` will have had its results /// cleared, so it likely needs to be removed from the graph. /// pub fn replace_with_aliases(&mut self, dest_inst: Inst, src_inst: Inst) { debug_assert_ne!(dest_inst, src_inst, "Replacing {} with itself would create a loop", dest_inst); debug_assert_eq!(self.results[dest_inst].len(&self.value_lists), self.results[src_inst].len(&self.value_lists), "Replacing {} with {} would produce a different number of results.", dest_inst, src_inst); for (&dest, &src) in self.results[dest_inst] .as_slice(&self.value_lists) .iter() .zip(self.results[src_inst].as_slice(&self.value_lists)) { let original = src; let ty = self.value_type(original); assert_eq!(self.value_type(dest), ty, "Aliasing {} to {} would change its type {} to {}", dest, src, self.value_type(dest), ty); self.values[dest] = ValueData::Alias { ty, original }; } self.clear_results(dest_inst); } /// Create a new value alias. /// /// Note that this function should only be called by the parser. pub fn make_value_alias(&mut self, src: Value) -> Value { let ty = self.value_type(src); let data = ValueData::Alias { ty, original: src }; self.make_value(data) } } /// Where did a value come from? #[derive(Debug, PartialEq, Eq)] pub enum ValueDef { /// Value is the n'th result of an instruction. Res(Inst, usize), /// Value is the n'th argument to an EBB. Arg(Ebb, usize), } // Internal table storage for extended values. #[derive(Clone, Debug)] enum ValueData { // Value is defined by an instruction. Inst { ty: Type, num: u16, inst: Inst }, // Value is an EBB argument. Arg { ty: Type, num: u16, ebb: Ebb }, // Value is an alias of another value. // An alias value can't be linked as an instruction result or EBB argument. It is used as a // placeholder when the original instruction or EBB has been rewritten or modified. Alias { ty: Type, original: Value }, } /// Instructions. /// impl DataFlowGraph { /// Create a new instruction. /// /// The type of the first result is indicated by `data.ty`. If the instruction produces /// multiple results, also call `make_inst_results` to allocate value table entries. pub fn make_inst(&mut self, data: InstructionData) -> Inst { let n = self.num_insts() + 1; self.results.resize(n); self.insts.push(data) } /// Get the instruction reference that will be assigned to the next instruction created by /// `make_inst`. /// /// This is only really useful to the parser. pub fn next_inst(&self) -> Inst { self.insts.next_key() } /// Returns an object that displays `inst`. pub fn display_inst(&self, inst: Inst) -> DisplayInst { DisplayInst(self, inst) } /// Get all value arguments on `inst` as a slice. pub fn inst_args(&self, inst: Inst) -> &[Value] { self.insts[inst].arguments(&self.value_lists) } /// Get all value arguments on `inst` as a mutable slice. pub fn inst_args_mut(&mut self, inst: Inst) -> &mut [Value] { self.insts[inst].arguments_mut(&mut self.value_lists) } /// Get the fixed value arguments on `inst` as a slice. pub fn inst_fixed_args(&self, inst: Inst) -> &[Value] { let fixed_args = self[inst].opcode().constraints().fixed_value_arguments(); &self.inst_args(inst)[..fixed_args] } /// Get the fixed value arguments on `inst` as a mutable slice. pub fn inst_fixed_args_mut(&mut self, inst: Inst) -> &mut [Value] { let fixed_args = self[inst].opcode().constraints().fixed_value_arguments(); &mut self.inst_args_mut(inst)[..fixed_args] } /// Get the variable value arguments on `inst` as a slice. pub fn inst_variable_args(&self, inst: Inst) -> &[Value] { let fixed_args = self[inst].opcode().constraints().fixed_value_arguments(); &self.inst_args(inst)[fixed_args..] } /// Get the variable value arguments on `inst` as a mutable slice. pub fn inst_variable_args_mut(&mut self, inst: Inst) -> &mut [Value] { let fixed_args = self[inst].opcode().constraints().fixed_value_arguments(); &mut self.inst_args_mut(inst)[fixed_args..] } /// Create result values for an instruction that produces multiple results. /// /// Instructions that produce no result values only need to be created with `make_inst`, /// otherwise call `make_inst_results` to allocate value table entries for the results. /// /// The result value types are determined from the instruction's value type constraints and the /// provided `ctrl_typevar` type for polymorphic instructions. For non-polymorphic /// instructions, `ctrl_typevar` is ignored, and `VOID` can be used. /// /// The type of the first result value is also set, even if it was already set in the /// `InstructionData` passed to `make_inst`. If this function is called with a single-result /// instruction, that is the only effect. pub fn make_inst_results(&mut self, inst: Inst, ctrl_typevar: Type) -> usize { self.make_inst_results_reusing(inst, ctrl_typevar, iter::empty()) } /// Create result values for `inst`, reusing the provided detached values. /// /// Create a new set of result values for `inst` using `ctrl_typevar` to determine the result /// types. Any values provided by `reuse` will be reused. When `reuse` is exhausted or when it /// produces `None`, a new value is created. pub fn make_inst_results_reusing<I>(&mut self, inst: Inst, ctrl_typevar: Type, reuse: I) -> usize where I: Iterator<Item = Option<Value>> { let mut reuse = reuse.fuse(); let constraints = self.insts[inst].opcode().constraints(); let fixed_results = constraints.fixed_results(); let mut total_results = fixed_results; self.results[inst].clear(&mut self.value_lists); // The fixed results will appear at the front of the list. for res_idx in 0..fixed_results { let ty = constraints.result_type(res_idx, ctrl_typevar); if let Some(Some(v)) = reuse.next() { debug_assert_eq!(self.value_type(v), ty, "Reused {} is wrong type", ty); self.attach_result(inst, v); } else { self.append_result(inst, ty); } } // Get the call signature if this is a function call. if let Some(sig) = self.call_signature(inst) { // Create result values corresponding to the call return types. let var_results = self.signatures[sig].return_types.len(); total_results += var_results; for res_idx in 0..var_results { let ty = self.signatures[sig].return_types[res_idx].value_type; if let Some(Some(v)) = reuse.next() { debug_assert_eq!(self.value_type(v), ty, "Reused {} is wrong type", ty); self.attach_result(inst, v); } else { self.append_result(inst, ty); } } } total_results } /// Create an `InsertBuilder` that will insert an instruction at the cursor's current position. pub fn ins<'c, 'fc: 'c, 'fd>(&'fd mut self, at: &'c mut Cursor<'fc>) -> InsertBuilder<'c, 'fc, 'fd> { InsertBuilder::new(self, at) } /// Create a `ReplaceBuilder` that will replace `inst` with a new instruction in place. pub fn replace(&mut self, inst: Inst) -> ReplaceBuilder { ReplaceBuilder::new(self, inst) } /// Detach the list of result values from `inst` and return it. /// /// This leaves `inst` without any result values. New result values can be created by calling /// `make_inst_results` or by using a `replace(inst)` builder. pub fn detach_results(&mut self, inst: Inst) -> ValueList { self.results[inst].take() } /// Clear the list of result values from `inst`. /// /// This leaves `inst` without any result values. New result values can be created by calling /// `make_inst_results` or by using a `replace(inst)` builder. pub fn clear_results(&mut self, inst: Inst) { self.results[inst].clear(&mut self.value_lists) } /// Attach an existing value to the result value list for `inst`. /// /// The `res` value is appended to the end of the result list. /// /// This is a very low-level operation. Usually, instruction results with the correct types are /// created automatically. The `res` value must not be attached to anything else. pub fn attach_result(&mut self, inst: Inst, res: Value) { assert!(!self.value_is_attached(res)); let num = self.results[inst].push(res, &mut self.value_lists); assert!(num <= u16::MAX as usize, "Too many result values"); let ty = self.value_type(res); self.values[res] = ValueData::Inst { ty, num: num as u16, inst, }; } /// Replace an instruction result with a new value of type `new_type`. /// /// The `old_value` must be an attached instruction result. /// /// The old value is left detached, so it should probably be changed into something else. /// /// Returns the new value. pub fn replace_result(&mut self, old_value: Value, new_type: Type) -> Value { let (num, inst) = match self.values[old_value] { ValueData::Inst { num, inst, .. } => (num, inst), _ => panic!("{} is not an instruction result value", old_value), }; let new_value = self.make_value(ValueData::Inst { ty: new_type, num, inst, }); let num = num as usize; let attached = mem::replace(self.results[inst] .get_mut(num, &mut self.value_lists) .expect("Replacing detached result"), new_value); assert_eq!(attached, old_value, "{} wasn't detached from {}", old_value, self.display_inst(inst)); new_value } /// Append a new instruction result value to `inst`. pub fn append_result(&mut self, inst: Inst, ty: Type) -> Value { let res = self.values.next_key(); let num = self.results[inst].push(res, &mut self.value_lists); assert!(num <= u16::MAX as usize, "Too many result values"); self.make_value(ValueData::Inst { ty, inst, num: num as u16, }) } /// Get the first result of an instruction. /// /// This function panics if the instruction doesn't have any result. pub fn first_result(&self, inst: Inst) -> Value { self.results[inst] .first(&self.value_lists) .expect("Instruction has no results") } /// Test if `inst` has any result values currently. pub fn has_results(&self, inst: Inst) -> bool { !self.results[inst].is_empty() } /// Return all the results of an instruction. pub fn inst_results(&self, inst: Inst) -> &[Value] { self.results[inst].as_slice(&self.value_lists) } /// Get the call signature of a direct or indirect call instruction. /// Returns `None` if `inst` is not a call instruction. pub fn call_signature(&self, inst: Inst) -> Option<SigRef> { match self.insts[inst].analyze_call(&self.value_lists) { CallInfo::NotACall => None, CallInfo::Direct(f, _) => Some(self.ext_funcs[f].signature), CallInfo::Indirect(s, _) => Some(s), } } /// Compute the type of an instruction result from opcode constraints and call signatures. /// /// This computes the same sequence of result types that `make_inst_results()` above would /// assign to the created result values, but it does not depend on `make_inst_results()` being /// called first. /// /// Returns `None` if asked about a result index that is too large. pub fn compute_result_type(&self, inst: Inst, result_idx: usize, ctrl_typevar: Type) -> Option<Type> { let constraints = self.insts[inst].opcode().constraints(); let fixed_results = constraints.fixed_results(); if result_idx < fixed_results { return Some(constraints.result_type(result_idx, ctrl_typevar)); } // Not a fixed result, try to extract a return type from the call signature. self.call_signature(inst) .and_then(|sigref| { self.signatures[sigref] .return_types .get(result_idx - fixed_results) .map(|&arg| arg.value_type) }) } /// Get the controlling type variable, or `VOID` if `inst` isn't polymorphic. pub fn ctrl_typevar(&self, inst: Inst) -> Type { let constraints = self[inst].opcode().constraints(); if !constraints.is_polymorphic() { types::VOID } else if constraints.requires_typevar_operand() { // Not all instruction formats have a designated operand, but in that case // `requires_typevar_operand()` should never be true. self.value_type(self[inst].typevar_operand(&self.value_lists) .expect("Instruction format doesn't have a designated operand, bad opcode.")) } else { self.value_type(self.first_result(inst)) } } } /// Allow immutable access to instructions via indexing. impl Index<Inst> for DataFlowGraph { type Output = InstructionData; fn index<'a>(&'a self, inst: Inst) -> &'a InstructionData { &self.insts[inst] } } /// Allow mutable access to instructions via indexing. impl IndexMut<Inst> for DataFlowGraph { fn index_mut<'a>(&'a mut self, inst: Inst) -> &'a mut InstructionData { &mut self.insts[inst] } } /// Extended basic blocks. impl DataFlowGraph { /// Create a new basic block. pub fn make_ebb(&mut self) -> Ebb { self.ebbs.push(EbbData::new()) } /// Get the number of arguments on `ebb`. pub fn num_ebb_args(&self, ebb: Ebb) -> usize { self.ebbs[ebb].args.len(&self.value_lists) } /// Get the arguments to an EBB. pub fn ebb_args(&self, ebb: Ebb) -> &[Value] { self.ebbs[ebb].args.as_slice(&self.value_lists) } /// Append an argument with type `ty` to `ebb`. pub fn append_ebb_arg(&mut self, ebb: Ebb, ty: Type) -> Value { let arg = self.values.next_key(); let num = self.ebbs[ebb].args.push(arg, &mut self.value_lists); assert!(num <= u16::MAX as usize, "Too many arguments to EBB"); self.make_value(ValueData::Arg { ty, num: num as u16, ebb, }) } /// Append an existing argument value to `ebb`. /// /// The appended value can't already be attached to something else. /// /// In almost all cases, you should be using `append_ebb_arg()` instead of this method. pub fn attach_ebb_arg(&mut self, ebb: Ebb, arg: Value) { assert!(!self.value_is_attached(arg)); let num = self.ebbs[ebb].args.push(arg, &mut self.value_lists); assert!(num <= u16::MAX as usize, "Too many arguments to EBB"); let ty = self.value_type(arg); self.values[arg] = ValueData::Arg { ty, num: num as u16, ebb, }; } /// Replace an EBB argument with a new value of type `ty`. /// /// The `old_value` must be an attached EBB argument. It is removed from its place in the list /// of arguments and replaced by a new value of type `new_type`. The new value gets the same /// position in the list, and other arguments are not disturbed. /// /// The old value is left detached, so it should probably be changed into something else. /// /// Returns the new value. pub fn replace_ebb_arg(&mut self, old_arg: Value, new_type: Type) -> Value { // Create new value identical to the old one except for the type. let (ebb, num) = if let ValueData::Arg { num, ebb, .. } = self.values[old_arg] { (ebb, num) } else { panic!("{} must be an EBB argument", old_arg); }; let new_arg = self.make_value(ValueData::Arg { ty: new_type, num, ebb, }); self.ebbs[ebb].args.as_mut_slice(&mut self.value_lists)[num as usize] = new_arg; new_arg } /// Detach all the arguments from `ebb` and return them as a `ValueList`. /// /// This is a quite low-level operation. Sensible things to do with the detached EBB arguments /// is to put them back on the same EBB with `attach_ebb_arg()` or change them into aliases /// with `change_to_alias()`. pub fn detach_ebb_args(&mut self, ebb: Ebb) -> ValueList { self.ebbs[ebb].args.take() } } // Contents of an extended basic block. // // Arguments for an extended basic block are values that dominate everything in the EBB. All // branches to this EBB must provide matching arguments, and the arguments to the entry EBB must // match the function arguments. #[derive(Clone)] struct EbbData { // List of arguments to this EBB. args: ValueList, } impl EbbData { fn new() -> EbbData { EbbData { args: ValueList::new() } } } /// Object that can display an instruction. pub struct DisplayInst<'a>(&'a DataFlowGraph, Inst); impl<'a> fmt::Display for DisplayInst<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let dfg = self.0; let inst = &dfg[self.1]; if let Some((first, rest)) = dfg.inst_results(self.1).split_first() { write!(f, "{}", first)?; for v in rest { write!(f, ", {}", v)?; } write!(f, " = ")?; } let typevar = dfg.ctrl_typevar(self.1); if typevar.is_void() { write!(f, "{}", inst.opcode())?; } else { write!(f, "{}.{}", inst.opcode(), typevar)?; } write_operands(f, dfg, None, self.1) } } #[cfg(test)] mod tests { use super::*; use ir::types; use ir::{Function, Cursor, Opcode, InstructionData}; #[test] fn make_inst() { let mut dfg = DataFlowGraph::new(); let idata = InstructionData::Nullary { opcode: Opcode::Iconst }; let inst = dfg.make_inst(idata); dfg.make_inst_results(inst, types::I32); assert_eq!(inst.to_string(), "inst0"); assert_eq!(dfg.display_inst(inst).to_string(), "v0 = iconst.i32"); // Immutable reference resolution. { let immdfg = &dfg; let ins = &immdfg[inst]; assert_eq!(ins.opcode(), Opcode::Iconst); } // Results. let val = dfg.first_result(inst); assert_eq!(dfg.inst_results(inst), &[val]); assert_eq!(dfg.value_def(val), ValueDef::Res(inst, 0)); assert_eq!(dfg.value_type(val), types::I32); // Replacing results. assert!(dfg.value_is_attached(val)); let v2 = dfg.replace_result(val, types::F64); assert!(!dfg.value_is_attached(val)); assert!(dfg.value_is_attached(v2)); assert_eq!(dfg.inst_results(inst), &[v2]); assert_eq!(dfg.value_def(v2), ValueDef::Res(inst, 0)); assert_eq!(dfg.value_type(v2), types::F64); } #[test] fn no_results() { let mut dfg = DataFlowGraph::new(); let idata = InstructionData::Nullary { opcode: Opcode::Trap }; let inst = dfg.make_inst(idata); assert_eq!(dfg.display_inst(inst).to_string(), "trap"); // Result slice should be empty. assert_eq!(dfg.inst_results(inst), &[]); } #[test] fn ebb() { let mut dfg = DataFlowGraph::new(); let ebb = dfg.make_ebb(); assert_eq!(ebb.to_string(), "ebb0"); assert_eq!(dfg.num_ebb_args(ebb), 0); assert_eq!(dfg.ebb_args(ebb), &[]); assert!(dfg.detach_ebb_args(ebb).is_empty()); assert_eq!(dfg.num_ebb_args(ebb), 0); assert_eq!(dfg.ebb_args(ebb), &[]); let arg1 = dfg.append_ebb_arg(ebb, types::F32); assert_eq!(arg1.to_string(), "v0"); assert_eq!(dfg.num_ebb_args(ebb), 1); assert_eq!(dfg.ebb_args(ebb), &[arg1]); let arg2 = dfg.append_ebb_arg(ebb, types::I16); assert_eq!(arg2.to_string(), "v1"); assert_eq!(dfg.num_ebb_args(ebb), 2); assert_eq!(dfg.ebb_args(ebb), &[arg1, arg2]); assert_eq!(dfg.value_def(arg1), ValueDef::Arg(ebb, 0)); assert_eq!(dfg.value_def(arg2), ValueDef::Arg(ebb, 1)); assert_eq!(dfg.value_type(arg1), types::F32); assert_eq!(dfg.value_type(arg2), types::I16); // Swap the two EBB arguments. let vlist = dfg.detach_ebb_args(ebb); assert_eq!(dfg.num_ebb_args(ebb), 0); assert_eq!(dfg.ebb_args(ebb), &[]); assert_eq!(vlist.as_slice(&dfg.value_lists), &[arg1, arg2]); dfg.attach_ebb_arg(ebb, arg2); let arg3 = dfg.append_ebb_arg(ebb, types::I32); dfg.attach_ebb_arg(ebb, arg1); assert_eq!(dfg.ebb_args(ebb), &[arg2, arg3, arg1]); } #[test] fn replace_ebb_arguments() { let mut dfg = DataFlowGraph::new(); let ebb = dfg.make_ebb(); let arg1 = dfg.append_ebb_arg(ebb, types::F32); let new1 = dfg.replace_ebb_arg(arg1, types::I64); assert_eq!(dfg.value_type(arg1), types::F32); assert_eq!(dfg.value_type(new1), types::I64); assert_eq!(dfg.ebb_args(ebb), &[new1]); dfg.attach_ebb_arg(ebb, arg1); assert_eq!(dfg.ebb_args(ebb), &[new1, arg1]); let new2 = dfg.replace_ebb_arg(arg1, types::I8); assert_eq!(dfg.value_type(arg1), types::F32); assert_eq!(dfg.value_type(new2), types::I8); assert_eq!(dfg.ebb_args(ebb), &[new1, new2]); dfg.attach_ebb_arg(ebb, arg1); assert_eq!(dfg.ebb_args(ebb), &[new1, new2, arg1]); let new3 = dfg.replace_ebb_arg(new2, types::I16); assert_eq!(dfg.value_type(new1), types::I64); assert_eq!(dfg.value_type(new2), types::I8); assert_eq!(dfg.value_type(new3), types::I16); assert_eq!(dfg.ebb_args(ebb), &[new1, new3, arg1]); } #[test] fn aliases() { use ir::InstBuilder; use ir::condcodes::IntCC; let mut func = Function::new(); let dfg = &mut func.dfg; let ebb0 = dfg.make_ebb(); let pos = &mut Cursor::new(&mut func.layout); pos.insert_ebb(ebb0); // Build a little test program. let v1 = dfg.ins(pos).iconst(types::I32, 42); // Make sure we can resolve value aliases even when values is empty. assert_eq!(dfg.resolve_aliases(v1), v1); let arg0 = dfg.append_ebb_arg(ebb0, types::I32); let (s, c) = dfg.ins(pos).iadd_cout(v1, arg0); let iadd = match dfg.value_def(s) { ValueDef::Res(i, 0) => i, _ => panic!(), }; // Remove `c` from the result list. dfg.clear_results(iadd); dfg.attach_result(iadd, s); // Replace `iadd_cout` with a normal `iadd` and an `icmp`. dfg.replace(iadd).iadd(v1, arg0); let c2 = dfg.ins(pos).icmp(IntCC::UnsignedLessThan, s, v1); dfg.change_to_alias(c, c2); assert_eq!(dfg.resolve_aliases(c2), c2); assert_eq!(dfg.resolve_aliases(c), c2); // Make a copy of the alias. let c3 = dfg.ins(pos).copy(c); // This does not see through copies. assert_eq!(dfg.resolve_aliases(c3), c3); // But this goes through both copies and aliases. assert_eq!(dfg.resolve_copies(c3), c2); } }
use ffmpeg_sys; use libc::*; use std::cmp; use std::ffi::{ CStr, CString }; use std::ops::Deref; use errors::*; #[derive(Debug, Clone, Copy)] pub struct Rational { pub numerator: i32, pub denominator: i32, } #[derive(Clone, Copy)] pub struct Codec { ptr: *mut ffmpeg_sys::AVCodec, } unsafe impl Send for Codec {} unsafe impl Sync for Codec {} pub struct PixelFormats { codec: Codec, index: isize, } pub struct Context { ptr: *mut ffmpeg_sys::AVCodecContext, } impl From<(i32, i32)> for Rational { fn from((numerator, denominator): (i32, i32)) -> Self { Self { numerator, denominator, } } } impl From<ffmpeg_sys::AVRational> for Rational { fn from(rational: ffmpeg_sys::AVRational) -> Self { Self { numerator: rational.num, denominator: rational.den, } } } impl From<Rational> for ffmpeg_sys::AVRational { fn from(rational: Rational) -> Self { Self { num: rational.numerator, den: rational.denominator, } } } impl Codec { pub fn description(self) -> String { unsafe { CStr::from_ptr((*self.ptr).long_name).to_string_lossy().into_owned() } } pub fn kind(self) -> ffmpeg_sys::AVMediaType { unsafe { (*self.ptr).kind } } pub fn is_video(self) -> bool { self.kind() == ffmpeg_sys::AVMediaType::AVMEDIA_TYPE_VIDEO } pub fn pixel_formats(self) -> Option<PixelFormats> { unsafe { if (*self.ptr).pix_fmts.is_null() { None } else { Some(PixelFormats::new(self)) } } } pub fn context(self) -> Result<Context> { let ptr = unsafe { ffmpeg_sys::avcodec_alloc_context3(self.ptr) }; ensure!(!ptr.is_null(), "unable to allocate the codec context"); Ok(Context { ptr }) } } impl PixelFormats { fn new(codec: Codec) -> Self { Self { codec, index: 0, } } } impl Iterator for PixelFormats { type Item = ffmpeg_sys::AVPixelFormat; fn next(&mut self) -> Option<Self::Item> { let format = unsafe { *(*self.codec.ptr).pix_fmts.offset(self.index) }; if format == ffmpeg_sys::AVPixelFormat::AV_PIX_FMT_NONE { None } else { self.index += 1; Some(format) } } } impl Context { pub fn width(&self) -> u32 { unsafe { cmp::max(0, (*self.ptr).width) as u32 } } pub fn set_width(&mut self, width: u32) { unsafe { (*self.ptr).width = cmp::min(width, c_int::max_value() as u32) as c_int; } } pub fn height(&self) -> u32 { unsafe { cmp::max(0, (*self.ptr).height) as u32 } } pub fn set_height(&mut self, height: u32) { unsafe { (*self.ptr).height = cmp::min(height, c_int::max_value() as u32) as c_int; } } pub fn time_base(&self) -> Rational { unsafe { (*self.ptr).time_base.into() } } pub fn set_time_base(&mut self, time_base: &Rational) { unsafe { (*self.ptr).time_base = (*time_base).into(); } } } impl Drop for Context { fn drop(&mut self) { unsafe { ffmpeg_sys::avcodec_free_context(&mut self.ptr); } } } pub fn initialize() { unsafe { ffmpeg_sys::avcodec_register_all(); } } pub fn find_encoder_by_name(name: &str) -> Result<Option<Codec>> { let name = CString::new(name).chain_err(|| "could not convert name to CString")?; let codec = unsafe { ffmpeg_sys::avcodec_find_encoder_by_name(name.as_ptr()) }; if codec.is_null() { Ok(None) } else { Ok(Some(Codec { ptr: codec })) } } Mark stuff as inline use ffmpeg_sys; use libc::*; use std::cmp; use std::ffi::{ CStr, CString }; use std::ops::Deref; use errors::*; #[derive(Debug, Clone, Copy)] pub struct Rational { pub numerator: i32, pub denominator: i32, } #[derive(Clone, Copy)] pub struct Codec { ptr: *mut ffmpeg_sys::AVCodec, } unsafe impl Send for Codec {} unsafe impl Sync for Codec {} pub struct PixelFormats { codec: Codec, index: isize, } pub struct Context { ptr: *mut ffmpeg_sys::AVCodecContext, } impl From<(i32, i32)> for Rational { fn from((numerator, denominator): (i32, i32)) -> Self { Self { numerator, denominator, } } } impl From<ffmpeg_sys::AVRational> for Rational { fn from(rational: ffmpeg_sys::AVRational) -> Self { Self { numerator: rational.num, denominator: rational.den, } } } impl From<Rational> for ffmpeg_sys::AVRational { fn from(rational: Rational) -> Self { Self { num: rational.numerator, den: rational.denominator, } } } impl Codec { pub fn description(self) -> String { unsafe { CStr::from_ptr((*self.ptr).long_name).to_string_lossy().into_owned() } } #[inline] pub fn kind(self) -> ffmpeg_sys::AVMediaType { unsafe { (*self.ptr).kind } } #[inline] pub fn is_video(self) -> bool { self.kind() == ffmpeg_sys::AVMediaType::AVMEDIA_TYPE_VIDEO } pub fn pixel_formats(self) -> Option<PixelFormats> { unsafe { if (*self.ptr).pix_fmts.is_null() { None } else { Some(PixelFormats::new(self)) } } } pub fn context(self) -> Result<Context> { let ptr = unsafe { ffmpeg_sys::avcodec_alloc_context3(self.ptr) }; ensure!(!ptr.is_null(), "unable to allocate the codec context"); Ok(Context { ptr }) } } impl PixelFormats { fn new(codec: Codec) -> Self { Self { codec, index: 0, } } } impl Iterator for PixelFormats { type Item = ffmpeg_sys::AVPixelFormat; fn next(&mut self) -> Option<Self::Item> { let format = unsafe { *(*self.codec.ptr).pix_fmts.offset(self.index) }; if format == ffmpeg_sys::AVPixelFormat::AV_PIX_FMT_NONE { None } else { self.index += 1; Some(format) } } } impl Context { #[inline] pub fn width(&self) -> u32 { unsafe { cmp::max(0, (*self.ptr).width) as u32 } } #[inline] pub fn set_width(&mut self, width: u32) { unsafe { (*self.ptr).width = cmp::min(width, c_int::max_value() as u32) as c_int; } } #[inline] pub fn height(&self) -> u32 { unsafe { cmp::max(0, (*self.ptr).height) as u32 } } #[inline] pub fn set_height(&mut self, height: u32) { unsafe { (*self.ptr).height = cmp::min(height, c_int::max_value() as u32) as c_int; } } #[inline] pub fn time_base(&self) -> Rational { unsafe { (*self.ptr).time_base.into() } } #[inline] pub fn set_time_base(&mut self, time_base: &Rational) { unsafe { (*self.ptr).time_base = (*time_base).into(); } } } impl Drop for Context { fn drop(&mut self) { unsafe { ffmpeg_sys::avcodec_free_context(&mut self.ptr); } } } pub fn initialize() { unsafe { ffmpeg_sys::avcodec_register_all(); } } pub fn find_encoder_by_name(name: &str) -> Result<Option<Codec>> { let name = CString::new(name).chain_err(|| "could not convert name to CString")?; let codec = unsafe { ffmpeg_sys::avcodec_find_encoder_by_name(name.as_ptr()) }; if codec.is_null() { Ok(None) } else { Ok(Some(Codec { ptr: codec })) } }
use std::collections::HashMap; use rustc::session::Session; use cranelift_module::{FuncId, Module}; use faerie::*; #[cfg(feature = "backend_object")] use object::{SectionKind, RelocationKind, RelocationEncoding}; #[cfg(feature = "backend_object")] use object::write::*; use cranelift_faerie::{FaerieBackend, FaerieBuilder, FaerieProduct, FaerieTrapCollection}; #[cfg(feature = "backend_object")] use cranelift_object::*; use gimli::SectionId; use crate::debuginfo::{DebugReloc, DebugRelocName}; pub trait WriteMetadata { fn add_rustc_section(&mut self, symbol_name: String, data: Vec<u8>, is_like_osx: bool); } impl WriteMetadata for faerie::Artifact { fn add_rustc_section(&mut self, symbol_name: String, data: Vec<u8>, is_like_osx: bool) { self .declare(".rustc", faerie::Decl::section(faerie::SectionKind::Data)) .unwrap(); self .define_with_symbols(".rustc", data, { let mut map = std::collections::BTreeMap::new(); // FIXME implement faerie elf backend section custom symbols // For MachO this is necessary to prevent the linker from throwing away the .rustc section, // but for ELF it isn't. if is_like_osx { map.insert( symbol_name, 0, ); } map }) .unwrap(); } } #[cfg(feature = "backend_object")] impl WriteMetadata for object::write::Object { fn add_rustc_section(&mut self, symbol_name: String, data: Vec<u8>, _is_like_osx: bool) { let segment = self.segment_name(object::write::StandardSegment::Data).to_vec(); let section_id = self.add_section(segment, b".rustc".to_vec(), object::SectionKind::Data); let offset = self.append_section_data(section_id, &data, 1); // For MachO and probably PE this is necessary to prevent the linker from throwing away the // .rustc section. For ELF this isn't necessary, but it also doesn't harm. self.add_symbol(object::write::Symbol { name: symbol_name.into_bytes(), value: offset, size: data.len() as u64, kind: object::SymbolKind::Data, scope: object::SymbolScope::Compilation, weak: false, section: Some(section_id), }); } } pub trait WriteDebugInfo { type SectionId; fn add_debug_section(&mut self, name: SectionId, data: Vec<u8>) -> Self::SectionId; fn add_debug_reloc( &mut self, section_map: &HashMap<SectionId, Self::SectionId>, symbol_map: &indexmap::IndexMap<FuncId, String>, from: &Self::SectionId, reloc: &DebugReloc, ); } impl WriteDebugInfo for FaerieProduct { type SectionId = SectionId; fn add_debug_section(&mut self, id: SectionId, data: Vec<u8>) -> SectionId { self.artifact.declare_with(id.name(), Decl::section(faerie::SectionKind::Debug), data).unwrap(); id } fn add_debug_reloc( &mut self, _section_map: &HashMap<SectionId, Self::SectionId>, symbol_map: &indexmap::IndexMap<FuncId, String>, from: &Self::SectionId, reloc: &DebugReloc, ) { self .artifact .link_with( faerie::Link { from: from.name(), to: match reloc.name { DebugRelocName::Section(id) => id.name(), DebugRelocName::Symbol(index) => &symbol_map.get_index(index).unwrap().1, }, at: u64::from(reloc.offset), }, faerie::Reloc::Debug { size: reloc.size, addend: reloc.addend as i32, }, ) .expect("faerie relocation error"); } } #[cfg(feature = "backend_object")] impl WriteDebugInfo for ObjectProduct { type SectionId = (object::write::SectionId, object::write::SymbolId); fn add_debug_section( &mut self, id: SectionId, data: Vec<u8>, ) -> (object::write::SectionId, object::write::SymbolId) { let segment = self.object.segment_name(StandardSegment::Debug).to_vec(); let name = id.name().as_bytes().to_vec(); let section_id = self.object.add_section(segment, name, SectionKind::Debug); self.object.section_mut(section_id).set_data(data, 1); let symbol_id = self.object.section_symbol(section_id); (section_id, symbol_id) } fn add_debug_reloc( &mut self, section_map: &HashMap<SectionId, Self::SectionId>, symbol_map: &indexmap::IndexMap<FuncId, String>, from: &Self::SectionId, reloc: &DebugReloc, ) { let symbol = match reloc.name { DebugRelocName::Section(id) => section_map.get(&id).unwrap().1, DebugRelocName::Symbol(id) => { self.function_symbol(*symbol_map.get_index(id).unwrap().0) } }; self.object.add_relocation(from.0, Relocation { offset: u64::from(reloc.offset), symbol, kind: RelocationKind::Absolute, encoding: RelocationEncoding::Generic, size: reloc.size * 8, addend: reloc.addend, }).unwrap(); } } pub trait Emit { fn emit(self) -> Vec<u8>; } impl Emit for FaerieProduct { fn emit(self) -> Vec<u8> { self.artifact.emit().unwrap() } } #[cfg(feature = "backend_object")] impl Emit for ObjectProduct { fn emit(self) -> Vec<u8> { self.object.write().unwrap() } } #[cfg(not(feature = "backend_object"))] pub fn with_object(sess: &Session, name: &str, f: impl FnOnce(&mut Artifact)) -> Vec<u8> { let mut metadata_artifact = faerie::Artifact::new( crate::build_isa(sess, true).triple().clone(), name.to_string(), ); f(&mut metadata_artifact); metadata_artifact.emit().unwrap() } #[cfg(feature = "backend_object")] pub fn with_object(sess: &Session, name: &str, f: impl FnOnce(&mut Object)) -> Vec<u8> { let triple = crate::build_isa(sess, true).triple().clone(); let mut metadata_object = object::write::Object::new(triple.binary_format, triple.architecture); metadata_object.add_file_symbol(name.as_bytes().to_vec()); f(&mut metadata_object); metadata_object.write().unwrap() } pub type Backend = impl cranelift_module::Backend<Product: Emit + WriteDebugInfo>; #[cfg(not(feature = "backend_object"))] pub fn make_module(sess: &Session, name: String) -> Module<Backend> { let module: Module<FaerieBackend> = Module::new( FaerieBuilder::new( crate::build_isa(sess, true), name + ".o", FaerieTrapCollection::Disabled, cranelift_module::default_libcall_names(), ) .unwrap(), ); module } #[cfg(feature = "backend_object")] pub fn make_module(sess: &Session, name: String) -> Module<Backend> { let module: Module<ObjectBackend> = Module::new( ObjectBuilder::new( crate::build_isa(sess, true), name + ".o", ObjectTrapCollection::Disabled, cranelift_module::default_libcall_names(), ) .unwrap(), ); module } Fix metadata symbol scope It should be exported from the generated dylib use std::collections::HashMap; use rustc::session::Session; use cranelift_module::{FuncId, Module}; use faerie::*; #[cfg(feature = "backend_object")] use object::{SectionKind, RelocationKind, RelocationEncoding}; #[cfg(feature = "backend_object")] use object::write::*; use cranelift_faerie::{FaerieBackend, FaerieBuilder, FaerieProduct, FaerieTrapCollection}; #[cfg(feature = "backend_object")] use cranelift_object::*; use gimli::SectionId; use crate::debuginfo::{DebugReloc, DebugRelocName}; pub trait WriteMetadata { fn add_rustc_section(&mut self, symbol_name: String, data: Vec<u8>, is_like_osx: bool); } impl WriteMetadata for faerie::Artifact { fn add_rustc_section(&mut self, symbol_name: String, data: Vec<u8>, is_like_osx: bool) { self .declare(".rustc", faerie::Decl::section(faerie::SectionKind::Data)) .unwrap(); self .define_with_symbols(".rustc", data, { let mut map = std::collections::BTreeMap::new(); // FIXME implement faerie elf backend section custom symbols // For MachO this is necessary to prevent the linker from throwing away the .rustc section, // but for ELF it isn't. if is_like_osx { map.insert( symbol_name, 0, ); } map }) .unwrap(); } } #[cfg(feature = "backend_object")] impl WriteMetadata for object::write::Object { fn add_rustc_section(&mut self, symbol_name: String, data: Vec<u8>, _is_like_osx: bool) { let segment = self.segment_name(object::write::StandardSegment::Data).to_vec(); let section_id = self.add_section(segment, b".rustc".to_vec(), object::SectionKind::Data); let offset = self.append_section_data(section_id, &data, 1); // For MachO and probably PE this is necessary to prevent the linker from throwing away the // .rustc section. For ELF this isn't necessary, but it also doesn't harm. self.add_symbol(object::write::Symbol { name: symbol_name.into_bytes(), value: offset, size: data.len() as u64, kind: object::SymbolKind::Data, scope: object::SymbolScope::Dynamic, weak: false, section: Some(section_id), }); } } pub trait WriteDebugInfo { type SectionId; fn add_debug_section(&mut self, name: SectionId, data: Vec<u8>) -> Self::SectionId; fn add_debug_reloc( &mut self, section_map: &HashMap<SectionId, Self::SectionId>, symbol_map: &indexmap::IndexMap<FuncId, String>, from: &Self::SectionId, reloc: &DebugReloc, ); } impl WriteDebugInfo for FaerieProduct { type SectionId = SectionId; fn add_debug_section(&mut self, id: SectionId, data: Vec<u8>) -> SectionId { self.artifact.declare_with(id.name(), Decl::section(faerie::SectionKind::Debug), data).unwrap(); id } fn add_debug_reloc( &mut self, _section_map: &HashMap<SectionId, Self::SectionId>, symbol_map: &indexmap::IndexMap<FuncId, String>, from: &Self::SectionId, reloc: &DebugReloc, ) { self .artifact .link_with( faerie::Link { from: from.name(), to: match reloc.name { DebugRelocName::Section(id) => id.name(), DebugRelocName::Symbol(index) => &symbol_map.get_index(index).unwrap().1, }, at: u64::from(reloc.offset), }, faerie::Reloc::Debug { size: reloc.size, addend: reloc.addend as i32, }, ) .expect("faerie relocation error"); } } #[cfg(feature = "backend_object")] impl WriteDebugInfo for ObjectProduct { type SectionId = (object::write::SectionId, object::write::SymbolId); fn add_debug_section( &mut self, id: SectionId, data: Vec<u8>, ) -> (object::write::SectionId, object::write::SymbolId) { let segment = self.object.segment_name(StandardSegment::Debug).to_vec(); let name = id.name().as_bytes().to_vec(); let section_id = self.object.add_section(segment, name, SectionKind::Debug); self.object.section_mut(section_id).set_data(data, 1); let symbol_id = self.object.section_symbol(section_id); (section_id, symbol_id) } fn add_debug_reloc( &mut self, section_map: &HashMap<SectionId, Self::SectionId>, symbol_map: &indexmap::IndexMap<FuncId, String>, from: &Self::SectionId, reloc: &DebugReloc, ) { let symbol = match reloc.name { DebugRelocName::Section(id) => section_map.get(&id).unwrap().1, DebugRelocName::Symbol(id) => { self.function_symbol(*symbol_map.get_index(id).unwrap().0) } }; self.object.add_relocation(from.0, Relocation { offset: u64::from(reloc.offset), symbol, kind: RelocationKind::Absolute, encoding: RelocationEncoding::Generic, size: reloc.size * 8, addend: reloc.addend, }).unwrap(); } } pub trait Emit { fn emit(self) -> Vec<u8>; } impl Emit for FaerieProduct { fn emit(self) -> Vec<u8> { self.artifact.emit().unwrap() } } #[cfg(feature = "backend_object")] impl Emit for ObjectProduct { fn emit(self) -> Vec<u8> { self.object.write().unwrap() } } #[cfg(not(feature = "backend_object"))] pub fn with_object(sess: &Session, name: &str, f: impl FnOnce(&mut Artifact)) -> Vec<u8> { let mut metadata_artifact = faerie::Artifact::new( crate::build_isa(sess, true).triple().clone(), name.to_string(), ); f(&mut metadata_artifact); metadata_artifact.emit().unwrap() } #[cfg(feature = "backend_object")] pub fn with_object(sess: &Session, name: &str, f: impl FnOnce(&mut Object)) -> Vec<u8> { let triple = crate::build_isa(sess, true).triple().clone(); let mut metadata_object = object::write::Object::new(triple.binary_format, triple.architecture); metadata_object.add_file_symbol(name.as_bytes().to_vec()); f(&mut metadata_object); metadata_object.write().unwrap() } pub type Backend = impl cranelift_module::Backend<Product: Emit + WriteDebugInfo>; #[cfg(not(feature = "backend_object"))] pub fn make_module(sess: &Session, name: String) -> Module<Backend> { let module: Module<FaerieBackend> = Module::new( FaerieBuilder::new( crate::build_isa(sess, true), name + ".o", FaerieTrapCollection::Disabled, cranelift_module::default_libcall_names(), ) .unwrap(), ); module } #[cfg(feature = "backend_object")] pub fn make_module(sess: &Session, name: String) -> Module<Backend> { let module: Module<ObjectBackend> = Module::new( ObjectBuilder::new( crate::build_isa(sess, true), name + ".o", ObjectTrapCollection::Disabled, cranelift_module::default_libcall_names(), ) .unwrap(), ); module }
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! The `Fragment` type, which represents the leaves of the layout tree. #![deny(unsafe_code)] use app_units::Au; use canvas_traits::CanvasMsg; use context::{LayoutContext, SharedLayoutContext}; use euclid::{Point2D, Rect, Size2D}; use floats::ClearType; use flow::{self, ImmutableFlowUtils}; use flow_ref::{self, FlowRef}; use gfx; use gfx::display_list::{BLUR_INFLATION_FACTOR, OpaqueNode}; use gfx::text::glyph::ByteIndex; use gfx::text::text_run::{TextRun, TextRunSlice}; use gfx_traits::{FragmentType, LayerId, LayerType, StackingContextId}; use inline::{FIRST_FRAGMENT_OF_ELEMENT, InlineFragmentContext, InlineFragmentNodeInfo}; use inline::{InlineMetrics, LAST_FRAGMENT_OF_ELEMENT}; use ipc_channel::ipc::IpcSender; #[cfg(debug_assertions)] use layout_debug; use model::{self, Direction, IntrinsicISizes, IntrinsicISizesContribution, MaybeAuto}; use msg::constellation_msg::PipelineId; use net_traits::image::base::{Image, ImageMetadata}; use net_traits::image_cache_thread::{ImageOrMetadataAvailable, UsePlaceholder}; use range::*; use rustc_serialize::{Encodable, Encoder}; use script_layout_interface::HTMLCanvasData; use script_layout_interface::restyle_damage::{RECONSTRUCT_FLOW, RestyleDamage}; use script_layout_interface::wrapper_traits::{PseudoElementType, ThreadSafeLayoutElement, ThreadSafeLayoutNode}; use std::borrow::ToOwned; use std::cmp::{max, min}; use std::collections::LinkedList; use std::fmt; use std::sync::{Arc, Mutex}; use style::arc_ptr_eq; use style::computed_values::{border_collapse, box_sizing, clear, color, display, mix_blend_mode}; use style::computed_values::{overflow_wrap, overflow_x, position, text_decoration}; use style::computed_values::{transform_style, vertical_align, white_space, word_break, z_index}; use style::computed_values::content::ContentItem; use style::context::SharedStyleContext; use style::dom::TRestyleDamage; use style::logical_geometry::{LogicalMargin, LogicalRect, LogicalSize, WritingMode}; use style::properties::ServoComputedValues; use style::str::char_is_whitespace; use style::values::computed::{LengthOrPercentage, LengthOrPercentageOrAuto}; use style::values::computed::LengthOrPercentageOrNone; use text; use text::TextRunScanner; use url::Url; /// Fragments (`struct Fragment`) are the leaves of the layout tree. They cannot position /// themselves. In general, fragments do not have a simple correspondence with CSS fragments in the /// specification: /// /// * Several fragments may correspond to the same CSS box or DOM node. For example, a CSS text box /// broken across two lines is represented by two fragments. /// /// * Some CSS fragments are not created at all, such as some anonymous block fragments induced by /// inline fragments with block-level sibling fragments. In that case, Servo uses an `InlineFlow` /// with `BlockFlow` siblings; the `InlineFlow` is block-level, but not a block container. It is /// positioned as if it were a block fragment, but its children are positioned according to /// inline flow. /// /// A `SpecificFragmentInfo::Generic` is an empty fragment that contributes only borders, margins, /// padding, and backgrounds. It is analogous to a CSS nonreplaced content box. /// /// A fragment's type influences how its styles are interpreted during layout. For example, /// replaced content such as images are resized differently from tables, text, or other content. /// Different types of fragments may also contain custom data; for example, text fragments contain /// text. /// /// Do not add fields to this structure unless they're really really mega necessary! Fragments get /// moved around a lot and thus their size impacts performance of layout quite a bit. /// /// FIXME(#2260, pcwalton): This can be slimmed down some by (at least) moving `inline_context` /// to be on `InlineFlow` only. #[derive(Clone)] pub struct Fragment { /// An opaque reference to the DOM node that this `Fragment` originates from. pub node: OpaqueNode, /// The CSS style of this fragment. pub style: Arc<ServoComputedValues>, /// The CSS style of this fragment when it's selected pub selected_style: Arc<ServoComputedValues>, /// The position of this fragment relative to its owning flow. The size includes padding and /// border, but not margin. /// /// NB: This does not account for relative positioning. /// NB: Collapsed borders are not included in this. pub border_box: LogicalRect<Au>, /// The sum of border and padding; i.e. the distance from the edge of the border box to the /// content edge of the fragment. pub border_padding: LogicalMargin<Au>, /// The margin of the content box. pub margin: LogicalMargin<Au>, /// Info specific to the kind of fragment. Keep this enum small. pub specific: SpecificFragmentInfo, /// Holds the style context information for fragments that are part of an inline formatting /// context. pub inline_context: Option<InlineFragmentContext>, /// How damaged this fragment is since last reflow. pub restyle_damage: RestyleDamage, /// The pseudo-element that this fragment represents. pub pseudo: PseudoElementType<()>, /// Various flags for this fragment. pub flags: FragmentFlags, /// A debug ID that is consistent for the life of this fragment (via transform etc). /// This ID should not be considered stable across multiple layouts or fragment /// manipulations. debug_id: DebugId, /// The ID of the StackingContext that contains this fragment. This is initialized /// to 0, but it assigned during the collect_stacking_contexts phase of display /// list construction. pub stacking_context_id: StackingContextId, } impl Encodable for Fragment { fn encode<S: Encoder>(&self, e: &mut S) -> Result<(), S::Error> { e.emit_struct("fragment", 3, |e| { try!(e.emit_struct_field("id", 0, |e| self.debug_id.encode(e))); try!(e.emit_struct_field("border_box", 1, |e| self.border_box.encode(e))); e.emit_struct_field("margin", 2, |e| self.margin.encode(e)) }) } } /// Info specific to the kind of fragment. /// /// Keep this enum small. As in, no more than one word. Or pcwalton will yell at you. #[derive(Clone)] pub enum SpecificFragmentInfo { Generic, /// A piece of generated content that cannot be resolved into `ScannedText` until the generated /// content resolution phase (e.g. an ordered list item marker). GeneratedContent(Box<GeneratedContentInfo>), Iframe(IframeFragmentInfo), Image(Box<ImageFragmentInfo>), Canvas(Box<CanvasFragmentInfo>), /// A hypothetical box (see CSS 2.1 § 10.3.7) for an absolutely-positioned block that was /// declared with `display: inline;`. InlineAbsoluteHypothetical(InlineAbsoluteHypotheticalFragmentInfo), InlineBlock(InlineBlockFragmentInfo), /// An inline fragment that establishes an absolute containing block for its descendants (i.e. /// a positioned inline fragment). InlineAbsolute(InlineAbsoluteFragmentInfo), ScannedText(Box<ScannedTextFragmentInfo>), Table, TableCell, TableColumn(TableColumnFragmentInfo), TableRow, TableWrapper, Multicol, MulticolColumn, UnscannedText(Box<UnscannedTextFragmentInfo>), } impl SpecificFragmentInfo { fn restyle_damage(&self) -> RestyleDamage { let flow = match *self { SpecificFragmentInfo::Canvas(_) | SpecificFragmentInfo::GeneratedContent(_) | SpecificFragmentInfo::Iframe(_) | SpecificFragmentInfo::Image(_) | SpecificFragmentInfo::ScannedText(_) | SpecificFragmentInfo::Table | SpecificFragmentInfo::TableCell | SpecificFragmentInfo::TableColumn(_) | SpecificFragmentInfo::TableRow | SpecificFragmentInfo::TableWrapper | SpecificFragmentInfo::Multicol | SpecificFragmentInfo::MulticolColumn | SpecificFragmentInfo::UnscannedText(_) | SpecificFragmentInfo::Generic => return RestyleDamage::empty(), SpecificFragmentInfo::InlineAbsoluteHypothetical(ref info) => &info.flow_ref, SpecificFragmentInfo::InlineAbsolute(ref info) => &info.flow_ref, SpecificFragmentInfo::InlineBlock(ref info) => &info.flow_ref, }; flow::base(&**flow).restyle_damage } pub fn get_type(&self) -> &'static str { match *self { SpecificFragmentInfo::Canvas(_) => "SpecificFragmentInfo::Canvas", SpecificFragmentInfo::Generic => "SpecificFragmentInfo::Generic", SpecificFragmentInfo::GeneratedContent(_) => "SpecificFragmentInfo::GeneratedContent", SpecificFragmentInfo::Iframe(_) => "SpecificFragmentInfo::Iframe", SpecificFragmentInfo::Image(_) => "SpecificFragmentInfo::Image", SpecificFragmentInfo::InlineAbsolute(_) => "SpecificFragmentInfo::InlineAbsolute", SpecificFragmentInfo::InlineAbsoluteHypothetical(_) => { "SpecificFragmentInfo::InlineAbsoluteHypothetical" } SpecificFragmentInfo::InlineBlock(_) => "SpecificFragmentInfo::InlineBlock", SpecificFragmentInfo::ScannedText(_) => "SpecificFragmentInfo::ScannedText", SpecificFragmentInfo::Table => "SpecificFragmentInfo::Table", SpecificFragmentInfo::TableCell => "SpecificFragmentInfo::TableCell", SpecificFragmentInfo::TableColumn(_) => "SpecificFragmentInfo::TableColumn", SpecificFragmentInfo::TableRow => "SpecificFragmentInfo::TableRow", SpecificFragmentInfo::TableWrapper => "SpecificFragmentInfo::TableWrapper", SpecificFragmentInfo::Multicol => "SpecificFragmentInfo::Multicol", SpecificFragmentInfo::MulticolColumn => "SpecificFragmentInfo::MulticolColumn", SpecificFragmentInfo::UnscannedText(_) => "SpecificFragmentInfo::UnscannedText", } } } impl fmt::Debug for SpecificFragmentInfo { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { SpecificFragmentInfo::ScannedText(ref info) => write!(f, "{:?}", info.text()), SpecificFragmentInfo::UnscannedText(ref info) => write!(f, "{:?}", info.text), _ => Ok(()) } } } /// Clamp a value obtained from style_length, based on min / max lengths. fn clamp_size(size: Au, min_size: LengthOrPercentage, max_size: LengthOrPercentageOrNone, container_size: Au) -> Au { let min_size = model::specified(min_size, container_size); let max_size = model::specified_or_none(max_size, container_size); max(min_size, match max_size { None => size, Some(max_size) => min(size, max_size), }) } /// Information for generated content. #[derive(Clone)] pub enum GeneratedContentInfo { ListItem, ContentItem(ContentItem), /// Placeholder for elements with generated content that did not generate any fragments. Empty, } /// A hypothetical box (see CSS 2.1 § 10.3.7) for an absolutely-positioned block that was declared /// with `display: inline;`. /// /// FIXME(pcwalton): Stop leaking this `FlowRef` to layout; that is not memory safe because layout /// can clone it. #[derive(Clone)] pub struct InlineAbsoluteHypotheticalFragmentInfo { pub flow_ref: FlowRef, } impl InlineAbsoluteHypotheticalFragmentInfo { pub fn new(flow_ref: FlowRef) -> InlineAbsoluteHypotheticalFragmentInfo { InlineAbsoluteHypotheticalFragmentInfo { flow_ref: flow_ref, } } } /// A fragment that represents an inline-block element. /// /// FIXME(pcwalton): Stop leaking this `FlowRef` to layout; that is not memory safe because layout /// can clone it. #[derive(Clone)] pub struct InlineBlockFragmentInfo { pub flow_ref: FlowRef, } impl InlineBlockFragmentInfo { pub fn new(flow_ref: FlowRef) -> InlineBlockFragmentInfo { InlineBlockFragmentInfo { flow_ref: flow_ref, } } } /// An inline fragment that establishes an absolute containing block for its descendants (i.e. /// a positioned inline fragment). /// /// FIXME(pcwalton): Stop leaking this `FlowRef` to layout; that is not memory safe because layout /// can clone it. #[derive(Clone)] pub struct InlineAbsoluteFragmentInfo { pub flow_ref: FlowRef, } impl InlineAbsoluteFragmentInfo { pub fn new(flow_ref: FlowRef) -> InlineAbsoluteFragmentInfo { InlineAbsoluteFragmentInfo { flow_ref: flow_ref, } } } #[derive(Clone)] pub struct CanvasFragmentInfo { pub replaced_image_fragment_info: ReplacedImageFragmentInfo, pub ipc_renderer: Option<Arc<Mutex<IpcSender<CanvasMsg>>>>, pub dom_width: Au, pub dom_height: Au, } impl CanvasFragmentInfo { pub fn new<N: ThreadSafeLayoutNode>(node: &N, data: HTMLCanvasData, ctx: &SharedStyleContext) -> CanvasFragmentInfo { CanvasFragmentInfo { replaced_image_fragment_info: ReplacedImageFragmentInfo::new(node, ctx), ipc_renderer: data.ipc_renderer .map(|renderer| Arc::new(Mutex::new(renderer))), dom_width: Au::from_px(data.width as i32), dom_height: Au::from_px(data.height as i32), } } /// Returns the original inline-size of the canvas. pub fn canvas_inline_size(&self) -> Au { if self.replaced_image_fragment_info.writing_mode_is_vertical { self.dom_height } else { self.dom_width } } /// Returns the original block-size of the canvas. pub fn canvas_block_size(&self) -> Au { if self.replaced_image_fragment_info.writing_mode_is_vertical { self.dom_width } else { self.dom_height } } } /// A fragment that represents a replaced content image and its accompanying borders, shadows, etc. #[derive(Clone)] pub struct ImageFragmentInfo { /// The image held within this fragment. pub replaced_image_fragment_info: ReplacedImageFragmentInfo, pub image: Option<Arc<Image>>, pub metadata: Option<ImageMetadata>, } impl ImageFragmentInfo { /// Creates a new image fragment from the given URL and local image cache. /// /// FIXME(pcwalton): The fact that image fragments store the cache in the fragment makes little /// sense to me. pub fn new<N: ThreadSafeLayoutNode>(node: &N, url: Option<Url>, shared_layout_context: &SharedLayoutContext) -> ImageFragmentInfo { let image_or_metadata = url.and_then(|url| { shared_layout_context.get_or_request_image_or_meta(url, UsePlaceholder::Yes) }); let (image, metadata) = match image_or_metadata { Some(ImageOrMetadataAvailable::ImageAvailable(i)) => { (Some(i.clone()), Some(ImageMetadata { height: i.height, width: i.width } )) } Some(ImageOrMetadataAvailable::MetadataAvailable(m)) => { (None, Some(m)) } None => { (None, None) } }; ImageFragmentInfo { replaced_image_fragment_info: ReplacedImageFragmentInfo::new(node, &shared_layout_context.style_context), image: image, metadata: metadata, } } /// Returns the original inline-size of the image. pub fn image_inline_size(&mut self) -> Au { match self.metadata { Some(ref metadata) => { Au::from_px(if self.replaced_image_fragment_info.writing_mode_is_vertical { metadata.height } else { metadata.width } as i32) } None => Au(0) } } /// Returns the original block-size of the image. pub fn image_block_size(&mut self) -> Au { match self.metadata { Some(ref metadata) => { Au::from_px(if self.replaced_image_fragment_info.writing_mode_is_vertical { metadata.width } else { metadata.height } as i32) } None => Au(0) } } pub fn tile_image_round(position: &mut Au, size: &mut Au, absolute_anchor_origin: Au, image_size: &mut Au) { if *size == Au(0) || *image_size == Au(0) { *position = Au(0); *size =Au(0); return; } let number_of_tiles = (size.to_f32_px() / image_size.to_f32_px()).round().max(1.0); *image_size = *size / (number_of_tiles as i32); ImageFragmentInfo::tile_image(position, size, absolute_anchor_origin, *image_size); } pub fn tile_image_spaced(position: &mut Au, size: &mut Au, tile_spacing: &mut Au, absolute_anchor_origin: Au, image_size: Au) { if *size == Au(0) || image_size == Au(0) { *position = Au(0); *size = Au(0); *tile_spacing = Au(0); return; } // Per the spec, if the space available is not enough for two images, just tile as // normal but only display a single tile. if image_size * 2 >= *size { ImageFragmentInfo::tile_image(position, size, absolute_anchor_origin, image_size); *tile_spacing = Au(0); *size = image_size;; return; } // Take the box size, remove room for two tiles on the edges, and then calculate how many // other tiles fit in between them. let size_remaining = *size - (image_size * 2); let num_middle_tiles = (size_remaining.to_f32_px() / image_size.to_f32_px()).floor() as i32; // Allocate the remaining space as padding between tiles. background-position is ignored // as per the spec, so the position is just the box origin. We are also ignoring // background-attachment here, which seems unspecced when combined with // background-repeat: space. let space_for_middle_tiles = image_size * num_middle_tiles; *tile_spacing = (size_remaining - space_for_middle_tiles) / (num_middle_tiles + 1); } /// Tile an image pub fn tile_image(position: &mut Au, size: &mut Au, absolute_anchor_origin: Au, image_size: Au) { // Avoid division by zero below! if image_size == Au(0) { return } let delta_pixels = absolute_anchor_origin - *position; let image_size_px = image_size.to_f32_px(); let tile_count = ((delta_pixels.to_f32_px() + image_size_px - 1.0) / image_size_px).floor(); let offset = image_size * (tile_count as i32); let new_position = absolute_anchor_origin - offset; *size = *position - new_position + *size; *position = new_position; } } #[derive(Clone)] pub struct ReplacedImageFragmentInfo { pub computed_inline_size: Option<Au>, pub computed_block_size: Option<Au>, pub writing_mode_is_vertical: bool, } impl ReplacedImageFragmentInfo { pub fn new<N>(node: &N, ctx: &SharedStyleContext) -> ReplacedImageFragmentInfo where N: ThreadSafeLayoutNode { let is_vertical = node.style(ctx).writing_mode.is_vertical(); ReplacedImageFragmentInfo { computed_inline_size: None, computed_block_size: None, writing_mode_is_vertical: is_vertical, } } /// Returns the calculated inline-size of the image, accounting for the inline-size attribute. pub fn computed_inline_size(&self) -> Au { self.computed_inline_size.expect("image inline_size is not computed yet!") } /// Returns the calculated block-size of the image, accounting for the block-size attribute. pub fn computed_block_size(&self) -> Au { self.computed_block_size.expect("image block_size is not computed yet!") } // Return used value for inline-size or block-size. // // `dom_length`: inline-size or block-size as specified in the `img` tag. // `style_length`: inline-size as given in the CSS pub fn style_length(style_length: LengthOrPercentageOrAuto, container_size: Option<Au>) -> MaybeAuto { match (style_length, container_size) { (LengthOrPercentageOrAuto::Length(length), _) => MaybeAuto::Specified(length), (LengthOrPercentageOrAuto::Percentage(pc), Some(container_size)) => { MaybeAuto::Specified(container_size.scale_by(pc)) } (LengthOrPercentageOrAuto::Percentage(_), None) => MaybeAuto::Auto, (LengthOrPercentageOrAuto::Calc(calc), Some(container_size)) => { MaybeAuto::Specified(calc.length() + container_size.scale_by(calc.percentage())) } (LengthOrPercentageOrAuto::Calc(_), None) => MaybeAuto::Auto, (LengthOrPercentageOrAuto::Auto, _) => MaybeAuto::Auto, } } pub fn calculate_replaced_inline_size(&mut self, style: &ServoComputedValues, noncontent_inline_size: Au, container_inline_size: Au, container_block_size: Option<Au>, fragment_inline_size: Au, fragment_block_size: Au) -> Au { let style_inline_size = style.content_inline_size(); let style_block_size = style.content_block_size(); let style_min_inline_size = style.min_inline_size(); let style_max_inline_size = style.max_inline_size(); let style_min_block_size = style.min_block_size(); let style_max_block_size = style.max_block_size(); // TODO(ksh8281): compute border,margin let inline_size = ReplacedImageFragmentInfo::style_length( style_inline_size, Some(container_inline_size)); let inline_size = match inline_size { MaybeAuto::Auto => { let intrinsic_width = fragment_inline_size; let intrinsic_height = fragment_block_size; if intrinsic_height == Au(0) { intrinsic_width } else { let ratio = intrinsic_width.to_f32_px() / intrinsic_height.to_f32_px(); let specified_height = ReplacedImageFragmentInfo::style_length( style_block_size, container_block_size); let specified_height = match specified_height { MaybeAuto::Auto => intrinsic_height, MaybeAuto::Specified(h) => h, }; let specified_height = clamp_size(specified_height, style_min_block_size, style_max_block_size, Au(0)); Au::from_f32_px(specified_height.to_f32_px() * ratio) } }, MaybeAuto::Specified(w) => w, }; let inline_size = clamp_size(inline_size, style_min_inline_size, style_max_inline_size, container_inline_size); self.computed_inline_size = Some(inline_size); inline_size + noncontent_inline_size } pub fn calculate_replaced_block_size(&mut self, style: &ServoComputedValues, noncontent_block_size: Au, containing_block_block_size: Option<Au>, fragment_inline_size: Au, fragment_block_size: Au) -> Au { // TODO(ksh8281): compute border,margin,padding let style_block_size = style.content_block_size(); let style_min_block_size = style.min_block_size(); let style_max_block_size = style.max_block_size(); let inline_size = self.computed_inline_size(); let block_size = ReplacedImageFragmentInfo::style_length( style_block_size, containing_block_block_size); let block_size = match block_size { MaybeAuto::Auto => { let intrinsic_width = fragment_inline_size; let intrinsic_height = fragment_block_size; let scale = intrinsic_width.to_f32_px() / inline_size.to_f32_px(); Au::from_f32_px(intrinsic_height.to_f32_px() / scale) }, MaybeAuto::Specified(h) => { h } }; let block_size = clamp_size(block_size, style_min_block_size, style_max_block_size, Au(0)); self.computed_block_size = Some(block_size); block_size + noncontent_block_size } } /// A fragment that represents an inline frame (iframe). This stores the pipeline ID so that the /// size of this iframe can be communicated via the constellation to the iframe's own layout thread. #[derive(Clone)] pub struct IframeFragmentInfo { /// The pipeline ID of this iframe. pub pipeline_id: PipelineId, } impl IframeFragmentInfo { /// Creates the information specific to an iframe fragment. pub fn new<N: ThreadSafeLayoutNode>(node: &N) -> IframeFragmentInfo { let pipeline_id = node.iframe_pipeline_id(); IframeFragmentInfo { pipeline_id: pipeline_id, } } #[inline] pub fn calculate_replaced_inline_size(&self, style: &ServoComputedValues, containing_size: Au) -> Au { // Calculate the replaced inline size (or default) as per CSS 2.1 § 10.3.2 IframeFragmentInfo::calculate_replaced_size(style.content_inline_size(), style.min_inline_size(), style.max_inline_size(), Some(containing_size), Au::from_px(300)) } #[inline] pub fn calculate_replaced_block_size(&self, style: &ServoComputedValues, containing_size: Option<Au>) -> Au { // Calculate the replaced block size (or default) as per CSS 2.1 § 10.3.2 IframeFragmentInfo::calculate_replaced_size(style.content_block_size(), style.min_block_size(), style.max_block_size(), containing_size, Au::from_px(150)) } fn calculate_replaced_size(content_size: LengthOrPercentageOrAuto, style_min_size: LengthOrPercentage, style_max_size: LengthOrPercentageOrNone, containing_size: Option<Au>, default_size: Au) -> Au { let computed_size = match (content_size, containing_size) { (LengthOrPercentageOrAuto::Length(length), _) => length, (LengthOrPercentageOrAuto::Percentage(pc), Some(container_size)) => container_size.scale_by(pc), (LengthOrPercentageOrAuto::Calc(calc), Some(container_size)) => { container_size.scale_by(calc.percentage()) + calc.length() }, (LengthOrPercentageOrAuto::Calc(calc), None) => calc.length(), (LengthOrPercentageOrAuto::Percentage(_), None) => default_size, (LengthOrPercentageOrAuto::Auto, _) => default_size, }; let containing_size = containing_size.unwrap_or(Au(0)); clamp_size(computed_size, style_min_size, style_max_size, containing_size) } } /// A scanned text fragment represents a single run of text with a distinct style. A `TextFragment` /// may be split into two or more fragments across line breaks. Several `TextFragment`s may /// correspond to a single DOM text node. Split text fragments are implemented by referring to /// subsets of a single `TextRun` object. #[derive(Clone)] pub struct ScannedTextFragmentInfo { /// The text run that this represents. pub run: Arc<TextRun>, /// The intrinsic size of the text fragment. pub content_size: LogicalSize<Au>, /// The byte offset of the insertion point, if any. pub insertion_point: Option<ByteIndex>, /// The range within the above text run that this represents. pub range: Range<ByteIndex>, /// The endpoint of the above range, including whitespace that was stripped out. This exists /// so that we can restore the range to its original value (before line breaking occurred) when /// performing incremental reflow. pub range_end_including_stripped_whitespace: ByteIndex, pub flags: ScannedTextFlags, } bitflags! { pub flags ScannedTextFlags: u8 { /// Whether a line break is required after this fragment if wrapping on newlines (e.g. if /// `white-space: pre` is in effect). const REQUIRES_LINE_BREAK_AFTERWARD_IF_WRAPPING_ON_NEWLINES = 0x01, /// Is this fragment selected? const SELECTED = 0x02, } } impl ScannedTextFragmentInfo { /// Creates the information specific to a scanned text fragment from a range and a text run. pub fn new(run: Arc<TextRun>, range: Range<ByteIndex>, content_size: LogicalSize<Au>, insertion_point: Option<ByteIndex>, flags: ScannedTextFlags) -> ScannedTextFragmentInfo { ScannedTextFragmentInfo { run: run, range: range, insertion_point: insertion_point, content_size: content_size, range_end_including_stripped_whitespace: range.end(), flags: flags, } } pub fn text(&self) -> &str { &self.run.text[self.range.begin().to_usize() .. self.range.end().to_usize()] } pub fn requires_line_break_afterward_if_wrapping_on_newlines(&self) -> bool { self.flags.contains(REQUIRES_LINE_BREAK_AFTERWARD_IF_WRAPPING_ON_NEWLINES) } pub fn selected(&self) -> bool { self.flags.contains(SELECTED) } } /// Describes how to split a fragment. This is used during line breaking as part of the return /// value of `find_split_info_for_inline_size()`. #[derive(Debug, Clone)] pub struct SplitInfo { // TODO(bjz): this should only need to be a single character index, but both values are // currently needed for splitting in the `inline::try_append_*` functions. pub range: Range<ByteIndex>, pub inline_size: Au, } impl SplitInfo { fn new(range: Range<ByteIndex>, info: &ScannedTextFragmentInfo) -> SplitInfo { let inline_size = info.run.advance_for_range(&range); SplitInfo { range: range, inline_size: inline_size, } } } /// Describes how to split a fragment into two. This contains up to two `SplitInfo`s. pub struct SplitResult { /// The part of the fragment that goes on the first line. pub inline_start: Option<SplitInfo>, /// The part of the fragment that goes on the second line. pub inline_end: Option<SplitInfo>, /// The text run which is being split. pub text_run: Arc<TextRun>, } /// Describes how a fragment should be truncated. pub struct TruncationResult { /// The part of the fragment remaining after truncation. pub split: SplitInfo, /// The text run which is being truncated. pub text_run: Arc<TextRun>, } /// Data for an unscanned text fragment. Unscanned text fragments are the results of flow /// construction that have not yet had their inline-size determined. #[derive(Clone)] pub struct UnscannedTextFragmentInfo { /// The text inside the fragment. pub text: Box<str>, /// The selected text range. An empty range represents the insertion point. pub selection: Option<Range<ByteIndex>>, } impl UnscannedTextFragmentInfo { /// Creates a new instance of `UnscannedTextFragmentInfo` from the given text. #[inline] pub fn new(text: String, selection: Option<Range<ByteIndex>>) -> UnscannedTextFragmentInfo { UnscannedTextFragmentInfo { text: text.into_boxed_str(), selection: selection, } } } /// A fragment that represents a table column. #[derive(Copy, Clone)] pub struct TableColumnFragmentInfo { /// the number of columns a <col> element should span pub span: u32, } impl TableColumnFragmentInfo { /// Create the information specific to an table column fragment. pub fn new<N: ThreadSafeLayoutNode>(node: &N) -> TableColumnFragmentInfo { let element = node.as_element(); let span = element.get_attr(&ns!(), &atom!("span")) .and_then(|string| string.parse().ok()) .unwrap_or(0); TableColumnFragmentInfo { span: span, } } } impl Fragment { /// Constructs a new `Fragment` instance. pub fn new<N: ThreadSafeLayoutNode>(node: &N, specific: SpecificFragmentInfo, ctx: &LayoutContext) -> Fragment { let style_context = ctx.style_context(); let style = node.style(style_context).clone(); let writing_mode = style.writing_mode; let mut restyle_damage = node.restyle_damage(); restyle_damage.remove(RECONSTRUCT_FLOW); Fragment { node: node.opaque(), style: style, selected_style: node.selected_style(style_context).clone(), restyle_damage: restyle_damage, border_box: LogicalRect::zero(writing_mode), border_padding: LogicalMargin::zero(writing_mode), margin: LogicalMargin::zero(writing_mode), specific: specific, inline_context: None, pseudo: node.get_pseudo_element_type().strip(), flags: FragmentFlags::empty(), debug_id: DebugId::new(), stacking_context_id: StackingContextId::new(0), } } /// Constructs a new `Fragment` instance from an opaque node. pub fn from_opaque_node_and_style(node: OpaqueNode, pseudo: PseudoElementType<()>, style: Arc<ServoComputedValues>, selected_style: Arc<ServoComputedValues>, mut restyle_damage: RestyleDamage, specific: SpecificFragmentInfo) -> Fragment { let writing_mode = style.writing_mode; restyle_damage.remove(RECONSTRUCT_FLOW); Fragment { node: node, style: style, selected_style: selected_style, restyle_damage: restyle_damage, border_box: LogicalRect::zero(writing_mode), border_padding: LogicalMargin::zero(writing_mode), margin: LogicalMargin::zero(writing_mode), specific: specific, inline_context: None, pseudo: pseudo, flags: FragmentFlags::empty(), debug_id: DebugId::new(), stacking_context_id: StackingContextId::new(0), } } /// Transforms this fragment into another fragment of the given type, with the given size, /// preserving all the other data. pub fn transform(&self, size: LogicalSize<Au>, info: SpecificFragmentInfo) -> Fragment { let new_border_box = LogicalRect::from_point_size(self.style.writing_mode, self.border_box.start, size); let mut restyle_damage = RestyleDamage::rebuild_and_reflow(); restyle_damage.remove(RECONSTRUCT_FLOW); Fragment { node: self.node, style: self.style.clone(), selected_style: self.selected_style.clone(), restyle_damage: restyle_damage, border_box: new_border_box, border_padding: self.border_padding, margin: self.margin, specific: info, inline_context: self.inline_context.clone(), pseudo: self.pseudo.clone(), flags: FragmentFlags::empty(), debug_id: self.debug_id.clone(), stacking_context_id: StackingContextId::new(0), } } /// Transforms this fragment using the given `SplitInfo`, preserving all the other data. pub fn transform_with_split_info(&self, split: &SplitInfo, text_run: Arc<TextRun>) -> Fragment { let size = LogicalSize::new(self.style.writing_mode, split.inline_size, self.border_box.size.block); // Preserve the insertion point if it is in this fragment's range or it is at line end. let (flags, insertion_point) = match self.specific { SpecificFragmentInfo::ScannedText(ref info) => { match info.insertion_point { Some(index) if split.range.contains(index) => (info.flags, info.insertion_point), Some(index) if index == ByteIndex(text_run.text.chars().count() as isize - 1) && index == split.range.end() => (info.flags, info.insertion_point), _ => (info.flags, None) } }, _ => (ScannedTextFlags::empty(), None) }; let info = box ScannedTextFragmentInfo::new( text_run, split.range, size, insertion_point, flags); self.transform(size, SpecificFragmentInfo::ScannedText(info)) } /// Transforms this fragment into an ellipsis fragment, preserving all the other data. pub fn transform_into_ellipsis(&self, layout_context: &LayoutContext) -> Fragment { let mut unscanned_ellipsis_fragments = LinkedList::new(); unscanned_ellipsis_fragments.push_back(self.transform( self.border_box.size, SpecificFragmentInfo::UnscannedText( box UnscannedTextFragmentInfo::new("…".to_owned(), None)))); let ellipsis_fragments = TextRunScanner::new().scan_for_runs(&mut layout_context.font_context(), unscanned_ellipsis_fragments); debug_assert!(ellipsis_fragments.len() == 1); ellipsis_fragments.fragments.into_iter().next().unwrap() } pub fn restyle_damage(&self) -> RestyleDamage { self.restyle_damage | self.specific.restyle_damage() } pub fn contains_node(&self, node_address: OpaqueNode) -> bool { node_address == self.node || self.inline_context.as_ref().map_or(false, |ctx| { ctx.contains_node(node_address) }) } /// Adds a style to the inline context for this fragment. If the inline context doesn't exist /// yet, it will be created. pub fn add_inline_context_style(&mut self, node_info: InlineFragmentNodeInfo) { if self.inline_context.is_none() { self.inline_context = Some(InlineFragmentContext::new()); } self.inline_context.as_mut().unwrap().nodes.push(node_info); } /// Determines which quantities (border/padding/margin/specified) should be included in the /// intrinsic inline size of this fragment. fn quantities_included_in_intrinsic_inline_size(&self) -> QuantitiesIncludedInIntrinsicInlineSizes { match self.specific { SpecificFragmentInfo::Canvas(_) | SpecificFragmentInfo::Generic | SpecificFragmentInfo::GeneratedContent(_) | SpecificFragmentInfo::Iframe(_) | SpecificFragmentInfo::Image(_) | SpecificFragmentInfo::InlineAbsolute(_) | SpecificFragmentInfo::Multicol => { QuantitiesIncludedInIntrinsicInlineSizes::all() } SpecificFragmentInfo::Table | SpecificFragmentInfo::TableCell => { let base_quantities = INTRINSIC_INLINE_SIZE_INCLUDES_PADDING | INTRINSIC_INLINE_SIZE_INCLUDES_SPECIFIED; if self.style.get_inheritedtable().border_collapse == border_collapse::T::separate { base_quantities | INTRINSIC_INLINE_SIZE_INCLUDES_BORDER } else { base_quantities } } SpecificFragmentInfo::TableWrapper => { let base_quantities = INTRINSIC_INLINE_SIZE_INCLUDES_MARGINS | INTRINSIC_INLINE_SIZE_INCLUDES_SPECIFIED; if self.style.get_inheritedtable().border_collapse == border_collapse::T::separate { base_quantities | INTRINSIC_INLINE_SIZE_INCLUDES_BORDER } else { base_quantities } } SpecificFragmentInfo::TableRow => { let base_quantities = INTRINSIC_INLINE_SIZE_INCLUDES_SPECIFIED; if self.style.get_inheritedtable().border_collapse == border_collapse::T::separate { base_quantities | INTRINSIC_INLINE_SIZE_INCLUDES_BORDER } else { base_quantities } } SpecificFragmentInfo::ScannedText(_) | SpecificFragmentInfo::TableColumn(_) | SpecificFragmentInfo::UnscannedText(_) | SpecificFragmentInfo::InlineAbsoluteHypothetical(_) | SpecificFragmentInfo::InlineBlock(_) | SpecificFragmentInfo::MulticolColumn => { QuantitiesIncludedInIntrinsicInlineSizes::empty() } } } /// Returns the portion of the intrinsic inline-size that consists of borders, padding, and/or /// margins. /// /// FIXME(#2261, pcwalton): This won't work well for inlines: is this OK? pub fn surrounding_intrinsic_inline_size(&self) -> Au { let flags = self.quantities_included_in_intrinsic_inline_size(); let style = self.style(); // FIXME(pcwalton): Percentages should be relative to any definite size per CSS-SIZING. // This will likely need to be done by pushing down definite sizes during selector // cascading. let margin = if flags.contains(INTRINSIC_INLINE_SIZE_INCLUDES_MARGINS) { let margin = style.logical_margin(); (MaybeAuto::from_style(margin.inline_start, Au(0)).specified_or_zero() + MaybeAuto::from_style(margin.inline_end, Au(0)).specified_or_zero()) } else { Au(0) }; // FIXME(pcwalton): Percentages should be relative to any definite size per CSS-SIZING. // This will likely need to be done by pushing down definite sizes during selector // cascading. let padding = if flags.contains(INTRINSIC_INLINE_SIZE_INCLUDES_PADDING) { let padding = style.logical_padding(); (model::specified(padding.inline_start, Au(0)) + model::specified(padding.inline_end, Au(0))) } else { Au(0) }; let border = if flags.contains(INTRINSIC_INLINE_SIZE_INCLUDES_BORDER) { self.border_width().inline_start_end() } else { Au(0) }; margin + padding + border } /// Uses the style only to estimate the intrinsic inline-sizes. These may be modified for text /// or replaced elements. fn style_specified_intrinsic_inline_size(&self) -> IntrinsicISizesContribution { let flags = self.quantities_included_in_intrinsic_inline_size(); let style = self.style(); let mut specified = Au(0); if flags.contains(INTRINSIC_INLINE_SIZE_INCLUDES_SPECIFIED) { specified = MaybeAuto::from_style(style.content_inline_size(), Au(0)).specified_or_zero(); specified = max(model::specified(style.min_inline_size(), Au(0)), specified); if let Some(max) = model::specified_or_none(style.max_inline_size(), Au(0)) { specified = min(specified, max) } } // FIXME(#2261, pcwalton): This won't work well for inlines: is this OK? let surrounding_inline_size = self.surrounding_intrinsic_inline_size(); IntrinsicISizesContribution { content_intrinsic_sizes: IntrinsicISizes { minimum_inline_size: specified, preferred_inline_size: specified, }, surrounding_size: surrounding_inline_size, } } /// Returns a guess as to the distances from the margin edge of this fragment to its content /// in the inline direction. This will generally be correct unless percentages are involved. /// /// This is used for the float placement speculation logic. pub fn guess_inline_content_edge_offsets(&self) -> SpeculatedInlineContentEdgeOffsets { let logical_margin = self.style.logical_margin(); let logical_padding = self.style.logical_padding(); let border_width = self.border_width(); SpeculatedInlineContentEdgeOffsets { start: MaybeAuto::from_style(logical_margin.inline_start, Au(0)).specified_or_zero() + model::specified(logical_padding.inline_start, Au(0)) + border_width.inline_start, end: MaybeAuto::from_style(logical_margin.inline_end, Au(0)).specified_or_zero() + model::specified(logical_padding.inline_end, Au(0)) + border_width.inline_end, } } pub fn calculate_line_height(&self, layout_context: &LayoutContext) -> Au { let font_style = self.style.get_font_arc(); let font_metrics = text::font_metrics_for_style(&mut layout_context.font_context(), font_style); text::line_height_from_style(&*self.style, &font_metrics) } /// Returns the sum of the inline-sizes of all the borders of this fragment. Note that this /// can be expensive to compute, so if possible use the `border_padding` field instead. #[inline] pub fn border_width(&self) -> LogicalMargin<Au> { let style_border_width = match self.specific { SpecificFragmentInfo::ScannedText(_) | SpecificFragmentInfo::InlineBlock(_) => LogicalMargin::zero(self.style.writing_mode), _ => self.style().logical_border_width(), }; match self.inline_context { None => style_border_width, Some(ref inline_fragment_context) => { inline_fragment_context.nodes.iter().fold(style_border_width, |accumulator, node| { let mut this_border_width = node.style.logical_border_width(); if !node.flags.contains(FIRST_FRAGMENT_OF_ELEMENT) { this_border_width.inline_start = Au(0) } if !node.flags.contains(LAST_FRAGMENT_OF_ELEMENT) { this_border_width.inline_end = Au(0) } accumulator + this_border_width }) } } } /// Returns the border width in given direction if this fragment has property /// 'box-sizing: border-box'. The `border_padding` field should have been initialized. pub fn box_sizing_boundary(&self, direction: Direction) -> Au { match (self.style().get_position().box_sizing, direction) { (box_sizing::T::border_box, Direction::Inline) => { self.border_padding.inline_start_end() } (box_sizing::T::border_box, Direction::Block) => { self.border_padding.block_start_end() } _ => Au(0) } } /// Computes the margins in the inline direction from the containing block inline-size and the /// style. After this call, the inline direction of the `margin` field will be correct. /// /// Do not use this method if the inline direction margins are to be computed some other way /// (for example, via constraint solving for blocks). pub fn compute_inline_direction_margins(&mut self, containing_block_inline_size: Au) { match self.specific { SpecificFragmentInfo::Table | SpecificFragmentInfo::TableCell | SpecificFragmentInfo::TableRow | SpecificFragmentInfo::TableColumn(_) => { self.margin.inline_start = Au(0); self.margin.inline_end = Au(0); return } SpecificFragmentInfo::InlineBlock(_) => { // Inline-blocks do not take self margins into account but do account for margins // from outer inline contexts. self.margin.inline_start = Au(0); self.margin.inline_end = Au(0); } _ => { let margin = self.style().logical_margin(); self.margin.inline_start = MaybeAuto::from_style(margin.inline_start, containing_block_inline_size).specified_or_zero(); self.margin.inline_end = MaybeAuto::from_style(margin.inline_end, containing_block_inline_size).specified_or_zero(); } } if let Some(ref inline_context) = self.inline_context { for node in &inline_context.nodes { let margin = node.style.logical_margin(); let this_inline_start_margin = if !node.flags.contains(FIRST_FRAGMENT_OF_ELEMENT) { Au(0) } else { MaybeAuto::from_style(margin.inline_start, containing_block_inline_size).specified_or_zero() }; let this_inline_end_margin = if !node.flags.contains(LAST_FRAGMENT_OF_ELEMENT) { Au(0) } else { MaybeAuto::from_style(margin.inline_end, containing_block_inline_size).specified_or_zero() }; self.margin.inline_start = self.margin.inline_start + this_inline_start_margin; self.margin.inline_end = self.margin.inline_end + this_inline_end_margin; } } } /// Computes the margins in the block direction from the containing block inline-size and the /// style. After this call, the block direction of the `margin` field will be correct. /// /// Do not use this method if the block direction margins are to be computed some other way /// (for example, via constraint solving for absolutely-positioned flows). pub fn compute_block_direction_margins(&mut self, containing_block_inline_size: Au) { match self.specific { SpecificFragmentInfo::Table | SpecificFragmentInfo::TableCell | SpecificFragmentInfo::TableRow | SpecificFragmentInfo::TableColumn(_) => { self.margin.block_start = Au(0); self.margin.block_end = Au(0) } _ => { // NB: Percentages are relative to containing block inline-size (not block-size) // per CSS 2.1. let margin = self.style().logical_margin(); self.margin.block_start = MaybeAuto::from_style(margin.block_start, containing_block_inline_size) .specified_or_zero(); self.margin.block_end = MaybeAuto::from_style(margin.block_end, containing_block_inline_size) .specified_or_zero(); } } } /// Computes the border and padding in both inline and block directions from the containing /// block inline-size and the style. After this call, the `border_padding` field will be /// correct. /// /// TODO(pcwalton): Remove `border_collapse`; we can figure it out from our style and specific /// fragment info. pub fn compute_border_and_padding(&mut self, containing_block_inline_size: Au, border_collapse: border_collapse::T) { // Compute border. let border = match border_collapse { border_collapse::T::separate => self.border_width(), border_collapse::T::collapse => LogicalMargin::zero(self.style.writing_mode), }; // Compute padding from the fragment's style. // // This is zero in the case of `inline-block` because that padding is applied to the // wrapped block, not the fragment. let padding_from_style = match self.specific { SpecificFragmentInfo::TableColumn(_) | SpecificFragmentInfo::TableRow | SpecificFragmentInfo::TableWrapper | SpecificFragmentInfo::InlineBlock(_) => LogicalMargin::zero(self.style.writing_mode), _ => model::padding_from_style(self.style(), containing_block_inline_size), }; // Compute padding from the inline fragment context. let padding_from_inline_fragment_context = match (&self.specific, &self.inline_context) { (_, &None) | (&SpecificFragmentInfo::TableColumn(_), _) | (&SpecificFragmentInfo::TableRow, _) | (&SpecificFragmentInfo::TableWrapper, _) => { LogicalMargin::zero(self.style.writing_mode) } (_, &Some(ref inline_fragment_context)) => { let zero_padding = LogicalMargin::zero(self.style.writing_mode); inline_fragment_context.nodes.iter().fold(zero_padding, |accumulator, node| { let mut padding = model::padding_from_style(&*node.style, Au(0)); if !node.flags.contains(FIRST_FRAGMENT_OF_ELEMENT) { padding.inline_start = Au(0) } if !node.flags.contains(LAST_FRAGMENT_OF_ELEMENT) { padding.inline_end = Au(0) } accumulator + padding }) } }; self.border_padding = border + padding_from_style + padding_from_inline_fragment_context } // Return offset from original position because of `position: relative`. pub fn relative_position(&self, containing_block_size: &LogicalSize<Au>) -> LogicalSize<Au> { fn from_style(style: &ServoComputedValues, container_size: &LogicalSize<Au>) -> LogicalSize<Au> { let offsets = style.logical_position(); let offset_i = if offsets.inline_start != LengthOrPercentageOrAuto::Auto { MaybeAuto::from_style(offsets.inline_start, container_size.inline).specified_or_zero() } else { -MaybeAuto::from_style(offsets.inline_end, container_size.inline).specified_or_zero() }; let offset_b = if offsets.block_start != LengthOrPercentageOrAuto::Auto { MaybeAuto::from_style(offsets.block_start, container_size.inline).specified_or_zero() } else { -MaybeAuto::from_style(offsets.block_end, container_size.inline).specified_or_zero() }; LogicalSize::new(style.writing_mode, offset_i, offset_b) } // Go over the ancestor fragments and add all relative offsets (if any). let mut rel_pos = if self.style().get_box().position == position::T::relative { from_style(self.style(), containing_block_size) } else { LogicalSize::zero(self.style.writing_mode) }; if let Some(ref inline_fragment_context) = self.inline_context { for node in &inline_fragment_context.nodes { if node.style.get_box().position == position::T::relative { rel_pos = rel_pos + from_style(&*node.style, containing_block_size); } } } rel_pos } /// Always inline for SCCP. /// /// FIXME(pcwalton): Just replace with the clear type from the style module for speed? #[inline(always)] pub fn clear(&self) -> Option<ClearType> { let style = self.style(); match style.get_box().clear { clear::T::none => None, clear::T::left => Some(ClearType::Left), clear::T::right => Some(ClearType::Right), clear::T::both => Some(ClearType::Both), } } #[inline(always)] pub fn style(&self) -> &ServoComputedValues { &*self.style } #[inline(always)] pub fn selected_style(&self) -> &ServoComputedValues { &*self.selected_style } pub fn white_space(&self) -> white_space::T { self.style().get_inheritedtext().white_space } pub fn color(&self) -> color::T { self.style().get_color().color } /// Returns the text decoration of this fragment, according to the style of the nearest ancestor /// element. /// /// NB: This may not be the actual text decoration, because of the override rules specified in /// CSS 2.1 § 16.3.1. Unfortunately, computing this properly doesn't really fit into Servo's /// model. Therefore, this is a best lower bound approximation, but the end result may actually /// have the various decoration flags turned on afterward. pub fn text_decoration(&self) -> text_decoration::T { self.style().get_text().text_decoration } /// Returns the inline-start offset from margin edge to content edge. /// /// FIXME(#2262, pcwalton): I think this method is pretty bogus, because it won't work for /// inlines. pub fn inline_start_offset(&self) -> Au { match self.specific { SpecificFragmentInfo::TableWrapper => self.margin.inline_start, SpecificFragmentInfo::Table | SpecificFragmentInfo::TableCell | SpecificFragmentInfo::TableRow => self.border_padding.inline_start, SpecificFragmentInfo::TableColumn(_) => Au(0), _ => self.margin.inline_start + self.border_padding.inline_start, } } /// Returns true if this element can be split. This is true for text fragments, unless /// `white-space: pre` or `white-space: nowrap` is set. pub fn can_split(&self) -> bool { self.is_scanned_text_fragment() && self.white_space().allow_wrap() } /// Returns true if and only if this fragment is a generated content fragment. pub fn is_unscanned_generated_content(&self) -> bool { match self.specific { SpecificFragmentInfo::GeneratedContent(box GeneratedContentInfo::Empty) => false, SpecificFragmentInfo::GeneratedContent(..) => true, _ => false, } } /// Returns true if and only if this is a scanned text fragment. pub fn is_scanned_text_fragment(&self) -> bool { match self.specific { SpecificFragmentInfo::ScannedText(..) => true, _ => false, } } /// Computes the intrinsic inline-sizes of this fragment. pub fn compute_intrinsic_inline_sizes(&mut self) -> IntrinsicISizesContribution { let mut result = self.style_specified_intrinsic_inline_size(); match self.specific { SpecificFragmentInfo::Generic | SpecificFragmentInfo::GeneratedContent(_) | SpecificFragmentInfo::Iframe(_) | SpecificFragmentInfo::Table | SpecificFragmentInfo::TableCell | SpecificFragmentInfo::TableColumn(_) | SpecificFragmentInfo::TableRow | SpecificFragmentInfo::TableWrapper | SpecificFragmentInfo::Multicol | SpecificFragmentInfo::MulticolColumn | SpecificFragmentInfo::InlineAbsoluteHypothetical(_) => {} SpecificFragmentInfo::InlineBlock(ref info) => { let block_flow = info.flow_ref.as_block(); result.union_block(&block_flow.base.intrinsic_inline_sizes) } SpecificFragmentInfo::InlineAbsolute(ref info) => { let block_flow = info.flow_ref.as_block(); result.union_block(&block_flow.base.intrinsic_inline_sizes) } SpecificFragmentInfo::Image(ref mut image_fragment_info) => { let mut image_inline_size = match self.style.content_inline_size() { LengthOrPercentageOrAuto::Auto | LengthOrPercentageOrAuto::Percentage(_) => { image_fragment_info.image_inline_size() } LengthOrPercentageOrAuto::Length(length) => length, LengthOrPercentageOrAuto::Calc(calc) => calc.length(), }; image_inline_size = max(model::specified(self.style.min_inline_size(), Au(0)), image_inline_size); if let Some(max) = model::specified_or_none(self.style.max_inline_size(), Au(0)) { image_inline_size = min(image_inline_size, max) } result.union_block(&IntrinsicISizes { minimum_inline_size: image_inline_size, preferred_inline_size: image_inline_size, }); } SpecificFragmentInfo::Canvas(ref mut canvas_fragment_info) => { let mut canvas_inline_size = match self.style.content_inline_size() { LengthOrPercentageOrAuto::Auto | LengthOrPercentageOrAuto::Percentage(_) => { canvas_fragment_info.canvas_inline_size() } LengthOrPercentageOrAuto::Length(length) => length, LengthOrPercentageOrAuto::Calc(calc) => calc.length(), }; canvas_inline_size = max(model::specified(self.style.min_inline_size(), Au(0)), canvas_inline_size); if let Some(max) = model::specified_or_none(self.style.max_inline_size(), Au(0)) { canvas_inline_size = min(canvas_inline_size, max) } result.union_block(&IntrinsicISizes { minimum_inline_size: canvas_inline_size, preferred_inline_size: canvas_inline_size, }); } SpecificFragmentInfo::ScannedText(ref text_fragment_info) => { let range = &text_fragment_info.range; // See http://dev.w3.org/csswg/css-sizing/#max-content-inline-size. // TODO: Account for soft wrap opportunities. let max_line_inline_size = text_fragment_info.run .metrics_for_range(range) .advance_width; let min_line_inline_size = if self.white_space().allow_wrap() { text_fragment_info.run.min_width_for_range(range) } else { max_line_inline_size }; result.union_block(&IntrinsicISizes { minimum_inline_size: min_line_inline_size, preferred_inline_size: max_line_inline_size, }) } SpecificFragmentInfo::UnscannedText(..) => { panic!("Unscanned text fragments should have been scanned by now!") } }; // Take borders and padding for parent inline fragments into account, if necessary. if self.is_primary_fragment() { if let Some(ref context) = self.inline_context { for node in &context.nodes { let mut border_width = node.style.logical_border_width(); let mut padding = model::padding_from_style(&*node.style, Au(0)); let mut margin = model::specified_margin_from_style(&*node.style); if !node.flags.contains(FIRST_FRAGMENT_OF_ELEMENT) { border_width.inline_start = Au(0); padding.inline_start = Au(0); margin.inline_start = Au(0); } if !node.flags.contains(LAST_FRAGMENT_OF_ELEMENT) { border_width.inline_end = Au(0); padding.inline_end = Au(0); margin.inline_end = Au(0); } result.surrounding_size = result.surrounding_size + border_width.inline_start_end() + padding.inline_start_end() + margin.inline_start_end(); } } } result } /// Returns the narrowest inline-size that the first splittable part of this fragment could /// possibly be split to. (In most cases, this returns the inline-size of the first word in /// this fragment.) pub fn minimum_splittable_inline_size(&self) -> Au { match self.specific { SpecificFragmentInfo::ScannedText(ref text) => { text.run.minimum_splittable_inline_size(&text.range) } _ => Au(0), } } /// TODO: What exactly does this function return? Why is it Au(0) for /// `SpecificFragmentInfo::Generic`? pub fn content_inline_size(&self) -> Au { match self.specific { SpecificFragmentInfo::Generic | SpecificFragmentInfo::GeneratedContent(_) | SpecificFragmentInfo::Iframe(_) | SpecificFragmentInfo::Table | SpecificFragmentInfo::TableCell | SpecificFragmentInfo::TableRow | SpecificFragmentInfo::TableWrapper | SpecificFragmentInfo::Multicol | SpecificFragmentInfo::MulticolColumn | SpecificFragmentInfo::InlineBlock(_) | SpecificFragmentInfo::InlineAbsoluteHypothetical(_) | SpecificFragmentInfo::InlineAbsolute(_) => Au(0), SpecificFragmentInfo::Canvas(ref canvas_fragment_info) => { canvas_fragment_info.replaced_image_fragment_info.computed_inline_size() } SpecificFragmentInfo::Image(ref image_fragment_info) => { image_fragment_info.replaced_image_fragment_info.computed_inline_size() } SpecificFragmentInfo::ScannedText(ref text_fragment_info) => { let (range, run) = (&text_fragment_info.range, &text_fragment_info.run); let text_bounds = run.metrics_for_range(range).bounding_box; text_bounds.size.width } SpecificFragmentInfo::TableColumn(_) => { panic!("Table column fragments do not have inline_size") } SpecificFragmentInfo::UnscannedText(_) => { panic!("Unscanned text fragments should have been scanned by now!") } } } /// Returns the dimensions of the content box. /// /// This is marked `#[inline]` because it is frequently called when only one or two of the /// values are needed and that will save computation. #[inline] pub fn content_box(&self) -> LogicalRect<Au> { self.border_box - self.border_padding } /// Attempts to find the split positions of a text fragment so that its inline-size is no more /// than `max_inline_size`. /// /// A return value of `None` indicates that the fragment could not be split. Otherwise the /// information pertaining to the split is returned. The inline-start and inline-end split /// information are both optional due to the possibility of them being whitespace. pub fn calculate_split_position(&self, max_inline_size: Au, starts_line: bool) -> Option<SplitResult> { let text_fragment_info = match self.specific { SpecificFragmentInfo::ScannedText(ref text_fragment_info) => text_fragment_info, _ => return None, }; let mut flags = SplitOptions::empty(); if starts_line { flags.insert(STARTS_LINE); if self.style().get_inheritedtext().overflow_wrap == overflow_wrap::T::break_word { flags.insert(RETRY_AT_CHARACTER_BOUNDARIES) } } match self.style().get_inheritedtext().word_break { word_break::T::normal => { // Break at normal word boundaries, allowing for soft wrap opportunities. let soft_wrap_breaking_strategy = text_fragment_info.run.natural_word_slices_in_range(&text_fragment_info.range); self.calculate_split_position_using_breaking_strategy( soft_wrap_breaking_strategy, max_inline_size, flags) } word_break::T::break_all => { // Break at character boundaries. let character_breaking_strategy = text_fragment_info.run.character_slices_in_range(&text_fragment_info.range); flags.remove(RETRY_AT_CHARACTER_BOUNDARIES); self.calculate_split_position_using_breaking_strategy( character_breaking_strategy, max_inline_size, flags) }, word_break::T::keep_all => { // Break at word boundaries, and forbid soft wrap opportunities. let natural_word_breaking_strategy = text_fragment_info.run.natural_word_slices_in_range(&text_fragment_info.range); self.calculate_split_position_using_breaking_strategy( natural_word_breaking_strategy, max_inline_size, flags) } } } /// Truncates this fragment to the given `max_inline_size`, using a character-based breaking /// strategy. If no characters could fit, returns `None`. pub fn truncate_to_inline_size(&self, max_inline_size: Au) -> Option<TruncationResult> { let text_fragment_info = if let SpecificFragmentInfo::ScannedText(ref text_fragment_info) = self.specific { text_fragment_info } else { return None }; let character_breaking_strategy = text_fragment_info.run.character_slices_in_range(&text_fragment_info.range); match self.calculate_split_position_using_breaking_strategy(character_breaking_strategy, max_inline_size, SplitOptions::empty()) { None => None, Some(split_info) => { match split_info.inline_start { None => None, Some(split) => { Some(TruncationResult { split: split, text_run: split_info.text_run.clone(), }) } } } } } /// A helper method that uses the breaking strategy described by `slice_iterator` (at present, /// either natural word breaking or character breaking) to split this fragment. fn calculate_split_position_using_breaking_strategy<'a, I>( &self, slice_iterator: I, max_inline_size: Au, flags: SplitOptions) -> Option<SplitResult> where I: Iterator<Item=TextRunSlice<'a>> { let text_fragment_info = match self.specific { SpecificFragmentInfo::ScannedText(ref text_fragment_info) => text_fragment_info, _ => return None, }; let mut remaining_inline_size = max_inline_size - self.border_padding.inline_start_end(); let mut inline_start_range = Range::new(text_fragment_info.range.begin(), ByteIndex(0)); let mut inline_end_range = None; let mut overflowing = false; debug!("calculate_split_position_using_breaking_strategy: splitting text fragment \ (strlen={}, range={:?}, max_inline_size={:?})", text_fragment_info.run.text.len(), text_fragment_info.range, max_inline_size); for slice in slice_iterator { debug!("calculate_split_position_using_breaking_strategy: considering slice \ (offset={:?}, slice range={:?}, remaining_inline_size={:?})", slice.offset, slice.range, remaining_inline_size); // Use the `remaining_inline_size` to find a split point if possible. If not, go around // the loop again with the next slice. let metrics = text_fragment_info.run.metrics_for_slice(slice.glyphs, &slice.range); let advance = metrics.advance_width; // Have we found the split point? if advance <= remaining_inline_size || slice.glyphs.is_whitespace() { // Keep going; we haven't found the split point yet. debug!("calculate_split_position_using_breaking_strategy: enlarging span"); remaining_inline_size = remaining_inline_size - advance; inline_start_range.extend_by(slice.range.length()); continue } // The advance is more than the remaining inline-size, so split here. First, check to // see if we're going to overflow the line. If so, perform a best-effort split. let mut remaining_range = slice.text_run_range(); let split_is_empty = inline_start_range.is_empty() && !(self.requires_line_break_afterward_if_wrapping_on_newlines() && !self.white_space().allow_wrap()); if split_is_empty { // We're going to overflow the line. overflowing = true; inline_start_range = slice.text_run_range(); remaining_range = Range::new(slice.text_run_range().end(), ByteIndex(0)); remaining_range.extend_to(text_fragment_info.range.end()); } // Check to see if we need to create an inline-end chunk. let slice_begin = remaining_range.begin(); if slice_begin < text_fragment_info.range.end() { // There still some things left over at the end of the line, so create the // inline-end chunk. let mut inline_end = remaining_range; inline_end.extend_to(text_fragment_info.range.end()); inline_end_range = Some(inline_end); debug!("calculate_split_position: splitting remainder with inline-end range={:?}", inline_end); } // If we failed to find a suitable split point, we're on the verge of overflowing the // line. if split_is_empty || overflowing { // If we've been instructed to retry at character boundaries (probably via // `overflow-wrap: break-word`), do so. if flags.contains(RETRY_AT_CHARACTER_BOUNDARIES) { let character_breaking_strategy = text_fragment_info.run .character_slices_in_range(&text_fragment_info.range); let mut flags = flags; flags.remove(RETRY_AT_CHARACTER_BOUNDARIES); return self.calculate_split_position_using_breaking_strategy( character_breaking_strategy, max_inline_size, flags) } // We aren't at the start of the line, so don't overflow. Let inline layout wrap to // the next line instead. if !flags.contains(STARTS_LINE) { return None } } break } let split_is_empty = inline_start_range.is_empty() && !self.requires_line_break_afterward_if_wrapping_on_newlines(); let inline_start = if !split_is_empty { Some(SplitInfo::new(inline_start_range, &**text_fragment_info)) } else { None }; let inline_end = inline_end_range.map(|inline_end_range| { SplitInfo::new(inline_end_range, &**text_fragment_info) }); Some(SplitResult { inline_start: inline_start, inline_end: inline_end, text_run: text_fragment_info.run.clone(), }) } /// The opposite of `calculate_split_position_using_breaking_strategy`: merges this fragment /// with the next one. pub fn merge_with(&mut self, next_fragment: Fragment) { match (&mut self.specific, &next_fragment.specific) { (&mut SpecificFragmentInfo::ScannedText(ref mut this_info), &SpecificFragmentInfo::ScannedText(ref other_info)) => { debug_assert!(arc_ptr_eq(&this_info.run, &other_info.run)); this_info.range_end_including_stripped_whitespace = other_info.range_end_including_stripped_whitespace; if other_info.requires_line_break_afterward_if_wrapping_on_newlines() { this_info.flags.insert(REQUIRES_LINE_BREAK_AFTERWARD_IF_WRAPPING_ON_NEWLINES); } if other_info.insertion_point.is_some() { this_info.insertion_point = other_info.insertion_point; } self.border_padding.inline_end = next_fragment.border_padding.inline_end; self.margin.inline_end = next_fragment.margin.inline_end; } _ => panic!("Can only merge two scanned-text fragments!"), } self.reset_text_range_and_inline_size(); self.meld_with_next_inline_fragment(&next_fragment); } /// Restore any whitespace that was stripped from a text fragment, and recompute inline metrics /// if necessary. pub fn reset_text_range_and_inline_size(&mut self) { if let SpecificFragmentInfo::ScannedText(ref mut info) = self.specific { if info.run.extra_word_spacing != Au(0) { Arc::make_mut(&mut info.run).extra_word_spacing = Au(0); } // FIXME (mbrubeck): Do we need to restore leading too? let range_end = info.range_end_including_stripped_whitespace; if info.range.end() == range_end { return } info.range.extend_to(range_end); info.content_size.inline = info.run.metrics_for_range(&info.range).advance_width; self.border_box.size.inline = info.content_size.inline + self.border_padding.inline_start_end(); } } /// Assigns replaced inline-size, padding, and margins for this fragment only if it is replaced /// content per CSS 2.1 § 10.3.2. pub fn assign_replaced_inline_size_if_necessary(&mut self, container_inline_size: Au, container_block_size: Option<Au>) { match self.specific { SpecificFragmentInfo::Generic | SpecificFragmentInfo::GeneratedContent(_) | SpecificFragmentInfo::Table | SpecificFragmentInfo::TableCell | SpecificFragmentInfo::TableRow | SpecificFragmentInfo::TableWrapper | SpecificFragmentInfo::Multicol | SpecificFragmentInfo::MulticolColumn => return, SpecificFragmentInfo::TableColumn(_) => { panic!("Table column fragments do not have inline size") } SpecificFragmentInfo::UnscannedText(_) => { panic!("Unscanned text fragments should have been scanned by now!") } SpecificFragmentInfo::Canvas(_) | SpecificFragmentInfo::Image(_) | SpecificFragmentInfo::Iframe(_) | SpecificFragmentInfo::InlineBlock(_) | SpecificFragmentInfo::InlineAbsoluteHypothetical(_) | SpecificFragmentInfo::InlineAbsolute(_) | SpecificFragmentInfo::ScannedText(_) => {} }; let style = &*self.style; let noncontent_inline_size = self.border_padding.inline_start_end(); match self.specific { SpecificFragmentInfo::InlineAbsoluteHypothetical(ref mut info) => { let block_flow = flow_ref::deref_mut(&mut info.flow_ref).as_mut_block(); block_flow.base.position.size.inline = block_flow.base.intrinsic_inline_sizes.preferred_inline_size; // This is a hypothetical box, so it takes up no space. self.border_box.size.inline = Au(0); } SpecificFragmentInfo::InlineBlock(ref mut info) => { let block_flow = flow_ref::deref_mut(&mut info.flow_ref).as_mut_block(); self.border_box.size.inline = max(block_flow.base.intrinsic_inline_sizes.minimum_inline_size, block_flow.base.intrinsic_inline_sizes.preferred_inline_size); block_flow.base.block_container_inline_size = self.border_box.size.inline; block_flow.base.block_container_writing_mode = self.style.writing_mode; } SpecificFragmentInfo::InlineAbsolute(ref mut info) => { let block_flow = flow_ref::deref_mut(&mut info.flow_ref).as_mut_block(); self.border_box.size.inline = max(block_flow.base.intrinsic_inline_sizes.minimum_inline_size, block_flow.base.intrinsic_inline_sizes.preferred_inline_size); block_flow.base.block_container_inline_size = self.border_box.size.inline; block_flow.base.block_container_writing_mode = self.style.writing_mode; } SpecificFragmentInfo::ScannedText(ref info) => { // Scanned text fragments will have already had their content inline-sizes assigned // by this point. self.border_box.size.inline = info.content_size.inline + noncontent_inline_size } SpecificFragmentInfo::Image(ref mut image_fragment_info) => { let fragment_inline_size = image_fragment_info.image_inline_size(); let fragment_block_size = image_fragment_info.image_block_size(); self.border_box.size.inline = image_fragment_info.replaced_image_fragment_info .calculate_replaced_inline_size(style, noncontent_inline_size, container_inline_size, container_block_size, fragment_inline_size, fragment_block_size); } SpecificFragmentInfo::Canvas(ref mut canvas_fragment_info) => { let fragment_inline_size = canvas_fragment_info.canvas_inline_size(); let fragment_block_size = canvas_fragment_info.canvas_block_size(); self.border_box.size.inline = canvas_fragment_info.replaced_image_fragment_info .calculate_replaced_inline_size(style, noncontent_inline_size, container_inline_size, container_block_size, fragment_inline_size, fragment_block_size); } SpecificFragmentInfo::Iframe(ref iframe_fragment_info) => { self.border_box.size.inline = iframe_fragment_info.calculate_replaced_inline_size(style, container_inline_size) + noncontent_inline_size; } _ => panic!("this case should have been handled above"), } } /// Assign block-size for this fragment if it is replaced content. The inline-size must have /// been assigned first. /// /// Ideally, this should follow CSS 2.1 § 10.6.2. pub fn assign_replaced_block_size_if_necessary(&mut self, containing_block_block_size: Option<Au>) { match self.specific { SpecificFragmentInfo::Generic | SpecificFragmentInfo::GeneratedContent(_) | SpecificFragmentInfo::Table | SpecificFragmentInfo::TableCell | SpecificFragmentInfo::TableRow | SpecificFragmentInfo::TableWrapper | SpecificFragmentInfo::Multicol | SpecificFragmentInfo::MulticolColumn => return, SpecificFragmentInfo::TableColumn(_) => { panic!("Table column fragments do not have block size") } SpecificFragmentInfo::UnscannedText(_) => { panic!("Unscanned text fragments should have been scanned by now!") } SpecificFragmentInfo::Canvas(_) | SpecificFragmentInfo::Iframe(_) | SpecificFragmentInfo::Image(_) | SpecificFragmentInfo::InlineBlock(_) | SpecificFragmentInfo::InlineAbsoluteHypothetical(_) | SpecificFragmentInfo::InlineAbsolute(_) | SpecificFragmentInfo::ScannedText(_) => {} } let style = &*self.style; let noncontent_block_size = self.border_padding.block_start_end(); match self.specific { SpecificFragmentInfo::Image(ref mut image_fragment_info) => { let fragment_inline_size = image_fragment_info.image_inline_size(); let fragment_block_size = image_fragment_info.image_block_size(); self.border_box.size.block = image_fragment_info.replaced_image_fragment_info .calculate_replaced_block_size(style, noncontent_block_size, containing_block_block_size, fragment_inline_size, fragment_block_size); } SpecificFragmentInfo::Canvas(ref mut canvas_fragment_info) => { let fragment_inline_size = canvas_fragment_info.canvas_inline_size(); let fragment_block_size = canvas_fragment_info.canvas_block_size(); self.border_box.size.block = canvas_fragment_info.replaced_image_fragment_info .calculate_replaced_block_size(style, noncontent_block_size, containing_block_block_size, fragment_inline_size, fragment_block_size); } SpecificFragmentInfo::ScannedText(ref info) => { // Scanned text fragments' content block-sizes are calculated by the text run // scanner during flow construction. self.border_box.size.block = info.content_size.block + noncontent_block_size } SpecificFragmentInfo::InlineBlock(ref mut info) => { // Not the primary fragment, so we do not take the noncontent size into account. let block_flow = flow_ref::deref_mut(&mut info.flow_ref).as_block(); self.border_box.size.block = block_flow.base.position.size.block + block_flow.fragment.margin.block_start_end() } SpecificFragmentInfo::InlineAbsoluteHypothetical(ref mut info) => { // Not the primary fragment, so we do not take the noncontent size into account. let block_flow = flow_ref::deref_mut(&mut info.flow_ref).as_block(); self.border_box.size.block = block_flow.base.position.size.block; } SpecificFragmentInfo::InlineAbsolute(ref mut info) => { // Not the primary fragment, so we do not take the noncontent size into account. let block_flow = flow_ref::deref_mut(&mut info.flow_ref).as_block(); self.border_box.size.block = block_flow.base.position.size.block + block_flow.fragment.margin.block_start_end() } SpecificFragmentInfo::Iframe(ref info) => { self.border_box.size.block = info.calculate_replaced_block_size(style, containing_block_block_size) + noncontent_block_size; } _ => panic!("should have been handled above"), } } /// Calculates block-size above baseline, depth below baseline, and ascent for this fragment /// when used in an inline formatting context. See CSS 2.1 § 10.8.1. pub fn inline_metrics(&self, layout_context: &LayoutContext) -> InlineMetrics { match self.specific { SpecificFragmentInfo::Image(ref image_fragment_info) => { let computed_block_size = image_fragment_info.replaced_image_fragment_info .computed_block_size(); InlineMetrics { block_size_above_baseline: computed_block_size + self.border_padding.block_start, depth_below_baseline: self.border_padding.block_end, ascent: computed_block_size + self.border_padding.block_start, } } SpecificFragmentInfo::Canvas(ref canvas_fragment_info) => { let computed_block_size = canvas_fragment_info.replaced_image_fragment_info .computed_block_size(); InlineMetrics { block_size_above_baseline: computed_block_size + self.border_padding.block_start, depth_below_baseline: self.border_padding.block_end, ascent: computed_block_size + self.border_padding.block_start, } } SpecificFragmentInfo::ScannedText(ref info) => { // Fragments with no glyphs don't contribute any inline metrics. // TODO: Filter out these fragments during flow construction? if info.insertion_point.is_none() && info.content_size.inline == Au(0) { return InlineMetrics::new(Au(0), Au(0), Au(0)); } // See CSS 2.1 § 10.8.1. let line_height = self.calculate_line_height(layout_context); let font_derived_metrics = InlineMetrics::from_font_metrics(&info.run.font_metrics, line_height); InlineMetrics { block_size_above_baseline: font_derived_metrics.block_size_above_baseline, depth_below_baseline: font_derived_metrics.depth_below_baseline, ascent: font_derived_metrics.ascent + self.border_padding.block_start, } } SpecificFragmentInfo::InlineBlock(ref info) => { // See CSS 2.1 § 10.8.1. let flow = &info.flow_ref; let block_flow = flow.as_block(); let is_auto = self.style.get_position().height == LengthOrPercentageOrAuto::Auto; let baseline_offset = match flow.baseline_offset_of_last_line_box_in_flow() { Some(baseline_offset) if is_auto => baseline_offset, _ => block_flow.fragment.border_box.size.block, }; let start_margin = block_flow.fragment.margin.block_start; let end_margin = block_flow.fragment.margin.block_end; let depth_below_baseline = flow::base(&**flow).position.size.block - baseline_offset + end_margin; InlineMetrics::new(baseline_offset + start_margin, depth_below_baseline, baseline_offset) } SpecificFragmentInfo::InlineAbsoluteHypothetical(_) | SpecificFragmentInfo::InlineAbsolute(_) => { // Hypothetical boxes take up no space. InlineMetrics { block_size_above_baseline: Au(0), depth_below_baseline: Au(0), ascent: Au(0), } } _ => { InlineMetrics { block_size_above_baseline: self.border_box.size.block, depth_below_baseline: Au(0), ascent: self.border_box.size.block, } } } } /// Returns true if this fragment is a hypothetical box. See CSS 2.1 § 10.3.7. pub fn is_hypothetical(&self) -> bool { match self.specific { SpecificFragmentInfo::InlineAbsoluteHypothetical(_) => true, _ => false, } } /// Returns true if this fragment can merge with another immediately-following fragment or /// false otherwise. pub fn can_merge_with_fragment(&self, other: &Fragment) -> bool { match (&self.specific, &other.specific) { (&SpecificFragmentInfo::UnscannedText(ref first_unscanned_text), &SpecificFragmentInfo::UnscannedText(_)) => { // FIXME: Should probably use a whitelist of styles that can safely differ (#3165) if self.style().get_font() != other.style().get_font() || self.text_decoration() != other.text_decoration() || self.white_space() != other.white_space() || self.color() != other.color() { return false } if first_unscanned_text.text.ends_with('\n') { return false } // If this node has any styles that have border/padding/margins on the following // side, then we can't merge with the next fragment. if let Some(ref inline_context) = self.inline_context { for inline_context_node in inline_context.nodes.iter() { if !inline_context_node.flags.contains(LAST_FRAGMENT_OF_ELEMENT) { continue } if inline_context_node.style.logical_margin().inline_end != LengthOrPercentageOrAuto::Length(Au(0)) { return false } if inline_context_node.style.logical_padding().inline_end != LengthOrPercentage::Length(Au(0)) { return false } if inline_context_node.style.logical_border_width().inline_end != Au(0) { return false } } } // If the next fragment has any styles that have border/padding/margins on the // preceding side, then it can't merge with us. if let Some(ref inline_context) = other.inline_context { for inline_context_node in inline_context.nodes.iter() { if !inline_context_node.flags.contains(FIRST_FRAGMENT_OF_ELEMENT) { continue } if inline_context_node.style.logical_margin().inline_start != LengthOrPercentageOrAuto::Length(Au(0)) { return false } if inline_context_node.style.logical_padding().inline_start != LengthOrPercentage::Length(Au(0)) { return false } if inline_context_node.style.logical_border_width().inline_start != Au(0) { return false } } } true } _ => false, } } /// Returns true if and only if this is the *primary fragment* for the fragment's style object /// (conceptually, though style sharing makes this not really true, of course). The primary /// fragment is the one that draws backgrounds, borders, etc., and takes borders, padding and /// margins into account. Every style object has at most one primary fragment. /// /// At present, all fragments are primary fragments except for inline-block and table wrapper /// fragments. Inline-block fragments are not primary fragments because the corresponding block /// flow is the primary fragment, while table wrapper fragments are not primary fragments /// because the corresponding table flow is the primary fragment. pub fn is_primary_fragment(&self) -> bool { match self.specific { SpecificFragmentInfo::InlineBlock(_) | SpecificFragmentInfo::InlineAbsoluteHypothetical(_) | SpecificFragmentInfo::InlineAbsolute(_) | SpecificFragmentInfo::MulticolColumn | SpecificFragmentInfo::TableWrapper => false, SpecificFragmentInfo::Canvas(_) | SpecificFragmentInfo::Generic | SpecificFragmentInfo::GeneratedContent(_) | SpecificFragmentInfo::Iframe(_) | SpecificFragmentInfo::Image(_) | SpecificFragmentInfo::ScannedText(_) | SpecificFragmentInfo::Table | SpecificFragmentInfo::TableCell | SpecificFragmentInfo::TableColumn(_) | SpecificFragmentInfo::TableRow | SpecificFragmentInfo::Multicol | SpecificFragmentInfo::UnscannedText(_) => true, } } /// Determines the inline sizes of inline-block fragments. These cannot be fully computed until /// inline size assignment has run for the child flow: thus it is computed "late", during /// block size assignment. pub fn update_late_computed_replaced_inline_size_if_necessary(&mut self) { if let SpecificFragmentInfo::InlineBlock(ref mut inline_block_info) = self.specific { let block_flow = flow_ref::deref_mut(&mut inline_block_info.flow_ref).as_block(); let margin = block_flow.fragment.style.logical_margin(); self.border_box.size.inline = block_flow.fragment.border_box.size.inline + MaybeAuto::from_style(margin.inline_start, Au(0)).specified_or_zero() + MaybeAuto::from_style(margin.inline_end, Au(0)).specified_or_zero() } } pub fn update_late_computed_inline_position_if_necessary(&mut self) { if let SpecificFragmentInfo::InlineAbsoluteHypothetical(ref mut info) = self.specific { let position = self.border_box.start.i; flow_ref::deref_mut(&mut info.flow_ref) .update_late_computed_inline_position_if_necessary(position) } } pub fn update_late_computed_block_position_if_necessary(&mut self) { if let SpecificFragmentInfo::InlineAbsoluteHypothetical(ref mut info) = self.specific { let position = self.border_box.start.b; flow_ref::deref_mut(&mut info.flow_ref) .update_late_computed_block_position_if_necessary(position) } } pub fn repair_style(&mut self, new_style: &Arc<ServoComputedValues>) { self.style = (*new_style).clone() } /// Given the stacking-context-relative position of the containing flow, returns the border box /// of this fragment relative to the parent stacking context. This takes `position: relative` /// into account. /// /// If `coordinate_system` is `Parent`, this returns the border box in the parent stacking /// context's coordinate system. Otherwise, if `coordinate_system` is `Own` and this fragment /// establishes a stacking context itself, this returns a border box anchored at (0, 0). (If /// this fragment does not establish a stacking context, then it always belongs to its parent /// stacking context and thus `coordinate_system` is ignored.) /// /// This is the method you should use for display list construction as well as /// `getBoundingClientRect()` and so forth. pub fn stacking_relative_border_box(&self, stacking_relative_flow_origin: &Point2D<Au>, relative_containing_block_size: &LogicalSize<Au>, relative_containing_block_mode: WritingMode, coordinate_system: CoordinateSystem) -> Rect<Au> { let container_size = relative_containing_block_size.to_physical(relative_containing_block_mode); let border_box = self.border_box.to_physical(self.style.writing_mode, container_size); if coordinate_system == CoordinateSystem::Own && self.establishes_stacking_context() { return Rect::new(Point2D::zero(), border_box.size) } // FIXME(pcwalton): This can double-count relative position sometimes for inlines (e.g. // `<div style="position:relative">x</div>`, because the `position:relative` trickles down // to the inline flow. Possibly we should extend the notion of "primary fragment" to fix // this. let relative_position = self.relative_position(relative_containing_block_size); border_box.translate_by_size(&relative_position.to_physical(self.style.writing_mode)) .translate(stacking_relative_flow_origin) } /// Given the stacking-context-relative border box, returns the stacking-context-relative /// content box. pub fn stacking_relative_content_box(&self, stacking_relative_border_box: &Rect<Au>) -> Rect<Au> { let border_padding = self.border_padding.to_physical(self.style.writing_mode); Rect::new(Point2D::new(stacking_relative_border_box.origin.x + border_padding.left, stacking_relative_border_box.origin.y + border_padding.top), Size2D::new(stacking_relative_border_box.size.width - border_padding.horizontal(), stacking_relative_border_box.size.height - border_padding.vertical())) } /// Returns true if this fragment establishes a new stacking context and false otherwise. pub fn establishes_stacking_context(&self) -> bool { // Text fragments shouldn't create stacking contexts. match self.specific { SpecificFragmentInfo::ScannedText(_) | SpecificFragmentInfo::UnscannedText(_) => return false, _ => {} } if self.flags.contains(HAS_LAYER) { return true } if self.style().get_effects().opacity != 1.0 { return true } if !self.style().get_effects().filter.is_empty() { return true } if self.style().get_effects().mix_blend_mode != mix_blend_mode::T::normal { return true } if self.style().get_effects().transform.0.is_some() { return true } match self.style().get_used_transform_style() { transform_style::T::flat | transform_style::T::preserve_3d => { return true } transform_style::T::auto => {} } // FIXME(pcwalton): Don't unconditionally form stacking contexts for `overflow_x: scroll` // and `overflow_y: scroll`. This needs multiple layers per stacking context. match (self.style().get_box().position, self.style().get_position().z_index, self.style().get_box().overflow_x, self.style().get_box().overflow_y.0) { (position::T::absolute, z_index::T::Auto, overflow_x::T::visible, overflow_x::T::visible) | (position::T::fixed, z_index::T::Auto, overflow_x::T::visible, overflow_x::T::visible) | (position::T::relative, z_index::T::Auto, overflow_x::T::visible, overflow_x::T::visible) => false, (position::T::absolute, _, _, _) | (position::T::fixed, _, _, _) | (position::T::relative, _, _, _) | (_, _, overflow_x::T::auto, _) | (_, _, overflow_x::T::scroll, _) | (_, _, _, overflow_x::T::auto) | (_, _, _, overflow_x::T::scroll) => true, (position::T::static_, _, _, _) => false } } // Get the effective z-index of this fragment. Z-indices only apply to positioned element // per CSS 2 9.9.1 (http://www.w3.org/TR/CSS2/visuren.html#z-index), so this value may differ // from the value specified in the style. pub fn effective_z_index(&self) -> i32 { match self.style().get_box().position { position::T::static_ => {}, _ => return self.style().get_position().z_index.number_or_zero(), } if self.style().get_effects().transform.0.is_some() { return self.style().get_position().z_index.number_or_zero(); } match self.style().get_box().display { display::T::flex => self.style().get_position().z_index.number_or_zero(), _ => 0, } } /// Computes the overflow rect of this fragment relative to the start of the flow. pub fn compute_overflow(&self, flow_size: &Size2D<Au>, relative_containing_block_size: &LogicalSize<Au>) -> Overflow { let mut border_box = self.border_box.to_physical(self.style.writing_mode, *flow_size); // Relative position can cause us to draw outside our border box. // // FIXME(pcwalton): I'm not a fan of the way this makes us crawl though so many styles all // the time. Can't we handle relative positioning by just adjusting `border_box`? let relative_position = self.relative_position(relative_containing_block_size); border_box = border_box.translate_by_size(&relative_position.to_physical(self.style.writing_mode)); let mut overflow = Overflow::from_rect(&border_box); // Box shadows cause us to draw outside our border box. for box_shadow in &self.style().get_effects().box_shadow.0 { let offset = Point2D::new(box_shadow.offset_x, box_shadow.offset_y); let inflation = box_shadow.spread_radius + box_shadow.blur_radius * BLUR_INFLATION_FACTOR; overflow.paint = overflow.paint.union(&border_box.translate(&offset) .inflate(inflation, inflation)) } // Outlines cause us to draw outside our border box. let outline_width = self.style.get_outline().outline_width; if outline_width != Au(0) { overflow.paint = overflow.paint.union(&border_box.inflate(outline_width, outline_width)) } // Include the overflow of the block flow, if any. match self.specific { SpecificFragmentInfo::InlineBlock(ref info) => { let block_flow = info.flow_ref.as_block(); overflow.union(&flow::base(block_flow).overflow); } SpecificFragmentInfo::InlineAbsolute(ref info) => { let block_flow = info.flow_ref.as_block(); overflow.union(&flow::base(block_flow).overflow); } _ => (), } // FIXME(pcwalton): Sometimes excessively fancy glyphs can make us draw outside our border // box too. overflow } pub fn requires_line_break_afterward_if_wrapping_on_newlines(&self) -> bool { match self.specific { SpecificFragmentInfo::ScannedText(ref scanned_text) => { scanned_text.requires_line_break_afterward_if_wrapping_on_newlines() } _ => false, } } pub fn strip_leading_whitespace_if_necessary(&mut self) -> WhitespaceStrippingResult { if self.white_space().preserve_spaces() { return WhitespaceStrippingResult::RetainFragment } match self.specific { SpecificFragmentInfo::ScannedText(ref mut scanned_text_fragment_info) => { let leading_whitespace_byte_count = scanned_text_fragment_info.text() .find(|c| !char_is_whitespace(c)) .unwrap_or(scanned_text_fragment_info.text().len()); let whitespace_len = ByteIndex(leading_whitespace_byte_count as isize); let whitespace_range = Range::new(scanned_text_fragment_info.range.begin(), whitespace_len); let text_bounds = scanned_text_fragment_info.run.metrics_for_range(&whitespace_range).bounding_box; self.border_box.size.inline = self.border_box.size.inline - text_bounds.size.width; scanned_text_fragment_info.content_size.inline = scanned_text_fragment_info.content_size.inline - text_bounds.size.width; scanned_text_fragment_info.range.adjust_by(whitespace_len, -whitespace_len); WhitespaceStrippingResult::RetainFragment } SpecificFragmentInfo::UnscannedText(ref mut unscanned_text_fragment_info) => { let mut new_text_string = String::new(); let mut modified = false; for (i, character) in unscanned_text_fragment_info.text.char_indices() { if gfx::text::util::is_bidi_control(character) { new_text_string.push(character); continue } if char_is_whitespace(character) { modified = true; continue } // Finished processing leading control chars and whitespace. if modified { new_text_string.push_str(&unscanned_text_fragment_info.text[i..]); } break } if modified { unscanned_text_fragment_info.text = new_text_string.into_boxed_str(); } WhitespaceStrippingResult::from_unscanned_text_fragment_info( &unscanned_text_fragment_info) } _ => WhitespaceStrippingResult::RetainFragment, } } /// Returns true if the entire fragment was stripped. pub fn strip_trailing_whitespace_if_necessary(&mut self) -> WhitespaceStrippingResult { if self.white_space().preserve_spaces() { return WhitespaceStrippingResult::RetainFragment } match self.specific { SpecificFragmentInfo::ScannedText(ref mut scanned_text_fragment_info) => { let mut trailing_whitespace_start_byte = 0; for (i, c) in scanned_text_fragment_info.text().char_indices().rev() { if !char_is_whitespace(c) { trailing_whitespace_start_byte = i + c.len_utf8(); break; } } let whitespace_start = ByteIndex(trailing_whitespace_start_byte as isize); let whitespace_len = scanned_text_fragment_info.range.length() - whitespace_start; let mut whitespace_range = Range::new(whitespace_start, whitespace_len); whitespace_range.shift_by(scanned_text_fragment_info.range.begin()); let text_bounds = scanned_text_fragment_info.run .metrics_for_range(&whitespace_range) .bounding_box; self.border_box.size.inline -= text_bounds.size.width; scanned_text_fragment_info.content_size.inline -= text_bounds.size.width; scanned_text_fragment_info.range.extend_by(-whitespace_len); WhitespaceStrippingResult::RetainFragment } SpecificFragmentInfo::UnscannedText(ref mut unscanned_text_fragment_info) => { let mut trailing_bidi_control_characters_to_retain = Vec::new(); let (mut modified, mut last_character_index) = (true, 0); for (i, character) in unscanned_text_fragment_info.text.char_indices().rev() { if gfx::text::util::is_bidi_control(character) { trailing_bidi_control_characters_to_retain.push(character); continue } if char_is_whitespace(character) { modified = true; continue } last_character_index = i + character.len_utf8(); break } if modified { let mut text = unscanned_text_fragment_info.text.to_string(); text.truncate(last_character_index); for character in trailing_bidi_control_characters_to_retain.iter().rev() { text.push(*character); } unscanned_text_fragment_info.text = text.into_boxed_str(); } WhitespaceStrippingResult::from_unscanned_text_fragment_info( &unscanned_text_fragment_info) } _ => WhitespaceStrippingResult::RetainFragment, } } pub fn inline_styles(&self) -> InlineStyleIterator { InlineStyleIterator::new(self) } /// Returns the inline-size of this fragment's margin box. pub fn margin_box_inline_size(&self) -> Au { self.border_box.size.inline + self.margin.inline_start_end() } /// Returns true if this node *or any of the nodes within its inline fragment context* have /// non-`static` `position`. pub fn is_positioned(&self) -> bool { if self.style.get_box().position != position::T::static_ { return true } if let Some(ref inline_context) = self.inline_context { for node in inline_context.nodes.iter() { if node.style.get_box().position != position::T::static_ { return true } } } false } /// Returns true if this node is absolutely positioned. pub fn is_absolutely_positioned(&self) -> bool { self.style.get_box().position == position::T::absolute } pub fn is_inline_absolute(&self) -> bool { match self.specific { SpecificFragmentInfo::InlineAbsolute(..) => true, _ => false, } } pub fn meld_with_next_inline_fragment(&mut self, next_fragment: &Fragment) { if let Some(ref mut inline_context_of_this_fragment) = self.inline_context { if let Some(ref inline_context_of_next_fragment) = next_fragment.inline_context { for (inline_context_node_from_this_fragment, inline_context_node_from_next_fragment) in inline_context_of_this_fragment.nodes.iter_mut().rev() .zip(inline_context_of_next_fragment.nodes.iter().rev()) { if !inline_context_node_from_next_fragment.flags.contains( LAST_FRAGMENT_OF_ELEMENT) { continue } if inline_context_node_from_next_fragment.address != inline_context_node_from_this_fragment.address { continue } inline_context_node_from_this_fragment.flags.insert(LAST_FRAGMENT_OF_ELEMENT); } } } } pub fn meld_with_prev_inline_fragment(&mut self, prev_fragment: &Fragment) { if let Some(ref mut inline_context_of_this_fragment) = self.inline_context { if let Some(ref inline_context_of_prev_fragment) = prev_fragment.inline_context { for (inline_context_node_from_prev_fragment, inline_context_node_from_this_fragment) in inline_context_of_prev_fragment.nodes.iter().rev().zip( inline_context_of_this_fragment.nodes.iter_mut().rev()) { if !inline_context_node_from_prev_fragment.flags.contains( FIRST_FRAGMENT_OF_ELEMENT) { continue } if inline_context_node_from_prev_fragment.address != inline_context_node_from_this_fragment.address { continue } inline_context_node_from_this_fragment.flags.insert( FIRST_FRAGMENT_OF_ELEMENT); } } } } pub fn fragment_id(&self) -> usize { return self as *const Fragment as usize; } pub fn fragment_type(&self) -> FragmentType { match self.pseudo { PseudoElementType::Normal => FragmentType::FragmentBody, PseudoElementType::Before(_) => FragmentType::BeforePseudoContent, PseudoElementType::After(_) => FragmentType::AfterPseudoContent, PseudoElementType::DetailsSummary(_) => FragmentType::FragmentBody, PseudoElementType::DetailsContent(_) => FragmentType::FragmentBody, } } pub fn layer_id(&self) -> LayerId { let layer_type = match self.pseudo { PseudoElementType::Normal => LayerType::FragmentBody, PseudoElementType::Before(_) => LayerType::BeforePseudoContent, PseudoElementType::After(_) => LayerType::AfterPseudoContent, PseudoElementType::DetailsSummary(_) => LayerType::FragmentBody, PseudoElementType::DetailsContent(_) => LayerType::FragmentBody, }; LayerId::new_of_type(layer_type, self.node.id() as usize) } pub fn layer_id_for_overflow_scroll(&self) -> LayerId { LayerId::new_of_type(LayerType::OverflowScroll, self.node.id() as usize) } /// Returns true if any of the inline styles associated with this fragment have /// `vertical-align` set to `top` or `bottom`. pub fn is_vertically_aligned_to_top_or_bottom(&self) -> bool { match self.style.get_box().vertical_align { vertical_align::T::top | vertical_align::T::bottom => return true, _ => {} } if let Some(ref inline_context) = self.inline_context { for node in &inline_context.nodes { match node.style.get_box().vertical_align { vertical_align::T::top | vertical_align::T::bottom => return true, _ => {} } } } false } pub fn is_text_or_replaced(&self) -> bool { match self.specific { SpecificFragmentInfo::Generic | SpecificFragmentInfo::InlineAbsolute(_) | SpecificFragmentInfo::InlineAbsoluteHypothetical(_) | SpecificFragmentInfo::InlineBlock(_) | SpecificFragmentInfo::Multicol | SpecificFragmentInfo::MulticolColumn | SpecificFragmentInfo::Table | SpecificFragmentInfo::TableCell | SpecificFragmentInfo::TableColumn(_) | SpecificFragmentInfo::TableRow | SpecificFragmentInfo::TableWrapper => false, SpecificFragmentInfo::Canvas(_) | SpecificFragmentInfo::GeneratedContent(_) | SpecificFragmentInfo::Iframe(_) | SpecificFragmentInfo::Image(_) | SpecificFragmentInfo::ScannedText(_) | SpecificFragmentInfo::UnscannedText(_) => true } } } impl fmt::Debug for Fragment { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let border_padding_string = if !self.border_padding.is_zero() { format!(" border_padding={:?}", self.border_padding) } else { "".to_owned() }; let margin_string = if !self.margin.is_zero() { format!(" margin={:?}", self.margin) } else { "".to_owned() }; let damage_string = if self.restyle_damage != RestyleDamage::empty() { format!(" damage={:?}", self.restyle_damage) } else { "".to_owned() }; write!(f, "{}({}) [{:?}] border_box={:?}{}{}{}", self.specific.get_type(), self.debug_id, self.specific, self.border_box, border_padding_string, margin_string, damage_string) } } bitflags! { flags QuantitiesIncludedInIntrinsicInlineSizes: u8 { const INTRINSIC_INLINE_SIZE_INCLUDES_MARGINS = 0x01, const INTRINSIC_INLINE_SIZE_INCLUDES_PADDING = 0x02, const INTRINSIC_INLINE_SIZE_INCLUDES_BORDER = 0x04, const INTRINSIC_INLINE_SIZE_INCLUDES_SPECIFIED = 0x08, } } bitflags! { // Various flags we can use when splitting fragments. See // `calculate_split_position_using_breaking_strategy()`. flags SplitOptions: u8 { #[doc = "True if this is the first fragment on the line."] const STARTS_LINE = 0x01, #[doc = "True if we should attempt to split at character boundaries if this split fails. \ This is used to implement `overflow-wrap: break-word`."] const RETRY_AT_CHARACTER_BOUNDARIES = 0x02, } } /// A top-down fragment border box iteration handler. pub trait FragmentBorderBoxIterator { /// The operation to perform. fn process(&mut self, fragment: &Fragment, level: i32, overflow: &Rect<Au>); /// Returns true if this fragment must be processed in-order. If this returns false, /// we skip the operation for this fragment, but continue processing siblings. fn should_process(&mut self, fragment: &Fragment) -> bool; } /// The coordinate system used in `stacking_relative_border_box()`. See the documentation of that /// method for details. #[derive(Clone, PartialEq, Debug)] pub enum CoordinateSystem { /// The border box returned is relative to the fragment's parent stacking context. Parent, /// The border box returned is relative to the fragment's own stacking context, if applicable. Own, } pub struct InlineStyleIterator<'a> { fragment: &'a Fragment, inline_style_index: usize, primary_style_yielded: bool, } impl<'a> Iterator for InlineStyleIterator<'a> { type Item = &'a ServoComputedValues; fn next(&mut self) -> Option<&'a ServoComputedValues> { if !self.primary_style_yielded { self.primary_style_yielded = true; return Some(&*self.fragment.style) } let inline_context = match self.fragment.inline_context { None => return None, Some(ref inline_context) => inline_context, }; let inline_style_index = self.inline_style_index; if inline_style_index == inline_context.nodes.len() { return None } self.inline_style_index += 1; Some(&*inline_context.nodes[inline_style_index].style) } } impl<'a> InlineStyleIterator<'a> { fn new(fragment: &Fragment) -> InlineStyleIterator { InlineStyleIterator { fragment: fragment, inline_style_index: 0, primary_style_yielded: false, } } } #[derive(Copy, Clone, Debug, PartialEq)] pub enum WhitespaceStrippingResult { RetainFragment, FragmentContainedOnlyBidiControlCharacters, FragmentContainedOnlyWhitespace, } impl WhitespaceStrippingResult { fn from_unscanned_text_fragment_info(info: &UnscannedTextFragmentInfo) -> WhitespaceStrippingResult { if info.text.is_empty() { WhitespaceStrippingResult::FragmentContainedOnlyWhitespace } else if info.text.chars().all(gfx::text::util::is_bidi_control) { WhitespaceStrippingResult::FragmentContainedOnlyBidiControlCharacters } else { WhitespaceStrippingResult::RetainFragment } } } /// The overflow area. We need two different notions of overflow: paint overflow and scrollable /// overflow. #[derive(Copy, Clone, Debug)] pub struct Overflow { pub scroll: Rect<Au>, pub paint: Rect<Au>, } impl Overflow { pub fn new() -> Overflow { Overflow { scroll: Rect::zero(), paint: Rect::zero(), } } pub fn from_rect(border_box: &Rect<Au>) -> Overflow { Overflow { scroll: *border_box, paint: *border_box, } } pub fn union(&mut self, other: &Overflow) { self.scroll = self.scroll.union(&other.scroll); self.paint = self.paint.union(&other.paint); } pub fn translate(&mut self, point: &Point2D<Au>) { self.scroll = self.scroll.translate(point); self.paint = self.paint.translate(point); } } bitflags! { pub flags FragmentFlags: u8 { /// Whether this fragment has a layer. const HAS_LAYER = 0x01, } } /// Specified distances from the margin edge of a block to its content in the inline direction. /// These are returned by `guess_inline_content_edge_offsets()` and are used in the float placement /// speculation logic. #[derive(Copy, Clone, Debug)] pub struct SpeculatedInlineContentEdgeOffsets { pub start: Au, pub end: Au, } #[cfg(not(debug_assertions))] #[derive(Clone)] struct DebugId; #[cfg(debug_assertions)] #[derive(Clone)] struct DebugId(u16); #[cfg(not(debug_assertions))] impl DebugId { pub fn new() -> DebugId { DebugId } } #[cfg(debug_assertions)] impl DebugId { pub fn new() -> DebugId { DebugId(layout_debug::generate_unique_debug_id()) } } #[cfg(not(debug_assertions))] impl fmt::Display for DebugId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:p}", &self) } } #[cfg(debug_assertions)] impl fmt::Display for DebugId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.0) } } #[cfg(not(debug_assertions))] impl Encodable for DebugId { fn encode<S: Encoder>(&self, e: &mut S) -> Result<(), S::Error> { e.emit_str(&format!("{:p}", &self)) } } #[cfg(debug_assertions)] impl Encodable for DebugId { fn encode<S: Encoder>(&self, e: &mut S) -> Result<(), S::Error> { e.emit_u16(self.0) } } combine normal and keep-all into a single strategy /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! The `Fragment` type, which represents the leaves of the layout tree. #![deny(unsafe_code)] use app_units::Au; use canvas_traits::CanvasMsg; use context::{LayoutContext, SharedLayoutContext}; use euclid::{Point2D, Rect, Size2D}; use floats::ClearType; use flow::{self, ImmutableFlowUtils}; use flow_ref::{self, FlowRef}; use gfx; use gfx::display_list::{BLUR_INFLATION_FACTOR, OpaqueNode}; use gfx::text::glyph::ByteIndex; use gfx::text::text_run::{TextRun, TextRunSlice}; use gfx_traits::{FragmentType, LayerId, LayerType, StackingContextId}; use inline::{FIRST_FRAGMENT_OF_ELEMENT, InlineFragmentContext, InlineFragmentNodeInfo}; use inline::{InlineMetrics, LAST_FRAGMENT_OF_ELEMENT}; use ipc_channel::ipc::IpcSender; #[cfg(debug_assertions)] use layout_debug; use model::{self, Direction, IntrinsicISizes, IntrinsicISizesContribution, MaybeAuto}; use msg::constellation_msg::PipelineId; use net_traits::image::base::{Image, ImageMetadata}; use net_traits::image_cache_thread::{ImageOrMetadataAvailable, UsePlaceholder}; use range::*; use rustc_serialize::{Encodable, Encoder}; use script_layout_interface::HTMLCanvasData; use script_layout_interface::restyle_damage::{RECONSTRUCT_FLOW, RestyleDamage}; use script_layout_interface::wrapper_traits::{PseudoElementType, ThreadSafeLayoutElement, ThreadSafeLayoutNode}; use std::borrow::ToOwned; use std::cmp::{max, min}; use std::collections::LinkedList; use std::fmt; use std::sync::{Arc, Mutex}; use style::arc_ptr_eq; use style::computed_values::{border_collapse, box_sizing, clear, color, display, mix_blend_mode}; use style::computed_values::{overflow_wrap, overflow_x, position, text_decoration}; use style::computed_values::{transform_style, vertical_align, white_space, word_break, z_index}; use style::computed_values::content::ContentItem; use style::context::SharedStyleContext; use style::dom::TRestyleDamage; use style::logical_geometry::{LogicalMargin, LogicalRect, LogicalSize, WritingMode}; use style::properties::ServoComputedValues; use style::str::char_is_whitespace; use style::values::computed::{LengthOrPercentage, LengthOrPercentageOrAuto}; use style::values::computed::LengthOrPercentageOrNone; use text; use text::TextRunScanner; use url::Url; /// Fragments (`struct Fragment`) are the leaves of the layout tree. They cannot position /// themselves. In general, fragments do not have a simple correspondence with CSS fragments in the /// specification: /// /// * Several fragments may correspond to the same CSS box or DOM node. For example, a CSS text box /// broken across two lines is represented by two fragments. /// /// * Some CSS fragments are not created at all, such as some anonymous block fragments induced by /// inline fragments with block-level sibling fragments. In that case, Servo uses an `InlineFlow` /// with `BlockFlow` siblings; the `InlineFlow` is block-level, but not a block container. It is /// positioned as if it were a block fragment, but its children are positioned according to /// inline flow. /// /// A `SpecificFragmentInfo::Generic` is an empty fragment that contributes only borders, margins, /// padding, and backgrounds. It is analogous to a CSS nonreplaced content box. /// /// A fragment's type influences how its styles are interpreted during layout. For example, /// replaced content such as images are resized differently from tables, text, or other content. /// Different types of fragments may also contain custom data; for example, text fragments contain /// text. /// /// Do not add fields to this structure unless they're really really mega necessary! Fragments get /// moved around a lot and thus their size impacts performance of layout quite a bit. /// /// FIXME(#2260, pcwalton): This can be slimmed down some by (at least) moving `inline_context` /// to be on `InlineFlow` only. #[derive(Clone)] pub struct Fragment { /// An opaque reference to the DOM node that this `Fragment` originates from. pub node: OpaqueNode, /// The CSS style of this fragment. pub style: Arc<ServoComputedValues>, /// The CSS style of this fragment when it's selected pub selected_style: Arc<ServoComputedValues>, /// The position of this fragment relative to its owning flow. The size includes padding and /// border, but not margin. /// /// NB: This does not account for relative positioning. /// NB: Collapsed borders are not included in this. pub border_box: LogicalRect<Au>, /// The sum of border and padding; i.e. the distance from the edge of the border box to the /// content edge of the fragment. pub border_padding: LogicalMargin<Au>, /// The margin of the content box. pub margin: LogicalMargin<Au>, /// Info specific to the kind of fragment. Keep this enum small. pub specific: SpecificFragmentInfo, /// Holds the style context information for fragments that are part of an inline formatting /// context. pub inline_context: Option<InlineFragmentContext>, /// How damaged this fragment is since last reflow. pub restyle_damage: RestyleDamage, /// The pseudo-element that this fragment represents. pub pseudo: PseudoElementType<()>, /// Various flags for this fragment. pub flags: FragmentFlags, /// A debug ID that is consistent for the life of this fragment (via transform etc). /// This ID should not be considered stable across multiple layouts or fragment /// manipulations. debug_id: DebugId, /// The ID of the StackingContext that contains this fragment. This is initialized /// to 0, but it assigned during the collect_stacking_contexts phase of display /// list construction. pub stacking_context_id: StackingContextId, } impl Encodable for Fragment { fn encode<S: Encoder>(&self, e: &mut S) -> Result<(), S::Error> { e.emit_struct("fragment", 3, |e| { try!(e.emit_struct_field("id", 0, |e| self.debug_id.encode(e))); try!(e.emit_struct_field("border_box", 1, |e| self.border_box.encode(e))); e.emit_struct_field("margin", 2, |e| self.margin.encode(e)) }) } } /// Info specific to the kind of fragment. /// /// Keep this enum small. As in, no more than one word. Or pcwalton will yell at you. #[derive(Clone)] pub enum SpecificFragmentInfo { Generic, /// A piece of generated content that cannot be resolved into `ScannedText` until the generated /// content resolution phase (e.g. an ordered list item marker). GeneratedContent(Box<GeneratedContentInfo>), Iframe(IframeFragmentInfo), Image(Box<ImageFragmentInfo>), Canvas(Box<CanvasFragmentInfo>), /// A hypothetical box (see CSS 2.1 § 10.3.7) for an absolutely-positioned block that was /// declared with `display: inline;`. InlineAbsoluteHypothetical(InlineAbsoluteHypotheticalFragmentInfo), InlineBlock(InlineBlockFragmentInfo), /// An inline fragment that establishes an absolute containing block for its descendants (i.e. /// a positioned inline fragment). InlineAbsolute(InlineAbsoluteFragmentInfo), ScannedText(Box<ScannedTextFragmentInfo>), Table, TableCell, TableColumn(TableColumnFragmentInfo), TableRow, TableWrapper, Multicol, MulticolColumn, UnscannedText(Box<UnscannedTextFragmentInfo>), } impl SpecificFragmentInfo { fn restyle_damage(&self) -> RestyleDamage { let flow = match *self { SpecificFragmentInfo::Canvas(_) | SpecificFragmentInfo::GeneratedContent(_) | SpecificFragmentInfo::Iframe(_) | SpecificFragmentInfo::Image(_) | SpecificFragmentInfo::ScannedText(_) | SpecificFragmentInfo::Table | SpecificFragmentInfo::TableCell | SpecificFragmentInfo::TableColumn(_) | SpecificFragmentInfo::TableRow | SpecificFragmentInfo::TableWrapper | SpecificFragmentInfo::Multicol | SpecificFragmentInfo::MulticolColumn | SpecificFragmentInfo::UnscannedText(_) | SpecificFragmentInfo::Generic => return RestyleDamage::empty(), SpecificFragmentInfo::InlineAbsoluteHypothetical(ref info) => &info.flow_ref, SpecificFragmentInfo::InlineAbsolute(ref info) => &info.flow_ref, SpecificFragmentInfo::InlineBlock(ref info) => &info.flow_ref, }; flow::base(&**flow).restyle_damage } pub fn get_type(&self) -> &'static str { match *self { SpecificFragmentInfo::Canvas(_) => "SpecificFragmentInfo::Canvas", SpecificFragmentInfo::Generic => "SpecificFragmentInfo::Generic", SpecificFragmentInfo::GeneratedContent(_) => "SpecificFragmentInfo::GeneratedContent", SpecificFragmentInfo::Iframe(_) => "SpecificFragmentInfo::Iframe", SpecificFragmentInfo::Image(_) => "SpecificFragmentInfo::Image", SpecificFragmentInfo::InlineAbsolute(_) => "SpecificFragmentInfo::InlineAbsolute", SpecificFragmentInfo::InlineAbsoluteHypothetical(_) => { "SpecificFragmentInfo::InlineAbsoluteHypothetical" } SpecificFragmentInfo::InlineBlock(_) => "SpecificFragmentInfo::InlineBlock", SpecificFragmentInfo::ScannedText(_) => "SpecificFragmentInfo::ScannedText", SpecificFragmentInfo::Table => "SpecificFragmentInfo::Table", SpecificFragmentInfo::TableCell => "SpecificFragmentInfo::TableCell", SpecificFragmentInfo::TableColumn(_) => "SpecificFragmentInfo::TableColumn", SpecificFragmentInfo::TableRow => "SpecificFragmentInfo::TableRow", SpecificFragmentInfo::TableWrapper => "SpecificFragmentInfo::TableWrapper", SpecificFragmentInfo::Multicol => "SpecificFragmentInfo::Multicol", SpecificFragmentInfo::MulticolColumn => "SpecificFragmentInfo::MulticolColumn", SpecificFragmentInfo::UnscannedText(_) => "SpecificFragmentInfo::UnscannedText", } } } impl fmt::Debug for SpecificFragmentInfo { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { SpecificFragmentInfo::ScannedText(ref info) => write!(f, "{:?}", info.text()), SpecificFragmentInfo::UnscannedText(ref info) => write!(f, "{:?}", info.text), _ => Ok(()) } } } /// Clamp a value obtained from style_length, based on min / max lengths. fn clamp_size(size: Au, min_size: LengthOrPercentage, max_size: LengthOrPercentageOrNone, container_size: Au) -> Au { let min_size = model::specified(min_size, container_size); let max_size = model::specified_or_none(max_size, container_size); max(min_size, match max_size { None => size, Some(max_size) => min(size, max_size), }) } /// Information for generated content. #[derive(Clone)] pub enum GeneratedContentInfo { ListItem, ContentItem(ContentItem), /// Placeholder for elements with generated content that did not generate any fragments. Empty, } /// A hypothetical box (see CSS 2.1 § 10.3.7) for an absolutely-positioned block that was declared /// with `display: inline;`. /// /// FIXME(pcwalton): Stop leaking this `FlowRef` to layout; that is not memory safe because layout /// can clone it. #[derive(Clone)] pub struct InlineAbsoluteHypotheticalFragmentInfo { pub flow_ref: FlowRef, } impl InlineAbsoluteHypotheticalFragmentInfo { pub fn new(flow_ref: FlowRef) -> InlineAbsoluteHypotheticalFragmentInfo { InlineAbsoluteHypotheticalFragmentInfo { flow_ref: flow_ref, } } } /// A fragment that represents an inline-block element. /// /// FIXME(pcwalton): Stop leaking this `FlowRef` to layout; that is not memory safe because layout /// can clone it. #[derive(Clone)] pub struct InlineBlockFragmentInfo { pub flow_ref: FlowRef, } impl InlineBlockFragmentInfo { pub fn new(flow_ref: FlowRef) -> InlineBlockFragmentInfo { InlineBlockFragmentInfo { flow_ref: flow_ref, } } } /// An inline fragment that establishes an absolute containing block for its descendants (i.e. /// a positioned inline fragment). /// /// FIXME(pcwalton): Stop leaking this `FlowRef` to layout; that is not memory safe because layout /// can clone it. #[derive(Clone)] pub struct InlineAbsoluteFragmentInfo { pub flow_ref: FlowRef, } impl InlineAbsoluteFragmentInfo { pub fn new(flow_ref: FlowRef) -> InlineAbsoluteFragmentInfo { InlineAbsoluteFragmentInfo { flow_ref: flow_ref, } } } #[derive(Clone)] pub struct CanvasFragmentInfo { pub replaced_image_fragment_info: ReplacedImageFragmentInfo, pub ipc_renderer: Option<Arc<Mutex<IpcSender<CanvasMsg>>>>, pub dom_width: Au, pub dom_height: Au, } impl CanvasFragmentInfo { pub fn new<N: ThreadSafeLayoutNode>(node: &N, data: HTMLCanvasData, ctx: &SharedStyleContext) -> CanvasFragmentInfo { CanvasFragmentInfo { replaced_image_fragment_info: ReplacedImageFragmentInfo::new(node, ctx), ipc_renderer: data.ipc_renderer .map(|renderer| Arc::new(Mutex::new(renderer))), dom_width: Au::from_px(data.width as i32), dom_height: Au::from_px(data.height as i32), } } /// Returns the original inline-size of the canvas. pub fn canvas_inline_size(&self) -> Au { if self.replaced_image_fragment_info.writing_mode_is_vertical { self.dom_height } else { self.dom_width } } /// Returns the original block-size of the canvas. pub fn canvas_block_size(&self) -> Au { if self.replaced_image_fragment_info.writing_mode_is_vertical { self.dom_width } else { self.dom_height } } } /// A fragment that represents a replaced content image and its accompanying borders, shadows, etc. #[derive(Clone)] pub struct ImageFragmentInfo { /// The image held within this fragment. pub replaced_image_fragment_info: ReplacedImageFragmentInfo, pub image: Option<Arc<Image>>, pub metadata: Option<ImageMetadata>, } impl ImageFragmentInfo { /// Creates a new image fragment from the given URL and local image cache. /// /// FIXME(pcwalton): The fact that image fragments store the cache in the fragment makes little /// sense to me. pub fn new<N: ThreadSafeLayoutNode>(node: &N, url: Option<Url>, shared_layout_context: &SharedLayoutContext) -> ImageFragmentInfo { let image_or_metadata = url.and_then(|url| { shared_layout_context.get_or_request_image_or_meta(url, UsePlaceholder::Yes) }); let (image, metadata) = match image_or_metadata { Some(ImageOrMetadataAvailable::ImageAvailable(i)) => { (Some(i.clone()), Some(ImageMetadata { height: i.height, width: i.width } )) } Some(ImageOrMetadataAvailable::MetadataAvailable(m)) => { (None, Some(m)) } None => { (None, None) } }; ImageFragmentInfo { replaced_image_fragment_info: ReplacedImageFragmentInfo::new(node, &shared_layout_context.style_context), image: image, metadata: metadata, } } /// Returns the original inline-size of the image. pub fn image_inline_size(&mut self) -> Au { match self.metadata { Some(ref metadata) => { Au::from_px(if self.replaced_image_fragment_info.writing_mode_is_vertical { metadata.height } else { metadata.width } as i32) } None => Au(0) } } /// Returns the original block-size of the image. pub fn image_block_size(&mut self) -> Au { match self.metadata { Some(ref metadata) => { Au::from_px(if self.replaced_image_fragment_info.writing_mode_is_vertical { metadata.width } else { metadata.height } as i32) } None => Au(0) } } pub fn tile_image_round(position: &mut Au, size: &mut Au, absolute_anchor_origin: Au, image_size: &mut Au) { if *size == Au(0) || *image_size == Au(0) { *position = Au(0); *size =Au(0); return; } let number_of_tiles = (size.to_f32_px() / image_size.to_f32_px()).round().max(1.0); *image_size = *size / (number_of_tiles as i32); ImageFragmentInfo::tile_image(position, size, absolute_anchor_origin, *image_size); } pub fn tile_image_spaced(position: &mut Au, size: &mut Au, tile_spacing: &mut Au, absolute_anchor_origin: Au, image_size: Au) { if *size == Au(0) || image_size == Au(0) { *position = Au(0); *size = Au(0); *tile_spacing = Au(0); return; } // Per the spec, if the space available is not enough for two images, just tile as // normal but only display a single tile. if image_size * 2 >= *size { ImageFragmentInfo::tile_image(position, size, absolute_anchor_origin, image_size); *tile_spacing = Au(0); *size = image_size;; return; } // Take the box size, remove room for two tiles on the edges, and then calculate how many // other tiles fit in between them. let size_remaining = *size - (image_size * 2); let num_middle_tiles = (size_remaining.to_f32_px() / image_size.to_f32_px()).floor() as i32; // Allocate the remaining space as padding between tiles. background-position is ignored // as per the spec, so the position is just the box origin. We are also ignoring // background-attachment here, which seems unspecced when combined with // background-repeat: space. let space_for_middle_tiles = image_size * num_middle_tiles; *tile_spacing = (size_remaining - space_for_middle_tiles) / (num_middle_tiles + 1); } /// Tile an image pub fn tile_image(position: &mut Au, size: &mut Au, absolute_anchor_origin: Au, image_size: Au) { // Avoid division by zero below! if image_size == Au(0) { return } let delta_pixels = absolute_anchor_origin - *position; let image_size_px = image_size.to_f32_px(); let tile_count = ((delta_pixels.to_f32_px() + image_size_px - 1.0) / image_size_px).floor(); let offset = image_size * (tile_count as i32); let new_position = absolute_anchor_origin - offset; *size = *position - new_position + *size; *position = new_position; } } #[derive(Clone)] pub struct ReplacedImageFragmentInfo { pub computed_inline_size: Option<Au>, pub computed_block_size: Option<Au>, pub writing_mode_is_vertical: bool, } impl ReplacedImageFragmentInfo { pub fn new<N>(node: &N, ctx: &SharedStyleContext) -> ReplacedImageFragmentInfo where N: ThreadSafeLayoutNode { let is_vertical = node.style(ctx).writing_mode.is_vertical(); ReplacedImageFragmentInfo { computed_inline_size: None, computed_block_size: None, writing_mode_is_vertical: is_vertical, } } /// Returns the calculated inline-size of the image, accounting for the inline-size attribute. pub fn computed_inline_size(&self) -> Au { self.computed_inline_size.expect("image inline_size is not computed yet!") } /// Returns the calculated block-size of the image, accounting for the block-size attribute. pub fn computed_block_size(&self) -> Au { self.computed_block_size.expect("image block_size is not computed yet!") } // Return used value for inline-size or block-size. // // `dom_length`: inline-size or block-size as specified in the `img` tag. // `style_length`: inline-size as given in the CSS pub fn style_length(style_length: LengthOrPercentageOrAuto, container_size: Option<Au>) -> MaybeAuto { match (style_length, container_size) { (LengthOrPercentageOrAuto::Length(length), _) => MaybeAuto::Specified(length), (LengthOrPercentageOrAuto::Percentage(pc), Some(container_size)) => { MaybeAuto::Specified(container_size.scale_by(pc)) } (LengthOrPercentageOrAuto::Percentage(_), None) => MaybeAuto::Auto, (LengthOrPercentageOrAuto::Calc(calc), Some(container_size)) => { MaybeAuto::Specified(calc.length() + container_size.scale_by(calc.percentage())) } (LengthOrPercentageOrAuto::Calc(_), None) => MaybeAuto::Auto, (LengthOrPercentageOrAuto::Auto, _) => MaybeAuto::Auto, } } pub fn calculate_replaced_inline_size(&mut self, style: &ServoComputedValues, noncontent_inline_size: Au, container_inline_size: Au, container_block_size: Option<Au>, fragment_inline_size: Au, fragment_block_size: Au) -> Au { let style_inline_size = style.content_inline_size(); let style_block_size = style.content_block_size(); let style_min_inline_size = style.min_inline_size(); let style_max_inline_size = style.max_inline_size(); let style_min_block_size = style.min_block_size(); let style_max_block_size = style.max_block_size(); // TODO(ksh8281): compute border,margin let inline_size = ReplacedImageFragmentInfo::style_length( style_inline_size, Some(container_inline_size)); let inline_size = match inline_size { MaybeAuto::Auto => { let intrinsic_width = fragment_inline_size; let intrinsic_height = fragment_block_size; if intrinsic_height == Au(0) { intrinsic_width } else { let ratio = intrinsic_width.to_f32_px() / intrinsic_height.to_f32_px(); let specified_height = ReplacedImageFragmentInfo::style_length( style_block_size, container_block_size); let specified_height = match specified_height { MaybeAuto::Auto => intrinsic_height, MaybeAuto::Specified(h) => h, }; let specified_height = clamp_size(specified_height, style_min_block_size, style_max_block_size, Au(0)); Au::from_f32_px(specified_height.to_f32_px() * ratio) } }, MaybeAuto::Specified(w) => w, }; let inline_size = clamp_size(inline_size, style_min_inline_size, style_max_inline_size, container_inline_size); self.computed_inline_size = Some(inline_size); inline_size + noncontent_inline_size } pub fn calculate_replaced_block_size(&mut self, style: &ServoComputedValues, noncontent_block_size: Au, containing_block_block_size: Option<Au>, fragment_inline_size: Au, fragment_block_size: Au) -> Au { // TODO(ksh8281): compute border,margin,padding let style_block_size = style.content_block_size(); let style_min_block_size = style.min_block_size(); let style_max_block_size = style.max_block_size(); let inline_size = self.computed_inline_size(); let block_size = ReplacedImageFragmentInfo::style_length( style_block_size, containing_block_block_size); let block_size = match block_size { MaybeAuto::Auto => { let intrinsic_width = fragment_inline_size; let intrinsic_height = fragment_block_size; let scale = intrinsic_width.to_f32_px() / inline_size.to_f32_px(); Au::from_f32_px(intrinsic_height.to_f32_px() / scale) }, MaybeAuto::Specified(h) => { h } }; let block_size = clamp_size(block_size, style_min_block_size, style_max_block_size, Au(0)); self.computed_block_size = Some(block_size); block_size + noncontent_block_size } } /// A fragment that represents an inline frame (iframe). This stores the pipeline ID so that the /// size of this iframe can be communicated via the constellation to the iframe's own layout thread. #[derive(Clone)] pub struct IframeFragmentInfo { /// The pipeline ID of this iframe. pub pipeline_id: PipelineId, } impl IframeFragmentInfo { /// Creates the information specific to an iframe fragment. pub fn new<N: ThreadSafeLayoutNode>(node: &N) -> IframeFragmentInfo { let pipeline_id = node.iframe_pipeline_id(); IframeFragmentInfo { pipeline_id: pipeline_id, } } #[inline] pub fn calculate_replaced_inline_size(&self, style: &ServoComputedValues, containing_size: Au) -> Au { // Calculate the replaced inline size (or default) as per CSS 2.1 § 10.3.2 IframeFragmentInfo::calculate_replaced_size(style.content_inline_size(), style.min_inline_size(), style.max_inline_size(), Some(containing_size), Au::from_px(300)) } #[inline] pub fn calculate_replaced_block_size(&self, style: &ServoComputedValues, containing_size: Option<Au>) -> Au { // Calculate the replaced block size (or default) as per CSS 2.1 § 10.3.2 IframeFragmentInfo::calculate_replaced_size(style.content_block_size(), style.min_block_size(), style.max_block_size(), containing_size, Au::from_px(150)) } fn calculate_replaced_size(content_size: LengthOrPercentageOrAuto, style_min_size: LengthOrPercentage, style_max_size: LengthOrPercentageOrNone, containing_size: Option<Au>, default_size: Au) -> Au { let computed_size = match (content_size, containing_size) { (LengthOrPercentageOrAuto::Length(length), _) => length, (LengthOrPercentageOrAuto::Percentage(pc), Some(container_size)) => container_size.scale_by(pc), (LengthOrPercentageOrAuto::Calc(calc), Some(container_size)) => { container_size.scale_by(calc.percentage()) + calc.length() }, (LengthOrPercentageOrAuto::Calc(calc), None) => calc.length(), (LengthOrPercentageOrAuto::Percentage(_), None) => default_size, (LengthOrPercentageOrAuto::Auto, _) => default_size, }; let containing_size = containing_size.unwrap_or(Au(0)); clamp_size(computed_size, style_min_size, style_max_size, containing_size) } } /// A scanned text fragment represents a single run of text with a distinct style. A `TextFragment` /// may be split into two or more fragments across line breaks. Several `TextFragment`s may /// correspond to a single DOM text node. Split text fragments are implemented by referring to /// subsets of a single `TextRun` object. #[derive(Clone)] pub struct ScannedTextFragmentInfo { /// The text run that this represents. pub run: Arc<TextRun>, /// The intrinsic size of the text fragment. pub content_size: LogicalSize<Au>, /// The byte offset of the insertion point, if any. pub insertion_point: Option<ByteIndex>, /// The range within the above text run that this represents. pub range: Range<ByteIndex>, /// The endpoint of the above range, including whitespace that was stripped out. This exists /// so that we can restore the range to its original value (before line breaking occurred) when /// performing incremental reflow. pub range_end_including_stripped_whitespace: ByteIndex, pub flags: ScannedTextFlags, } bitflags! { pub flags ScannedTextFlags: u8 { /// Whether a line break is required after this fragment if wrapping on newlines (e.g. if /// `white-space: pre` is in effect). const REQUIRES_LINE_BREAK_AFTERWARD_IF_WRAPPING_ON_NEWLINES = 0x01, /// Is this fragment selected? const SELECTED = 0x02, } } impl ScannedTextFragmentInfo { /// Creates the information specific to a scanned text fragment from a range and a text run. pub fn new(run: Arc<TextRun>, range: Range<ByteIndex>, content_size: LogicalSize<Au>, insertion_point: Option<ByteIndex>, flags: ScannedTextFlags) -> ScannedTextFragmentInfo { ScannedTextFragmentInfo { run: run, range: range, insertion_point: insertion_point, content_size: content_size, range_end_including_stripped_whitespace: range.end(), flags: flags, } } pub fn text(&self) -> &str { &self.run.text[self.range.begin().to_usize() .. self.range.end().to_usize()] } pub fn requires_line_break_afterward_if_wrapping_on_newlines(&self) -> bool { self.flags.contains(REQUIRES_LINE_BREAK_AFTERWARD_IF_WRAPPING_ON_NEWLINES) } pub fn selected(&self) -> bool { self.flags.contains(SELECTED) } } /// Describes how to split a fragment. This is used during line breaking as part of the return /// value of `find_split_info_for_inline_size()`. #[derive(Debug, Clone)] pub struct SplitInfo { // TODO(bjz): this should only need to be a single character index, but both values are // currently needed for splitting in the `inline::try_append_*` functions. pub range: Range<ByteIndex>, pub inline_size: Au, } impl SplitInfo { fn new(range: Range<ByteIndex>, info: &ScannedTextFragmentInfo) -> SplitInfo { let inline_size = info.run.advance_for_range(&range); SplitInfo { range: range, inline_size: inline_size, } } } /// Describes how to split a fragment into two. This contains up to two `SplitInfo`s. pub struct SplitResult { /// The part of the fragment that goes on the first line. pub inline_start: Option<SplitInfo>, /// The part of the fragment that goes on the second line. pub inline_end: Option<SplitInfo>, /// The text run which is being split. pub text_run: Arc<TextRun>, } /// Describes how a fragment should be truncated. pub struct TruncationResult { /// The part of the fragment remaining after truncation. pub split: SplitInfo, /// The text run which is being truncated. pub text_run: Arc<TextRun>, } /// Data for an unscanned text fragment. Unscanned text fragments are the results of flow /// construction that have not yet had their inline-size determined. #[derive(Clone)] pub struct UnscannedTextFragmentInfo { /// The text inside the fragment. pub text: Box<str>, /// The selected text range. An empty range represents the insertion point. pub selection: Option<Range<ByteIndex>>, } impl UnscannedTextFragmentInfo { /// Creates a new instance of `UnscannedTextFragmentInfo` from the given text. #[inline] pub fn new(text: String, selection: Option<Range<ByteIndex>>) -> UnscannedTextFragmentInfo { UnscannedTextFragmentInfo { text: text.into_boxed_str(), selection: selection, } } } /// A fragment that represents a table column. #[derive(Copy, Clone)] pub struct TableColumnFragmentInfo { /// the number of columns a <col> element should span pub span: u32, } impl TableColumnFragmentInfo { /// Create the information specific to an table column fragment. pub fn new<N: ThreadSafeLayoutNode>(node: &N) -> TableColumnFragmentInfo { let element = node.as_element(); let span = element.get_attr(&ns!(), &atom!("span")) .and_then(|string| string.parse().ok()) .unwrap_or(0); TableColumnFragmentInfo { span: span, } } } impl Fragment { /// Constructs a new `Fragment` instance. pub fn new<N: ThreadSafeLayoutNode>(node: &N, specific: SpecificFragmentInfo, ctx: &LayoutContext) -> Fragment { let style_context = ctx.style_context(); let style = node.style(style_context).clone(); let writing_mode = style.writing_mode; let mut restyle_damage = node.restyle_damage(); restyle_damage.remove(RECONSTRUCT_FLOW); Fragment { node: node.opaque(), style: style, selected_style: node.selected_style(style_context).clone(), restyle_damage: restyle_damage, border_box: LogicalRect::zero(writing_mode), border_padding: LogicalMargin::zero(writing_mode), margin: LogicalMargin::zero(writing_mode), specific: specific, inline_context: None, pseudo: node.get_pseudo_element_type().strip(), flags: FragmentFlags::empty(), debug_id: DebugId::new(), stacking_context_id: StackingContextId::new(0), } } /// Constructs a new `Fragment` instance from an opaque node. pub fn from_opaque_node_and_style(node: OpaqueNode, pseudo: PseudoElementType<()>, style: Arc<ServoComputedValues>, selected_style: Arc<ServoComputedValues>, mut restyle_damage: RestyleDamage, specific: SpecificFragmentInfo) -> Fragment { let writing_mode = style.writing_mode; restyle_damage.remove(RECONSTRUCT_FLOW); Fragment { node: node, style: style, selected_style: selected_style, restyle_damage: restyle_damage, border_box: LogicalRect::zero(writing_mode), border_padding: LogicalMargin::zero(writing_mode), margin: LogicalMargin::zero(writing_mode), specific: specific, inline_context: None, pseudo: pseudo, flags: FragmentFlags::empty(), debug_id: DebugId::new(), stacking_context_id: StackingContextId::new(0), } } /// Transforms this fragment into another fragment of the given type, with the given size, /// preserving all the other data. pub fn transform(&self, size: LogicalSize<Au>, info: SpecificFragmentInfo) -> Fragment { let new_border_box = LogicalRect::from_point_size(self.style.writing_mode, self.border_box.start, size); let mut restyle_damage = RestyleDamage::rebuild_and_reflow(); restyle_damage.remove(RECONSTRUCT_FLOW); Fragment { node: self.node, style: self.style.clone(), selected_style: self.selected_style.clone(), restyle_damage: restyle_damage, border_box: new_border_box, border_padding: self.border_padding, margin: self.margin, specific: info, inline_context: self.inline_context.clone(), pseudo: self.pseudo.clone(), flags: FragmentFlags::empty(), debug_id: self.debug_id.clone(), stacking_context_id: StackingContextId::new(0), } } /// Transforms this fragment using the given `SplitInfo`, preserving all the other data. pub fn transform_with_split_info(&self, split: &SplitInfo, text_run: Arc<TextRun>) -> Fragment { let size = LogicalSize::new(self.style.writing_mode, split.inline_size, self.border_box.size.block); // Preserve the insertion point if it is in this fragment's range or it is at line end. let (flags, insertion_point) = match self.specific { SpecificFragmentInfo::ScannedText(ref info) => { match info.insertion_point { Some(index) if split.range.contains(index) => (info.flags, info.insertion_point), Some(index) if index == ByteIndex(text_run.text.chars().count() as isize - 1) && index == split.range.end() => (info.flags, info.insertion_point), _ => (info.flags, None) } }, _ => (ScannedTextFlags::empty(), None) }; let info = box ScannedTextFragmentInfo::new( text_run, split.range, size, insertion_point, flags); self.transform(size, SpecificFragmentInfo::ScannedText(info)) } /// Transforms this fragment into an ellipsis fragment, preserving all the other data. pub fn transform_into_ellipsis(&self, layout_context: &LayoutContext) -> Fragment { let mut unscanned_ellipsis_fragments = LinkedList::new(); unscanned_ellipsis_fragments.push_back(self.transform( self.border_box.size, SpecificFragmentInfo::UnscannedText( box UnscannedTextFragmentInfo::new("…".to_owned(), None)))); let ellipsis_fragments = TextRunScanner::new().scan_for_runs(&mut layout_context.font_context(), unscanned_ellipsis_fragments); debug_assert!(ellipsis_fragments.len() == 1); ellipsis_fragments.fragments.into_iter().next().unwrap() } pub fn restyle_damage(&self) -> RestyleDamage { self.restyle_damage | self.specific.restyle_damage() } pub fn contains_node(&self, node_address: OpaqueNode) -> bool { node_address == self.node || self.inline_context.as_ref().map_or(false, |ctx| { ctx.contains_node(node_address) }) } /// Adds a style to the inline context for this fragment. If the inline context doesn't exist /// yet, it will be created. pub fn add_inline_context_style(&mut self, node_info: InlineFragmentNodeInfo) { if self.inline_context.is_none() { self.inline_context = Some(InlineFragmentContext::new()); } self.inline_context.as_mut().unwrap().nodes.push(node_info); } /// Determines which quantities (border/padding/margin/specified) should be included in the /// intrinsic inline size of this fragment. fn quantities_included_in_intrinsic_inline_size(&self) -> QuantitiesIncludedInIntrinsicInlineSizes { match self.specific { SpecificFragmentInfo::Canvas(_) | SpecificFragmentInfo::Generic | SpecificFragmentInfo::GeneratedContent(_) | SpecificFragmentInfo::Iframe(_) | SpecificFragmentInfo::Image(_) | SpecificFragmentInfo::InlineAbsolute(_) | SpecificFragmentInfo::Multicol => { QuantitiesIncludedInIntrinsicInlineSizes::all() } SpecificFragmentInfo::Table | SpecificFragmentInfo::TableCell => { let base_quantities = INTRINSIC_INLINE_SIZE_INCLUDES_PADDING | INTRINSIC_INLINE_SIZE_INCLUDES_SPECIFIED; if self.style.get_inheritedtable().border_collapse == border_collapse::T::separate { base_quantities | INTRINSIC_INLINE_SIZE_INCLUDES_BORDER } else { base_quantities } } SpecificFragmentInfo::TableWrapper => { let base_quantities = INTRINSIC_INLINE_SIZE_INCLUDES_MARGINS | INTRINSIC_INLINE_SIZE_INCLUDES_SPECIFIED; if self.style.get_inheritedtable().border_collapse == border_collapse::T::separate { base_quantities | INTRINSIC_INLINE_SIZE_INCLUDES_BORDER } else { base_quantities } } SpecificFragmentInfo::TableRow => { let base_quantities = INTRINSIC_INLINE_SIZE_INCLUDES_SPECIFIED; if self.style.get_inheritedtable().border_collapse == border_collapse::T::separate { base_quantities | INTRINSIC_INLINE_SIZE_INCLUDES_BORDER } else { base_quantities } } SpecificFragmentInfo::ScannedText(_) | SpecificFragmentInfo::TableColumn(_) | SpecificFragmentInfo::UnscannedText(_) | SpecificFragmentInfo::InlineAbsoluteHypothetical(_) | SpecificFragmentInfo::InlineBlock(_) | SpecificFragmentInfo::MulticolColumn => { QuantitiesIncludedInIntrinsicInlineSizes::empty() } } } /// Returns the portion of the intrinsic inline-size that consists of borders, padding, and/or /// margins. /// /// FIXME(#2261, pcwalton): This won't work well for inlines: is this OK? pub fn surrounding_intrinsic_inline_size(&self) -> Au { let flags = self.quantities_included_in_intrinsic_inline_size(); let style = self.style(); // FIXME(pcwalton): Percentages should be relative to any definite size per CSS-SIZING. // This will likely need to be done by pushing down definite sizes during selector // cascading. let margin = if flags.contains(INTRINSIC_INLINE_SIZE_INCLUDES_MARGINS) { let margin = style.logical_margin(); (MaybeAuto::from_style(margin.inline_start, Au(0)).specified_or_zero() + MaybeAuto::from_style(margin.inline_end, Au(0)).specified_or_zero()) } else { Au(0) }; // FIXME(pcwalton): Percentages should be relative to any definite size per CSS-SIZING. // This will likely need to be done by pushing down definite sizes during selector // cascading. let padding = if flags.contains(INTRINSIC_INLINE_SIZE_INCLUDES_PADDING) { let padding = style.logical_padding(); (model::specified(padding.inline_start, Au(0)) + model::specified(padding.inline_end, Au(0))) } else { Au(0) }; let border = if flags.contains(INTRINSIC_INLINE_SIZE_INCLUDES_BORDER) { self.border_width().inline_start_end() } else { Au(0) }; margin + padding + border } /// Uses the style only to estimate the intrinsic inline-sizes. These may be modified for text /// or replaced elements. fn style_specified_intrinsic_inline_size(&self) -> IntrinsicISizesContribution { let flags = self.quantities_included_in_intrinsic_inline_size(); let style = self.style(); let mut specified = Au(0); if flags.contains(INTRINSIC_INLINE_SIZE_INCLUDES_SPECIFIED) { specified = MaybeAuto::from_style(style.content_inline_size(), Au(0)).specified_or_zero(); specified = max(model::specified(style.min_inline_size(), Au(0)), specified); if let Some(max) = model::specified_or_none(style.max_inline_size(), Au(0)) { specified = min(specified, max) } } // FIXME(#2261, pcwalton): This won't work well for inlines: is this OK? let surrounding_inline_size = self.surrounding_intrinsic_inline_size(); IntrinsicISizesContribution { content_intrinsic_sizes: IntrinsicISizes { minimum_inline_size: specified, preferred_inline_size: specified, }, surrounding_size: surrounding_inline_size, } } /// Returns a guess as to the distances from the margin edge of this fragment to its content /// in the inline direction. This will generally be correct unless percentages are involved. /// /// This is used for the float placement speculation logic. pub fn guess_inline_content_edge_offsets(&self) -> SpeculatedInlineContentEdgeOffsets { let logical_margin = self.style.logical_margin(); let logical_padding = self.style.logical_padding(); let border_width = self.border_width(); SpeculatedInlineContentEdgeOffsets { start: MaybeAuto::from_style(logical_margin.inline_start, Au(0)).specified_or_zero() + model::specified(logical_padding.inline_start, Au(0)) + border_width.inline_start, end: MaybeAuto::from_style(logical_margin.inline_end, Au(0)).specified_or_zero() + model::specified(logical_padding.inline_end, Au(0)) + border_width.inline_end, } } pub fn calculate_line_height(&self, layout_context: &LayoutContext) -> Au { let font_style = self.style.get_font_arc(); let font_metrics = text::font_metrics_for_style(&mut layout_context.font_context(), font_style); text::line_height_from_style(&*self.style, &font_metrics) } /// Returns the sum of the inline-sizes of all the borders of this fragment. Note that this /// can be expensive to compute, so if possible use the `border_padding` field instead. #[inline] pub fn border_width(&self) -> LogicalMargin<Au> { let style_border_width = match self.specific { SpecificFragmentInfo::ScannedText(_) | SpecificFragmentInfo::InlineBlock(_) => LogicalMargin::zero(self.style.writing_mode), _ => self.style().logical_border_width(), }; match self.inline_context { None => style_border_width, Some(ref inline_fragment_context) => { inline_fragment_context.nodes.iter().fold(style_border_width, |accumulator, node| { let mut this_border_width = node.style.logical_border_width(); if !node.flags.contains(FIRST_FRAGMENT_OF_ELEMENT) { this_border_width.inline_start = Au(0) } if !node.flags.contains(LAST_FRAGMENT_OF_ELEMENT) { this_border_width.inline_end = Au(0) } accumulator + this_border_width }) } } } /// Returns the border width in given direction if this fragment has property /// 'box-sizing: border-box'. The `border_padding` field should have been initialized. pub fn box_sizing_boundary(&self, direction: Direction) -> Au { match (self.style().get_position().box_sizing, direction) { (box_sizing::T::border_box, Direction::Inline) => { self.border_padding.inline_start_end() } (box_sizing::T::border_box, Direction::Block) => { self.border_padding.block_start_end() } _ => Au(0) } } /// Computes the margins in the inline direction from the containing block inline-size and the /// style. After this call, the inline direction of the `margin` field will be correct. /// /// Do not use this method if the inline direction margins are to be computed some other way /// (for example, via constraint solving for blocks). pub fn compute_inline_direction_margins(&mut self, containing_block_inline_size: Au) { match self.specific { SpecificFragmentInfo::Table | SpecificFragmentInfo::TableCell | SpecificFragmentInfo::TableRow | SpecificFragmentInfo::TableColumn(_) => { self.margin.inline_start = Au(0); self.margin.inline_end = Au(0); return } SpecificFragmentInfo::InlineBlock(_) => { // Inline-blocks do not take self margins into account but do account for margins // from outer inline contexts. self.margin.inline_start = Au(0); self.margin.inline_end = Au(0); } _ => { let margin = self.style().logical_margin(); self.margin.inline_start = MaybeAuto::from_style(margin.inline_start, containing_block_inline_size).specified_or_zero(); self.margin.inline_end = MaybeAuto::from_style(margin.inline_end, containing_block_inline_size).specified_or_zero(); } } if let Some(ref inline_context) = self.inline_context { for node in &inline_context.nodes { let margin = node.style.logical_margin(); let this_inline_start_margin = if !node.flags.contains(FIRST_FRAGMENT_OF_ELEMENT) { Au(0) } else { MaybeAuto::from_style(margin.inline_start, containing_block_inline_size).specified_or_zero() }; let this_inline_end_margin = if !node.flags.contains(LAST_FRAGMENT_OF_ELEMENT) { Au(0) } else { MaybeAuto::from_style(margin.inline_end, containing_block_inline_size).specified_or_zero() }; self.margin.inline_start = self.margin.inline_start + this_inline_start_margin; self.margin.inline_end = self.margin.inline_end + this_inline_end_margin; } } } /// Computes the margins in the block direction from the containing block inline-size and the /// style. After this call, the block direction of the `margin` field will be correct. /// /// Do not use this method if the block direction margins are to be computed some other way /// (for example, via constraint solving for absolutely-positioned flows). pub fn compute_block_direction_margins(&mut self, containing_block_inline_size: Au) { match self.specific { SpecificFragmentInfo::Table | SpecificFragmentInfo::TableCell | SpecificFragmentInfo::TableRow | SpecificFragmentInfo::TableColumn(_) => { self.margin.block_start = Au(0); self.margin.block_end = Au(0) } _ => { // NB: Percentages are relative to containing block inline-size (not block-size) // per CSS 2.1. let margin = self.style().logical_margin(); self.margin.block_start = MaybeAuto::from_style(margin.block_start, containing_block_inline_size) .specified_or_zero(); self.margin.block_end = MaybeAuto::from_style(margin.block_end, containing_block_inline_size) .specified_or_zero(); } } } /// Computes the border and padding in both inline and block directions from the containing /// block inline-size and the style. After this call, the `border_padding` field will be /// correct. /// /// TODO(pcwalton): Remove `border_collapse`; we can figure it out from our style and specific /// fragment info. pub fn compute_border_and_padding(&mut self, containing_block_inline_size: Au, border_collapse: border_collapse::T) { // Compute border. let border = match border_collapse { border_collapse::T::separate => self.border_width(), border_collapse::T::collapse => LogicalMargin::zero(self.style.writing_mode), }; // Compute padding from the fragment's style. // // This is zero in the case of `inline-block` because that padding is applied to the // wrapped block, not the fragment. let padding_from_style = match self.specific { SpecificFragmentInfo::TableColumn(_) | SpecificFragmentInfo::TableRow | SpecificFragmentInfo::TableWrapper | SpecificFragmentInfo::InlineBlock(_) => LogicalMargin::zero(self.style.writing_mode), _ => model::padding_from_style(self.style(), containing_block_inline_size), }; // Compute padding from the inline fragment context. let padding_from_inline_fragment_context = match (&self.specific, &self.inline_context) { (_, &None) | (&SpecificFragmentInfo::TableColumn(_), _) | (&SpecificFragmentInfo::TableRow, _) | (&SpecificFragmentInfo::TableWrapper, _) => { LogicalMargin::zero(self.style.writing_mode) } (_, &Some(ref inline_fragment_context)) => { let zero_padding = LogicalMargin::zero(self.style.writing_mode); inline_fragment_context.nodes.iter().fold(zero_padding, |accumulator, node| { let mut padding = model::padding_from_style(&*node.style, Au(0)); if !node.flags.contains(FIRST_FRAGMENT_OF_ELEMENT) { padding.inline_start = Au(0) } if !node.flags.contains(LAST_FRAGMENT_OF_ELEMENT) { padding.inline_end = Au(0) } accumulator + padding }) } }; self.border_padding = border + padding_from_style + padding_from_inline_fragment_context } // Return offset from original position because of `position: relative`. pub fn relative_position(&self, containing_block_size: &LogicalSize<Au>) -> LogicalSize<Au> { fn from_style(style: &ServoComputedValues, container_size: &LogicalSize<Au>) -> LogicalSize<Au> { let offsets = style.logical_position(); let offset_i = if offsets.inline_start != LengthOrPercentageOrAuto::Auto { MaybeAuto::from_style(offsets.inline_start, container_size.inline).specified_or_zero() } else { -MaybeAuto::from_style(offsets.inline_end, container_size.inline).specified_or_zero() }; let offset_b = if offsets.block_start != LengthOrPercentageOrAuto::Auto { MaybeAuto::from_style(offsets.block_start, container_size.inline).specified_or_zero() } else { -MaybeAuto::from_style(offsets.block_end, container_size.inline).specified_or_zero() }; LogicalSize::new(style.writing_mode, offset_i, offset_b) } // Go over the ancestor fragments and add all relative offsets (if any). let mut rel_pos = if self.style().get_box().position == position::T::relative { from_style(self.style(), containing_block_size) } else { LogicalSize::zero(self.style.writing_mode) }; if let Some(ref inline_fragment_context) = self.inline_context { for node in &inline_fragment_context.nodes { if node.style.get_box().position == position::T::relative { rel_pos = rel_pos + from_style(&*node.style, containing_block_size); } } } rel_pos } /// Always inline for SCCP. /// /// FIXME(pcwalton): Just replace with the clear type from the style module for speed? #[inline(always)] pub fn clear(&self) -> Option<ClearType> { let style = self.style(); match style.get_box().clear { clear::T::none => None, clear::T::left => Some(ClearType::Left), clear::T::right => Some(ClearType::Right), clear::T::both => Some(ClearType::Both), } } #[inline(always)] pub fn style(&self) -> &ServoComputedValues { &*self.style } #[inline(always)] pub fn selected_style(&self) -> &ServoComputedValues { &*self.selected_style } pub fn white_space(&self) -> white_space::T { self.style().get_inheritedtext().white_space } pub fn color(&self) -> color::T { self.style().get_color().color } /// Returns the text decoration of this fragment, according to the style of the nearest ancestor /// element. /// /// NB: This may not be the actual text decoration, because of the override rules specified in /// CSS 2.1 § 16.3.1. Unfortunately, computing this properly doesn't really fit into Servo's /// model. Therefore, this is a best lower bound approximation, but the end result may actually /// have the various decoration flags turned on afterward. pub fn text_decoration(&self) -> text_decoration::T { self.style().get_text().text_decoration } /// Returns the inline-start offset from margin edge to content edge. /// /// FIXME(#2262, pcwalton): I think this method is pretty bogus, because it won't work for /// inlines. pub fn inline_start_offset(&self) -> Au { match self.specific { SpecificFragmentInfo::TableWrapper => self.margin.inline_start, SpecificFragmentInfo::Table | SpecificFragmentInfo::TableCell | SpecificFragmentInfo::TableRow => self.border_padding.inline_start, SpecificFragmentInfo::TableColumn(_) => Au(0), _ => self.margin.inline_start + self.border_padding.inline_start, } } /// Returns true if this element can be split. This is true for text fragments, unless /// `white-space: pre` or `white-space: nowrap` is set. pub fn can_split(&self) -> bool { self.is_scanned_text_fragment() && self.white_space().allow_wrap() } /// Returns true if and only if this fragment is a generated content fragment. pub fn is_unscanned_generated_content(&self) -> bool { match self.specific { SpecificFragmentInfo::GeneratedContent(box GeneratedContentInfo::Empty) => false, SpecificFragmentInfo::GeneratedContent(..) => true, _ => false, } } /// Returns true if and only if this is a scanned text fragment. pub fn is_scanned_text_fragment(&self) -> bool { match self.specific { SpecificFragmentInfo::ScannedText(..) => true, _ => false, } } /// Computes the intrinsic inline-sizes of this fragment. pub fn compute_intrinsic_inline_sizes(&mut self) -> IntrinsicISizesContribution { let mut result = self.style_specified_intrinsic_inline_size(); match self.specific { SpecificFragmentInfo::Generic | SpecificFragmentInfo::GeneratedContent(_) | SpecificFragmentInfo::Iframe(_) | SpecificFragmentInfo::Table | SpecificFragmentInfo::TableCell | SpecificFragmentInfo::TableColumn(_) | SpecificFragmentInfo::TableRow | SpecificFragmentInfo::TableWrapper | SpecificFragmentInfo::Multicol | SpecificFragmentInfo::MulticolColumn | SpecificFragmentInfo::InlineAbsoluteHypothetical(_) => {} SpecificFragmentInfo::InlineBlock(ref info) => { let block_flow = info.flow_ref.as_block(); result.union_block(&block_flow.base.intrinsic_inline_sizes) } SpecificFragmentInfo::InlineAbsolute(ref info) => { let block_flow = info.flow_ref.as_block(); result.union_block(&block_flow.base.intrinsic_inline_sizes) } SpecificFragmentInfo::Image(ref mut image_fragment_info) => { let mut image_inline_size = match self.style.content_inline_size() { LengthOrPercentageOrAuto::Auto | LengthOrPercentageOrAuto::Percentage(_) => { image_fragment_info.image_inline_size() } LengthOrPercentageOrAuto::Length(length) => length, LengthOrPercentageOrAuto::Calc(calc) => calc.length(), }; image_inline_size = max(model::specified(self.style.min_inline_size(), Au(0)), image_inline_size); if let Some(max) = model::specified_or_none(self.style.max_inline_size(), Au(0)) { image_inline_size = min(image_inline_size, max) } result.union_block(&IntrinsicISizes { minimum_inline_size: image_inline_size, preferred_inline_size: image_inline_size, }); } SpecificFragmentInfo::Canvas(ref mut canvas_fragment_info) => { let mut canvas_inline_size = match self.style.content_inline_size() { LengthOrPercentageOrAuto::Auto | LengthOrPercentageOrAuto::Percentage(_) => { canvas_fragment_info.canvas_inline_size() } LengthOrPercentageOrAuto::Length(length) => length, LengthOrPercentageOrAuto::Calc(calc) => calc.length(), }; canvas_inline_size = max(model::specified(self.style.min_inline_size(), Au(0)), canvas_inline_size); if let Some(max) = model::specified_or_none(self.style.max_inline_size(), Au(0)) { canvas_inline_size = min(canvas_inline_size, max) } result.union_block(&IntrinsicISizes { minimum_inline_size: canvas_inline_size, preferred_inline_size: canvas_inline_size, }); } SpecificFragmentInfo::ScannedText(ref text_fragment_info) => { let range = &text_fragment_info.range; // See http://dev.w3.org/csswg/css-sizing/#max-content-inline-size. // TODO: Account for soft wrap opportunities. let max_line_inline_size = text_fragment_info.run .metrics_for_range(range) .advance_width; let min_line_inline_size = if self.white_space().allow_wrap() { text_fragment_info.run.min_width_for_range(range) } else { max_line_inline_size }; result.union_block(&IntrinsicISizes { minimum_inline_size: min_line_inline_size, preferred_inline_size: max_line_inline_size, }) } SpecificFragmentInfo::UnscannedText(..) => { panic!("Unscanned text fragments should have been scanned by now!") } }; // Take borders and padding for parent inline fragments into account, if necessary. if self.is_primary_fragment() { if let Some(ref context) = self.inline_context { for node in &context.nodes { let mut border_width = node.style.logical_border_width(); let mut padding = model::padding_from_style(&*node.style, Au(0)); let mut margin = model::specified_margin_from_style(&*node.style); if !node.flags.contains(FIRST_FRAGMENT_OF_ELEMENT) { border_width.inline_start = Au(0); padding.inline_start = Au(0); margin.inline_start = Au(0); } if !node.flags.contains(LAST_FRAGMENT_OF_ELEMENT) { border_width.inline_end = Au(0); padding.inline_end = Au(0); margin.inline_end = Au(0); } result.surrounding_size = result.surrounding_size + border_width.inline_start_end() + padding.inline_start_end() + margin.inline_start_end(); } } } result } /// Returns the narrowest inline-size that the first splittable part of this fragment could /// possibly be split to. (In most cases, this returns the inline-size of the first word in /// this fragment.) pub fn minimum_splittable_inline_size(&self) -> Au { match self.specific { SpecificFragmentInfo::ScannedText(ref text) => { text.run.minimum_splittable_inline_size(&text.range) } _ => Au(0), } } /// TODO: What exactly does this function return? Why is it Au(0) for /// `SpecificFragmentInfo::Generic`? pub fn content_inline_size(&self) -> Au { match self.specific { SpecificFragmentInfo::Generic | SpecificFragmentInfo::GeneratedContent(_) | SpecificFragmentInfo::Iframe(_) | SpecificFragmentInfo::Table | SpecificFragmentInfo::TableCell | SpecificFragmentInfo::TableRow | SpecificFragmentInfo::TableWrapper | SpecificFragmentInfo::Multicol | SpecificFragmentInfo::MulticolColumn | SpecificFragmentInfo::InlineBlock(_) | SpecificFragmentInfo::InlineAbsoluteHypothetical(_) | SpecificFragmentInfo::InlineAbsolute(_) => Au(0), SpecificFragmentInfo::Canvas(ref canvas_fragment_info) => { canvas_fragment_info.replaced_image_fragment_info.computed_inline_size() } SpecificFragmentInfo::Image(ref image_fragment_info) => { image_fragment_info.replaced_image_fragment_info.computed_inline_size() } SpecificFragmentInfo::ScannedText(ref text_fragment_info) => { let (range, run) = (&text_fragment_info.range, &text_fragment_info.run); let text_bounds = run.metrics_for_range(range).bounding_box; text_bounds.size.width } SpecificFragmentInfo::TableColumn(_) => { panic!("Table column fragments do not have inline_size") } SpecificFragmentInfo::UnscannedText(_) => { panic!("Unscanned text fragments should have been scanned by now!") } } } /// Returns the dimensions of the content box. /// /// This is marked `#[inline]` because it is frequently called when only one or two of the /// values are needed and that will save computation. #[inline] pub fn content_box(&self) -> LogicalRect<Au> { self.border_box - self.border_padding } /// Attempts to find the split positions of a text fragment so that its inline-size is no more /// than `max_inline_size`. /// /// A return value of `None` indicates that the fragment could not be split. Otherwise the /// information pertaining to the split is returned. The inline-start and inline-end split /// information are both optional due to the possibility of them being whitespace. pub fn calculate_split_position(&self, max_inline_size: Au, starts_line: bool) -> Option<SplitResult> { let text_fragment_info = match self.specific { SpecificFragmentInfo::ScannedText(ref text_fragment_info) => text_fragment_info, _ => return None, }; let mut flags = SplitOptions::empty(); if starts_line { flags.insert(STARTS_LINE); if self.style().get_inheritedtext().overflow_wrap == overflow_wrap::T::break_word { flags.insert(RETRY_AT_CHARACTER_BOUNDARIES) } } match self.style().get_inheritedtext().word_break { word_break::T::normal | word_break::T::keep_all => { // Break at normal word boundaries. keep-all forbids soft wrap opportunities. let natural_word_breaking_strategy = text_fragment_info.run.natural_word_slices_in_range(&text_fragment_info.range); self.calculate_split_position_using_breaking_strategy( natural_word_breaking_strategy, max_inline_size, flags) } word_break::T::break_all => { // Break at character boundaries. let character_breaking_strategy = text_fragment_info.run.character_slices_in_range(&text_fragment_info.range); flags.remove(RETRY_AT_CHARACTER_BOUNDARIES); self.calculate_split_position_using_breaking_strategy( character_breaking_strategy, max_inline_size, flags) } } } /// Truncates this fragment to the given `max_inline_size`, using a character-based breaking /// strategy. If no characters could fit, returns `None`. pub fn truncate_to_inline_size(&self, max_inline_size: Au) -> Option<TruncationResult> { let text_fragment_info = if let SpecificFragmentInfo::ScannedText(ref text_fragment_info) = self.specific { text_fragment_info } else { return None }; let character_breaking_strategy = text_fragment_info.run.character_slices_in_range(&text_fragment_info.range); match self.calculate_split_position_using_breaking_strategy(character_breaking_strategy, max_inline_size, SplitOptions::empty()) { None => None, Some(split_info) => { match split_info.inline_start { None => None, Some(split) => { Some(TruncationResult { split: split, text_run: split_info.text_run.clone(), }) } } } } } /// A helper method that uses the breaking strategy described by `slice_iterator` (at present, /// either natural word breaking or character breaking) to split this fragment. fn calculate_split_position_using_breaking_strategy<'a, I>( &self, slice_iterator: I, max_inline_size: Au, flags: SplitOptions) -> Option<SplitResult> where I: Iterator<Item=TextRunSlice<'a>> { let text_fragment_info = match self.specific { SpecificFragmentInfo::ScannedText(ref text_fragment_info) => text_fragment_info, _ => return None, }; let mut remaining_inline_size = max_inline_size - self.border_padding.inline_start_end(); let mut inline_start_range = Range::new(text_fragment_info.range.begin(), ByteIndex(0)); let mut inline_end_range = None; let mut overflowing = false; debug!("calculate_split_position_using_breaking_strategy: splitting text fragment \ (strlen={}, range={:?}, max_inline_size={:?})", text_fragment_info.run.text.len(), text_fragment_info.range, max_inline_size); for slice in slice_iterator { debug!("calculate_split_position_using_breaking_strategy: considering slice \ (offset={:?}, slice range={:?}, remaining_inline_size={:?})", slice.offset, slice.range, remaining_inline_size); // Use the `remaining_inline_size` to find a split point if possible. If not, go around // the loop again with the next slice. let metrics = text_fragment_info.run.metrics_for_slice(slice.glyphs, &slice.range); let advance = metrics.advance_width; // Have we found the split point? if advance <= remaining_inline_size || slice.glyphs.is_whitespace() { // Keep going; we haven't found the split point yet. debug!("calculate_split_position_using_breaking_strategy: enlarging span"); remaining_inline_size = remaining_inline_size - advance; inline_start_range.extend_by(slice.range.length()); continue } // The advance is more than the remaining inline-size, so split here. First, check to // see if we're going to overflow the line. If so, perform a best-effort split. let mut remaining_range = slice.text_run_range(); let split_is_empty = inline_start_range.is_empty() && !(self.requires_line_break_afterward_if_wrapping_on_newlines() && !self.white_space().allow_wrap()); if split_is_empty { // We're going to overflow the line. overflowing = true; inline_start_range = slice.text_run_range(); remaining_range = Range::new(slice.text_run_range().end(), ByteIndex(0)); remaining_range.extend_to(text_fragment_info.range.end()); } // Check to see if we need to create an inline-end chunk. let slice_begin = remaining_range.begin(); if slice_begin < text_fragment_info.range.end() { // There still some things left over at the end of the line, so create the // inline-end chunk. let mut inline_end = remaining_range; inline_end.extend_to(text_fragment_info.range.end()); inline_end_range = Some(inline_end); debug!("calculate_split_position: splitting remainder with inline-end range={:?}", inline_end); } // If we failed to find a suitable split point, we're on the verge of overflowing the // line. if split_is_empty || overflowing { // If we've been instructed to retry at character boundaries (probably via // `overflow-wrap: break-word`), do so. if flags.contains(RETRY_AT_CHARACTER_BOUNDARIES) { let character_breaking_strategy = text_fragment_info.run .character_slices_in_range(&text_fragment_info.range); let mut flags = flags; flags.remove(RETRY_AT_CHARACTER_BOUNDARIES); return self.calculate_split_position_using_breaking_strategy( character_breaking_strategy, max_inline_size, flags) } // We aren't at the start of the line, so don't overflow. Let inline layout wrap to // the next line instead. if !flags.contains(STARTS_LINE) { return None } } break } let split_is_empty = inline_start_range.is_empty() && !self.requires_line_break_afterward_if_wrapping_on_newlines(); let inline_start = if !split_is_empty { Some(SplitInfo::new(inline_start_range, &**text_fragment_info)) } else { None }; let inline_end = inline_end_range.map(|inline_end_range| { SplitInfo::new(inline_end_range, &**text_fragment_info) }); Some(SplitResult { inline_start: inline_start, inline_end: inline_end, text_run: text_fragment_info.run.clone(), }) } /// The opposite of `calculate_split_position_using_breaking_strategy`: merges this fragment /// with the next one. pub fn merge_with(&mut self, next_fragment: Fragment) { match (&mut self.specific, &next_fragment.specific) { (&mut SpecificFragmentInfo::ScannedText(ref mut this_info), &SpecificFragmentInfo::ScannedText(ref other_info)) => { debug_assert!(arc_ptr_eq(&this_info.run, &other_info.run)); this_info.range_end_including_stripped_whitespace = other_info.range_end_including_stripped_whitespace; if other_info.requires_line_break_afterward_if_wrapping_on_newlines() { this_info.flags.insert(REQUIRES_LINE_BREAK_AFTERWARD_IF_WRAPPING_ON_NEWLINES); } if other_info.insertion_point.is_some() { this_info.insertion_point = other_info.insertion_point; } self.border_padding.inline_end = next_fragment.border_padding.inline_end; self.margin.inline_end = next_fragment.margin.inline_end; } _ => panic!("Can only merge two scanned-text fragments!"), } self.reset_text_range_and_inline_size(); self.meld_with_next_inline_fragment(&next_fragment); } /// Restore any whitespace that was stripped from a text fragment, and recompute inline metrics /// if necessary. pub fn reset_text_range_and_inline_size(&mut self) { if let SpecificFragmentInfo::ScannedText(ref mut info) = self.specific { if info.run.extra_word_spacing != Au(0) { Arc::make_mut(&mut info.run).extra_word_spacing = Au(0); } // FIXME (mbrubeck): Do we need to restore leading too? let range_end = info.range_end_including_stripped_whitespace; if info.range.end() == range_end { return } info.range.extend_to(range_end); info.content_size.inline = info.run.metrics_for_range(&info.range).advance_width; self.border_box.size.inline = info.content_size.inline + self.border_padding.inline_start_end(); } } /// Assigns replaced inline-size, padding, and margins for this fragment only if it is replaced /// content per CSS 2.1 § 10.3.2. pub fn assign_replaced_inline_size_if_necessary(&mut self, container_inline_size: Au, container_block_size: Option<Au>) { match self.specific { SpecificFragmentInfo::Generic | SpecificFragmentInfo::GeneratedContent(_) | SpecificFragmentInfo::Table | SpecificFragmentInfo::TableCell | SpecificFragmentInfo::TableRow | SpecificFragmentInfo::TableWrapper | SpecificFragmentInfo::Multicol | SpecificFragmentInfo::MulticolColumn => return, SpecificFragmentInfo::TableColumn(_) => { panic!("Table column fragments do not have inline size") } SpecificFragmentInfo::UnscannedText(_) => { panic!("Unscanned text fragments should have been scanned by now!") } SpecificFragmentInfo::Canvas(_) | SpecificFragmentInfo::Image(_) | SpecificFragmentInfo::Iframe(_) | SpecificFragmentInfo::InlineBlock(_) | SpecificFragmentInfo::InlineAbsoluteHypothetical(_) | SpecificFragmentInfo::InlineAbsolute(_) | SpecificFragmentInfo::ScannedText(_) => {} }; let style = &*self.style; let noncontent_inline_size = self.border_padding.inline_start_end(); match self.specific { SpecificFragmentInfo::InlineAbsoluteHypothetical(ref mut info) => { let block_flow = flow_ref::deref_mut(&mut info.flow_ref).as_mut_block(); block_flow.base.position.size.inline = block_flow.base.intrinsic_inline_sizes.preferred_inline_size; // This is a hypothetical box, so it takes up no space. self.border_box.size.inline = Au(0); } SpecificFragmentInfo::InlineBlock(ref mut info) => { let block_flow = flow_ref::deref_mut(&mut info.flow_ref).as_mut_block(); self.border_box.size.inline = max(block_flow.base.intrinsic_inline_sizes.minimum_inline_size, block_flow.base.intrinsic_inline_sizes.preferred_inline_size); block_flow.base.block_container_inline_size = self.border_box.size.inline; block_flow.base.block_container_writing_mode = self.style.writing_mode; } SpecificFragmentInfo::InlineAbsolute(ref mut info) => { let block_flow = flow_ref::deref_mut(&mut info.flow_ref).as_mut_block(); self.border_box.size.inline = max(block_flow.base.intrinsic_inline_sizes.minimum_inline_size, block_flow.base.intrinsic_inline_sizes.preferred_inline_size); block_flow.base.block_container_inline_size = self.border_box.size.inline; block_flow.base.block_container_writing_mode = self.style.writing_mode; } SpecificFragmentInfo::ScannedText(ref info) => { // Scanned text fragments will have already had their content inline-sizes assigned // by this point. self.border_box.size.inline = info.content_size.inline + noncontent_inline_size } SpecificFragmentInfo::Image(ref mut image_fragment_info) => { let fragment_inline_size = image_fragment_info.image_inline_size(); let fragment_block_size = image_fragment_info.image_block_size(); self.border_box.size.inline = image_fragment_info.replaced_image_fragment_info .calculate_replaced_inline_size(style, noncontent_inline_size, container_inline_size, container_block_size, fragment_inline_size, fragment_block_size); } SpecificFragmentInfo::Canvas(ref mut canvas_fragment_info) => { let fragment_inline_size = canvas_fragment_info.canvas_inline_size(); let fragment_block_size = canvas_fragment_info.canvas_block_size(); self.border_box.size.inline = canvas_fragment_info.replaced_image_fragment_info .calculate_replaced_inline_size(style, noncontent_inline_size, container_inline_size, container_block_size, fragment_inline_size, fragment_block_size); } SpecificFragmentInfo::Iframe(ref iframe_fragment_info) => { self.border_box.size.inline = iframe_fragment_info.calculate_replaced_inline_size(style, container_inline_size) + noncontent_inline_size; } _ => panic!("this case should have been handled above"), } } /// Assign block-size for this fragment if it is replaced content. The inline-size must have /// been assigned first. /// /// Ideally, this should follow CSS 2.1 § 10.6.2. pub fn assign_replaced_block_size_if_necessary(&mut self, containing_block_block_size: Option<Au>) { match self.specific { SpecificFragmentInfo::Generic | SpecificFragmentInfo::GeneratedContent(_) | SpecificFragmentInfo::Table | SpecificFragmentInfo::TableCell | SpecificFragmentInfo::TableRow | SpecificFragmentInfo::TableWrapper | SpecificFragmentInfo::Multicol | SpecificFragmentInfo::MulticolColumn => return, SpecificFragmentInfo::TableColumn(_) => { panic!("Table column fragments do not have block size") } SpecificFragmentInfo::UnscannedText(_) => { panic!("Unscanned text fragments should have been scanned by now!") } SpecificFragmentInfo::Canvas(_) | SpecificFragmentInfo::Iframe(_) | SpecificFragmentInfo::Image(_) | SpecificFragmentInfo::InlineBlock(_) | SpecificFragmentInfo::InlineAbsoluteHypothetical(_) | SpecificFragmentInfo::InlineAbsolute(_) | SpecificFragmentInfo::ScannedText(_) => {} } let style = &*self.style; let noncontent_block_size = self.border_padding.block_start_end(); match self.specific { SpecificFragmentInfo::Image(ref mut image_fragment_info) => { let fragment_inline_size = image_fragment_info.image_inline_size(); let fragment_block_size = image_fragment_info.image_block_size(); self.border_box.size.block = image_fragment_info.replaced_image_fragment_info .calculate_replaced_block_size(style, noncontent_block_size, containing_block_block_size, fragment_inline_size, fragment_block_size); } SpecificFragmentInfo::Canvas(ref mut canvas_fragment_info) => { let fragment_inline_size = canvas_fragment_info.canvas_inline_size(); let fragment_block_size = canvas_fragment_info.canvas_block_size(); self.border_box.size.block = canvas_fragment_info.replaced_image_fragment_info .calculate_replaced_block_size(style, noncontent_block_size, containing_block_block_size, fragment_inline_size, fragment_block_size); } SpecificFragmentInfo::ScannedText(ref info) => { // Scanned text fragments' content block-sizes are calculated by the text run // scanner during flow construction. self.border_box.size.block = info.content_size.block + noncontent_block_size } SpecificFragmentInfo::InlineBlock(ref mut info) => { // Not the primary fragment, so we do not take the noncontent size into account. let block_flow = flow_ref::deref_mut(&mut info.flow_ref).as_block(); self.border_box.size.block = block_flow.base.position.size.block + block_flow.fragment.margin.block_start_end() } SpecificFragmentInfo::InlineAbsoluteHypothetical(ref mut info) => { // Not the primary fragment, so we do not take the noncontent size into account. let block_flow = flow_ref::deref_mut(&mut info.flow_ref).as_block(); self.border_box.size.block = block_flow.base.position.size.block; } SpecificFragmentInfo::InlineAbsolute(ref mut info) => { // Not the primary fragment, so we do not take the noncontent size into account. let block_flow = flow_ref::deref_mut(&mut info.flow_ref).as_block(); self.border_box.size.block = block_flow.base.position.size.block + block_flow.fragment.margin.block_start_end() } SpecificFragmentInfo::Iframe(ref info) => { self.border_box.size.block = info.calculate_replaced_block_size(style, containing_block_block_size) + noncontent_block_size; } _ => panic!("should have been handled above"), } } /// Calculates block-size above baseline, depth below baseline, and ascent for this fragment /// when used in an inline formatting context. See CSS 2.1 § 10.8.1. pub fn inline_metrics(&self, layout_context: &LayoutContext) -> InlineMetrics { match self.specific { SpecificFragmentInfo::Image(ref image_fragment_info) => { let computed_block_size = image_fragment_info.replaced_image_fragment_info .computed_block_size(); InlineMetrics { block_size_above_baseline: computed_block_size + self.border_padding.block_start, depth_below_baseline: self.border_padding.block_end, ascent: computed_block_size + self.border_padding.block_start, } } SpecificFragmentInfo::Canvas(ref canvas_fragment_info) => { let computed_block_size = canvas_fragment_info.replaced_image_fragment_info .computed_block_size(); InlineMetrics { block_size_above_baseline: computed_block_size + self.border_padding.block_start, depth_below_baseline: self.border_padding.block_end, ascent: computed_block_size + self.border_padding.block_start, } } SpecificFragmentInfo::ScannedText(ref info) => { // Fragments with no glyphs don't contribute any inline metrics. // TODO: Filter out these fragments during flow construction? if info.insertion_point.is_none() && info.content_size.inline == Au(0) { return InlineMetrics::new(Au(0), Au(0), Au(0)); } // See CSS 2.1 § 10.8.1. let line_height = self.calculate_line_height(layout_context); let font_derived_metrics = InlineMetrics::from_font_metrics(&info.run.font_metrics, line_height); InlineMetrics { block_size_above_baseline: font_derived_metrics.block_size_above_baseline, depth_below_baseline: font_derived_metrics.depth_below_baseline, ascent: font_derived_metrics.ascent + self.border_padding.block_start, } } SpecificFragmentInfo::InlineBlock(ref info) => { // See CSS 2.1 § 10.8.1. let flow = &info.flow_ref; let block_flow = flow.as_block(); let is_auto = self.style.get_position().height == LengthOrPercentageOrAuto::Auto; let baseline_offset = match flow.baseline_offset_of_last_line_box_in_flow() { Some(baseline_offset) if is_auto => baseline_offset, _ => block_flow.fragment.border_box.size.block, }; let start_margin = block_flow.fragment.margin.block_start; let end_margin = block_flow.fragment.margin.block_end; let depth_below_baseline = flow::base(&**flow).position.size.block - baseline_offset + end_margin; InlineMetrics::new(baseline_offset + start_margin, depth_below_baseline, baseline_offset) } SpecificFragmentInfo::InlineAbsoluteHypothetical(_) | SpecificFragmentInfo::InlineAbsolute(_) => { // Hypothetical boxes take up no space. InlineMetrics { block_size_above_baseline: Au(0), depth_below_baseline: Au(0), ascent: Au(0), } } _ => { InlineMetrics { block_size_above_baseline: self.border_box.size.block, depth_below_baseline: Au(0), ascent: self.border_box.size.block, } } } } /// Returns true if this fragment is a hypothetical box. See CSS 2.1 § 10.3.7. pub fn is_hypothetical(&self) -> bool { match self.specific { SpecificFragmentInfo::InlineAbsoluteHypothetical(_) => true, _ => false, } } /// Returns true if this fragment can merge with another immediately-following fragment or /// false otherwise. pub fn can_merge_with_fragment(&self, other: &Fragment) -> bool { match (&self.specific, &other.specific) { (&SpecificFragmentInfo::UnscannedText(ref first_unscanned_text), &SpecificFragmentInfo::UnscannedText(_)) => { // FIXME: Should probably use a whitelist of styles that can safely differ (#3165) if self.style().get_font() != other.style().get_font() || self.text_decoration() != other.text_decoration() || self.white_space() != other.white_space() || self.color() != other.color() { return false } if first_unscanned_text.text.ends_with('\n') { return false } // If this node has any styles that have border/padding/margins on the following // side, then we can't merge with the next fragment. if let Some(ref inline_context) = self.inline_context { for inline_context_node in inline_context.nodes.iter() { if !inline_context_node.flags.contains(LAST_FRAGMENT_OF_ELEMENT) { continue } if inline_context_node.style.logical_margin().inline_end != LengthOrPercentageOrAuto::Length(Au(0)) { return false } if inline_context_node.style.logical_padding().inline_end != LengthOrPercentage::Length(Au(0)) { return false } if inline_context_node.style.logical_border_width().inline_end != Au(0) { return false } } } // If the next fragment has any styles that have border/padding/margins on the // preceding side, then it can't merge with us. if let Some(ref inline_context) = other.inline_context { for inline_context_node in inline_context.nodes.iter() { if !inline_context_node.flags.contains(FIRST_FRAGMENT_OF_ELEMENT) { continue } if inline_context_node.style.logical_margin().inline_start != LengthOrPercentageOrAuto::Length(Au(0)) { return false } if inline_context_node.style.logical_padding().inline_start != LengthOrPercentage::Length(Au(0)) { return false } if inline_context_node.style.logical_border_width().inline_start != Au(0) { return false } } } true } _ => false, } } /// Returns true if and only if this is the *primary fragment* for the fragment's style object /// (conceptually, though style sharing makes this not really true, of course). The primary /// fragment is the one that draws backgrounds, borders, etc., and takes borders, padding and /// margins into account. Every style object has at most one primary fragment. /// /// At present, all fragments are primary fragments except for inline-block and table wrapper /// fragments. Inline-block fragments are not primary fragments because the corresponding block /// flow is the primary fragment, while table wrapper fragments are not primary fragments /// because the corresponding table flow is the primary fragment. pub fn is_primary_fragment(&self) -> bool { match self.specific { SpecificFragmentInfo::InlineBlock(_) | SpecificFragmentInfo::InlineAbsoluteHypothetical(_) | SpecificFragmentInfo::InlineAbsolute(_) | SpecificFragmentInfo::MulticolColumn | SpecificFragmentInfo::TableWrapper => false, SpecificFragmentInfo::Canvas(_) | SpecificFragmentInfo::Generic | SpecificFragmentInfo::GeneratedContent(_) | SpecificFragmentInfo::Iframe(_) | SpecificFragmentInfo::Image(_) | SpecificFragmentInfo::ScannedText(_) | SpecificFragmentInfo::Table | SpecificFragmentInfo::TableCell | SpecificFragmentInfo::TableColumn(_) | SpecificFragmentInfo::TableRow | SpecificFragmentInfo::Multicol | SpecificFragmentInfo::UnscannedText(_) => true, } } /// Determines the inline sizes of inline-block fragments. These cannot be fully computed until /// inline size assignment has run for the child flow: thus it is computed "late", during /// block size assignment. pub fn update_late_computed_replaced_inline_size_if_necessary(&mut self) { if let SpecificFragmentInfo::InlineBlock(ref mut inline_block_info) = self.specific { let block_flow = flow_ref::deref_mut(&mut inline_block_info.flow_ref).as_block(); let margin = block_flow.fragment.style.logical_margin(); self.border_box.size.inline = block_flow.fragment.border_box.size.inline + MaybeAuto::from_style(margin.inline_start, Au(0)).specified_or_zero() + MaybeAuto::from_style(margin.inline_end, Au(0)).specified_or_zero() } } pub fn update_late_computed_inline_position_if_necessary(&mut self) { if let SpecificFragmentInfo::InlineAbsoluteHypothetical(ref mut info) = self.specific { let position = self.border_box.start.i; flow_ref::deref_mut(&mut info.flow_ref) .update_late_computed_inline_position_if_necessary(position) } } pub fn update_late_computed_block_position_if_necessary(&mut self) { if let SpecificFragmentInfo::InlineAbsoluteHypothetical(ref mut info) = self.specific { let position = self.border_box.start.b; flow_ref::deref_mut(&mut info.flow_ref) .update_late_computed_block_position_if_necessary(position) } } pub fn repair_style(&mut self, new_style: &Arc<ServoComputedValues>) { self.style = (*new_style).clone() } /// Given the stacking-context-relative position of the containing flow, returns the border box /// of this fragment relative to the parent stacking context. This takes `position: relative` /// into account. /// /// If `coordinate_system` is `Parent`, this returns the border box in the parent stacking /// context's coordinate system. Otherwise, if `coordinate_system` is `Own` and this fragment /// establishes a stacking context itself, this returns a border box anchored at (0, 0). (If /// this fragment does not establish a stacking context, then it always belongs to its parent /// stacking context and thus `coordinate_system` is ignored.) /// /// This is the method you should use for display list construction as well as /// `getBoundingClientRect()` and so forth. pub fn stacking_relative_border_box(&self, stacking_relative_flow_origin: &Point2D<Au>, relative_containing_block_size: &LogicalSize<Au>, relative_containing_block_mode: WritingMode, coordinate_system: CoordinateSystem) -> Rect<Au> { let container_size = relative_containing_block_size.to_physical(relative_containing_block_mode); let border_box = self.border_box.to_physical(self.style.writing_mode, container_size); if coordinate_system == CoordinateSystem::Own && self.establishes_stacking_context() { return Rect::new(Point2D::zero(), border_box.size) } // FIXME(pcwalton): This can double-count relative position sometimes for inlines (e.g. // `<div style="position:relative">x</div>`, because the `position:relative` trickles down // to the inline flow. Possibly we should extend the notion of "primary fragment" to fix // this. let relative_position = self.relative_position(relative_containing_block_size); border_box.translate_by_size(&relative_position.to_physical(self.style.writing_mode)) .translate(stacking_relative_flow_origin) } /// Given the stacking-context-relative border box, returns the stacking-context-relative /// content box. pub fn stacking_relative_content_box(&self, stacking_relative_border_box: &Rect<Au>) -> Rect<Au> { let border_padding = self.border_padding.to_physical(self.style.writing_mode); Rect::new(Point2D::new(stacking_relative_border_box.origin.x + border_padding.left, stacking_relative_border_box.origin.y + border_padding.top), Size2D::new(stacking_relative_border_box.size.width - border_padding.horizontal(), stacking_relative_border_box.size.height - border_padding.vertical())) } /// Returns true if this fragment establishes a new stacking context and false otherwise. pub fn establishes_stacking_context(&self) -> bool { // Text fragments shouldn't create stacking contexts. match self.specific { SpecificFragmentInfo::ScannedText(_) | SpecificFragmentInfo::UnscannedText(_) => return false, _ => {} } if self.flags.contains(HAS_LAYER) { return true } if self.style().get_effects().opacity != 1.0 { return true } if !self.style().get_effects().filter.is_empty() { return true } if self.style().get_effects().mix_blend_mode != mix_blend_mode::T::normal { return true } if self.style().get_effects().transform.0.is_some() { return true } match self.style().get_used_transform_style() { transform_style::T::flat | transform_style::T::preserve_3d => { return true } transform_style::T::auto => {} } // FIXME(pcwalton): Don't unconditionally form stacking contexts for `overflow_x: scroll` // and `overflow_y: scroll`. This needs multiple layers per stacking context. match (self.style().get_box().position, self.style().get_position().z_index, self.style().get_box().overflow_x, self.style().get_box().overflow_y.0) { (position::T::absolute, z_index::T::Auto, overflow_x::T::visible, overflow_x::T::visible) | (position::T::fixed, z_index::T::Auto, overflow_x::T::visible, overflow_x::T::visible) | (position::T::relative, z_index::T::Auto, overflow_x::T::visible, overflow_x::T::visible) => false, (position::T::absolute, _, _, _) | (position::T::fixed, _, _, _) | (position::T::relative, _, _, _) | (_, _, overflow_x::T::auto, _) | (_, _, overflow_x::T::scroll, _) | (_, _, _, overflow_x::T::auto) | (_, _, _, overflow_x::T::scroll) => true, (position::T::static_, _, _, _) => false } } // Get the effective z-index of this fragment. Z-indices only apply to positioned element // per CSS 2 9.9.1 (http://www.w3.org/TR/CSS2/visuren.html#z-index), so this value may differ // from the value specified in the style. pub fn effective_z_index(&self) -> i32 { match self.style().get_box().position { position::T::static_ => {}, _ => return self.style().get_position().z_index.number_or_zero(), } if self.style().get_effects().transform.0.is_some() { return self.style().get_position().z_index.number_or_zero(); } match self.style().get_box().display { display::T::flex => self.style().get_position().z_index.number_or_zero(), _ => 0, } } /// Computes the overflow rect of this fragment relative to the start of the flow. pub fn compute_overflow(&self, flow_size: &Size2D<Au>, relative_containing_block_size: &LogicalSize<Au>) -> Overflow { let mut border_box = self.border_box.to_physical(self.style.writing_mode, *flow_size); // Relative position can cause us to draw outside our border box. // // FIXME(pcwalton): I'm not a fan of the way this makes us crawl though so many styles all // the time. Can't we handle relative positioning by just adjusting `border_box`? let relative_position = self.relative_position(relative_containing_block_size); border_box = border_box.translate_by_size(&relative_position.to_physical(self.style.writing_mode)); let mut overflow = Overflow::from_rect(&border_box); // Box shadows cause us to draw outside our border box. for box_shadow in &self.style().get_effects().box_shadow.0 { let offset = Point2D::new(box_shadow.offset_x, box_shadow.offset_y); let inflation = box_shadow.spread_radius + box_shadow.blur_radius * BLUR_INFLATION_FACTOR; overflow.paint = overflow.paint.union(&border_box.translate(&offset) .inflate(inflation, inflation)) } // Outlines cause us to draw outside our border box. let outline_width = self.style.get_outline().outline_width; if outline_width != Au(0) { overflow.paint = overflow.paint.union(&border_box.inflate(outline_width, outline_width)) } // Include the overflow of the block flow, if any. match self.specific { SpecificFragmentInfo::InlineBlock(ref info) => { let block_flow = info.flow_ref.as_block(); overflow.union(&flow::base(block_flow).overflow); } SpecificFragmentInfo::InlineAbsolute(ref info) => { let block_flow = info.flow_ref.as_block(); overflow.union(&flow::base(block_flow).overflow); } _ => (), } // FIXME(pcwalton): Sometimes excessively fancy glyphs can make us draw outside our border // box too. overflow } pub fn requires_line_break_afterward_if_wrapping_on_newlines(&self) -> bool { match self.specific { SpecificFragmentInfo::ScannedText(ref scanned_text) => { scanned_text.requires_line_break_afterward_if_wrapping_on_newlines() } _ => false, } } pub fn strip_leading_whitespace_if_necessary(&mut self) -> WhitespaceStrippingResult { if self.white_space().preserve_spaces() { return WhitespaceStrippingResult::RetainFragment } match self.specific { SpecificFragmentInfo::ScannedText(ref mut scanned_text_fragment_info) => { let leading_whitespace_byte_count = scanned_text_fragment_info.text() .find(|c| !char_is_whitespace(c)) .unwrap_or(scanned_text_fragment_info.text().len()); let whitespace_len = ByteIndex(leading_whitespace_byte_count as isize); let whitespace_range = Range::new(scanned_text_fragment_info.range.begin(), whitespace_len); let text_bounds = scanned_text_fragment_info.run.metrics_for_range(&whitespace_range).bounding_box; self.border_box.size.inline = self.border_box.size.inline - text_bounds.size.width; scanned_text_fragment_info.content_size.inline = scanned_text_fragment_info.content_size.inline - text_bounds.size.width; scanned_text_fragment_info.range.adjust_by(whitespace_len, -whitespace_len); WhitespaceStrippingResult::RetainFragment } SpecificFragmentInfo::UnscannedText(ref mut unscanned_text_fragment_info) => { let mut new_text_string = String::new(); let mut modified = false; for (i, character) in unscanned_text_fragment_info.text.char_indices() { if gfx::text::util::is_bidi_control(character) { new_text_string.push(character); continue } if char_is_whitespace(character) { modified = true; continue } // Finished processing leading control chars and whitespace. if modified { new_text_string.push_str(&unscanned_text_fragment_info.text[i..]); } break } if modified { unscanned_text_fragment_info.text = new_text_string.into_boxed_str(); } WhitespaceStrippingResult::from_unscanned_text_fragment_info( &unscanned_text_fragment_info) } _ => WhitespaceStrippingResult::RetainFragment, } } /// Returns true if the entire fragment was stripped. pub fn strip_trailing_whitespace_if_necessary(&mut self) -> WhitespaceStrippingResult { if self.white_space().preserve_spaces() { return WhitespaceStrippingResult::RetainFragment } match self.specific { SpecificFragmentInfo::ScannedText(ref mut scanned_text_fragment_info) => { let mut trailing_whitespace_start_byte = 0; for (i, c) in scanned_text_fragment_info.text().char_indices().rev() { if !char_is_whitespace(c) { trailing_whitespace_start_byte = i + c.len_utf8(); break; } } let whitespace_start = ByteIndex(trailing_whitespace_start_byte as isize); let whitespace_len = scanned_text_fragment_info.range.length() - whitespace_start; let mut whitespace_range = Range::new(whitespace_start, whitespace_len); whitespace_range.shift_by(scanned_text_fragment_info.range.begin()); let text_bounds = scanned_text_fragment_info.run .metrics_for_range(&whitespace_range) .bounding_box; self.border_box.size.inline -= text_bounds.size.width; scanned_text_fragment_info.content_size.inline -= text_bounds.size.width; scanned_text_fragment_info.range.extend_by(-whitespace_len); WhitespaceStrippingResult::RetainFragment } SpecificFragmentInfo::UnscannedText(ref mut unscanned_text_fragment_info) => { let mut trailing_bidi_control_characters_to_retain = Vec::new(); let (mut modified, mut last_character_index) = (true, 0); for (i, character) in unscanned_text_fragment_info.text.char_indices().rev() { if gfx::text::util::is_bidi_control(character) { trailing_bidi_control_characters_to_retain.push(character); continue } if char_is_whitespace(character) { modified = true; continue } last_character_index = i + character.len_utf8(); break } if modified { let mut text = unscanned_text_fragment_info.text.to_string(); text.truncate(last_character_index); for character in trailing_bidi_control_characters_to_retain.iter().rev() { text.push(*character); } unscanned_text_fragment_info.text = text.into_boxed_str(); } WhitespaceStrippingResult::from_unscanned_text_fragment_info( &unscanned_text_fragment_info) } _ => WhitespaceStrippingResult::RetainFragment, } } pub fn inline_styles(&self) -> InlineStyleIterator { InlineStyleIterator::new(self) } /// Returns the inline-size of this fragment's margin box. pub fn margin_box_inline_size(&self) -> Au { self.border_box.size.inline + self.margin.inline_start_end() } /// Returns true if this node *or any of the nodes within its inline fragment context* have /// non-`static` `position`. pub fn is_positioned(&self) -> bool { if self.style.get_box().position != position::T::static_ { return true } if let Some(ref inline_context) = self.inline_context { for node in inline_context.nodes.iter() { if node.style.get_box().position != position::T::static_ { return true } } } false } /// Returns true if this node is absolutely positioned. pub fn is_absolutely_positioned(&self) -> bool { self.style.get_box().position == position::T::absolute } pub fn is_inline_absolute(&self) -> bool { match self.specific { SpecificFragmentInfo::InlineAbsolute(..) => true, _ => false, } } pub fn meld_with_next_inline_fragment(&mut self, next_fragment: &Fragment) { if let Some(ref mut inline_context_of_this_fragment) = self.inline_context { if let Some(ref inline_context_of_next_fragment) = next_fragment.inline_context { for (inline_context_node_from_this_fragment, inline_context_node_from_next_fragment) in inline_context_of_this_fragment.nodes.iter_mut().rev() .zip(inline_context_of_next_fragment.nodes.iter().rev()) { if !inline_context_node_from_next_fragment.flags.contains( LAST_FRAGMENT_OF_ELEMENT) { continue } if inline_context_node_from_next_fragment.address != inline_context_node_from_this_fragment.address { continue } inline_context_node_from_this_fragment.flags.insert(LAST_FRAGMENT_OF_ELEMENT); } } } } pub fn meld_with_prev_inline_fragment(&mut self, prev_fragment: &Fragment) { if let Some(ref mut inline_context_of_this_fragment) = self.inline_context { if let Some(ref inline_context_of_prev_fragment) = prev_fragment.inline_context { for (inline_context_node_from_prev_fragment, inline_context_node_from_this_fragment) in inline_context_of_prev_fragment.nodes.iter().rev().zip( inline_context_of_this_fragment.nodes.iter_mut().rev()) { if !inline_context_node_from_prev_fragment.flags.contains( FIRST_FRAGMENT_OF_ELEMENT) { continue } if inline_context_node_from_prev_fragment.address != inline_context_node_from_this_fragment.address { continue } inline_context_node_from_this_fragment.flags.insert( FIRST_FRAGMENT_OF_ELEMENT); } } } } pub fn fragment_id(&self) -> usize { return self as *const Fragment as usize; } pub fn fragment_type(&self) -> FragmentType { match self.pseudo { PseudoElementType::Normal => FragmentType::FragmentBody, PseudoElementType::Before(_) => FragmentType::BeforePseudoContent, PseudoElementType::After(_) => FragmentType::AfterPseudoContent, PseudoElementType::DetailsSummary(_) => FragmentType::FragmentBody, PseudoElementType::DetailsContent(_) => FragmentType::FragmentBody, } } pub fn layer_id(&self) -> LayerId { let layer_type = match self.pseudo { PseudoElementType::Normal => LayerType::FragmentBody, PseudoElementType::Before(_) => LayerType::BeforePseudoContent, PseudoElementType::After(_) => LayerType::AfterPseudoContent, PseudoElementType::DetailsSummary(_) => LayerType::FragmentBody, PseudoElementType::DetailsContent(_) => LayerType::FragmentBody, }; LayerId::new_of_type(layer_type, self.node.id() as usize) } pub fn layer_id_for_overflow_scroll(&self) -> LayerId { LayerId::new_of_type(LayerType::OverflowScroll, self.node.id() as usize) } /// Returns true if any of the inline styles associated with this fragment have /// `vertical-align` set to `top` or `bottom`. pub fn is_vertically_aligned_to_top_or_bottom(&self) -> bool { match self.style.get_box().vertical_align { vertical_align::T::top | vertical_align::T::bottom => return true, _ => {} } if let Some(ref inline_context) = self.inline_context { for node in &inline_context.nodes { match node.style.get_box().vertical_align { vertical_align::T::top | vertical_align::T::bottom => return true, _ => {} } } } false } pub fn is_text_or_replaced(&self) -> bool { match self.specific { SpecificFragmentInfo::Generic | SpecificFragmentInfo::InlineAbsolute(_) | SpecificFragmentInfo::InlineAbsoluteHypothetical(_) | SpecificFragmentInfo::InlineBlock(_) | SpecificFragmentInfo::Multicol | SpecificFragmentInfo::MulticolColumn | SpecificFragmentInfo::Table | SpecificFragmentInfo::TableCell | SpecificFragmentInfo::TableColumn(_) | SpecificFragmentInfo::TableRow | SpecificFragmentInfo::TableWrapper => false, SpecificFragmentInfo::Canvas(_) | SpecificFragmentInfo::GeneratedContent(_) | SpecificFragmentInfo::Iframe(_) | SpecificFragmentInfo::Image(_) | SpecificFragmentInfo::ScannedText(_) | SpecificFragmentInfo::UnscannedText(_) => true } } } impl fmt::Debug for Fragment { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let border_padding_string = if !self.border_padding.is_zero() { format!(" border_padding={:?}", self.border_padding) } else { "".to_owned() }; let margin_string = if !self.margin.is_zero() { format!(" margin={:?}", self.margin) } else { "".to_owned() }; let damage_string = if self.restyle_damage != RestyleDamage::empty() { format!(" damage={:?}", self.restyle_damage) } else { "".to_owned() }; write!(f, "{}({}) [{:?}] border_box={:?}{}{}{}", self.specific.get_type(), self.debug_id, self.specific, self.border_box, border_padding_string, margin_string, damage_string) } } bitflags! { flags QuantitiesIncludedInIntrinsicInlineSizes: u8 { const INTRINSIC_INLINE_SIZE_INCLUDES_MARGINS = 0x01, const INTRINSIC_INLINE_SIZE_INCLUDES_PADDING = 0x02, const INTRINSIC_INLINE_SIZE_INCLUDES_BORDER = 0x04, const INTRINSIC_INLINE_SIZE_INCLUDES_SPECIFIED = 0x08, } } bitflags! { // Various flags we can use when splitting fragments. See // `calculate_split_position_using_breaking_strategy()`. flags SplitOptions: u8 { #[doc = "True if this is the first fragment on the line."] const STARTS_LINE = 0x01, #[doc = "True if we should attempt to split at character boundaries if this split fails. \ This is used to implement `overflow-wrap: break-word`."] const RETRY_AT_CHARACTER_BOUNDARIES = 0x02, } } /// A top-down fragment border box iteration handler. pub trait FragmentBorderBoxIterator { /// The operation to perform. fn process(&mut self, fragment: &Fragment, level: i32, overflow: &Rect<Au>); /// Returns true if this fragment must be processed in-order. If this returns false, /// we skip the operation for this fragment, but continue processing siblings. fn should_process(&mut self, fragment: &Fragment) -> bool; } /// The coordinate system used in `stacking_relative_border_box()`. See the documentation of that /// method for details. #[derive(Clone, PartialEq, Debug)] pub enum CoordinateSystem { /// The border box returned is relative to the fragment's parent stacking context. Parent, /// The border box returned is relative to the fragment's own stacking context, if applicable. Own, } pub struct InlineStyleIterator<'a> { fragment: &'a Fragment, inline_style_index: usize, primary_style_yielded: bool, } impl<'a> Iterator for InlineStyleIterator<'a> { type Item = &'a ServoComputedValues; fn next(&mut self) -> Option<&'a ServoComputedValues> { if !self.primary_style_yielded { self.primary_style_yielded = true; return Some(&*self.fragment.style) } let inline_context = match self.fragment.inline_context { None => return None, Some(ref inline_context) => inline_context, }; let inline_style_index = self.inline_style_index; if inline_style_index == inline_context.nodes.len() { return None } self.inline_style_index += 1; Some(&*inline_context.nodes[inline_style_index].style) } } impl<'a> InlineStyleIterator<'a> { fn new(fragment: &Fragment) -> InlineStyleIterator { InlineStyleIterator { fragment: fragment, inline_style_index: 0, primary_style_yielded: false, } } } #[derive(Copy, Clone, Debug, PartialEq)] pub enum WhitespaceStrippingResult { RetainFragment, FragmentContainedOnlyBidiControlCharacters, FragmentContainedOnlyWhitespace, } impl WhitespaceStrippingResult { fn from_unscanned_text_fragment_info(info: &UnscannedTextFragmentInfo) -> WhitespaceStrippingResult { if info.text.is_empty() { WhitespaceStrippingResult::FragmentContainedOnlyWhitespace } else if info.text.chars().all(gfx::text::util::is_bidi_control) { WhitespaceStrippingResult::FragmentContainedOnlyBidiControlCharacters } else { WhitespaceStrippingResult::RetainFragment } } } /// The overflow area. We need two different notions of overflow: paint overflow and scrollable /// overflow. #[derive(Copy, Clone, Debug)] pub struct Overflow { pub scroll: Rect<Au>, pub paint: Rect<Au>, } impl Overflow { pub fn new() -> Overflow { Overflow { scroll: Rect::zero(), paint: Rect::zero(), } } pub fn from_rect(border_box: &Rect<Au>) -> Overflow { Overflow { scroll: *border_box, paint: *border_box, } } pub fn union(&mut self, other: &Overflow) { self.scroll = self.scroll.union(&other.scroll); self.paint = self.paint.union(&other.paint); } pub fn translate(&mut self, point: &Point2D<Au>) { self.scroll = self.scroll.translate(point); self.paint = self.paint.translate(point); } } bitflags! { pub flags FragmentFlags: u8 { /// Whether this fragment has a layer. const HAS_LAYER = 0x01, } } /// Specified distances from the margin edge of a block to its content in the inline direction. /// These are returned by `guess_inline_content_edge_offsets()` and are used in the float placement /// speculation logic. #[derive(Copy, Clone, Debug)] pub struct SpeculatedInlineContentEdgeOffsets { pub start: Au, pub end: Au, } #[cfg(not(debug_assertions))] #[derive(Clone)] struct DebugId; #[cfg(debug_assertions)] #[derive(Clone)] struct DebugId(u16); #[cfg(not(debug_assertions))] impl DebugId { pub fn new() -> DebugId { DebugId } } #[cfg(debug_assertions)] impl DebugId { pub fn new() -> DebugId { DebugId(layout_debug::generate_unique_debug_id()) } } #[cfg(not(debug_assertions))] impl fmt::Display for DebugId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:p}", &self) } } #[cfg(debug_assertions)] impl fmt::Display for DebugId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.0) } } #[cfg(not(debug_assertions))] impl Encodable for DebugId { fn encode<S: Encoder>(&self, e: &mut S) -> Result<(), S::Error> { e.emit_str(&format!("{:p}", &self)) } } #[cfg(debug_assertions)] impl Encodable for DebugId { fn encode<S: Encoder>(&self, e: &mut S) -> Result<(), S::Error> { e.emit_u16(self.0) } }
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Implements parallel traversals over the DOM and flow trees. //! //! This code is highly unsafe. Keep this file small and easy to audit. #![allow(unsafe_code)] use context::{LayoutContext, SharedLayoutContextWrapper, SharedLayoutContext}; use flow::{Flow, MutableFlowUtils, PreorderFlowTraversal, PostorderFlowTraversal}; use flow; use flow_ref::FlowRef; use data::{LayoutDataAccess, LayoutDataWrapper}; use traversal::{BubbleISizes, AssignISizes, AssignBSizesAndStoreOverflow}; use traversal::{ComputeAbsolutePositions, BuildDisplayList}; use traversal::{RecalcStyleForNode, ConstructFlows}; use wrapper::{layout_node_to_unsafe_layout_node, layout_node_from_unsafe_layout_node, LayoutNode}; use wrapper::{PostorderNodeMutTraversal, UnsafeLayoutNode}; use wrapper::{PreorderDomTraversal, PostorderDomTraversal}; use profile_traits::time::{self, ProfilerMetadata, profile}; use std::mem; use std::ptr; use std::sync::atomic::{AtomicIsize, Ordering}; use util::opts; use util::workqueue::{WorkQueue, WorkUnit, WorkerProxy}; #[allow(dead_code)] fn static_assertion(node: UnsafeLayoutNode) { unsafe { let _: UnsafeFlow = ::std::intrinsics::transmute(node); } } /// Vtable + pointer representation of a Flow trait object. pub type UnsafeFlow = (usize, usize); fn null_unsafe_flow() -> UnsafeFlow { (0, 0) } pub fn owned_flow_to_unsafe_flow(flow: *const FlowRef) -> UnsafeFlow { unsafe { mem::transmute_copy(&*flow) } } pub fn mut_owned_flow_to_unsafe_flow(flow: *mut FlowRef) -> UnsafeFlow { unsafe { mem::transmute_copy(&*flow) } } pub fn borrowed_flow_to_unsafe_flow(flow: &Flow) -> UnsafeFlow { unsafe { mem::transmute_copy(&flow) } } pub fn mut_borrowed_flow_to_unsafe_flow(flow: &mut Flow) -> UnsafeFlow { unsafe { mem::transmute_copy(&flow) } } /// Information that we need stored in each DOM node. pub struct DomParallelInfo { /// The number of children that still need work done. pub children_count: AtomicIsize, } impl DomParallelInfo { pub fn new() -> DomParallelInfo { DomParallelInfo { children_count: AtomicIsize::new(0), } } } pub type FlowTraversalFunction = extern "Rust" fn(UnsafeFlow, &mut WorkerProxy<SharedLayoutContextWrapper,UnsafeLayoutNode>); /// A parallel top-down DOM traversal. pub trait ParallelPreorderDomTraversal : PreorderDomTraversal { fn run_parallel(&self, node: UnsafeLayoutNode, proxy: &mut WorkerProxy<SharedLayoutContextWrapper,UnsafeLayoutNode>); #[inline(always)] fn run_parallel_helper(&self, unsafe_node: UnsafeLayoutNode, proxy: &mut WorkerProxy<SharedLayoutContextWrapper,UnsafeLayoutNode>, top_down_func: FlowTraversalFunction, bottom_up_func: FlowTraversalFunction) { // Get a real layout node. let node: LayoutNode = unsafe { layout_node_from_unsafe_layout_node(&unsafe_node) }; // Perform the appropriate traversal. self.process(node); // NB: O(n). let child_count = node.children().count(); // Reset the count of children. { let mut layout_data_ref = node.mutate_layout_data(); let layout_data = layout_data_ref.as_mut().expect("no layout data"); layout_data.data.parallel.children_count.store(child_count as isize, Ordering::Relaxed); } // Possibly enqueue the children. if child_count != 0 { for kid in node.children() { proxy.push(WorkUnit { fun: top_down_func, data: layout_node_to_unsafe_layout_node(&kid), }); } } else { // If there were no more children, start walking back up. bottom_up_func(unsafe_node, proxy) } } } /// A parallel bottom-up DOM traversal. trait ParallelPostorderDomTraversal : PostorderDomTraversal { /// Process current node and potentially traverse its ancestors. /// /// If we are the last child that finished processing, recursively process /// our parent. Else, stop. Also, stop at the root. /// /// Thus, if we start with all the leaves of a tree, we end up traversing /// the whole tree bottom-up because each parent will be processed exactly /// once (by the last child that finishes processing). /// /// The only communication between siblings is that they both /// fetch-and-subtract the parent's children count. fn run_parallel(&self, mut unsafe_node: UnsafeLayoutNode, proxy: &mut WorkerProxy<SharedLayoutContextWrapper,UnsafeLayoutNode>) { loop { // Get a real layout node. let node: LayoutNode = unsafe { layout_node_from_unsafe_layout_node(&unsafe_node) }; // Perform the appropriate traversal. self.process(node); let shared_layout_context = unsafe { &*(proxy.user_data().0) }; let layout_context = LayoutContext::new(shared_layout_context); let parent = match node.layout_parent_node(layout_context.shared) { None => break, Some(parent) => parent, }; unsafe { let parent_layout_data = (*parent.borrow_layout_data_unchecked()) .as_ref() .expect("no layout data"); unsafe_node = layout_node_to_unsafe_layout_node(&parent); let parent_layout_data: &LayoutDataWrapper = mem::transmute(parent_layout_data); if parent_layout_data .data .parallel .children_count .fetch_sub(1, Ordering::SeqCst) == 1 { // We were the last child of our parent. Construct flows for our parent. } else { // Get out of here and find another node to work on. break } } } } } /// Information that we need stored in each flow. pub struct FlowParallelInfo { /// The number of children that still need work done. pub children_count: AtomicIsize, /// The address of the parent flow. pub parent: UnsafeFlow, } impl FlowParallelInfo { pub fn new() -> FlowParallelInfo { FlowParallelInfo { children_count: AtomicIsize::new(0), parent: null_unsafe_flow(), } } } /// A parallel bottom-up flow traversal. trait ParallelPostorderFlowTraversal : PostorderFlowTraversal { /// Process current flow and potentially traverse its ancestors. /// /// If we are the last child that finished processing, recursively process /// our parent. Else, stop. Also, stop at the root. /// /// Thus, if we start with all the leaves of a tree, we end up traversing /// the whole tree bottom-up because each parent will be processed exactly /// once (by the last child that finishes processing). /// /// The only communication between siblings is that they both /// fetch-and-subtract the parent's children count. fn run_parallel(&self, mut unsafe_flow: UnsafeFlow, _: &mut WorkerProxy<SharedLayoutContextWrapper,UnsafeFlow>) { loop { unsafe { // Get a real flow. let flow: &mut FlowRef = mem::transmute(&mut unsafe_flow); // Perform the appropriate traversal. if self.should_process(&mut **flow) { self.process(&mut **flow); } let base = flow::mut_base(&mut **flow); // Reset the count of children for the next layout traversal. base.parallel.children_count.store(base.children.len() as isize, Ordering::Relaxed); // Possibly enqueue the parent. let mut unsafe_parent = base.parallel.parent; if unsafe_parent == null_unsafe_flow() { // We're done! break } // No, we're not at the root yet. Then are we the last child // of our parent to finish processing? If so, we can continue // on with our parent; otherwise, we've gotta wait. let parent: &mut FlowRef = mem::transmute(&mut unsafe_parent); let parent_base = flow::mut_base(&mut **parent); if parent_base.parallel.children_count.fetch_sub(1, Ordering::SeqCst) == 1 { // We were the last child of our parent. Reflow our parent. unsafe_flow = unsafe_parent } else { // Stop. break } } } } } /// A parallel top-down flow traversal. trait ParallelPreorderFlowTraversal : PreorderFlowTraversal { fn run_parallel(&self, unsafe_flow: UnsafeFlow, proxy: &mut WorkerProxy<SharedLayoutContextWrapper,UnsafeFlow>); fn should_record_thread_ids(&self) -> bool; #[inline(always)] fn run_parallel_helper(&self, mut unsafe_flow: UnsafeFlow, proxy: &mut WorkerProxy<SharedLayoutContextWrapper,UnsafeFlow>, top_down_func: FlowTraversalFunction, bottom_up_func: FlowTraversalFunction) { let mut had_children = false; unsafe { // Get a real flow. let flow: &mut FlowRef = mem::transmute(&mut unsafe_flow); if self.should_record_thread_ids() { flow::mut_base(&mut **flow).thread_id = proxy.worker_index(); } if self.should_process(&mut **flow) { // Perform the appropriate traversal. self.process(&mut **flow); } // Possibly enqueue the children. for kid in flow::child_iter(&mut **flow) { had_children = true; proxy.push(WorkUnit { fun: top_down_func, data: borrowed_flow_to_unsafe_flow(kid), }); } } // If there were no more children, start assigning block-sizes. if !had_children { bottom_up_func(unsafe_flow, proxy) } } } impl<'a> ParallelPostorderFlowTraversal for BubbleISizes<'a> {} impl<'a> ParallelPreorderFlowTraversal for AssignISizes<'a> { fn run_parallel(&self, unsafe_flow: UnsafeFlow, proxy: &mut WorkerProxy<SharedLayoutContextWrapper,UnsafeFlow>) { self.run_parallel_helper(unsafe_flow, proxy, assign_inline_sizes, assign_block_sizes_and_store_overflow) } fn should_record_thread_ids(&self) -> bool { true } } impl<'a> ParallelPostorderFlowTraversal for AssignBSizesAndStoreOverflow<'a> {} impl<'a> ParallelPreorderFlowTraversal for ComputeAbsolutePositions<'a> { fn run_parallel(&self, unsafe_flow: UnsafeFlow, proxy: &mut WorkerProxy<SharedLayoutContextWrapper, UnsafeFlow>) { self.run_parallel_helper(unsafe_flow, proxy, compute_absolute_positions, build_display_list) } fn should_record_thread_ids(&self) -> bool { false } } impl<'a> ParallelPostorderFlowTraversal for BuildDisplayList<'a> {} impl<'a> ParallelPostorderDomTraversal for ConstructFlows<'a> {} impl <'a> ParallelPreorderDomTraversal for RecalcStyleForNode<'a> { fn run_parallel(&self, unsafe_node: UnsafeLayoutNode, proxy: &mut WorkerProxy<SharedLayoutContextWrapper, UnsafeLayoutNode>) { self.run_parallel_helper(unsafe_node, proxy, recalc_style, construct_flows) } } fn recalc_style(unsafe_node: UnsafeLayoutNode, proxy: &mut WorkerProxy<SharedLayoutContextWrapper, UnsafeLayoutNode>) { let shared_layout_context = unsafe { &*(proxy.user_data().0) }; let layout_context = LayoutContext::new(shared_layout_context); let recalc_style_for_node_traversal = RecalcStyleForNode { layout_context: &layout_context, }; recalc_style_for_node_traversal.run_parallel(unsafe_node, proxy) } fn construct_flows(unsafe_node: UnsafeLayoutNode, proxy: &mut WorkerProxy<SharedLayoutContextWrapper, UnsafeLayoutNode>) { let shared_layout_context = unsafe { &*(proxy.user_data().0) }; let layout_context = LayoutContext::new(shared_layout_context); let construct_flows_traversal = ConstructFlows { layout_context: &layout_context, }; construct_flows_traversal.run_parallel(unsafe_node, proxy) } fn assign_inline_sizes(unsafe_flow: UnsafeFlow, proxy: &mut WorkerProxy<SharedLayoutContextWrapper,UnsafeFlow>) { let shared_layout_context = unsafe { &*(proxy.user_data().0) }; let layout_context = LayoutContext::new(shared_layout_context); let assign_inline_sizes_traversal = AssignISizes { layout_context: &layout_context, }; assign_inline_sizes_traversal.run_parallel(unsafe_flow, proxy) } fn assign_block_sizes_and_store_overflow(unsafe_flow: UnsafeFlow, proxy: &mut WorkerProxy<SharedLayoutContextWrapper,UnsafeFlow>) { let shared_layout_context = unsafe { &*(proxy.user_data().0) }; let layout_context = LayoutContext::new(shared_layout_context); let assign_block_sizes_traversal = AssignBSizesAndStoreOverflow { layout_context: &layout_context, }; assign_block_sizes_traversal.run_parallel(unsafe_flow, proxy) } fn compute_absolute_positions(unsafe_flow: UnsafeFlow, proxy: &mut WorkerProxy<SharedLayoutContextWrapper, UnsafeFlow>) { let shared_layout_context = unsafe { &*(proxy.user_data().0) }; let layout_context = LayoutContext::new(shared_layout_context); let compute_absolute_positions_traversal = ComputeAbsolutePositions { layout_context: &layout_context, }; compute_absolute_positions_traversal.run_parallel(unsafe_flow, proxy); } fn build_display_list(unsafe_flow: UnsafeFlow, proxy: &mut WorkerProxy<SharedLayoutContextWrapper, UnsafeFlow>) { let shared_layout_context = unsafe { &*(proxy.user_data().0) }; let layout_context = LayoutContext::new(shared_layout_context); let build_display_list_traversal = BuildDisplayList { layout_context: &layout_context, }; build_display_list_traversal.run_parallel(unsafe_flow, proxy); } pub fn traverse_dom_preorder(root: LayoutNode, shared_layout_context: &SharedLayoutContext, queue: &mut WorkQueue<SharedLayoutContextWrapper, UnsafeLayoutNode>) { queue.data = SharedLayoutContextWrapper(shared_layout_context as *const _); queue.push(WorkUnit { fun: recalc_style, data: layout_node_to_unsafe_layout_node(&root), }); queue.run(); queue.data = SharedLayoutContextWrapper(ptr::null()); } pub fn traverse_flow_tree_preorder(root: &mut FlowRef, profiler_metadata: ProfilerMetadata, time_profiler_chan: time::ProfilerChan, shared_layout_context: &SharedLayoutContext, queue: &mut WorkQueue<SharedLayoutContextWrapper,UnsafeFlow>) { if opts::get().bubble_inline_sizes_separately { let layout_context = LayoutContext::new(shared_layout_context); let bubble_inline_sizes = BubbleISizes { layout_context: &layout_context }; root.traverse_postorder(&bubble_inline_sizes); } queue.data = SharedLayoutContextWrapper(shared_layout_context as *const _); profile(time::ProfilerCategory::LayoutParallelWarmup, profiler_metadata, time_profiler_chan, || { queue.push(WorkUnit { fun: assign_inline_sizes, data: mut_owned_flow_to_unsafe_flow(root), }) }); queue.run(); queue.data = SharedLayoutContextWrapper(ptr::null()) } pub fn build_display_list_for_subtree(root: &mut FlowRef, profiler_metadata: ProfilerMetadata, time_profiler_chan: time::ProfilerChan, shared_layout_context: &SharedLayoutContext, queue: &mut WorkQueue<SharedLayoutContextWrapper,UnsafeFlow>) { queue.data = SharedLayoutContextWrapper(shared_layout_context as *const _); profile(time::ProfilerCategory::LayoutParallelWarmup, profiler_metadata, time_profiler_chan, || { queue.push(WorkUnit { fun: compute_absolute_positions, data: mut_owned_flow_to_unsafe_flow(root), }) }); queue.run(); queue.data = SharedLayoutContextWrapper(ptr::null()) } layout: Divide DOM nodes and flows into chunks, and perform work stealing over those instead of working on nodes one-by-one. This reduces the overhead of the work-stealing traversal function significantly. It's especially important on ARM, where memory barriers are expensive. /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Implements parallel traversals over the DOM and flow trees. //! //! This code is highly unsafe. Keep this file small and easy to audit. #![allow(unsafe_code)] use context::{LayoutContext, SharedLayoutContextWrapper, SharedLayoutContext}; use flow::{Flow, MutableFlowUtils, PreorderFlowTraversal, PostorderFlowTraversal}; use flow; use flow_ref::FlowRef; use data::{LayoutDataAccess, LayoutDataWrapper}; use traversal::{BubbleISizes, AssignISizes, AssignBSizesAndStoreOverflow}; use traversal::{ComputeAbsolutePositions, BuildDisplayList}; use traversal::{RecalcStyleForNode, ConstructFlows}; use wrapper::{layout_node_to_unsafe_layout_node, layout_node_from_unsafe_layout_node, LayoutNode}; use wrapper::{PostorderNodeMutTraversal, UnsafeLayoutNode}; use wrapper::{PreorderDomTraversal, PostorderDomTraversal}; use profile_traits::time::{self, ProfilerMetadata, profile}; use std::mem; use std::ptr; use std::sync::atomic::{AtomicIsize, Ordering}; use util::opts; use util::workqueue::{WorkQueue, WorkUnit, WorkerProxy}; const CHUNK_SIZE: usize = 64; #[allow(dead_code)] fn static_assertion(node: UnsafeLayoutNode) { unsafe { let _: UnsafeFlow = ::std::intrinsics::transmute(node); let _: UnsafeLayoutNodeList = ::std::intrinsics::transmute(node); } } /// Vtable + pointer representation of a Flow trait object. pub type UnsafeFlow = (usize, usize); fn null_unsafe_flow() -> UnsafeFlow { (0, 0) } pub fn owned_flow_to_unsafe_flow(flow: *const FlowRef) -> UnsafeFlow { unsafe { mem::transmute_copy(&*flow) } } pub fn mut_owned_flow_to_unsafe_flow(flow: *mut FlowRef) -> UnsafeFlow { unsafe { mem::transmute_copy(&*flow) } } pub fn borrowed_flow_to_unsafe_flow(flow: &Flow) -> UnsafeFlow { unsafe { mem::transmute_copy(&flow) } } pub fn mut_borrowed_flow_to_unsafe_flow(flow: &mut Flow) -> UnsafeFlow { unsafe { mem::transmute_copy(&flow) } } /// Information that we need stored in each DOM node. pub struct DomParallelInfo { /// The number of children that still need work done. pub children_count: AtomicIsize, } impl DomParallelInfo { pub fn new() -> DomParallelInfo { DomParallelInfo { children_count: AtomicIsize::new(0), } } } pub type UnsafeLayoutNodeList = (Box<Vec<UnsafeLayoutNode>>, usize); pub type UnsafeFlowList = (Box<Vec<UnsafeLayoutNode>>, usize); pub type ChunkedDomTraversalFunction = extern "Rust" fn(UnsafeLayoutNodeList, &mut WorkerProxy<SharedLayoutContextWrapper,UnsafeLayoutNodeList>); pub type DomTraversalFunction = extern "Rust" fn(UnsafeLayoutNode, &mut WorkerProxy<SharedLayoutContextWrapper,UnsafeLayoutNodeList>); pub type ChunkedFlowTraversalFunction = extern "Rust" fn(UnsafeFlowList, &mut WorkerProxy<SharedLayoutContextWrapper,UnsafeFlowList>); pub type FlowTraversalFunction = extern "Rust" fn(UnsafeFlow, &mut WorkerProxy<SharedLayoutContextWrapper,UnsafeFlowList>); /// A parallel top-down DOM traversal. pub trait ParallelPreorderDomTraversal : PreorderDomTraversal { fn run_parallel(&self, nodes: UnsafeLayoutNodeList, proxy: &mut WorkerProxy<SharedLayoutContextWrapper,UnsafeLayoutNodeList>); #[inline(always)] fn run_parallel_helper( &self, unsafe_nodes: UnsafeLayoutNodeList, proxy: &mut WorkerProxy<SharedLayoutContextWrapper,UnsafeLayoutNodeList>, top_down_func: ChunkedDomTraversalFunction, bottom_up_func: DomTraversalFunction) { let mut discovered_child_nodes = Vec::new(); for unsafe_node in unsafe_nodes.0.into_iter() { // Get a real layout node. let node: LayoutNode = unsafe { layout_node_from_unsafe_layout_node(&unsafe_node) }; // Perform the appropriate traversal. self.process(node); // NB: O(n). let child_count = node.children().count(); // Reset the count of children. { let mut layout_data_ref = node.mutate_layout_data(); let layout_data = layout_data_ref.as_mut().expect("no layout data"); layout_data.data.parallel.children_count.store(child_count as isize, Ordering::Relaxed); } // Possibly enqueue the children. if child_count != 0 { for kid in node.children() { discovered_child_nodes.push(layout_node_to_unsafe_layout_node(&kid)) } } else { // If there were no more children, start walking back up. bottom_up_func(unsafe_node, proxy) } } for chunk in discovered_child_nodes.chunks(CHUNK_SIZE) { proxy.push(WorkUnit { fun: top_down_func, data: (box chunk.iter().cloned().collect(), 0), }); } } } /// A parallel bottom-up DOM traversal. trait ParallelPostorderDomTraversal : PostorderDomTraversal { /// Process current node and potentially traverse its ancestors. /// /// If we are the last child that finished processing, recursively process /// our parent. Else, stop. Also, stop at the root. /// /// Thus, if we start with all the leaves of a tree, we end up traversing /// the whole tree bottom-up because each parent will be processed exactly /// once (by the last child that finishes processing). /// /// The only communication between siblings is that they both /// fetch-and-subtract the parent's children count. fn run_parallel(&self, mut unsafe_node: UnsafeLayoutNode, proxy: &mut WorkerProxy<SharedLayoutContextWrapper,UnsafeLayoutNodeList>) { loop { // Get a real layout node. let node: LayoutNode = unsafe { layout_node_from_unsafe_layout_node(&unsafe_node) }; // Perform the appropriate operation. self.process(node); let shared_layout_context = unsafe { &*(proxy.user_data().0) }; let layout_context = LayoutContext::new(shared_layout_context); let parent = match node.layout_parent_node(layout_context.shared) { None => break, Some(parent) => parent, }; unsafe { let parent_layout_data = (*parent.borrow_layout_data_unchecked()).as_ref().expect("no layout data"); unsafe_node = layout_node_to_unsafe_layout_node(&parent); let parent_layout_data: &LayoutDataWrapper = mem::transmute(parent_layout_data); if parent_layout_data .data .parallel .children_count .fetch_sub(1, Ordering::Relaxed) == 1 { // We were the last child of our parent. Construct flows for our parent. } else { // Get out of here and find another node to work on. break } } } } } /// Information that we need stored in each flow. pub struct FlowParallelInfo { /// The number of children that still need work done. pub children_count: AtomicIsize, /// The address of the parent flow. pub parent: UnsafeFlow, } impl FlowParallelInfo { pub fn new() -> FlowParallelInfo { FlowParallelInfo { children_count: AtomicIsize::new(0), parent: null_unsafe_flow(), } } } /// A parallel bottom-up flow traversal. trait ParallelPostorderFlowTraversal : PostorderFlowTraversal { /// Process current flow and potentially traverse its ancestors. /// /// If we are the last child that finished processing, recursively process /// our parent. Else, stop. Also, stop at the root. /// /// Thus, if we start with all the leaves of a tree, we end up traversing /// the whole tree bottom-up because each parent will be processed exactly /// once (by the last child that finishes processing). /// /// The only communication between siblings is that they both /// fetch-and-subtract the parent's children count. fn run_parallel(&self, mut unsafe_flow: UnsafeFlow, _: &mut WorkerProxy<SharedLayoutContextWrapper,UnsafeFlowList>) { loop { unsafe { // Get a real flow. let flow: &mut FlowRef = mem::transmute(&mut unsafe_flow); // Perform the appropriate traversal. if self.should_process(&mut **flow) { self.process(&mut **flow); } let base = flow::mut_base(&mut **flow); // Reset the count of children for the next layout traversal. base.parallel.children_count.store(base.children.len() as isize, Ordering::Relaxed); // Possibly enqueue the parent. let mut unsafe_parent = base.parallel.parent; if unsafe_parent == null_unsafe_flow() { // We're done! break } // No, we're not at the root yet. Then are we the last child // of our parent to finish processing? If so, we can continue // on with our parent; otherwise, we've gotta wait. let parent: &mut FlowRef = mem::transmute(&mut unsafe_parent); let parent_base = flow::mut_base(&mut **parent); if parent_base.parallel.children_count.fetch_sub(1, Ordering::Relaxed) == 1 { // We were the last child of our parent. Reflow our parent. unsafe_flow = unsafe_parent } else { // Stop. break } } } } } /// A parallel top-down flow traversal. trait ParallelPreorderFlowTraversal : PreorderFlowTraversal { fn run_parallel(&self, unsafe_flows: UnsafeFlowList, proxy: &mut WorkerProxy<SharedLayoutContextWrapper,UnsafeFlowList>); fn should_record_thread_ids(&self) -> bool; #[inline(always)] fn run_parallel_helper(&self, unsafe_flows: UnsafeFlowList, proxy: &mut WorkerProxy<SharedLayoutContextWrapper,UnsafeFlowList>, top_down_func: ChunkedFlowTraversalFunction, bottom_up_func: FlowTraversalFunction) { let mut discovered_child_flows = Vec::new(); for mut unsafe_flow in unsafe_flows.0.into_iter() { let mut had_children = false; unsafe { // Get a real flow. let flow: &mut FlowRef = mem::transmute(&mut unsafe_flow); if self.should_record_thread_ids() { flow::mut_base(&mut **flow).thread_id = proxy.worker_index(); } if self.should_process(&mut **flow) { // Perform the appropriate traversal. self.process(&mut **flow); } // Possibly enqueue the children. for kid in flow::child_iter(&mut **flow) { had_children = true; discovered_child_flows.push(borrowed_flow_to_unsafe_flow(kid)); } } // If there were no more children, start assigning block-sizes. if !had_children { bottom_up_func(unsafe_flow, proxy) } } for chunk in discovered_child_flows.chunks(CHUNK_SIZE) { proxy.push(WorkUnit { fun: top_down_func, data: (box chunk.iter().cloned().collect(), 0), }); } } } impl<'a> ParallelPostorderFlowTraversal for BubbleISizes<'a> {} impl<'a> ParallelPreorderFlowTraversal for AssignISizes<'a> { fn run_parallel(&self, unsafe_flows: UnsafeFlowList, proxy: &mut WorkerProxy<SharedLayoutContextWrapper,UnsafeFlowList>) { self.run_parallel_helper(unsafe_flows, proxy, assign_inline_sizes, assign_block_sizes_and_store_overflow) } fn should_record_thread_ids(&self) -> bool { true } } impl<'a> ParallelPostorderFlowTraversal for AssignBSizesAndStoreOverflow<'a> {} impl<'a> ParallelPreorderFlowTraversal for ComputeAbsolutePositions<'a> { fn run_parallel(&self, unsafe_flows: UnsafeFlowList, proxy: &mut WorkerProxy<SharedLayoutContextWrapper, UnsafeFlowList>) { self.run_parallel_helper(unsafe_flows, proxy, compute_absolute_positions, build_display_list) } fn should_record_thread_ids(&self) -> bool { false } } impl<'a> ParallelPostorderFlowTraversal for BuildDisplayList<'a> {} impl<'a> ParallelPostorderDomTraversal for ConstructFlows<'a> {} impl <'a> ParallelPreorderDomTraversal for RecalcStyleForNode<'a> { fn run_parallel(&self, unsafe_nodes: UnsafeLayoutNodeList, proxy: &mut WorkerProxy<SharedLayoutContextWrapper, UnsafeLayoutNodeList>) { self.run_parallel_helper(unsafe_nodes, proxy, recalc_style, construct_flows) } } fn recalc_style(unsafe_nodes: UnsafeLayoutNodeList, proxy: &mut WorkerProxy<SharedLayoutContextWrapper, UnsafeLayoutNodeList>) { let shared_layout_context = unsafe { &*(proxy.user_data().0) }; let layout_context = LayoutContext::new(shared_layout_context); let recalc_style_for_node_traversal = RecalcStyleForNode { layout_context: &layout_context, }; recalc_style_for_node_traversal.run_parallel(unsafe_nodes, proxy) } fn construct_flows(unsafe_node: UnsafeLayoutNode, proxy: &mut WorkerProxy<SharedLayoutContextWrapper, UnsafeLayoutNodeList>) { let shared_layout_context = unsafe { &*(proxy.user_data().0) }; let layout_context = LayoutContext::new(shared_layout_context); let construct_flows_traversal = ConstructFlows { layout_context: &layout_context, }; construct_flows_traversal.run_parallel(unsafe_node, proxy) } fn assign_inline_sizes(unsafe_flows: UnsafeFlowList, proxy: &mut WorkerProxy<SharedLayoutContextWrapper,UnsafeFlowList>) { let shared_layout_context = unsafe { &*(proxy.user_data().0) }; let layout_context = LayoutContext::new(shared_layout_context); let assign_inline_sizes_traversal = AssignISizes { layout_context: &layout_context, }; assign_inline_sizes_traversal.run_parallel(unsafe_flows, proxy) } fn assign_block_sizes_and_store_overflow( unsafe_flow: UnsafeFlow, proxy: &mut WorkerProxy<SharedLayoutContextWrapper,UnsafeFlowList>) { let shared_layout_context = unsafe { &*(proxy.user_data().0) }; let layout_context = LayoutContext::new(shared_layout_context); let assign_block_sizes_traversal = AssignBSizesAndStoreOverflow { layout_context: &layout_context, }; assign_block_sizes_traversal.run_parallel(unsafe_flow, proxy) } fn compute_absolute_positions( unsafe_flows: UnsafeFlowList, proxy: &mut WorkerProxy<SharedLayoutContextWrapper, UnsafeFlowList>) { let shared_layout_context = unsafe { &*(proxy.user_data().0) }; let layout_context = LayoutContext::new(shared_layout_context); let compute_absolute_positions_traversal = ComputeAbsolutePositions { layout_context: &layout_context, }; compute_absolute_positions_traversal.run_parallel(unsafe_flows, proxy); } fn build_display_list(unsafe_flow: UnsafeFlow, proxy: &mut WorkerProxy<SharedLayoutContextWrapper, UnsafeFlowList>) { let shared_layout_context = unsafe { &*(proxy.user_data().0) }; let layout_context = LayoutContext::new(shared_layout_context); let build_display_list_traversal = BuildDisplayList { layout_context: &layout_context, }; build_display_list_traversal.run_parallel(unsafe_flow, proxy); } fn run_queue_with_custom_work_data_type<To,F>( queue: &mut WorkQueue<SharedLayoutContextWrapper,UnsafeLayoutNode>, callback: F) where To: 'static + Send, F: FnOnce(&mut WorkQueue<SharedLayoutContextWrapper,To>) { unsafe { let queue: &mut WorkQueue<SharedLayoutContextWrapper,To> = mem::transmute(queue); callback(queue); queue.run(); } } pub fn traverse_dom_preorder(root: LayoutNode, shared_layout_context: &SharedLayoutContext, queue: &mut WorkQueue<SharedLayoutContextWrapper, UnsafeLayoutNode>) { queue.data = SharedLayoutContextWrapper(shared_layout_context as *const _); run_queue_with_custom_work_data_type(queue, |queue| { queue.push(WorkUnit { fun: recalc_style, data: (box vec![layout_node_to_unsafe_layout_node(&root)], 0), }); }); queue.data = SharedLayoutContextWrapper(ptr::null()); } pub fn traverse_flow_tree_preorder( root: &mut FlowRef, profiler_metadata: ProfilerMetadata, time_profiler_chan: time::ProfilerChan, shared_layout_context: &SharedLayoutContext, queue: &mut WorkQueue<SharedLayoutContextWrapper,UnsafeLayoutNode>) { if opts::get().bubble_inline_sizes_separately { let layout_context = LayoutContext::new(shared_layout_context); let bubble_inline_sizes = BubbleISizes { layout_context: &layout_context }; root.traverse_postorder(&bubble_inline_sizes); } queue.data = SharedLayoutContextWrapper(shared_layout_context as *const _); run_queue_with_custom_work_data_type(queue, |queue| { profile(time::ProfilerCategory::LayoutParallelWarmup, profiler_metadata, time_profiler_chan, || { queue.push(WorkUnit { fun: assign_inline_sizes, data: (box vec![mut_owned_flow_to_unsafe_flow(root)], 0), }) }); }); queue.data = SharedLayoutContextWrapper(ptr::null()) } pub fn build_display_list_for_subtree( root: &mut FlowRef, profiler_metadata: ProfilerMetadata, time_profiler_chan: time::ProfilerChan, shared_layout_context: &SharedLayoutContext, queue: &mut WorkQueue<SharedLayoutContextWrapper,UnsafeLayoutNode>) { queue.data = SharedLayoutContextWrapper(shared_layout_context as *const _); run_queue_with_custom_work_data_type(queue, |queue| { profile(time::ProfilerCategory::LayoutParallelWarmup, profiler_metadata, time_profiler_chan, || { queue.push(WorkUnit { fun: compute_absolute_positions, data: (box vec![mut_owned_flow_to_unsafe_flow(root)], 0), }) }); }); queue.data = SharedLayoutContextWrapper(ptr::null()) }
//! Interpreter and minimal REPL #[macro_use] extern crate clap; extern crate env_logger; extern crate rustc_serialize; extern crate term; extern crate lea_compiler as compiler; extern crate lea_parser as parser; extern crate lea_vm as vm; extern crate lea; use parser::span::DummyTerm; use compiler::{CompileConfig, FnData}; use vm::function::FunctionProto; use vm::{Value, VM}; use std::io::{self, stdin, stderr, Write, BufRead}; /// Opens a terminal that writes to stderr. If stderr couldn't be opened as a terminal, creates a /// `DummyTerm` that writes to stderr instead. fn stderr_term() -> Box<term::StderrTerminal> { term::stderr().unwrap_or_else(|| Box::new(DummyTerm(io::stderr()))) } /// Compiles a piece of code (expression or block of statements). Prints all errors / warnings to /// stderr, using color if stderr is a terminal. fn compile(code: &str, filename: &str) -> io::Result<Option<FnData>> { let mut fmt_target = stderr_term(); let fmt_target = &mut *fmt_target; // try to compile an expression first let mut result = compiler::compile_expr(code, filename, &CompileConfig::default()); if result.is_err() { result = compiler::compile_str(code, filename, &CompileConfig::default()); } match result { Err(e) => { try!(e.format(code, filename, fmt_target)); Ok(None) }, Ok(output) => { let warns = output.warns; if !warns.is_empty() { for w in warns { try!(w.format(code, filename, fmt_target)); } } Ok(Some(output.mainproto)) } } } /// Executes the given `FnData` object using the given VM. Prints the returned value or the error /// thrown. /// /// Returns `true` if the code executed successfully and `false` if the VM returned with an error. fn run_fndata(main: FnData, vm: &mut VM, env: Value) -> bool { // XXX This really needs to be easier use std::rc::Rc; use std::cell::Cell; use vm::function::{Function, Upval}; use vm::mem::GcStrategy; let proto = FunctionProto::from_fndata(main, &mut vm.gc); let mut first = true; let f = Function::new(&vm.gc, proto, |_| if first { first = false; Rc::new(Cell::new(Upval::Closed(env))) } else { Rc::new(Cell::new(Upval::Closed(Value::Nil))) }); let f = vm.gc.register_obj(f); match vm.start(f, |error| { println!("runtime error: {}", error); }) { None => false, Some(vals) => { if !vals.is_empty() { for (i, val) in vals.iter().enumerate() { if i != 0 { print!("\t"); } unsafe { val.fmt(io::stdout(), &vm.gc) }.unwrap(); } println!(""); } true } } } fn run_code(code: &str, file: &str, vm: &mut VM, env: Value) -> io::Result<bool> { if let Some(fndata) = try!(compile(code, file)) { Ok(run_fndata(fndata, vm, env)) } else { Ok(false) // compile error } } fn run_file(filename: &str, vm: &mut VM, env: Value) -> io::Result<bool> { use std::fs::File; use std::io::Read; let mut file = try!(File::open(filename)); let mut code = String::new(); try!(file.read_to_string(&mut code)); run_code(&code, filename, vm, env) } fn print_prompt() -> io::Result<()> { let mut stdout = io::stdout(); try!(write!(stdout, "> ")); try!(stdout.flush()); Ok(()) } fn repl(vm: &mut VM, env: Value) -> io::Result<()> { let stdin = io::stdin(); let stdin = io::BufReader::new(stdin); try!(print_prompt()); for input in stdin.lines() { let input = try!(input); try!(run_code(&input, "<repl>", vm, env)); try!(print_prompt()); } // EOF: Print newline so that the shell's prompt appears on the next line println!(""); Ok(()) } fn main() { env_logger::init().unwrap(); let args = clap_app!(lea => (version: lea::version_str()) (about: "Lea interpreter and interactive REPL") (@arg FILE: "The file to execute") (@arg exec: -e --exec +takes_value ... "Execute code") (@arg interactive: -i --interactive "Enter interactive mode after processing all arguments") ).get_matches(); let arg_file = args.value_of("FILE"); let flag_exec = args.values_of("exec").unwrap_or(Vec::new()); let flag_interactive = args.value_of("interactive").is_some(); let enter_repl = flag_interactive || (flag_exec.is_empty() && arg_file.is_none()); let mut vm = VM::new(); let env = lea::build_stdlib(&mut vm.gc); for code in flag_exec { match run_code(&code, "<cmdline>", &mut vm, env) { Ok(true) => {} Ok(false) => {return} Err(e) => { println!("{}", e); return } } } // TODO Pass the arguments to the program if let Some(file) = arg_file { match run_file(&file, &mut vm, env) { Ok(true) => {} Ok(false) => {return} Err(e) => { println!("{}", e); return } } } if enter_repl { match repl(&mut vm, env) { Err(e) => println!("{}", e), Ok(_) => {} } } } Remove unused import //! Minimal REPL #[macro_use] extern crate clap; extern crate env_logger; extern crate rustc_serialize; extern crate term; extern crate lea_compiler as compiler; extern crate lea_parser as parser; extern crate lea_vm as vm; extern crate lea; use parser::span::DummyTerm; use compiler::{CompileConfig, FnData}; use vm::function::FunctionProto; use vm::{Value, VM}; use std::io::{self, stdin, Write, BufRead}; /// Opens a terminal that writes to stderr. If stderr couldn't be opened as a terminal, creates a /// `DummyTerm` that writes to stderr instead. fn stderr_term() -> Box<term::StderrTerminal> { term::stderr().unwrap_or_else(|| Box::new(DummyTerm(io::stderr()))) } /// Compiles a piece of code (expression or block of statements). Prints all errors / warnings to /// stderr, using color if stderr is a terminal. fn compile(code: &str, filename: &str) -> io::Result<Option<FnData>> { let mut fmt_target = stderr_term(); let fmt_target = &mut *fmt_target; // try to compile an expression first let mut result = compiler::compile_expr(code, filename, &CompileConfig::default()); if result.is_err() { result = compiler::compile_str(code, filename, &CompileConfig::default()); } match result { Err(e) => { try!(e.format(code, filename, fmt_target)); Ok(None) }, Ok(output) => { let warns = output.warns; if !warns.is_empty() { for w in warns { try!(w.format(code, filename, fmt_target)); } } Ok(Some(output.mainproto)) } } } /// Executes the given `FnData` object using the given VM. Prints the returned value or the error /// thrown. /// /// Returns `true` if the code executed successfully and `false` if the VM returned with an error. fn run_fndata(main: FnData, vm: &mut VM, env: Value) -> bool { // XXX This really needs to be easier use std::rc::Rc; use std::cell::Cell; use vm::function::{Function, Upval}; use vm::mem::GcStrategy; let proto = FunctionProto::from_fndata(main, &mut vm.gc); let mut first = true; let f = Function::new(&vm.gc, proto, |_| if first { first = false; Rc::new(Cell::new(Upval::Closed(env))) } else { Rc::new(Cell::new(Upval::Closed(Value::Nil))) }); let f = vm.gc.register_obj(f); match vm.start(f, |error| { println!("runtime error: {}", error); }) { None => false, Some(vals) => { if !vals.is_empty() { for (i, val) in vals.iter().enumerate() { if i != 0 { print!("\t"); } unsafe { val.fmt(io::stdout(), &vm.gc) }.unwrap(); } println!(""); } true } } } fn run_code(code: &str, file: &str, vm: &mut VM, env: Value) -> io::Result<bool> { if let Some(fndata) = try!(compile(code, file)) { Ok(run_fndata(fndata, vm, env)) } else { Ok(false) // compile error } } fn run_file(filename: &str, vm: &mut VM, env: Value) -> io::Result<bool> { use std::fs::File; use std::io::Read; let mut file = try!(File::open(filename)); let mut code = String::new(); try!(file.read_to_string(&mut code)); run_code(&code, filename, vm, env) } fn print_prompt() -> io::Result<()> { let mut stdout = io::stdout(); try!(write!(stdout, "> ")); try!(stdout.flush()); Ok(()) } fn repl(vm: &mut VM, env: Value) -> io::Result<()> { let stdin = io::stdin(); let stdin = io::BufReader::new(stdin); try!(print_prompt()); for input in stdin.lines() { let input = try!(input); try!(run_code(&input, "<repl>", vm, env)); try!(print_prompt()); } // EOF: Print newline so that the shell's prompt appears on the next line println!(""); Ok(()) } fn main() { env_logger::init().unwrap(); let args = clap_app!(lea => (version: lea::version_str()) (about: "Lea interpreter and interactive REPL") (@arg FILE: "The file to execute") (@arg exec: -e --exec +takes_value ... "Execute code") (@arg interactive: -i --interactive "Enter interactive mode after processing all arguments") ).get_matches(); let arg_file = args.value_of("FILE"); let flag_exec = args.values_of("exec").unwrap_or(Vec::new()); let flag_interactive = args.value_of("interactive").is_some(); let enter_repl = flag_interactive || (flag_exec.is_empty() && arg_file.is_none()); let mut vm = VM::new(); let env = lea::build_stdlib(&mut vm.gc); for code in flag_exec { match run_code(&code, "<cmdline>", &mut vm, env) { Ok(true) => {} Ok(false) => {return} Err(e) => { println!("{}", e); return } } } // TODO Pass the arguments to the program if let Some(file) = arg_file { match run_file(&file, &mut vm, env) { Ok(true) => {} Ok(false) => {return} Err(e) => { println!("{}", e); return } } } if enter_repl { match repl(&mut vm, env) { Err(e) => println!("{}", e), Ok(_) => {} } } }
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use std::iter::repeat; use cryptoutil::{copy_memory, read_u64v_le, write_u64v_le}; use digest::Digest; use mac::{Mac, MacResult}; use util::secure_memset; static IV : [u64; 8] = [ 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179, ]; static SIGMA : [[usize; 16]; 12] = [ [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 ], [ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 ], [ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 ], [ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 ], [ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 ], [ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 ], [ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 ], [ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 ], [ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 ], [ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13 , 0 ], [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 ], [ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 ], ]; const BLAKE2B_BLOCKBYTES : usize = 128; const BLAKE2B_OUTBYTES : usize = 64; const BLAKE2B_KEYBYTES : usize = 64; const BLAKE2B_SALTBYTES : usize = 16; const BLAKE2B_PERSONALBYTES : usize = 16; #[derive(Copy)] pub struct Blake2b { h: [u64; 8], t: [u64; 2], f: [u64; 2], buf: [u8; 2*BLAKE2B_BLOCKBYTES], buflen: usize, key: [u8; BLAKE2B_KEYBYTES], key_length: u8, last_node: u8, digest_length: u8, computed: bool, // whether the final digest has been computed } impl Clone for Blake2b { fn clone(&self) -> Blake2b { *self } } struct Blake2bParam { digest_length: u8, key_length: u8, fanout: u8, depth: u8, leaf_length: u32, node_offset: u64, node_depth: u8, inner_length: u8, reserved: [u8; 14], salt: [u8; BLAKE2B_SALTBYTES], personal: [u8; BLAKE2B_PERSONALBYTES], } macro_rules! G( ($r:expr, $i:expr, $a:expr, $b:expr, $c:expr, $d:expr, $m:expr) => ({ $a = $a.wrapping_add($b).wrapping_add($m[SIGMA[$r][2*$i+0]]); $d = ($d ^ $a).rotate_right(32); $c = $c.wrapping_add($d); $b = ($b ^ $c).rotate_right(24); $a = $a.wrapping_add($b).wrapping_add($m[SIGMA[$r][2*$i+1]]); $d = ($d ^ $a).rotate_right(16); $c = $c .wrapping_add($d); $b = ($b ^ $c).rotate_right(63); })); macro_rules! round( ($r:expr, $v:expr, $m:expr) => ( { G!($r,0,$v[ 0],$v[ 4],$v[ 8],$v[12], $m); G!($r,1,$v[ 1],$v[ 5],$v[ 9],$v[13], $m); G!($r,2,$v[ 2],$v[ 6],$v[10],$v[14], $m); G!($r,3,$v[ 3],$v[ 7],$v[11],$v[15], $m); G!($r,4,$v[ 0],$v[ 5],$v[10],$v[15], $m); G!($r,5,$v[ 1],$v[ 6],$v[11],$v[12], $m); G!($r,6,$v[ 2],$v[ 7],$v[ 8],$v[13], $m); G!($r,7,$v[ 3],$v[ 4],$v[ 9],$v[14], $m); } )); impl Blake2b { fn set_lastnode(&mut self) { self.f[1] = 0xFFFFFFFFFFFFFFFF; } fn set_lastblock(&mut self) { if self.last_node!=0 { self.set_lastnode(); } self.f[0] = 0xFFFFFFFFFFFFFFFF; } fn increment_counter(&mut self, inc : u64) { self.t[0] += inc; self.t[1] += if self.t[0] < inc { 1 } else { 0 }; } fn init0(digest_length: u8, key: &[u8]) -> Blake2b { assert!(key.len() <= BLAKE2B_KEYBYTES); let mut b = Blake2b { h: IV, t: [0,0], f: [0,0], buf: [0; 2*BLAKE2B_BLOCKBYTES], buflen: 0, last_node: 0, digest_length: digest_length, computed: false, key: [0; BLAKE2B_KEYBYTES], key_length: key.len() as u8 }; copy_memory(key, &mut b.key); b } fn apply_param(&mut self, p: &Blake2bParam) { use std::io::Write; use cryptoutil::WriteExt; let mut param_bytes : [u8; 64] = [0; 64]; { let mut writer: &mut [u8] = &mut param_bytes; writer.write_u8(p.digest_length).unwrap(); writer.write_u8(p.key_length).unwrap(); writer.write_u8(p.fanout).unwrap(); writer.write_u8(p.depth).unwrap(); writer.write_u32_le(p.leaf_length).unwrap(); writer.write_u64_le(p.node_offset).unwrap(); writer.write_u8(p.node_depth).unwrap(); writer.write_u8(p.inner_length).unwrap(); writer.write_all(&p.reserved).unwrap(); writer.write_all(&p.salt).unwrap(); writer.write_all(&p.personal).unwrap(); } let mut param_words : [u64; 8] = [0; 8]; read_u64v_le(&mut param_words, &param_bytes); for (h, param_word) in self.h.iter_mut().zip(param_words.iter()) { *h = *h ^ *param_word; } } // init xors IV with input parameter block fn init_param( p: &Blake2bParam, key: &[u8] ) -> Blake2b { let mut b = Blake2b::init0(p.digest_length, key); b.apply_param(p); b } fn default_param(outlen: u8) -> Blake2bParam { Blake2bParam { digest_length: outlen, key_length: 0, fanout: 1, depth: 1, leaf_length: 0, node_offset: 0, node_depth: 0, inner_length: 0, reserved: [0; 14], salt: [0; BLAKE2B_SALTBYTES], personal: [0; BLAKE2B_PERSONALBYTES], } } pub fn new(outlen: usize) -> Blake2b { assert!(outlen > 0 && outlen <= BLAKE2B_OUTBYTES); Blake2b::init_param(&Blake2b::default_param(outlen as u8), &[]) } fn apply_key(&mut self) { let mut block : [u8; BLAKE2B_BLOCKBYTES] = [0; BLAKE2B_BLOCKBYTES]; copy_memory(&self.key[..self.key_length as usize], &mut block); self.update(&block); secure_memset(&mut block[..], 0); } pub fn new_keyed(outlen: usize, key: &[u8] ) -> Blake2b { assert!(outlen > 0 && outlen <= BLAKE2B_OUTBYTES); assert!(key.len() > 0 && key.len() <= BLAKE2B_KEYBYTES); let param = Blake2bParam { digest_length: outlen as u8, key_length: key.len() as u8, fanout: 1, depth: 1, leaf_length: 0, node_offset: 0, node_depth: 0, inner_length: 0, reserved: [0; 14], salt: [0; BLAKE2B_SALTBYTES], personal: [0; BLAKE2B_PERSONALBYTES], }; let mut b = Blake2b::init_param(&param, key); b.apply_key(); b } fn compress(&mut self) { let mut ms: [u64; 16] = [0; 16]; let mut vs: [u64; 16] = [0; 16]; read_u64v_le(&mut ms, &self.buf[0..BLAKE2B_BLOCKBYTES]); for (v, h) in vs.iter_mut().zip(self.h.iter()) { *v = *h; } vs[ 8] = IV[0]; vs[ 9] = IV[1]; vs[10] = IV[2]; vs[11] = IV[3]; vs[12] = self.t[0] ^ IV[4]; vs[13] = self.t[1] ^ IV[5]; vs[14] = self.f[0] ^ IV[6]; vs[15] = self.f[1] ^ IV[7]; round!( 0, vs, ms ); round!( 1, vs, ms ); round!( 2, vs, ms ); round!( 3, vs, ms ); round!( 4, vs, ms ); round!( 5, vs, ms ); round!( 6, vs, ms ); round!( 7, vs, ms ); round!( 8, vs, ms ); round!( 9, vs, ms ); round!( 10, vs, ms ); round!( 11, vs, ms ); for (h_elem, (v_low, v_high)) in self.h.iter_mut().zip( vs[0..8].iter().zip(vs[8..16].iter()) ) { *h_elem = *h_elem ^ *v_low ^ *v_high; } } fn update( &mut self, mut input: &[u8] ) { while input.len() > 0 { let left = self.buflen; let fill = 2 * BLAKE2B_BLOCKBYTES - left; if input.len() > fill { copy_memory(&input[0..fill], &mut self.buf[left..]); // Fill buffer self.buflen += fill; self.increment_counter( BLAKE2B_BLOCKBYTES as u64); self.compress(); let mut halves = self.buf.chunks_mut(BLAKE2B_BLOCKBYTES); let first_half = halves.next().unwrap(); let second_half = halves.next().unwrap(); copy_memory(second_half, first_half); self.buflen -= BLAKE2B_BLOCKBYTES; input = &input[fill..input.len()]; } else { // inlen <= fill copy_memory(input, &mut self.buf[left..]); self.buflen += input.len(); break; } } } fn finalize( &mut self, out: &mut [u8] ) { assert!(out.len() == self.digest_length as usize); if !self.computed { if self.buflen > BLAKE2B_BLOCKBYTES { self.increment_counter(BLAKE2B_BLOCKBYTES as u64); self.compress(); self.buflen -= BLAKE2B_BLOCKBYTES; let mut halves = self.buf.chunks_mut(BLAKE2B_BLOCKBYTES); let first_half = halves.next().unwrap(); let second_half = halves.next().unwrap(); copy_memory(second_half, first_half); } let incby = self.buflen as u64; self.increment_counter(incby); self.set_lastblock(); for b in self.buf[self.buflen..].iter_mut() { *b = 0; } self.compress(); write_u64v_le(&mut self.buf[0..64], &self.h); self.computed = true; } let outlen = out.len(); copy_memory(&self.buf[0..outlen], out); } pub fn blake2b(out: &mut[u8], input: &[u8], key: &[u8]) { let mut hasher : Blake2b = if key.len() > 0 { Blake2b::new_keyed(out.len(), key) } else { Blake2b::new(out.len()) }; hasher.update(input); hasher.finalize(out); } } impl Digest for Blake2b { fn reset(&mut self) { for (h_elem, iv_elem) in self.h.iter_mut().zip(IV.iter()) { *h_elem = *iv_elem; } for t_elem in self.t.iter_mut() { *t_elem = 0; } for f_elem in self.f.iter_mut() { *f_elem = 0; } for b in self.buf.iter_mut() { *b = 0; } self.buflen = 0; self.last_node = 0; self.computed = false; let len = self.digest_length; self.apply_param(&Blake2b::default_param(len)); } fn input(&mut self, msg: &[u8]) { self.update(msg); } fn result(&mut self, out: &mut [u8]) { self.finalize(out); } fn output_bits(&self) -> usize { 8 * (self.digest_length as usize) } fn block_size(&self) -> usize { 8 * BLAKE2B_BLOCKBYTES } } impl Mac for Blake2b { /** * Process input data. * * # Arguments * * data - The input data to process. * */ fn input(&mut self, data: &[u8]) { self.update(data); } /** * Reset the Mac state to begin processing another input stream. */ fn reset(&mut self) { for (h_elem, iv_elem) in self.h.iter_mut().zip(IV.iter()) { *h_elem = *iv_elem; } for t_elem in self.t.iter_mut() { *t_elem = 0; } for f_elem in self.f.iter_mut() { *f_elem = 0; } for b in self.buf.iter_mut() { *b = 0; } self.buflen = 0; self.last_node = 0; self.computed = false; let len = self.digest_length; self.apply_param(&Blake2b::default_param(len)); self.apply_key(); } /** * Obtain the result of a Mac computation as a MacResult. */ fn result(&mut self) -> MacResult { let mut mac: Vec<u8> = repeat(0).take(self.digest_length as usize).collect(); self.raw_result(&mut mac); MacResult::new_from_owned(mac) } /** * Obtain the result of a Mac computation as [u8]. This method should be used very carefully * since incorrect use of the Mac code could result in permitting a timing attack which defeats * the security provided by a Mac function. */ fn raw_result(&mut self, output: &mut [u8]) { self.finalize(output); } /** * Get the size of the Mac code, in bytes. */ fn output_bytes(&self) -> usize { self.digest_length as usize } } #[cfg(test)] mod digest_tests { //use cryptoutil::test::test_digest_1million_random; use blake2b::Blake2b; use digest::Digest; struct Test { input: &'static str, output_str: &'static str, } fn test_hash<D: Digest>(sh: &mut D, tests: &[Test]) { // Test that it works when accepting the message all at once for t in tests.iter() { sh.input_str(t.input); let out_str = sh.result_str(); assert!(&out_str[..] == t.output_str); sh.reset(); } // Test that it works when accepting the message in pieces for t in tests.iter() { let len = t.input.len(); let mut left = len; while left > 0 { let take = (left + 1) / 2; sh.input_str(&t.input[len - left..take + len - left]); left = left - take; } let out_str = sh.result_str(); assert!(&out_str[..] == t.output_str); sh.reset(); } } #[test] fn test_blake2b_digest() { // Examples from wikipedia let wikipedia_tests = vec![ Test { input: "", output_str: "786a02f742015903c6c6fd852552d272912f4740e15847618a86e217f71f5419\ d25e1031afee585313896444934eb04b903a685b1448b755d56f701afe9be2ce" }, Test { input: "The quick brown fox jumps over the lazy dog", output_str: "a8add4bdddfd93e4877d2746e62817b116364a1fa7bc148d95090bc7333b3673\ f82401cf7aa2e4cb1ecd90296e3f14cb5413f8ed77be73045b13914cdcd6a918" }, ]; let tests = wikipedia_tests; let mut sh = Blake2b::new(64); test_hash(&mut sh, &tests[..]); } } #[cfg(test)] mod mac_tests { use blake2b::Blake2b; use mac::Mac; #[test] fn test_blake2b_mac() { let key: Vec<u8> = (0..64).map(|i| i).collect(); let mut m = Blake2b::new_keyed(64, &key[..]); m.input(&[1,2,4,8]); let expected = [ 0x8e, 0xc6, 0xcb, 0x71, 0xc4, 0x5c, 0x3c, 0x90, 0x91, 0xd0, 0x8a, 0x37, 0x1e, 0xa8, 0x5d, 0xc1, 0x22, 0xb5, 0xc8, 0xe2, 0xd9, 0xe5, 0x71, 0x42, 0xbf, 0xef, 0xce, 0x42, 0xd7, 0xbc, 0xf8, 0x8b, 0xb0, 0x31, 0x27, 0x88, 0x2e, 0x51, 0xa9, 0x21, 0x44, 0x62, 0x08, 0xf6, 0xa3, 0x58, 0xa9, 0xe0, 0x7d, 0x35, 0x3b, 0xd3, 0x1c, 0x41, 0x70, 0x15, 0x62, 0xac, 0xd5, 0x39, 0x4e, 0xee, 0x73, 0xae, ]; assert_eq!(m.result().code().to_vec(), expected.to_vec()); } } #[cfg(all(test, feature = "with-bench"))] mod bench { use test::Bencher; use digest::Digest; use blake2b::Blake2b; #[bench] pub fn blake2b_10(bh: & mut Bencher) { let mut sh = Blake2b::new(64); let bytes = [1u8; 10]; bh.iter( || { sh.input(&bytes); }); bh.bytes = bytes.len() as u64; } #[bench] pub fn blake2b_1k(bh: & mut Bencher) { let mut sh = Blake2b::new(64); let bytes = [1u8; 1024]; bh.iter( || { sh.input(&bytes); }); bh.bytes = bytes.len() as u64; } #[bench] pub fn blake2b_64k(bh: & mut Bencher) { let mut sh = Blake2b::new(64); let bytes = [1u8; 65536]; bh.iter( || { sh.input(&bytes); }); bh.bytes = bytes.len() as u64; } } Fix the implementation of reset() for Blake2b * Nothing stops you from createing a keyed Blake2b Digest and then calling the reset method on the Digest. However, the reset() implementation would then fail to reset the key - this is surprising behavior. * Both the Digest and Mac reset() implementations would always use the default_params() regardless of the parameters originally used to create the Blake2b instance. So, the Blake2b structure is extended to save a copy of the params used to create the Blake2b instance. The Mac and Digest reset() methods are replaced with a reset() method on Blake2b itself that resets the instance using the stored params and re-applies the key if necessary. // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use std::iter::repeat; use cryptoutil::{copy_memory, read_u64v_le, write_u64v_le}; use digest::Digest; use mac::{Mac, MacResult}; use util::secure_memset; static IV : [u64; 8] = [ 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179, ]; static SIGMA : [[usize; 16]; 12] = [ [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 ], [ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 ], [ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 ], [ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 ], [ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 ], [ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 ], [ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 ], [ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 ], [ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 ], [ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13 , 0 ], [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 ], [ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 ], ]; const BLAKE2B_BLOCKBYTES : usize = 128; const BLAKE2B_OUTBYTES : usize = 64; const BLAKE2B_KEYBYTES : usize = 64; const BLAKE2B_SALTBYTES : usize = 16; const BLAKE2B_PERSONALBYTES : usize = 16; #[derive(Copy)] pub struct Blake2b { h: [u64; 8], t: [u64; 2], f: [u64; 2], buf: [u8; 2*BLAKE2B_BLOCKBYTES], buflen: usize, key: [u8; BLAKE2B_KEYBYTES], key_length: u8, last_node: u8, digest_length: u8, computed: bool, // whether the final digest has been computed param: Blake2bParam } impl Clone for Blake2b { fn clone(&self) -> Blake2b { *self } } #[derive(Copy, Clone)] struct Blake2bParam { digest_length: u8, key_length: u8, fanout: u8, depth: u8, leaf_length: u32, node_offset: u64, node_depth: u8, inner_length: u8, reserved: [u8; 14], salt: [u8; BLAKE2B_SALTBYTES], personal: [u8; BLAKE2B_PERSONALBYTES], } macro_rules! G( ($r:expr, $i:expr, $a:expr, $b:expr, $c:expr, $d:expr, $m:expr) => ({ $a = $a.wrapping_add($b).wrapping_add($m[SIGMA[$r][2*$i+0]]); $d = ($d ^ $a).rotate_right(32); $c = $c.wrapping_add($d); $b = ($b ^ $c).rotate_right(24); $a = $a.wrapping_add($b).wrapping_add($m[SIGMA[$r][2*$i+1]]); $d = ($d ^ $a).rotate_right(16); $c = $c .wrapping_add($d); $b = ($b ^ $c).rotate_right(63); })); macro_rules! round( ($r:expr, $v:expr, $m:expr) => ( { G!($r,0,$v[ 0],$v[ 4],$v[ 8],$v[12], $m); G!($r,1,$v[ 1],$v[ 5],$v[ 9],$v[13], $m); G!($r,2,$v[ 2],$v[ 6],$v[10],$v[14], $m); G!($r,3,$v[ 3],$v[ 7],$v[11],$v[15], $m); G!($r,4,$v[ 0],$v[ 5],$v[10],$v[15], $m); G!($r,5,$v[ 1],$v[ 6],$v[11],$v[12], $m); G!($r,6,$v[ 2],$v[ 7],$v[ 8],$v[13], $m); G!($r,7,$v[ 3],$v[ 4],$v[ 9],$v[14], $m); } )); impl Blake2b { fn set_lastnode(&mut self) { self.f[1] = 0xFFFFFFFFFFFFFFFF; } fn set_lastblock(&mut self) { if self.last_node!=0 { self.set_lastnode(); } self.f[0] = 0xFFFFFFFFFFFFFFFF; } fn increment_counter(&mut self, inc : u64) { self.t[0] += inc; self.t[1] += if self.t[0] < inc { 1 } else { 0 }; } fn init0(param: Blake2bParam, digest_length: u8, key: &[u8]) -> Blake2b { assert!(key.len() <= BLAKE2B_KEYBYTES); let mut b = Blake2b { h: IV, t: [0,0], f: [0,0], buf: [0; 2*BLAKE2B_BLOCKBYTES], buflen: 0, last_node: 0, digest_length: digest_length, computed: false, key: [0; BLAKE2B_KEYBYTES], key_length: key.len() as u8, param: param }; copy_memory(key, &mut b.key); b } fn apply_param(&mut self) { use std::io::Write; use cryptoutil::WriteExt; let mut param_bytes : [u8; 64] = [0; 64]; { let mut writer: &mut [u8] = &mut param_bytes; writer.write_u8(self.param.digest_length).unwrap(); writer.write_u8(self.param.key_length).unwrap(); writer.write_u8(self.param.fanout).unwrap(); writer.write_u8(self.param.depth).unwrap(); writer.write_u32_le(self.param.leaf_length).unwrap(); writer.write_u64_le(self.param.node_offset).unwrap(); writer.write_u8(self.param.node_depth).unwrap(); writer.write_u8(self.param.inner_length).unwrap(); writer.write_all(&self.param.reserved).unwrap(); writer.write_all(&self.param.salt).unwrap(); writer.write_all(&self.param.personal).unwrap(); } let mut param_words : [u64; 8] = [0; 8]; read_u64v_le(&mut param_words, &param_bytes); for (h, param_word) in self.h.iter_mut().zip(param_words.iter()) { *h = *h ^ *param_word; } } // init xors IV with input parameter block fn init_param( p: Blake2bParam, key: &[u8] ) -> Blake2b { let mut b = Blake2b::init0(p, p.digest_length, key); b.apply_param(); b } fn default_param(outlen: u8) -> Blake2bParam { Blake2bParam { digest_length: outlen, key_length: 0, fanout: 1, depth: 1, leaf_length: 0, node_offset: 0, node_depth: 0, inner_length: 0, reserved: [0; 14], salt: [0; BLAKE2B_SALTBYTES], personal: [0; BLAKE2B_PERSONALBYTES], } } pub fn new(outlen: usize) -> Blake2b { assert!(outlen > 0 && outlen <= BLAKE2B_OUTBYTES); Blake2b::init_param(Blake2b::default_param(outlen as u8), &[]) } fn apply_key(&mut self) { let mut block : [u8; BLAKE2B_BLOCKBYTES] = [0; BLAKE2B_BLOCKBYTES]; copy_memory(&self.key[..self.key_length as usize], &mut block); self.update(&block); secure_memset(&mut block[..], 0); } pub fn new_keyed(outlen: usize, key: &[u8] ) -> Blake2b { assert!(outlen > 0 && outlen <= BLAKE2B_OUTBYTES); assert!(key.len() > 0 && key.len() <= BLAKE2B_KEYBYTES); let param = Blake2bParam { digest_length: outlen as u8, key_length: key.len() as u8, fanout: 1, depth: 1, leaf_length: 0, node_offset: 0, node_depth: 0, inner_length: 0, reserved: [0; 14], salt: [0; BLAKE2B_SALTBYTES], personal: [0; BLAKE2B_PERSONALBYTES], }; let mut b = Blake2b::init_param(param, key); b.apply_key(); b } fn compress(&mut self) { let mut ms: [u64; 16] = [0; 16]; let mut vs: [u64; 16] = [0; 16]; read_u64v_le(&mut ms, &self.buf[0..BLAKE2B_BLOCKBYTES]); for (v, h) in vs.iter_mut().zip(self.h.iter()) { *v = *h; } vs[ 8] = IV[0]; vs[ 9] = IV[1]; vs[10] = IV[2]; vs[11] = IV[3]; vs[12] = self.t[0] ^ IV[4]; vs[13] = self.t[1] ^ IV[5]; vs[14] = self.f[0] ^ IV[6]; vs[15] = self.f[1] ^ IV[7]; round!( 0, vs, ms ); round!( 1, vs, ms ); round!( 2, vs, ms ); round!( 3, vs, ms ); round!( 4, vs, ms ); round!( 5, vs, ms ); round!( 6, vs, ms ); round!( 7, vs, ms ); round!( 8, vs, ms ); round!( 9, vs, ms ); round!( 10, vs, ms ); round!( 11, vs, ms ); for (h_elem, (v_low, v_high)) in self.h.iter_mut().zip( vs[0..8].iter().zip(vs[8..16].iter()) ) { *h_elem = *h_elem ^ *v_low ^ *v_high; } } fn update( &mut self, mut input: &[u8] ) { while input.len() > 0 { let left = self.buflen; let fill = 2 * BLAKE2B_BLOCKBYTES - left; if input.len() > fill { copy_memory(&input[0..fill], &mut self.buf[left..]); // Fill buffer self.buflen += fill; self.increment_counter( BLAKE2B_BLOCKBYTES as u64); self.compress(); let mut halves = self.buf.chunks_mut(BLAKE2B_BLOCKBYTES); let first_half = halves.next().unwrap(); let second_half = halves.next().unwrap(); copy_memory(second_half, first_half); self.buflen -= BLAKE2B_BLOCKBYTES; input = &input[fill..input.len()]; } else { // inlen <= fill copy_memory(input, &mut self.buf[left..]); self.buflen += input.len(); break; } } } fn finalize( &mut self, out: &mut [u8] ) { assert!(out.len() == self.digest_length as usize); if !self.computed { if self.buflen > BLAKE2B_BLOCKBYTES { self.increment_counter(BLAKE2B_BLOCKBYTES as u64); self.compress(); self.buflen -= BLAKE2B_BLOCKBYTES; let mut halves = self.buf.chunks_mut(BLAKE2B_BLOCKBYTES); let first_half = halves.next().unwrap(); let second_half = halves.next().unwrap(); copy_memory(second_half, first_half); } let incby = self.buflen as u64; self.increment_counter(incby); self.set_lastblock(); for b in self.buf[self.buflen..].iter_mut() { *b = 0; } self.compress(); write_u64v_le(&mut self.buf[0..64], &self.h); self.computed = true; } let outlen = out.len(); copy_memory(&self.buf[0..outlen], out); } pub fn reset(&mut self) { for (h_elem, iv_elem) in self.h.iter_mut().zip(IV.iter()) { *h_elem = *iv_elem; } for t_elem in self.t.iter_mut() { *t_elem = 0; } for f_elem in self.f.iter_mut() { *f_elem = 0; } for b in self.buf.iter_mut() { *b = 0; } self.buflen = 0; self.last_node = 0; self.computed = false; self.apply_param(); if self.key_length > 0 { self.apply_key(); } } pub fn blake2b(out: &mut[u8], input: &[u8], key: &[u8]) { let mut hasher : Blake2b = if key.len() > 0 { Blake2b::new_keyed(out.len(), key) } else { Blake2b::new(out.len()) }; hasher.update(input); hasher.finalize(out); } } impl Digest for Blake2b { fn reset(&mut self) { Blake2b::reset(self); } fn input(&mut self, msg: &[u8]) { self.update(msg); } fn result(&mut self, out: &mut [u8]) { self.finalize(out); } fn output_bits(&self) -> usize { 8 * (self.digest_length as usize) } fn block_size(&self) -> usize { 8 * BLAKE2B_BLOCKBYTES } } impl Mac for Blake2b { /** * Process input data. * * # Arguments * * data - The input data to process. * */ fn input(&mut self, data: &[u8]) { self.update(data); } /** * Reset the Mac state to begin processing another input stream. */ fn reset(&mut self) { Blake2b::reset(self); } /** * Obtain the result of a Mac computation as a MacResult. */ fn result(&mut self) -> MacResult { let mut mac: Vec<u8> = repeat(0).take(self.digest_length as usize).collect(); self.raw_result(&mut mac); MacResult::new_from_owned(mac) } /** * Obtain the result of a Mac computation as [u8]. This method should be used very carefully * since incorrect use of the Mac code could result in permitting a timing attack which defeats * the security provided by a Mac function. */ fn raw_result(&mut self, output: &mut [u8]) { self.finalize(output); } /** * Get the size of the Mac code, in bytes. */ fn output_bytes(&self) -> usize { self.digest_length as usize } } #[cfg(test)] mod digest_tests { //use cryptoutil::test::test_digest_1million_random; use blake2b::Blake2b; use digest::Digest; struct Test { input: &'static str, output_str: &'static str, } fn test_hash<D: Digest>(sh: &mut D, tests: &[Test]) { // Test that it works when accepting the message all at once for t in tests.iter() { sh.input_str(t.input); let out_str = sh.result_str(); assert!(&out_str[..] == t.output_str); sh.reset(); } // Test that it works when accepting the message in pieces for t in tests.iter() { let len = t.input.len(); let mut left = len; while left > 0 { let take = (left + 1) / 2; sh.input_str(&t.input[len - left..take + len - left]); left = left - take; } let out_str = sh.result_str(); assert!(&out_str[..] == t.output_str); sh.reset(); } } #[test] fn test_blake2b_digest() { // Examples from wikipedia let wikipedia_tests = vec![ Test { input: "", output_str: "786a02f742015903c6c6fd852552d272912f4740e15847618a86e217f71f5419\ d25e1031afee585313896444934eb04b903a685b1448b755d56f701afe9be2ce" }, Test { input: "The quick brown fox jumps over the lazy dog", output_str: "a8add4bdddfd93e4877d2746e62817b116364a1fa7bc148d95090bc7333b3673\ f82401cf7aa2e4cb1ecd90296e3f14cb5413f8ed77be73045b13914cdcd6a918" }, ]; let tests = wikipedia_tests; let mut sh = Blake2b::new(64); test_hash(&mut sh, &tests[..]); } } #[cfg(test)] mod mac_tests { use blake2b::Blake2b; use mac::Mac; #[test] fn test_blake2b_mac() { let key: Vec<u8> = (0..64).map(|i| i).collect(); let mut m = Blake2b::new_keyed(64, &key[..]); m.input(&[1,2,4,8]); let expected = [ 0x8e, 0xc6, 0xcb, 0x71, 0xc4, 0x5c, 0x3c, 0x90, 0x91, 0xd0, 0x8a, 0x37, 0x1e, 0xa8, 0x5d, 0xc1, 0x22, 0xb5, 0xc8, 0xe2, 0xd9, 0xe5, 0x71, 0x42, 0xbf, 0xef, 0xce, 0x42, 0xd7, 0xbc, 0xf8, 0x8b, 0xb0, 0x31, 0x27, 0x88, 0x2e, 0x51, 0xa9, 0x21, 0x44, 0x62, 0x08, 0xf6, 0xa3, 0x58, 0xa9, 0xe0, 0x7d, 0x35, 0x3b, 0xd3, 0x1c, 0x41, 0x70, 0x15, 0x62, 0xac, 0xd5, 0x39, 0x4e, 0xee, 0x73, 0xae, ]; assert_eq!(m.result().code().to_vec(), expected.to_vec()); } } #[cfg(all(test, feature = "with-bench"))] mod bench { use test::Bencher; use digest::Digest; use blake2b::Blake2b; #[bench] pub fn blake2b_10(bh: & mut Bencher) { let mut sh = Blake2b::new(64); let bytes = [1u8; 10]; bh.iter( || { sh.input(&bytes); }); bh.bytes = bytes.len() as u64; } #[bench] pub fn blake2b_1k(bh: & mut Bencher) { let mut sh = Blake2b::new(64); let bytes = [1u8; 1024]; bh.iter( || { sh.input(&bytes); }); bh.bytes = bytes.len() as u64; } #[bench] pub fn blake2b_64k(bh: & mut Bencher) { let mut sh = Blake2b::new(64); let bytes = [1u8; 65536]; bh.iter( || { sh.input(&bytes); }); bh.bytes = bytes.len() as u64; } }
pub use self::parse_mode::ParseMode; mod parse_mode; use reqwest::Client; use serde_json; use serde_json::Value; use bot::parse_mode::{get_parse_mode}; use error::Error::{JsonNotFound, RequestFailed}; use error::{Result, check_for_error}; use objects::{Update, Message}; use value_extension::ValueExtension; /// A `Bot` which will do all the API calls. /// /// The `Bot` will be given access to in a `Command` with which you can do all /// the API interactions in your `Command`s. #[derive(Debug)] pub struct Bot { pub id: i64, pub first_name: String, pub last_name: Option<String>, pub username: String, client: Client, pub bot_url: String, } impl Bot { /// Constructs a new `Bot`. pub fn new(bot_url: String) -> Result<Self> { let client = Client::new()?; let rjson = Bot::get_me(&client, &bot_url)?; let id = rjson.as_required_i64("id")?; let first_name = rjson.as_required_string("first_name")?; let last_name = rjson.as_optional_string("last_name"); let username = rjson.as_required_string("username")?; Ok(Bot { id: id, first_name: first_name, last_name: last_name, username: username, client: client, bot_url: bot_url, }) } /// API call which gets the information about your bot. pub fn get_me(client: &Client, bot_url: &str) -> Result<Value> { let path = ["getMe"]; let url = ::construct_api_url(bot_url, &path); let mut resp = client.get(&url).send()?; if resp.status().is_success() { let rjson: Value = resp.json()?; rjson.get("result").cloned().ok_or(JsonNotFound) } else { Err(RequestFailed(*resp.status())) } } /// API call which will get called to get the updates for your bot. pub fn get_updates(&self, offset: i32, limit: Option<i32>, timeout: Option<i32>, network_delay: Option<i32>) -> Result<Option<Vec<Update>>> { let limit = limit.unwrap_or(100); let timeout = timeout.unwrap_or(0); // Use network_delay when it gets implemented let network_delay = network_delay.unwrap_or(5); let path = ["getUpdates"]; let path_url = ::construct_api_url(&self.bot_url, &path); let url = format!("{}?offset={}&limit={}&timeout={}", path_url, offset, limit, timeout); let mut data = self.client.get(&url).send()?; let rjson: Value = check_for_error(data.json()?)?; let updates_json = rjson.get("result"); if let Some(result) = updates_json { let updates: Vec<Update> = serde_json::from_value(result.clone())?; Ok(Some(updates)) } else { Ok(None) } } /// API call which will send a message to a chat which your bot participates in. pub fn send_message(&self, chat_id: &i64, text: &str, parse_mode: Option<&ParseMode>, disable_web_page_preview: Option<&bool>, disable_notification: Option<&bool>, reply_to_message_id: Option<&i64>) -> Result<Message> { let chat_id: &str = &chat_id.to_string(); let parse_mode = &get_parse_mode(parse_mode.unwrap_or(&ParseMode::Text)); let disable_web_page_preview: &str = &disable_web_page_preview.unwrap_or(&false) .to_string(); let disable_notification: &str = &disable_notification.unwrap_or(&false).to_string(); let reply_to_message_id: &str = &reply_to_message_id.map(|i| i.to_string()) .unwrap_or("None".to_string()); let path = ["sendMessage"]; let params = [("chat_id", chat_id), ("text", text), ("parse_mode", parse_mode), ("disable_web_page_preview", disable_web_page_preview), ("disable_notification", disable_notification), ("reply_to_message_id", reply_to_message_id)]; self.post_message(&path, &params) } /// API call which will reply to a message directed to your bot. pub fn reply_to_message(&self, update: &Update, text: &str) -> Result<Message> { let message = update.clone().message.unwrap(); let message_id = message.message_id; let chat_id = message.chat.id; self.send_message(&chat_id, text, None, None, None, Some(&message_id)) } /// API call which will forward a message. pub fn forward_messge(&self, update: &Update, chat_id: &i32, disable_notification: Option<&bool>) -> Result<Message> { let message = update.clone().message.unwrap(); let chat_id: &str = &chat_id.to_string(); let from_chat_id: &str = &message.chat.id.to_string(); let message_id: &str = &message.message_id.to_string(); let disable_notification: &str = &disable_notification.unwrap_or(&false).to_string(); let path = ["forwardMessage"]; let params = [("chat_id", chat_id), ("from_chat_id", from_chat_id), ("disable_notification", disable_notification), ("message_id", message_id)]; self.post_message(&path, &params) } /// The actual networking done for sending messages. fn post_message(&self, path: &[&str], params: &[(&str, &str)]) -> Result<Message> { let url = ::construct_api_url(&self.bot_url, path); let mut data = self.client.post(&url).form(&params).send()?; let rjson: Value = check_for_error(data.json()?)?; let message_json = rjson.get("result").ok_or(JsonNotFound)?; let message: Message = serde_json::from_value(message_json.clone())?; Ok(message) } } Fixing typo pub use self::parse_mode::ParseMode; mod parse_mode; use reqwest::Client; use serde_json; use serde_json::Value; use bot::parse_mode::{get_parse_mode}; use error::Error::{JsonNotFound, RequestFailed}; use error::{Result, check_for_error}; use objects::{Update, Message}; use value_extension::ValueExtension; /// A `Bot` which will do all the API calls. /// /// The `Bot` will be given access to in a `Command` with which you can do all /// the API interactions in your `Command`s. #[derive(Debug)] pub struct Bot { pub id: i64, pub first_name: String, pub last_name: Option<String>, pub username: String, client: Client, pub bot_url: String, } impl Bot { /// Constructs a new `Bot`. pub fn new(bot_url: String) -> Result<Self> { let client = Client::new()?; let rjson = Bot::get_me(&client, &bot_url)?; let id = rjson.as_required_i64("id")?; let first_name = rjson.as_required_string("first_name")?; let last_name = rjson.as_optional_string("last_name"); let username = rjson.as_required_string("username")?; Ok(Bot { id: id, first_name: first_name, last_name: last_name, username: username, client: client, bot_url: bot_url, }) } /// API call which gets the information about your bot. pub fn get_me(client: &Client, bot_url: &str) -> Result<Value> { let path = ["getMe"]; let url = ::construct_api_url(bot_url, &path); let mut resp = client.get(&url).send()?; if resp.status().is_success() { let rjson: Value = resp.json()?; rjson.get("result").cloned().ok_or(JsonNotFound) } else { Err(RequestFailed(*resp.status())) } } /// API call which will get called to get the updates for your bot. pub fn get_updates(&self, offset: i32, limit: Option<i32>, timeout: Option<i32>, network_delay: Option<i32>) -> Result<Option<Vec<Update>>> { let limit = limit.unwrap_or(100); let timeout = timeout.unwrap_or(0); // Use network_delay when it gets implemented let network_delay = network_delay.unwrap_or(5); let path = ["getUpdates"]; let path_url = ::construct_api_url(&self.bot_url, &path); let url = format!("{}?offset={}&limit={}&timeout={}", path_url, offset, limit, timeout); let mut data = self.client.get(&url).send()?; let rjson: Value = check_for_error(data.json()?)?; let updates_json = rjson.get("result"); if let Some(result) = updates_json { let updates: Vec<Update> = serde_json::from_value(result.clone())?; Ok(Some(updates)) } else { Ok(None) } } /// API call which will send a message to a chat which your bot participates in. pub fn send_message(&self, chat_id: &i64, text: &str, parse_mode: Option<&ParseMode>, disable_web_page_preview: Option<&bool>, disable_notification: Option<&bool>, reply_to_message_id: Option<&i64>) -> Result<Message> { let chat_id: &str = &chat_id.to_string(); let parse_mode = &get_parse_mode(parse_mode.unwrap_or(&ParseMode::Text)); let disable_web_page_preview: &str = &disable_web_page_preview.unwrap_or(&false) .to_string(); let disable_notification: &str = &disable_notification.unwrap_or(&false).to_string(); let reply_to_message_id: &str = &reply_to_message_id.map(|i| i.to_string()) .unwrap_or("None".to_string()); let path = ["sendMessage"]; let params = [("chat_id", chat_id), ("text", text), ("parse_mode", parse_mode), ("disable_web_page_preview", disable_web_page_preview), ("disable_notification", disable_notification), ("reply_to_message_id", reply_to_message_id)]; self.post_message(&path, &params) } /// API call which will reply to a message directed to your bot. pub fn reply_to_message(&self, update: &Update, text: &str) -> Result<Message> { let message = update.clone().message.unwrap(); let message_id = message.message_id; let chat_id = message.chat.id; self.send_message(&chat_id, text, None, None, None, Some(&message_id)) } /// API call which will forward a message. pub fn forward_message(&self, update: &Update, chat_id: &i32, disable_notification: Option<&bool>) -> Result<Message> { let message = update.clone().message.unwrap(); let chat_id: &str = &chat_id.to_string(); let from_chat_id: &str = &message.chat.id.to_string(); let message_id: &str = &message.message_id.to_string(); let disable_notification: &str = &disable_notification.unwrap_or(&false).to_string(); let path = ["forwardMessage"]; let params = [("chat_id", chat_id), ("from_chat_id", from_chat_id), ("disable_notification", disable_notification), ("message_id", message_id)]; self.post_message(&path, &params) } /// The actual networking done for sending messages. fn post_message(&self, path: &[&str], params: &[(&str, &str)]) -> Result<Message> { let url = ::construct_api_url(&self.bot_url, path); let mut data = self.client.post(&url).form(&params).send()?; let rjson: Value = check_for_error(data.json()?)?; let message_json = rjson.get("result").ok_or(JsonNotFound)?; let message: Message = serde_json::from_value(message_json.clone())?; Ok(message) } }
//! Interfaces for building various structures use crate::{errors::Error, Result}; use serde::Serialize; use serde_json::{self, json, map::Map, Value}; use std::{ cmp::Eq, collections::{BTreeMap, HashMap}, hash::Hash, iter::{IntoIterator, Peekable}, }; use url::form_urlencoded; #[derive(Clone, Serialize, Debug)] #[serde(untagged)] pub enum RegistryAuth { Password { username: String, password: String, #[serde(skip_serializing_if = "Option::is_none")] email: Option<String>, #[serde(rename = "serveraddress")] #[serde(skip_serializing_if = "Option::is_none")] server_address: Option<String>, }, Token { #[serde(rename = "identitytoken")] identity_token: String, }, } impl RegistryAuth { /// return a new instance with token authentication pub fn token<S>(token: S) -> RegistryAuth where S: Into<String>, { RegistryAuth::Token { identity_token: token.into(), } } /// return a new instance of a builder for authentication pub fn builder() -> RegistryAuthBuilder { RegistryAuthBuilder::default() } /// serialize authentication as JSON in base64 pub fn serialize(&self) -> String { serde_json::to_string(self) .map(|c| base64::encode(&c)) .unwrap() } } #[derive(Default)] pub struct RegistryAuthBuilder { username: Option<String>, password: Option<String>, email: Option<String>, server_address: Option<String>, } impl RegistryAuthBuilder { pub fn username<I>( &mut self, username: I, ) -> &mut Self where I: Into<String>, { self.username = Some(username.into()); self } pub fn password<I>( &mut self, password: I, ) -> &mut Self where I: Into<String>, { self.password = Some(password.into()); self } pub fn email<I>( &mut self, email: I, ) -> &mut Self where I: Into<String>, { self.email = Some(email.into()); self } pub fn server_address<I>( &mut self, server_address: I, ) -> &mut Self where I: Into<String>, { self.server_address = Some(server_address.into()); self } pub fn build(&self) -> RegistryAuth { RegistryAuth::Password { username: self.username.clone().unwrap_or_else(String::new), password: self.password.clone().unwrap_or_else(String::new), email: self.email.clone(), server_address: self.server_address.clone(), } } } #[derive(Default, Debug)] pub struct PullOptions { auth: Option<RegistryAuth>, params: HashMap<&'static str, String>, } impl PullOptions { /// return a new instance of a builder for options pub fn builder() -> PullOptionsBuilder { PullOptionsBuilder::default() } /// serialize options as a string. returns None if no options are defined pub fn serialize(&self) -> Option<String> { if self.params.is_empty() { None } else { Some( form_urlencoded::Serializer::new(String::new()) .extend_pairs(&self.params) .finish(), ) } } pub(crate) fn auth_header(&self) -> Option<String> { self.auth.clone().map(|a| a.serialize()) } } #[derive(Default)] pub struct PullOptionsBuilder { auth: Option<RegistryAuth>, params: HashMap<&'static str, String>, } impl PullOptionsBuilder { /// Name of the image to pull. The name may include a tag or digest. /// This parameter may only be used when pulling an image. /// If an untagged value is provided and no `tag` is provided, _all_ /// tags will be pulled /// The pull is cancelled if the HTTP connection is closed. pub fn image<I>( &mut self, img: I, ) -> &mut Self where I: Into<String>, { self.params.insert("fromImage", img.into()); self } pub fn src<S>( &mut self, s: S, ) -> &mut Self where S: Into<String>, { self.params.insert("fromSrc", s.into()); self } /// Repository name given to an image when it is imported. The repo may include a tag. /// This parameter may only be used when importing an image. pub fn repo<R>( &mut self, r: R, ) -> &mut Self where R: Into<String>, { self.params.insert("repo", r.into()); self } /// Tag or digest. If empty when pulling an image, /// this causes all tags for the given image to be pulled. pub fn tag<T>( &mut self, t: T, ) -> &mut Self where T: Into<String>, { self.params.insert("tag", t.into()); self } pub fn auth( &mut self, auth: RegistryAuth, ) -> &mut Self { self.auth = Some(auth); self } pub fn build(&mut self) -> PullOptions { PullOptions { auth: self.auth.take(), params: self.params.clone(), } } } #[derive(Default, Debug)] pub struct BuildOptions { pub path: String, params: HashMap<&'static str, String>, } impl BuildOptions { /// return a new instance of a builder for options /// path is expected to be a file path to a directory containing a Dockerfile /// describing how to build a Docker image pub fn builder<S>(path: S) -> BuildOptionsBuilder where S: Into<String>, { BuildOptionsBuilder::new(path) } /// serialize options as a string. returns None if no options are defined pub fn serialize(&self) -> Option<String> { if self.params.is_empty() { None } else { Some( form_urlencoded::Serializer::new(String::new()) .extend_pairs(&self.params) .finish(), ) } } } #[derive(Default)] pub struct BuildOptionsBuilder { path: String, params: HashMap<&'static str, String>, } impl BuildOptionsBuilder { /// path is expected to be a file path to a directory containing a Dockerfile /// describing how to build a Docker image pub(crate) fn new<S>(path: S) -> Self where S: Into<String>, { BuildOptionsBuilder { path: path.into(), ..Default::default() } } /// set the name of the docker file. defaults to "DockerFile" pub fn dockerfile<P>( &mut self, path: P, ) -> &mut Self where P: Into<String>, { self.params.insert("dockerfile", path.into()); self } /// tag this image with a name after building it pub fn tag<T>( &mut self, t: T, ) -> &mut Self where T: Into<String>, { self.params.insert("t", t.into()); self } pub fn remote<R>( &mut self, r: R, ) -> &mut Self where R: Into<String>, { self.params.insert("remote", r.into()); self } /// don't use the image cache when building image pub fn nocache( &mut self, nc: bool, ) -> &mut Self { self.params.insert("nocache", nc.to_string()); self } pub fn rm( &mut self, r: bool, ) -> &mut Self { self.params.insert("rm", r.to_string()); self } pub fn forcerm( &mut self, fr: bool, ) -> &mut Self { self.params.insert("forcerm", fr.to_string()); self } /// `bridge`, `host`, `none`, `container:<name|id>`, or a custom network name. pub fn network_mode<T>( &mut self, t: T, ) -> &mut Self where T: Into<String>, { self.params.insert("networkmode", t.into()); self } pub fn memory( &mut self, memory: u64, ) -> &mut Self { self.params.insert("memory", memory.to_string()); self } pub fn cpu_shares( &mut self, cpu_shares: u32, ) -> &mut Self { self.params.insert("cpushares", cpu_shares.to_string()); self } // todo: memswap // todo: cpusetcpus // todo: cpuperiod // todo: cpuquota // todo: buildargs pub fn build(&self) -> BuildOptions { BuildOptions { path: self.path.clone(), params: self.params.clone(), } } } /// Options for filtering container list results #[derive(Default, Debug)] pub struct ContainerListOptions { params: HashMap<&'static str, String>, } impl ContainerListOptions { /// return a new instance of a builder for options pub fn builder() -> ContainerListOptionsBuilder { ContainerListOptionsBuilder::default() } /// serialize options as a string. returns None if no options are defined pub fn serialize(&self) -> Option<String> { if self.params.is_empty() { None } else { Some( form_urlencoded::Serializer::new(String::new()) .extend_pairs(&self.params) .finish(), ) } } } /// Filter options for container listings pub enum ContainerFilter { ExitCode(u64), Status(String), LabelName(String), Label(String, String), } /// Builder interface for `ContainerListOptions` #[derive(Default)] pub struct ContainerListOptionsBuilder { params: HashMap<&'static str, String>, } impl ContainerListOptionsBuilder { pub fn filter( &mut self, filters: Vec<ContainerFilter>, ) -> &mut Self { let mut param = HashMap::new(); for f in filters { match f { ContainerFilter::ExitCode(c) => param.insert("exit", vec![c.to_string()]), ContainerFilter::Status(s) => param.insert("status", vec![s]), ContainerFilter::LabelName(n) => param.insert("label", vec![n]), ContainerFilter::Label(n, v) => param.insert("label", vec![format!("{}={}", n, v)]), }; } // structure is a a json encoded object mapping string keys to a list // of string values self.params .insert("filters", serde_json::to_string(&param).unwrap()); self } pub fn all(&mut self) -> &mut Self { self.params.insert("all", "true".to_owned()); self } pub fn since( &mut self, since: &str, ) -> &mut Self { self.params.insert("since", since.to_owned()); self } pub fn before( &mut self, before: &str, ) -> &mut Self { self.params.insert("before", before.to_owned()); self } pub fn sized(&mut self) -> &mut Self { self.params.insert("size", "true".to_owned()); self } pub fn build(&self) -> ContainerListOptions { ContainerListOptions { params: self.params.clone(), } } } /// Interface for building a new docker container from an existing image #[derive(Serialize, Debug)] pub struct ContainerOptions { pub name: Option<String>, params: HashMap<&'static str, Value>, } /// Function to insert a JSON value into a tree where the desired /// location of the value is given as a path of JSON keys. fn insert<'a, I, V>( key_path: &mut Peekable<I>, value: &V, parent_node: &mut Value, ) where V: Serialize, I: Iterator<Item = &'a str>, { let local_key = key_path.next().unwrap(); if key_path.peek().is_some() { let node = parent_node .as_object_mut() .unwrap() .entry(local_key.to_string()) .or_insert(Value::Object(Map::new())); insert(key_path, value, node); } else { parent_node .as_object_mut() .unwrap() .insert(local_key.to_string(), serde_json::to_value(value).unwrap()); } } impl ContainerOptions { /// return a new instance of a builder for options pub fn builder(name: &str) -> ContainerOptionsBuilder { ContainerOptionsBuilder::new(name) } /// serialize options as a string. returns None if no options are defined pub fn serialize(&self) -> Result<String> { serde_json::to_string(&self.to_json()).map_err(Error::from) } fn to_json(&self) -> Value { let mut body_members = Map::new(); // The HostConfig element gets initialized to an empty object, // for backward compatibility. body_members.insert("HostConfig".to_string(), Value::Object(Map::new())); let mut body = Value::Object(body_members); self.parse_from(&self.params, &mut body); body } pub fn parse_from<'a, K, V>( &self, params: &'a HashMap<K, V>, body: &mut Value, ) where &'a HashMap<K, V>: IntoIterator, K: ToString + Eq + Hash, V: Serialize, { for (k, v) in params.iter() { let key_string = k.to_string(); insert(&mut key_string.split('.').peekable(), v, body) } } } #[derive(Default)] pub struct ContainerOptionsBuilder { name: Option<String>, params: HashMap<&'static str, Value>, } impl ContainerOptionsBuilder { pub(crate) fn new(image: &str) -> Self { let mut params = HashMap::new(); params.insert("Image", Value::String(image.to_owned())); ContainerOptionsBuilder { name: None, params } } pub fn name( &mut self, name: &str, ) -> &mut Self { self.name = Some(name.to_owned()); self } pub fn volumes( &mut self, volumes: Vec<&str>, ) -> &mut Self { self.params.insert("HostConfig.Binds", json!(volumes)); self } pub fn expose( &mut self, srcport: u32, protocol: &str, hostport: u32, ) -> &mut Self { let mut exposedport: HashMap<String, String> = HashMap::new(); exposedport.insert("HostPort".to_string(), hostport.to_string()); /* The idea here is to go thought the 'old' port binds * and to apply them to the local 'port_bindings' variable, * add the bind we want and replace the 'old' value */ let mut port_bindings: HashMap<String, Value> = HashMap::new(); for (key, val) in self .params .get("HostConfig.PortBindings") .unwrap_or(&json!(null)) .as_object() .unwrap_or(&Map::new()) .iter() { port_bindings.insert(key.to_string(), json!(val)); } port_bindings.insert( format!("{}/{}", srcport, protocol), json!(vec![exposedport]), ); self.params .insert("HostConfig.PortBindings", json!(port_bindings)); // Replicate the port bindings over to the exposed ports config let mut exposed_ports: HashMap<String, Value> = HashMap::new(); let empty_config: HashMap<String, Value> = HashMap::new(); for (key, _) in &port_bindings { exposed_ports.insert(key.to_string(), json!(empty_config)); } self.params.insert("ExposedPorts", json!(exposed_ports)); self } pub fn links( &mut self, links: Vec<&str>, ) -> &mut Self { self.params.insert("HostConfig.Links", json!(links)); self } pub fn memory( &mut self, memory: u64, ) -> &mut Self { self.params.insert("HostConfig.Memory", json!(memory)); self } /// Sets an integer value representing the container's /// relative CPU weight versus other containers. pub fn cpu_shares( &mut self, cpu_shares: u32, ) -> &mut Self { self.params .insert("HostConfig.CpuShares", json!(cpu_shares)); self } pub fn labels( &mut self, labels: &HashMap<&str, &str>, ) -> &mut Self { self.params.insert("Labels", json!(labels)); self } /// Whether to attach to `stdin`. pub fn attach_stdin( &mut self, attach: bool, ) -> &mut Self { self.params.insert("AttachStdin", json!(attach)); self.params.insert("OpenStdin", json!(attach)); self } /// Whether to attach to `stdout`. pub fn attach_stdout( &mut self, attach: bool, ) -> &mut Self { self.params.insert("AttachStdout", json!(attach)); self } /// Whether to attach to `stderr`. pub fn attach_stderr( &mut self, attach: bool, ) -> &mut Self { self.params.insert("AttachStderr", json!(attach)); self } /// Whether standard streams should be attached to a TTY. pub fn tty( &mut self, tty: bool, ) -> &mut Self { self.params.insert("Tty", json!(tty)); self } pub fn extra_hosts( &mut self, hosts: Vec<&str>, ) -> &mut Self { self.params.insert("HostConfig.ExtraHosts", json!(hosts)); self } pub fn volumes_from( &mut self, volumes: Vec<&str>, ) -> &mut Self { self.params.insert("HostConfig.VolumesFrom", json!(volumes)); self } pub fn network_mode( &mut self, network: &str, ) -> &mut Self { self.params.insert("HostConfig.NetworkMode", json!(network)); self } pub fn env( &mut self, envs: Vec<&str>, ) -> &mut Self { self.params.insert("Env", json!(envs)); self } pub fn cmd( &mut self, cmds: Vec<&str>, ) -> &mut Self { self.params.insert("Cmd", json!(cmds)); self } pub fn entrypoint( &mut self, entrypoint: &str, ) -> &mut Self { self.params.insert("Entrypoint", json!(entrypoint)); self } pub fn capabilities( &mut self, capabilities: Vec<&str>, ) -> &mut Self { self.params.insert("HostConfig.CapAdd", json!(capabilities)); self } pub fn devices( &mut self, devices: Vec<HashMap<String, String>>, ) -> &mut Self { self.params.insert("HostConfig.Devices", json!(devices)); self } pub fn log_driver( &mut self, log_driver: &str, ) -> &mut Self { self.params .insert("HostConfig.LogConfig.Type", json!(log_driver)); self } pub fn restart_policy( &mut self, name: &str, maximum_retry_count: u64, ) -> &mut Self { self.params .insert("HostConfig.RestartPolicy.Name", json!(name)); if name == "on-failure" { self.params.insert( "HostConfig.RestartPolicy.MaximumRetryCount", json!(maximum_retry_count), ); } self } pub fn auto_remove( &mut self, set: bool, ) -> &mut Self { self.params.insert("HostConfig.AutoRemove", json!(set)); self } pub fn userns_mode( &mut self, mode: &str, ) -> &mut Self { self.params.insert("HostConfig.UsernsMode", json!(mode)); self } pub fn privileged( &mut self, set: bool, ) -> &mut Self { self.params.insert("HostConfig.Privileged", json!(set)); self } pub fn build(&self) -> ContainerOptions { ContainerOptions { name: self.name.clone(), params: self.params.clone(), } } } #[derive(Serialize, Debug)] pub struct ExecContainerOptions { params: HashMap<&'static str, Vec<String>>, params_bool: HashMap<&'static str, bool>, } impl ExecContainerOptions { /// return a new instance of a builder for options pub fn builder() -> ExecContainerOptionsBuilder { ExecContainerOptionsBuilder::default() } /// serialize options as a string. returns None if no options are defined pub fn serialize(&self) -> Result<String> { let mut body = serde_json::Map::new(); for (k, v) in &self.params { body.insert( k.to_string(), serde_json::to_value(v).map_err(Error::SerdeJsonError)?, ); } for (k, v) in &self.params_bool { body.insert( k.to_string(), serde_json::to_value(v).map_err(Error::SerdeJsonError)?, ); } serde_json::to_string(&body).map_err(Error::from) } } #[derive(Default)] pub struct ExecContainerOptionsBuilder { params: HashMap<&'static str, Vec<String>>, params_bool: HashMap<&'static str, bool>, } impl ExecContainerOptionsBuilder { /// Command to run, as an array of strings pub fn cmd( &mut self, cmds: Vec<&str>, ) -> &mut Self { for cmd in cmds { self.params .entry("Cmd") .or_insert_with(Vec::new) .push(cmd.to_owned()); } self } /// A list of environment variables in the form "VAR=value" pub fn env( &mut self, envs: Vec<&str>, ) -> &mut Self { for env in envs { self.params .entry("Env") .or_insert_with(Vec::new) .push(env.to_owned()); } self } /// Attach to stdout of the exec command pub fn attach_stdout( &mut self, stdout: bool, ) -> &mut Self { self.params_bool.insert("AttachStdout", stdout); self } /// Attach to stderr of the exec command pub fn attach_stderr( &mut self, stderr: bool, ) -> &mut Self { self.params_bool.insert("AttachStderr", stderr); self } pub fn build(&self) -> ExecContainerOptions { ExecContainerOptions { params: self.params.clone(), params_bool: self.params_bool.clone(), } } } /// Options for filtering streams of Docker events #[derive(Default, Debug)] pub struct EventsOptions { params: HashMap<&'static str, String>, } impl EventsOptions { pub fn builder() -> EventsOptionsBuilder { EventsOptionsBuilder::default() } /// serialize options as a string. returns None if no options are defined pub fn serialize(&self) -> Option<String> { if self.params.is_empty() { None } else { Some( form_urlencoded::Serializer::new(String::new()) .extend_pairs(&self.params) .finish(), ) } } } #[derive(Copy, Clone)] pub enum EventFilterType { Container, Image, Volume, Network, Daemon, } fn event_filter_type_to_string(filter: EventFilterType) -> &'static str { match filter { EventFilterType::Container => "container", EventFilterType::Image => "image", EventFilterType::Volume => "volume", EventFilterType::Network => "network", EventFilterType::Daemon => "daemon", } } /// Filter options for image listings pub enum EventFilter { Container(String), Event(String), Image(String), Label(String), Type(EventFilterType), Volume(String), Network(String), Daemon(String), } /// Builder interface for `EventOptions` #[derive(Default)] pub struct EventsOptionsBuilder { params: HashMap<&'static str, String>, events: Vec<String>, containers: Vec<String>, images: Vec<String>, labels: Vec<String>, volumes: Vec<String>, networks: Vec<String>, daemons: Vec<String>, types: Vec<String>, } impl EventsOptionsBuilder { /// Filter events since a given timestamp pub fn since( &mut self, ts: &u64, ) -> &mut Self { self.params.insert("since", ts.to_string()); self } /// Filter events until a given timestamp pub fn until( &mut self, ts: &u64, ) -> &mut Self { self.params.insert("until", ts.to_string()); self } pub fn filter( &mut self, filters: Vec<EventFilter>, ) -> &mut Self { let mut params = HashMap::new(); for f in filters { match f { EventFilter::Container(n) => { self.containers.push(n); params.insert("container", self.containers.clone()) } EventFilter::Event(n) => { self.events.push(n); params.insert("event", self.events.clone()) } EventFilter::Image(n) => { self.images.push(n); params.insert("image", self.images.clone()) } EventFilter::Label(n) => { self.labels.push(n); params.insert("label", self.labels.clone()) } EventFilter::Volume(n) => { self.volumes.push(n); params.insert("volume", self.volumes.clone()) } EventFilter::Network(n) => { self.networks.push(n); params.insert("network", self.networks.clone()) } EventFilter::Daemon(n) => { self.daemons.push(n); params.insert("daemon", self.daemons.clone()) } EventFilter::Type(n) => { let event_type = event_filter_type_to_string(n).to_string(); self.types.push(event_type); params.insert("type", self.types.clone()) } }; } self.params .insert("filters", serde_json::to_string(&params).unwrap()); self } pub fn build(&self) -> EventsOptions { EventsOptions { params: self.params.clone(), } } } /// Options for controlling log request results #[derive(Default, Debug)] pub struct LogsOptions { params: HashMap<&'static str, String>, } impl LogsOptions { /// return a new instance of a builder for options pub fn builder() -> LogsOptionsBuilder { LogsOptionsBuilder::default() } /// serialize options as a string. returns None if no options are defined pub fn serialize(&self) -> Option<String> { if self.params.is_empty() { None } else { Some( form_urlencoded::Serializer::new(String::new()) .extend_pairs(&self.params) .finish(), ) } } } /// Builder interface for `LogsOptions` #[derive(Default)] pub struct LogsOptionsBuilder { params: HashMap<&'static str, String>, } impl LogsOptionsBuilder { pub fn follow( &mut self, f: bool, ) -> &mut Self { self.params.insert("follow", f.to_string()); self } pub fn stdout( &mut self, s: bool, ) -> &mut Self { self.params.insert("stdout", s.to_string()); self } pub fn stderr( &mut self, s: bool, ) -> &mut Self { self.params.insert("stderr", s.to_string()); self } pub fn timestamps( &mut self, t: bool, ) -> &mut Self { self.params.insert("timestamps", t.to_string()); self } /// how_many can either be "all" or a to_string() of the number pub fn tail( &mut self, how_many: &str, ) -> &mut Self { self.params.insert("tail", how_many.to_owned()); self } pub fn build(&self) -> LogsOptions { LogsOptions { params: self.params.clone(), } } } /// Filter options for image listings pub enum ImageFilter { Dangling, LabelName(String), Label(String, String), } /// Options for filtering image list results #[derive(Default, Debug)] pub struct ImageListOptions { params: HashMap<&'static str, String>, } impl ImageListOptions { pub fn builder() -> ImageListOptionsBuilder { ImageListOptionsBuilder::default() } pub fn serialize(&self) -> Option<String> { if self.params.is_empty() { None } else { Some( form_urlencoded::Serializer::new(String::new()) .extend_pairs(&self.params) .finish(), ) } } } /// Builder interface for `ImageListOptions` #[derive(Default)] pub struct ImageListOptionsBuilder { params: HashMap<&'static str, String>, } impl ImageListOptionsBuilder { pub fn digests( &mut self, d: bool, ) -> &mut Self { self.params.insert("digests", d.to_string()); self } pub fn all( &mut self, a: bool, ) -> &mut Self { self.params.insert("all", a.to_string()); self } pub fn filter_name( &mut self, name: &str, ) -> &mut Self { self.params.insert("filter", name.to_owned()); self } pub fn filter( &mut self, filters: Vec<ImageFilter>, ) -> &mut Self { let mut param = HashMap::new(); for f in filters { match f { ImageFilter::Dangling => param.insert("dangling", vec![true.to_string()]), ImageFilter::LabelName(n) => param.insert("label", vec![n]), ImageFilter::Label(n, v) => param.insert("label", vec![format!("{}={}", n, v)]), }; } // structure is a a json encoded object mapping string keys to a list // of string values self.params .insert("filters", serde_json::to_string(&param).unwrap()); self } pub fn build(&self) -> ImageListOptions { ImageListOptions { params: self.params.clone(), } } } /// Options for controlling log request results #[derive(Default, Debug)] pub struct RmContainerOptions { params: HashMap<&'static str, String>, } impl RmContainerOptions { /// return a new instance of a builder for options pub fn builder() -> RmContainerOptionsBuilder { RmContainerOptionsBuilder::default() } /// serialize options as a string. returns None if no options are defined pub fn serialize(&self) -> Option<String> { if self.params.is_empty() { None } else { Some( form_urlencoded::Serializer::new(String::new()) .extend_pairs(&self.params) .finish(), ) } } } /// Builder interface for `LogsOptions` #[derive(Default)] pub struct RmContainerOptionsBuilder { params: HashMap<&'static str, String>, } impl RmContainerOptionsBuilder { pub fn force( &mut self, f: bool, ) -> &mut Self { self.params.insert("force", f.to_string()); self } pub fn volumes( &mut self, s: bool, ) -> &mut Self { self.params.insert("v", s.to_string()); self } pub fn build(&self) -> RmContainerOptions { RmContainerOptions { params: self.params.clone(), } } } /// Options for filtering networks list results #[derive(Default, Debug)] pub struct NetworkListOptions { params: HashMap<&'static str, String>, } impl NetworkListOptions { /// serialize options as a string. returns None if no options are defined pub fn serialize(&self) -> Option<String> { if self.params.is_empty() { None } else { Some( form_urlencoded::Serializer::new(String::new()) .extend_pairs(&self.params) .finish(), ) } } } /// Interface for creating new docker network #[derive(Serialize, Debug)] pub struct NetworkCreateOptions { params: HashMap<&'static str, Value>, } impl NetworkCreateOptions { /// return a new instance of a builder for options pub fn builder(name: &str) -> NetworkCreateOptionsBuilder { NetworkCreateOptionsBuilder::new(name) } /// serialize options as a string. returns None if no options are defined pub fn serialize(&self) -> Result<String> { serde_json::to_string(&self.params).map_err(Error::from) } pub fn parse_from<'a, K, V>( &self, params: &'a HashMap<K, V>, body: &mut serde_json::Map<String, Value>, ) where &'a HashMap<K, V>: IntoIterator, K: ToString + Eq + Hash, V: Serialize, { for (k, v) in params.iter() { let key = k.to_string(); let value = serde_json::to_value(v).unwrap(); body.insert(key, value); } } } #[derive(Default)] pub struct NetworkCreateOptionsBuilder { params: HashMap<&'static str, Value>, } impl NetworkCreateOptionsBuilder { pub(crate) fn new(name: &str) -> Self { let mut params = HashMap::new(); params.insert("Name", json!(name)); NetworkCreateOptionsBuilder { params } } pub fn driver( &mut self, name: &str, ) -> &mut Self { if !name.is_empty() { self.params.insert("Driver", json!(name)); } self } pub fn label( &mut self, labels: HashMap<String, String>, ) -> &mut Self { self.params.insert("Labels", json!(labels)); self } pub fn build(&self) -> NetworkCreateOptions { NetworkCreateOptions { params: self.params.clone(), } } } /// Interface for connect container to network #[derive(Serialize, Debug)] pub struct ContainerConnectionOptions { params: HashMap<&'static str, Value>, } impl ContainerConnectionOptions { /// serialize options as a string. returns None if no options are defined pub fn serialize(&self) -> Result<String> { serde_json::to_string(&self.params).map_err(Error::from) } pub fn parse_from<'a, K, V>( &self, params: &'a HashMap<K, V>, body: &mut BTreeMap<String, Value>, ) where &'a HashMap<K, V>: IntoIterator, K: ToString + Eq + Hash, V: Serialize, { for (k, v) in params.iter() { let key = k.to_string(); let value = serde_json::to_value(v).unwrap(); body.insert(key, value); } } /// return a new instance of a builder for options pub fn builder(container_id: &str) -> ContainerConnectionOptionsBuilder { ContainerConnectionOptionsBuilder::new(container_id) } } #[derive(Default)] pub struct ContainerConnectionOptionsBuilder { params: HashMap<&'static str, Value>, } impl ContainerConnectionOptionsBuilder { pub(crate) fn new(container_id: &str) -> Self { let mut params = HashMap::new(); params.insert("Container", json!(container_id)); ContainerConnectionOptionsBuilder { params } } pub fn aliases( &mut self, aliases: Vec<&str>, ) -> &mut Self { self.params .insert("EndpointConfig", json!({ "Aliases": json!(aliases) })); self } pub fn force(&mut self) -> &mut Self { self.params.insert("Force", json!(true)); self } pub fn build(&self) -> ContainerConnectionOptions { ContainerConnectionOptions { params: self.params.clone(), } } } /// Interface for creating volumes #[derive(Serialize, Debug)] pub struct VolumeCreateOptions { params: HashMap<&'static str, Value>, } impl VolumeCreateOptions { /// serialize options as a string. returns None if no options are defined pub fn serialize(&self) -> Result<String> { serde_json::to_string(&self.params).map_err(Error::from) } pub fn parse_from<'a, K, V>( &self, params: &'a HashMap<K, V>, body: &mut BTreeMap<String, Value>, ) where &'a HashMap<K, V>: IntoIterator, K: ToString + Eq + Hash, V: Serialize, { for (k, v) in params.iter() { let key = k.to_string(); let value = serde_json::to_value(v).unwrap(); body.insert(key, value); } } /// return a new instance of a builder for options pub fn builder() -> VolumeCreateOptionsBuilder { VolumeCreateOptionsBuilder::new() } } #[derive(Default)] pub struct VolumeCreateOptionsBuilder { params: HashMap<&'static str, Value>, } impl VolumeCreateOptionsBuilder { pub(crate) fn new() -> Self { let params = HashMap::new(); VolumeCreateOptionsBuilder { params } } pub fn name( &mut self, name: &str, ) -> &mut Self { self.params.insert("Name", json!(name)); self } pub fn labels( &mut self, labels: &HashMap<&str, &str>, ) -> &mut Self { self.params.insert("Labels", json!(labels)); self } pub fn build(&self) -> VolumeCreateOptions { VolumeCreateOptions { params: self.params.clone(), } } } #[cfg(test)] mod tests { use super::{ContainerOptionsBuilder, RegistryAuth}; #[test] fn container_options_simple() { let builder = ContainerOptionsBuilder::new("test_image"); let options = builder.build(); assert_eq!( r#"{"HostConfig":{},"Image":"test_image"}"#, options.serialize().unwrap() ); } #[test] fn container_options_env() { let options = ContainerOptionsBuilder::new("test_image") .env(vec!["foo", "bar"]) .build(); assert_eq!( r#"{"Env":["foo","bar"],"HostConfig":{},"Image":"test_image"}"#, options.serialize().unwrap() ); } #[test] fn container_options_host_config() { let options = ContainerOptionsBuilder::new("test_image") .network_mode("host") .auto_remove(true) .privileged(true) .build(); assert_eq!( r#"{"HostConfig":{"AutoRemove":true,"NetworkMode":"host","Privileged":true},"Image":"test_image"}"#, options.serialize().unwrap() ); } #[test] fn container_options_expose() { let options = ContainerOptionsBuilder::new("test_image") .expose(80, "tcp", 8080) .build(); assert_eq!( r#"{"ExposedPorts":{"80/tcp":{}},"HostConfig":{"PortBindings":{"80/tcp":[{"HostPort":"8080"}]}},"Image":"test_image"}"#, options.serialize().unwrap() ); // try exposing two let options = ContainerOptionsBuilder::new("test_image") .expose(80, "tcp", 8080) .expose(81, "tcp", 8081) .build(); assert_eq!( r#"{"ExposedPorts":{"80/tcp":{},"81/tcp":{}},"HostConfig":{"PortBindings":{"80/tcp":[{"HostPort":"8080"}],"81/tcp":[{"HostPort":"8081"}]}},"Image":"test_image"}"#, options.serialize().unwrap() ); } /// Test container options that are nested 3 levels deep. #[test] fn container_options_nested() { let options = ContainerOptionsBuilder::new("test_image") .log_driver("fluentd") .build(); assert_eq!( r#"{"HostConfig":{"LogConfig":{"Type":"fluentd"}},"Image":"test_image"}"#, options.serialize().unwrap() ); } /// Test the restart policy settings #[test] fn container_options_restart_policy() { let mut options = ContainerOptionsBuilder::new("test_image") .restart_policy("on-failure", 10) .build(); assert_eq!( r#"{"HostConfig":{"RestartPolicy":{"MaximumRetryCount":10,"Name":"on-failure"}},"Image":"test_image"}"#, options.serialize().unwrap() ); options = ContainerOptionsBuilder::new("test_image") .restart_policy("always", 0) .build(); assert_eq!( r#"{"HostConfig":{"RestartPolicy":{"Name":"always"}},"Image":"test_image"}"#, options.serialize().unwrap() ); } /// Test registry auth with token #[test] fn registry_auth_token() { let options = RegistryAuth::token("abc"); assert_eq!( base64::encode(r#"{"identitytoken":"abc"}"#), options.serialize() ); } /// Test registry auth with username and password #[test] fn registry_auth_password_simple() { let options = RegistryAuth::builder() .username("user_abc") .password("password_abc") .build(); assert_eq!( base64::encode(r#"{"username":"user_abc","password":"password_abc"}"#), options.serialize() ); } /// Test registry auth with all fields #[test] fn registry_auth_password_all() { let options = RegistryAuth::builder() .username("user_abc") .password("password_abc") .email("email_abc") .server_address("https://example.org") .build(); assert_eq!( base64::encode(r#"{"username":"user_abc","password":"password_abc","email":"email_abc","serveraddress":"https://example.org"}"#), options.serialize() ); } } Add explanative sentence for volumes option (#164) //! Interfaces for building various structures use crate::{errors::Error, Result}; use serde::Serialize; use serde_json::{self, json, map::Map, Value}; use std::{ cmp::Eq, collections::{BTreeMap, HashMap}, hash::Hash, iter::{IntoIterator, Peekable}, }; use url::form_urlencoded; #[derive(Clone, Serialize, Debug)] #[serde(untagged)] pub enum RegistryAuth { Password { username: String, password: String, #[serde(skip_serializing_if = "Option::is_none")] email: Option<String>, #[serde(rename = "serveraddress")] #[serde(skip_serializing_if = "Option::is_none")] server_address: Option<String>, }, Token { #[serde(rename = "identitytoken")] identity_token: String, }, } impl RegistryAuth { /// return a new instance with token authentication pub fn token<S>(token: S) -> RegistryAuth where S: Into<String>, { RegistryAuth::Token { identity_token: token.into(), } } /// return a new instance of a builder for authentication pub fn builder() -> RegistryAuthBuilder { RegistryAuthBuilder::default() } /// serialize authentication as JSON in base64 pub fn serialize(&self) -> String { serde_json::to_string(self) .map(|c| base64::encode(&c)) .unwrap() } } #[derive(Default)] pub struct RegistryAuthBuilder { username: Option<String>, password: Option<String>, email: Option<String>, server_address: Option<String>, } impl RegistryAuthBuilder { pub fn username<I>( &mut self, username: I, ) -> &mut Self where I: Into<String>, { self.username = Some(username.into()); self } pub fn password<I>( &mut self, password: I, ) -> &mut Self where I: Into<String>, { self.password = Some(password.into()); self } pub fn email<I>( &mut self, email: I, ) -> &mut Self where I: Into<String>, { self.email = Some(email.into()); self } pub fn server_address<I>( &mut self, server_address: I, ) -> &mut Self where I: Into<String>, { self.server_address = Some(server_address.into()); self } pub fn build(&self) -> RegistryAuth { RegistryAuth::Password { username: self.username.clone().unwrap_or_else(String::new), password: self.password.clone().unwrap_or_else(String::new), email: self.email.clone(), server_address: self.server_address.clone(), } } } #[derive(Default, Debug)] pub struct PullOptions { auth: Option<RegistryAuth>, params: HashMap<&'static str, String>, } impl PullOptions { /// return a new instance of a builder for options pub fn builder() -> PullOptionsBuilder { PullOptionsBuilder::default() } /// serialize options as a string. returns None if no options are defined pub fn serialize(&self) -> Option<String> { if self.params.is_empty() { None } else { Some( form_urlencoded::Serializer::new(String::new()) .extend_pairs(&self.params) .finish(), ) } } pub(crate) fn auth_header(&self) -> Option<String> { self.auth.clone().map(|a| a.serialize()) } } #[derive(Default)] pub struct PullOptionsBuilder { auth: Option<RegistryAuth>, params: HashMap<&'static str, String>, } impl PullOptionsBuilder { /// Name of the image to pull. The name may include a tag or digest. /// This parameter may only be used when pulling an image. /// If an untagged value is provided and no `tag` is provided, _all_ /// tags will be pulled /// The pull is cancelled if the HTTP connection is closed. pub fn image<I>( &mut self, img: I, ) -> &mut Self where I: Into<String>, { self.params.insert("fromImage", img.into()); self } pub fn src<S>( &mut self, s: S, ) -> &mut Self where S: Into<String>, { self.params.insert("fromSrc", s.into()); self } /// Repository name given to an image when it is imported. The repo may include a tag. /// This parameter may only be used when importing an image. pub fn repo<R>( &mut self, r: R, ) -> &mut Self where R: Into<String>, { self.params.insert("repo", r.into()); self } /// Tag or digest. If empty when pulling an image, /// this causes all tags for the given image to be pulled. pub fn tag<T>( &mut self, t: T, ) -> &mut Self where T: Into<String>, { self.params.insert("tag", t.into()); self } pub fn auth( &mut self, auth: RegistryAuth, ) -> &mut Self { self.auth = Some(auth); self } pub fn build(&mut self) -> PullOptions { PullOptions { auth: self.auth.take(), params: self.params.clone(), } } } #[derive(Default, Debug)] pub struct BuildOptions { pub path: String, params: HashMap<&'static str, String>, } impl BuildOptions { /// return a new instance of a builder for options /// path is expected to be a file path to a directory containing a Dockerfile /// describing how to build a Docker image pub fn builder<S>(path: S) -> BuildOptionsBuilder where S: Into<String>, { BuildOptionsBuilder::new(path) } /// serialize options as a string. returns None if no options are defined pub fn serialize(&self) -> Option<String> { if self.params.is_empty() { None } else { Some( form_urlencoded::Serializer::new(String::new()) .extend_pairs(&self.params) .finish(), ) } } } #[derive(Default)] pub struct BuildOptionsBuilder { path: String, params: HashMap<&'static str, String>, } impl BuildOptionsBuilder { /// path is expected to be a file path to a directory containing a Dockerfile /// describing how to build a Docker image pub(crate) fn new<S>(path: S) -> Self where S: Into<String>, { BuildOptionsBuilder { path: path.into(), ..Default::default() } } /// set the name of the docker file. defaults to "DockerFile" pub fn dockerfile<P>( &mut self, path: P, ) -> &mut Self where P: Into<String>, { self.params.insert("dockerfile", path.into()); self } /// tag this image with a name after building it pub fn tag<T>( &mut self, t: T, ) -> &mut Self where T: Into<String>, { self.params.insert("t", t.into()); self } pub fn remote<R>( &mut self, r: R, ) -> &mut Self where R: Into<String>, { self.params.insert("remote", r.into()); self } /// don't use the image cache when building image pub fn nocache( &mut self, nc: bool, ) -> &mut Self { self.params.insert("nocache", nc.to_string()); self } pub fn rm( &mut self, r: bool, ) -> &mut Self { self.params.insert("rm", r.to_string()); self } pub fn forcerm( &mut self, fr: bool, ) -> &mut Self { self.params.insert("forcerm", fr.to_string()); self } /// `bridge`, `host`, `none`, `container:<name|id>`, or a custom network name. pub fn network_mode<T>( &mut self, t: T, ) -> &mut Self where T: Into<String>, { self.params.insert("networkmode", t.into()); self } pub fn memory( &mut self, memory: u64, ) -> &mut Self { self.params.insert("memory", memory.to_string()); self } pub fn cpu_shares( &mut self, cpu_shares: u32, ) -> &mut Self { self.params.insert("cpushares", cpu_shares.to_string()); self } // todo: memswap // todo: cpusetcpus // todo: cpuperiod // todo: cpuquota // todo: buildargs pub fn build(&self) -> BuildOptions { BuildOptions { path: self.path.clone(), params: self.params.clone(), } } } /// Options for filtering container list results #[derive(Default, Debug)] pub struct ContainerListOptions { params: HashMap<&'static str, String>, } impl ContainerListOptions { /// return a new instance of a builder for options pub fn builder() -> ContainerListOptionsBuilder { ContainerListOptionsBuilder::default() } /// serialize options as a string. returns None if no options are defined pub fn serialize(&self) -> Option<String> { if self.params.is_empty() { None } else { Some( form_urlencoded::Serializer::new(String::new()) .extend_pairs(&self.params) .finish(), ) } } } /// Filter options for container listings pub enum ContainerFilter { ExitCode(u64), Status(String), LabelName(String), Label(String, String), } /// Builder interface for `ContainerListOptions` #[derive(Default)] pub struct ContainerListOptionsBuilder { params: HashMap<&'static str, String>, } impl ContainerListOptionsBuilder { pub fn filter( &mut self, filters: Vec<ContainerFilter>, ) -> &mut Self { let mut param = HashMap::new(); for f in filters { match f { ContainerFilter::ExitCode(c) => param.insert("exit", vec![c.to_string()]), ContainerFilter::Status(s) => param.insert("status", vec![s]), ContainerFilter::LabelName(n) => param.insert("label", vec![n]), ContainerFilter::Label(n, v) => param.insert("label", vec![format!("{}={}", n, v)]), }; } // structure is a a json encoded object mapping string keys to a list // of string values self.params .insert("filters", serde_json::to_string(&param).unwrap()); self } pub fn all(&mut self) -> &mut Self { self.params.insert("all", "true".to_owned()); self } pub fn since( &mut self, since: &str, ) -> &mut Self { self.params.insert("since", since.to_owned()); self } pub fn before( &mut self, before: &str, ) -> &mut Self { self.params.insert("before", before.to_owned()); self } pub fn sized(&mut self) -> &mut Self { self.params.insert("size", "true".to_owned()); self } pub fn build(&self) -> ContainerListOptions { ContainerListOptions { params: self.params.clone(), } } } /// Interface for building a new docker container from an existing image #[derive(Serialize, Debug)] pub struct ContainerOptions { pub name: Option<String>, params: HashMap<&'static str, Value>, } /// Function to insert a JSON value into a tree where the desired /// location of the value is given as a path of JSON keys. fn insert<'a, I, V>( key_path: &mut Peekable<I>, value: &V, parent_node: &mut Value, ) where V: Serialize, I: Iterator<Item = &'a str>, { let local_key = key_path.next().unwrap(); if key_path.peek().is_some() { let node = parent_node .as_object_mut() .unwrap() .entry(local_key.to_string()) .or_insert(Value::Object(Map::new())); insert(key_path, value, node); } else { parent_node .as_object_mut() .unwrap() .insert(local_key.to_string(), serde_json::to_value(value).unwrap()); } } impl ContainerOptions { /// return a new instance of a builder for options pub fn builder(name: &str) -> ContainerOptionsBuilder { ContainerOptionsBuilder::new(name) } /// serialize options as a string. returns None if no options are defined pub fn serialize(&self) -> Result<String> { serde_json::to_string(&self.to_json()).map_err(Error::from) } fn to_json(&self) -> Value { let mut body_members = Map::new(); // The HostConfig element gets initialized to an empty object, // for backward compatibility. body_members.insert("HostConfig".to_string(), Value::Object(Map::new())); let mut body = Value::Object(body_members); self.parse_from(&self.params, &mut body); body } pub fn parse_from<'a, K, V>( &self, params: &'a HashMap<K, V>, body: &mut Value, ) where &'a HashMap<K, V>: IntoIterator, K: ToString + Eq + Hash, V: Serialize, { for (k, v) in params.iter() { let key_string = k.to_string(); insert(&mut key_string.split('.').peekable(), v, body) } } } #[derive(Default)] pub struct ContainerOptionsBuilder { name: Option<String>, params: HashMap<&'static str, Value>, } impl ContainerOptionsBuilder { pub(crate) fn new(image: &str) -> Self { let mut params = HashMap::new(); params.insert("Image", Value::String(image.to_owned())); ContainerOptionsBuilder { name: None, params } } pub fn name( &mut self, name: &str, ) -> &mut Self { self.name = Some(name.to_owned()); self } /// Specify any bind mounts, taking the form of `/some/host/path:/some/container/path` pub fn volumes( &mut self, volumes: Vec<&str>, ) -> &mut Self { self.params.insert("HostConfig.Binds", json!(volumes)); self } pub fn expose( &mut self, srcport: u32, protocol: &str, hostport: u32, ) -> &mut Self { let mut exposedport: HashMap<String, String> = HashMap::new(); exposedport.insert("HostPort".to_string(), hostport.to_string()); /* The idea here is to go thought the 'old' port binds * and to apply them to the local 'port_bindings' variable, * add the bind we want and replace the 'old' value */ let mut port_bindings: HashMap<String, Value> = HashMap::new(); for (key, val) in self .params .get("HostConfig.PortBindings") .unwrap_or(&json!(null)) .as_object() .unwrap_or(&Map::new()) .iter() { port_bindings.insert(key.to_string(), json!(val)); } port_bindings.insert( format!("{}/{}", srcport, protocol), json!(vec![exposedport]), ); self.params .insert("HostConfig.PortBindings", json!(port_bindings)); // Replicate the port bindings over to the exposed ports config let mut exposed_ports: HashMap<String, Value> = HashMap::new(); let empty_config: HashMap<String, Value> = HashMap::new(); for (key, _) in &port_bindings { exposed_ports.insert(key.to_string(), json!(empty_config)); } self.params.insert("ExposedPorts", json!(exposed_ports)); self } pub fn links( &mut self, links: Vec<&str>, ) -> &mut Self { self.params.insert("HostConfig.Links", json!(links)); self } pub fn memory( &mut self, memory: u64, ) -> &mut Self { self.params.insert("HostConfig.Memory", json!(memory)); self } /// Sets an integer value representing the container's /// relative CPU weight versus other containers. pub fn cpu_shares( &mut self, cpu_shares: u32, ) -> &mut Self { self.params .insert("HostConfig.CpuShares", json!(cpu_shares)); self } pub fn labels( &mut self, labels: &HashMap<&str, &str>, ) -> &mut Self { self.params.insert("Labels", json!(labels)); self } /// Whether to attach to `stdin`. pub fn attach_stdin( &mut self, attach: bool, ) -> &mut Self { self.params.insert("AttachStdin", json!(attach)); self.params.insert("OpenStdin", json!(attach)); self } /// Whether to attach to `stdout`. pub fn attach_stdout( &mut self, attach: bool, ) -> &mut Self { self.params.insert("AttachStdout", json!(attach)); self } /// Whether to attach to `stderr`. pub fn attach_stderr( &mut self, attach: bool, ) -> &mut Self { self.params.insert("AttachStderr", json!(attach)); self } /// Whether standard streams should be attached to a TTY. pub fn tty( &mut self, tty: bool, ) -> &mut Self { self.params.insert("Tty", json!(tty)); self } pub fn extra_hosts( &mut self, hosts: Vec<&str>, ) -> &mut Self { self.params.insert("HostConfig.ExtraHosts", json!(hosts)); self } pub fn volumes_from( &mut self, volumes: Vec<&str>, ) -> &mut Self { self.params.insert("HostConfig.VolumesFrom", json!(volumes)); self } pub fn network_mode( &mut self, network: &str, ) -> &mut Self { self.params.insert("HostConfig.NetworkMode", json!(network)); self } pub fn env( &mut self, envs: Vec<&str>, ) -> &mut Self { self.params.insert("Env", json!(envs)); self } pub fn cmd( &mut self, cmds: Vec<&str>, ) -> &mut Self { self.params.insert("Cmd", json!(cmds)); self } pub fn entrypoint( &mut self, entrypoint: &str, ) -> &mut Self { self.params.insert("Entrypoint", json!(entrypoint)); self } pub fn capabilities( &mut self, capabilities: Vec<&str>, ) -> &mut Self { self.params.insert("HostConfig.CapAdd", json!(capabilities)); self } pub fn devices( &mut self, devices: Vec<HashMap<String, String>>, ) -> &mut Self { self.params.insert("HostConfig.Devices", json!(devices)); self } pub fn log_driver( &mut self, log_driver: &str, ) -> &mut Self { self.params .insert("HostConfig.LogConfig.Type", json!(log_driver)); self } pub fn restart_policy( &mut self, name: &str, maximum_retry_count: u64, ) -> &mut Self { self.params .insert("HostConfig.RestartPolicy.Name", json!(name)); if name == "on-failure" { self.params.insert( "HostConfig.RestartPolicy.MaximumRetryCount", json!(maximum_retry_count), ); } self } pub fn auto_remove( &mut self, set: bool, ) -> &mut Self { self.params.insert("HostConfig.AutoRemove", json!(set)); self } pub fn userns_mode( &mut self, mode: &str, ) -> &mut Self { self.params.insert("HostConfig.UsernsMode", json!(mode)); self } pub fn privileged( &mut self, set: bool, ) -> &mut Self { self.params.insert("HostConfig.Privileged", json!(set)); self } pub fn build(&self) -> ContainerOptions { ContainerOptions { name: self.name.clone(), params: self.params.clone(), } } } #[derive(Serialize, Debug)] pub struct ExecContainerOptions { params: HashMap<&'static str, Vec<String>>, params_bool: HashMap<&'static str, bool>, } impl ExecContainerOptions { /// return a new instance of a builder for options pub fn builder() -> ExecContainerOptionsBuilder { ExecContainerOptionsBuilder::default() } /// serialize options as a string. returns None if no options are defined pub fn serialize(&self) -> Result<String> { let mut body = serde_json::Map::new(); for (k, v) in &self.params { body.insert( k.to_string(), serde_json::to_value(v).map_err(Error::SerdeJsonError)?, ); } for (k, v) in &self.params_bool { body.insert( k.to_string(), serde_json::to_value(v).map_err(Error::SerdeJsonError)?, ); } serde_json::to_string(&body).map_err(Error::from) } } #[derive(Default)] pub struct ExecContainerOptionsBuilder { params: HashMap<&'static str, Vec<String>>, params_bool: HashMap<&'static str, bool>, } impl ExecContainerOptionsBuilder { /// Command to run, as an array of strings pub fn cmd( &mut self, cmds: Vec<&str>, ) -> &mut Self { for cmd in cmds { self.params .entry("Cmd") .or_insert_with(Vec::new) .push(cmd.to_owned()); } self } /// A list of environment variables in the form "VAR=value" pub fn env( &mut self, envs: Vec<&str>, ) -> &mut Self { for env in envs { self.params .entry("Env") .or_insert_with(Vec::new) .push(env.to_owned()); } self } /// Attach to stdout of the exec command pub fn attach_stdout( &mut self, stdout: bool, ) -> &mut Self { self.params_bool.insert("AttachStdout", stdout); self } /// Attach to stderr of the exec command pub fn attach_stderr( &mut self, stderr: bool, ) -> &mut Self { self.params_bool.insert("AttachStderr", stderr); self } pub fn build(&self) -> ExecContainerOptions { ExecContainerOptions { params: self.params.clone(), params_bool: self.params_bool.clone(), } } } /// Options for filtering streams of Docker events #[derive(Default, Debug)] pub struct EventsOptions { params: HashMap<&'static str, String>, } impl EventsOptions { pub fn builder() -> EventsOptionsBuilder { EventsOptionsBuilder::default() } /// serialize options as a string. returns None if no options are defined pub fn serialize(&self) -> Option<String> { if self.params.is_empty() { None } else { Some( form_urlencoded::Serializer::new(String::new()) .extend_pairs(&self.params) .finish(), ) } } } #[derive(Copy, Clone)] pub enum EventFilterType { Container, Image, Volume, Network, Daemon, } fn event_filter_type_to_string(filter: EventFilterType) -> &'static str { match filter { EventFilterType::Container => "container", EventFilterType::Image => "image", EventFilterType::Volume => "volume", EventFilterType::Network => "network", EventFilterType::Daemon => "daemon", } } /// Filter options for image listings pub enum EventFilter { Container(String), Event(String), Image(String), Label(String), Type(EventFilterType), Volume(String), Network(String), Daemon(String), } /// Builder interface for `EventOptions` #[derive(Default)] pub struct EventsOptionsBuilder { params: HashMap<&'static str, String>, events: Vec<String>, containers: Vec<String>, images: Vec<String>, labels: Vec<String>, volumes: Vec<String>, networks: Vec<String>, daemons: Vec<String>, types: Vec<String>, } impl EventsOptionsBuilder { /// Filter events since a given timestamp pub fn since( &mut self, ts: &u64, ) -> &mut Self { self.params.insert("since", ts.to_string()); self } /// Filter events until a given timestamp pub fn until( &mut self, ts: &u64, ) -> &mut Self { self.params.insert("until", ts.to_string()); self } pub fn filter( &mut self, filters: Vec<EventFilter>, ) -> &mut Self { let mut params = HashMap::new(); for f in filters { match f { EventFilter::Container(n) => { self.containers.push(n); params.insert("container", self.containers.clone()) } EventFilter::Event(n) => { self.events.push(n); params.insert("event", self.events.clone()) } EventFilter::Image(n) => { self.images.push(n); params.insert("image", self.images.clone()) } EventFilter::Label(n) => { self.labels.push(n); params.insert("label", self.labels.clone()) } EventFilter::Volume(n) => { self.volumes.push(n); params.insert("volume", self.volumes.clone()) } EventFilter::Network(n) => { self.networks.push(n); params.insert("network", self.networks.clone()) } EventFilter::Daemon(n) => { self.daemons.push(n); params.insert("daemon", self.daemons.clone()) } EventFilter::Type(n) => { let event_type = event_filter_type_to_string(n).to_string(); self.types.push(event_type); params.insert("type", self.types.clone()) } }; } self.params .insert("filters", serde_json::to_string(&params).unwrap()); self } pub fn build(&self) -> EventsOptions { EventsOptions { params: self.params.clone(), } } } /// Options for controlling log request results #[derive(Default, Debug)] pub struct LogsOptions { params: HashMap<&'static str, String>, } impl LogsOptions { /// return a new instance of a builder for options pub fn builder() -> LogsOptionsBuilder { LogsOptionsBuilder::default() } /// serialize options as a string. returns None if no options are defined pub fn serialize(&self) -> Option<String> { if self.params.is_empty() { None } else { Some( form_urlencoded::Serializer::new(String::new()) .extend_pairs(&self.params) .finish(), ) } } } /// Builder interface for `LogsOptions` #[derive(Default)] pub struct LogsOptionsBuilder { params: HashMap<&'static str, String>, } impl LogsOptionsBuilder { pub fn follow( &mut self, f: bool, ) -> &mut Self { self.params.insert("follow", f.to_string()); self } pub fn stdout( &mut self, s: bool, ) -> &mut Self { self.params.insert("stdout", s.to_string()); self } pub fn stderr( &mut self, s: bool, ) -> &mut Self { self.params.insert("stderr", s.to_string()); self } pub fn timestamps( &mut self, t: bool, ) -> &mut Self { self.params.insert("timestamps", t.to_string()); self } /// how_many can either be "all" or a to_string() of the number pub fn tail( &mut self, how_many: &str, ) -> &mut Self { self.params.insert("tail", how_many.to_owned()); self } pub fn build(&self) -> LogsOptions { LogsOptions { params: self.params.clone(), } } } /// Filter options for image listings pub enum ImageFilter { Dangling, LabelName(String), Label(String, String), } /// Options for filtering image list results #[derive(Default, Debug)] pub struct ImageListOptions { params: HashMap<&'static str, String>, } impl ImageListOptions { pub fn builder() -> ImageListOptionsBuilder { ImageListOptionsBuilder::default() } pub fn serialize(&self) -> Option<String> { if self.params.is_empty() { None } else { Some( form_urlencoded::Serializer::new(String::new()) .extend_pairs(&self.params) .finish(), ) } } } /// Builder interface for `ImageListOptions` #[derive(Default)] pub struct ImageListOptionsBuilder { params: HashMap<&'static str, String>, } impl ImageListOptionsBuilder { pub fn digests( &mut self, d: bool, ) -> &mut Self { self.params.insert("digests", d.to_string()); self } pub fn all( &mut self, a: bool, ) -> &mut Self { self.params.insert("all", a.to_string()); self } pub fn filter_name( &mut self, name: &str, ) -> &mut Self { self.params.insert("filter", name.to_owned()); self } pub fn filter( &mut self, filters: Vec<ImageFilter>, ) -> &mut Self { let mut param = HashMap::new(); for f in filters { match f { ImageFilter::Dangling => param.insert("dangling", vec![true.to_string()]), ImageFilter::LabelName(n) => param.insert("label", vec![n]), ImageFilter::Label(n, v) => param.insert("label", vec![format!("{}={}", n, v)]), }; } // structure is a a json encoded object mapping string keys to a list // of string values self.params .insert("filters", serde_json::to_string(&param).unwrap()); self } pub fn build(&self) -> ImageListOptions { ImageListOptions { params: self.params.clone(), } } } /// Options for controlling log request results #[derive(Default, Debug)] pub struct RmContainerOptions { params: HashMap<&'static str, String>, } impl RmContainerOptions { /// return a new instance of a builder for options pub fn builder() -> RmContainerOptionsBuilder { RmContainerOptionsBuilder::default() } /// serialize options as a string. returns None if no options are defined pub fn serialize(&self) -> Option<String> { if self.params.is_empty() { None } else { Some( form_urlencoded::Serializer::new(String::new()) .extend_pairs(&self.params) .finish(), ) } } } /// Builder interface for `LogsOptions` #[derive(Default)] pub struct RmContainerOptionsBuilder { params: HashMap<&'static str, String>, } impl RmContainerOptionsBuilder { pub fn force( &mut self, f: bool, ) -> &mut Self { self.params.insert("force", f.to_string()); self } pub fn volumes( &mut self, s: bool, ) -> &mut Self { self.params.insert("v", s.to_string()); self } pub fn build(&self) -> RmContainerOptions { RmContainerOptions { params: self.params.clone(), } } } /// Options for filtering networks list results #[derive(Default, Debug)] pub struct NetworkListOptions { params: HashMap<&'static str, String>, } impl NetworkListOptions { /// serialize options as a string. returns None if no options are defined pub fn serialize(&self) -> Option<String> { if self.params.is_empty() { None } else { Some( form_urlencoded::Serializer::new(String::new()) .extend_pairs(&self.params) .finish(), ) } } } /// Interface for creating new docker network #[derive(Serialize, Debug)] pub struct NetworkCreateOptions { params: HashMap<&'static str, Value>, } impl NetworkCreateOptions { /// return a new instance of a builder for options pub fn builder(name: &str) -> NetworkCreateOptionsBuilder { NetworkCreateOptionsBuilder::new(name) } /// serialize options as a string. returns None if no options are defined pub fn serialize(&self) -> Result<String> { serde_json::to_string(&self.params).map_err(Error::from) } pub fn parse_from<'a, K, V>( &self, params: &'a HashMap<K, V>, body: &mut serde_json::Map<String, Value>, ) where &'a HashMap<K, V>: IntoIterator, K: ToString + Eq + Hash, V: Serialize, { for (k, v) in params.iter() { let key = k.to_string(); let value = serde_json::to_value(v).unwrap(); body.insert(key, value); } } } #[derive(Default)] pub struct NetworkCreateOptionsBuilder { params: HashMap<&'static str, Value>, } impl NetworkCreateOptionsBuilder { pub(crate) fn new(name: &str) -> Self { let mut params = HashMap::new(); params.insert("Name", json!(name)); NetworkCreateOptionsBuilder { params } } pub fn driver( &mut self, name: &str, ) -> &mut Self { if !name.is_empty() { self.params.insert("Driver", json!(name)); } self } pub fn label( &mut self, labels: HashMap<String, String>, ) -> &mut Self { self.params.insert("Labels", json!(labels)); self } pub fn build(&self) -> NetworkCreateOptions { NetworkCreateOptions { params: self.params.clone(), } } } /// Interface for connect container to network #[derive(Serialize, Debug)] pub struct ContainerConnectionOptions { params: HashMap<&'static str, Value>, } impl ContainerConnectionOptions { /// serialize options as a string. returns None if no options are defined pub fn serialize(&self) -> Result<String> { serde_json::to_string(&self.params).map_err(Error::from) } pub fn parse_from<'a, K, V>( &self, params: &'a HashMap<K, V>, body: &mut BTreeMap<String, Value>, ) where &'a HashMap<K, V>: IntoIterator, K: ToString + Eq + Hash, V: Serialize, { for (k, v) in params.iter() { let key = k.to_string(); let value = serde_json::to_value(v).unwrap(); body.insert(key, value); } } /// return a new instance of a builder for options pub fn builder(container_id: &str) -> ContainerConnectionOptionsBuilder { ContainerConnectionOptionsBuilder::new(container_id) } } #[derive(Default)] pub struct ContainerConnectionOptionsBuilder { params: HashMap<&'static str, Value>, } impl ContainerConnectionOptionsBuilder { pub(crate) fn new(container_id: &str) -> Self { let mut params = HashMap::new(); params.insert("Container", json!(container_id)); ContainerConnectionOptionsBuilder { params } } pub fn aliases( &mut self, aliases: Vec<&str>, ) -> &mut Self { self.params .insert("EndpointConfig", json!({ "Aliases": json!(aliases) })); self } pub fn force(&mut self) -> &mut Self { self.params.insert("Force", json!(true)); self } pub fn build(&self) -> ContainerConnectionOptions { ContainerConnectionOptions { params: self.params.clone(), } } } /// Interface for creating volumes #[derive(Serialize, Debug)] pub struct VolumeCreateOptions { params: HashMap<&'static str, Value>, } impl VolumeCreateOptions { /// serialize options as a string. returns None if no options are defined pub fn serialize(&self) -> Result<String> { serde_json::to_string(&self.params).map_err(Error::from) } pub fn parse_from<'a, K, V>( &self, params: &'a HashMap<K, V>, body: &mut BTreeMap<String, Value>, ) where &'a HashMap<K, V>: IntoIterator, K: ToString + Eq + Hash, V: Serialize, { for (k, v) in params.iter() { let key = k.to_string(); let value = serde_json::to_value(v).unwrap(); body.insert(key, value); } } /// return a new instance of a builder for options pub fn builder() -> VolumeCreateOptionsBuilder { VolumeCreateOptionsBuilder::new() } } #[derive(Default)] pub struct VolumeCreateOptionsBuilder { params: HashMap<&'static str, Value>, } impl VolumeCreateOptionsBuilder { pub(crate) fn new() -> Self { let params = HashMap::new(); VolumeCreateOptionsBuilder { params } } pub fn name( &mut self, name: &str, ) -> &mut Self { self.params.insert("Name", json!(name)); self } pub fn labels( &mut self, labels: &HashMap<&str, &str>, ) -> &mut Self { self.params.insert("Labels", json!(labels)); self } pub fn build(&self) -> VolumeCreateOptions { VolumeCreateOptions { params: self.params.clone(), } } } #[cfg(test)] mod tests { use super::{ContainerOptionsBuilder, RegistryAuth}; #[test] fn container_options_simple() { let builder = ContainerOptionsBuilder::new("test_image"); let options = builder.build(); assert_eq!( r#"{"HostConfig":{},"Image":"test_image"}"#, options.serialize().unwrap() ); } #[test] fn container_options_env() { let options = ContainerOptionsBuilder::new("test_image") .env(vec!["foo", "bar"]) .build(); assert_eq!( r#"{"Env":["foo","bar"],"HostConfig":{},"Image":"test_image"}"#, options.serialize().unwrap() ); } #[test] fn container_options_host_config() { let options = ContainerOptionsBuilder::new("test_image") .network_mode("host") .auto_remove(true) .privileged(true) .build(); assert_eq!( r#"{"HostConfig":{"AutoRemove":true,"NetworkMode":"host","Privileged":true},"Image":"test_image"}"#, options.serialize().unwrap() ); } #[test] fn container_options_expose() { let options = ContainerOptionsBuilder::new("test_image") .expose(80, "tcp", 8080) .build(); assert_eq!( r#"{"ExposedPorts":{"80/tcp":{}},"HostConfig":{"PortBindings":{"80/tcp":[{"HostPort":"8080"}]}},"Image":"test_image"}"#, options.serialize().unwrap() ); // try exposing two let options = ContainerOptionsBuilder::new("test_image") .expose(80, "tcp", 8080) .expose(81, "tcp", 8081) .build(); assert_eq!( r#"{"ExposedPorts":{"80/tcp":{},"81/tcp":{}},"HostConfig":{"PortBindings":{"80/tcp":[{"HostPort":"8080"}],"81/tcp":[{"HostPort":"8081"}]}},"Image":"test_image"}"#, options.serialize().unwrap() ); } /// Test container options that are nested 3 levels deep. #[test] fn container_options_nested() { let options = ContainerOptionsBuilder::new("test_image") .log_driver("fluentd") .build(); assert_eq!( r#"{"HostConfig":{"LogConfig":{"Type":"fluentd"}},"Image":"test_image"}"#, options.serialize().unwrap() ); } /// Test the restart policy settings #[test] fn container_options_restart_policy() { let mut options = ContainerOptionsBuilder::new("test_image") .restart_policy("on-failure", 10) .build(); assert_eq!( r#"{"HostConfig":{"RestartPolicy":{"MaximumRetryCount":10,"Name":"on-failure"}},"Image":"test_image"}"#, options.serialize().unwrap() ); options = ContainerOptionsBuilder::new("test_image") .restart_policy("always", 0) .build(); assert_eq!( r#"{"HostConfig":{"RestartPolicy":{"Name":"always"}},"Image":"test_image"}"#, options.serialize().unwrap() ); } /// Test registry auth with token #[test] fn registry_auth_token() { let options = RegistryAuth::token("abc"); assert_eq!( base64::encode(r#"{"identitytoken":"abc"}"#), options.serialize() ); } /// Test registry auth with username and password #[test] fn registry_auth_password_simple() { let options = RegistryAuth::builder() .username("user_abc") .password("password_abc") .build(); assert_eq!( base64::encode(r#"{"username":"user_abc","password":"password_abc"}"#), options.serialize() ); } /// Test registry auth with all fields #[test] fn registry_auth_password_all() { let options = RegistryAuth::builder() .username("user_abc") .password("password_abc") .email("email_abc") .server_address("https://example.org") .build(); assert_eq!( base64::encode(r#"{"username":"user_abc","password":"password_abc","email":"email_abc","serveraddress":"https://example.org"}"#), options.serialize() ); } }
//! //! Raw communication channel to the FUSE kernel driver. //! use std::io; use std::ffi::{CString, CStr, OsStr, AsOsStr}; use std::os::unix::ffi::OsStrExt; use std::path::{PathBuf, Path}; use libc::{c_char, c_int, c_void, size_t}; use fuse::{fuse_args, fuse_mount_compat25}; // Libc provides iovec based I/O using readv and writev functions #[allow(dead_code, non_camel_case_types)] mod libc { use libc::{c_char, c_int, c_void, size_t, ssize_t}; /// Iovec data structure for readv and writev calls. #[repr(C)] pub struct iovec { pub iov_base: *const c_void, pub iov_len: size_t, } extern "system" { /// Read data from fd into multiple buffers pub fn readv (fd: c_int, iov: *mut iovec, iovcnt: c_int) -> ssize_t; /// Write data from multiple buffers to fd pub fn writev (fd: c_int, iov: *const iovec, iovcnt: c_int) -> ssize_t; pub fn realpath (file_name: *const c_char, resolved_name: *mut c_char) -> *const c_char; #[cfg(target_os = "macos")] pub fn unmount(dir: *const c_char, flags: c_int) -> c_int; #[cfg(not(target_os = "macos"))] pub fn umount(dir: *const c_char) -> c_int; } /// Max length for path names. 4096 should be reasonable safe (OS X uses 1024, Linux uses 4096) pub const PATH_MAX: usize = 4096; } /// Wrapper around libc's realpath. Returns the errno value if the real path cannot be obtained. /// FIXME: Use Rust's realpath method once available in std (see also https://github.com/mozilla/rust/issues/11857) fn real_path (path: &CStr) -> io::Result<CString> { let mut resolved: Vec<c_char> = Vec::with_capacity(libc::PATH_MAX); unsafe { if libc::realpath(path.as_ptr(), resolved.as_mut_ptr()).is_null() { Err(io::Error::last_os_error()) } else { // Using CStr::from_ptr gets the correct string length via strlen() let cresolved = CStr::from_ptr(resolved.as_ptr()); Ok(CString::new(cresolved.to_bytes()).unwrap()) } } } /// Helper function to provide options as a fuse_args struct /// (which contains an argc count and an argv pointer) fn with_fuse_args<T, F: FnOnce(&fuse_args) -> T> (options: &[&OsStr], f: F) -> T { let mut args: Vec<CString> = vec![CString::new("rust-fuse").unwrap()]; args.extend(options.iter().map(|s| s.to_cstring().unwrap() )); let argptrs: Vec<*const i8> = args.iter().map(|s| s.as_ptr()).collect(); f(&fuse_args { argc: argptrs.len() as i32, argv: argptrs.as_ptr(), allocated: 0 }) } /// A raw communication channel to the FUSE kernel driver pub struct Channel { mountpoint: PathBuf, fd: c_int, } impl Channel { /// Create a new communication channel to the kernel driver by mounting the /// given path. The kernel driver will delegate filesystem operations of /// the given path to the channel. If the channel is dropped, the path is /// unmounted. pub fn new (mountpoint: &Path, options: &[&OsStr]) -> io::Result<Channel> { let mnt = try!(mountpoint.as_os_str().to_cstring()); real_path(&mnt).and_then(|mnt| { with_fuse_args(options, |args| { let fd = unsafe { fuse_mount_compat25(mnt.as_ptr(), args) }; if fd < 0 { Err(io::Error::last_os_error()) } else { let mountpoint = PathBuf::new(<OsStr as OsStrExt>::from_bytes(mnt.as_bytes())); Ok(Channel { mountpoint: mountpoint, fd: fd }) } }) }) } /// Return path of the mounted filesystem pub fn mountpoint (&self) -> &Path { &self.mountpoint } /// Receives data up to the capacity of the given buffer (can block). pub fn receive (&self, buffer: &mut Vec<u8>) -> io::Result<()> { let rc = unsafe { ::libc::read(self.fd, buffer.as_ptr() as *mut c_void, buffer.capacity() as size_t) }; if rc < 0 { Err(io::Error::last_os_error()) } else { unsafe { buffer.set_len(rc as usize); } Ok(()) } } /// Returns a sender object for this channel. The sender object can be /// used to send to the channel. Multiple sender objects can be used /// and they can safely be sent to other threads. pub fn sender (&self) -> ChannelSender { // Since write/writev syscalls are threadsafe, we can simply create // a sender by using the same fd and use it in other threads. Only // the channel closes the fd when dropped. If any sender is used after // dropping the channel, it'll return an EBADF error. ChannelSender { fd: self.fd } } } impl Drop for Channel { fn drop (&mut self) { // TODO: send ioctl FUSEDEVIOCSETDAEMONDEAD on OS X before closing the fd // Close the communication channel to the kernel driver // (closing it before unnmount prevents sync unmount deadlock) unsafe { ::libc::close(self.fd); } // Unmount this channel's mount point let _ = unmount(&self.mountpoint); } } #[derive(Copy)] pub struct ChannelSender { fd: c_int, } impl ChannelSender { /// Send all data in the slice of slice of bytes in a single write (can block). pub fn send (&self, buffer: &[&[u8]]) -> io::Result<()> { let iovecs: Vec<libc::iovec> = buffer.iter().map(|d| { libc::iovec { iov_base: d.as_ptr() as *const c_void, iov_len: d.len() as size_t } }).collect(); let rc = unsafe { libc::writev(self.fd, iovecs.as_ptr(), iovecs.len() as c_int) }; if rc < 0 { Err(io::Error::last_os_error()) } else { Ok(()) } } } /// Unmount an arbitrary mount point pub fn unmount (mountpoint: &Path) -> io::Result<()> { // fuse_unmount_compat22 unfortunately doesn't return a status. Additionally, // it attempts to call realpath, which in turn calls into the filesystem. So // if the filesystem returns an error, the unmount does not take place, with // no indication of the error available to the caller. So we call unmount // directly, which is what osxfuse does anyway, since we already converted // to the real path when we first mounted. #[cfg(target_os = "macos")] #[inline] fn libc_umount (mnt: &CStr) -> c_int { unsafe { libc::unmount(mnt.as_ptr(), 0) } } #[cfg(not(target_os = "macos"))] #[inline] fn libc_umount (mnt: &CStr) -> c_int { unsafe { libc::umount(mnt.as_ptr()) } } let mnt = try!(mountpoint.as_os_str().to_cstring()); let rc = libc_umount(&mnt); if rc < 0 { Err(io::Error::last_os_error()) } else { Ok(()) } } #[cfg(test)] mod test { use super::with_fuse_args; use std::ffi::{CStr, OsStr}; #[test] fn fuse_args () { with_fuse_args(&[OsStr::from_str("foo"), OsStr::from_str("bar")], |args| { assert_eq!(args.argc, 3); assert_eq!(unsafe { CStr::from_ptr(*args.argv.offset(0)).to_bytes() }, b"rust-fuse"); assert_eq!(unsafe { CStr::from_ptr(*args.argv.offset(1)).to_bytes() }, b"foo"); assert_eq!(unsafe { CStr::from_ptr(*args.argv.offset(2)).to_bytes() }, b"bar"); }); } } use PathBuf::from instead of PathBuf::new //! //! Raw communication channel to the FUSE kernel driver. //! use std::io; use std::ffi::{CString, CStr, OsStr, AsOsStr}; use std::os::unix::ffi::OsStrExt; use std::path::{PathBuf, Path}; use libc::{c_char, c_int, c_void, size_t}; use fuse::{fuse_args, fuse_mount_compat25}; // Libc provides iovec based I/O using readv and writev functions #[allow(dead_code, non_camel_case_types)] mod libc { use libc::{c_char, c_int, c_void, size_t, ssize_t}; /// Iovec data structure for readv and writev calls. #[repr(C)] pub struct iovec { pub iov_base: *const c_void, pub iov_len: size_t, } extern "system" { /// Read data from fd into multiple buffers pub fn readv (fd: c_int, iov: *mut iovec, iovcnt: c_int) -> ssize_t; /// Write data from multiple buffers to fd pub fn writev (fd: c_int, iov: *const iovec, iovcnt: c_int) -> ssize_t; pub fn realpath (file_name: *const c_char, resolved_name: *mut c_char) -> *const c_char; #[cfg(target_os = "macos")] pub fn unmount(dir: *const c_char, flags: c_int) -> c_int; #[cfg(not(target_os = "macos"))] pub fn umount(dir: *const c_char) -> c_int; } /// Max length for path names. 4096 should be reasonable safe (OS X uses 1024, Linux uses 4096) pub const PATH_MAX: usize = 4096; } /// Wrapper around libc's realpath. Returns the errno value if the real path cannot be obtained. /// FIXME: Use Rust's realpath method once available in std (see also https://github.com/mozilla/rust/issues/11857) fn real_path (path: &CStr) -> io::Result<CString> { let mut resolved: Vec<c_char> = Vec::with_capacity(libc::PATH_MAX); unsafe { if libc::realpath(path.as_ptr(), resolved.as_mut_ptr()).is_null() { Err(io::Error::last_os_error()) } else { // Using CStr::from_ptr gets the correct string length via strlen() let cresolved = CStr::from_ptr(resolved.as_ptr()); Ok(CString::new(cresolved.to_bytes()).unwrap()) } } } /// Helper function to provide options as a fuse_args struct /// (which contains an argc count and an argv pointer) fn with_fuse_args<T, F: FnOnce(&fuse_args) -> T> (options: &[&OsStr], f: F) -> T { let mut args: Vec<CString> = vec![CString::new("rust-fuse").unwrap()]; args.extend(options.iter().map(|s| s.to_cstring().unwrap() )); let argptrs: Vec<*const i8> = args.iter().map(|s| s.as_ptr()).collect(); f(&fuse_args { argc: argptrs.len() as i32, argv: argptrs.as_ptr(), allocated: 0 }) } /// A raw communication channel to the FUSE kernel driver pub struct Channel { mountpoint: PathBuf, fd: c_int, } impl Channel { /// Create a new communication channel to the kernel driver by mounting the /// given path. The kernel driver will delegate filesystem operations of /// the given path to the channel. If the channel is dropped, the path is /// unmounted. pub fn new (mountpoint: &Path, options: &[&OsStr]) -> io::Result<Channel> { let mnt = try!(mountpoint.as_os_str().to_cstring()); real_path(&mnt).and_then(|mnt| { with_fuse_args(options, |args| { let fd = unsafe { fuse_mount_compat25(mnt.as_ptr(), args) }; if fd < 0 { Err(io::Error::last_os_error()) } else { let mountpoint = PathBuf::from(<OsStr as OsStrExt>::from_bytes(mnt.as_bytes())); Ok(Channel { mountpoint: mountpoint, fd: fd }) } }) }) } /// Return path of the mounted filesystem pub fn mountpoint (&self) -> &Path { &self.mountpoint } /// Receives data up to the capacity of the given buffer (can block). pub fn receive (&self, buffer: &mut Vec<u8>) -> io::Result<()> { let rc = unsafe { ::libc::read(self.fd, buffer.as_ptr() as *mut c_void, buffer.capacity() as size_t) }; if rc < 0 { Err(io::Error::last_os_error()) } else { unsafe { buffer.set_len(rc as usize); } Ok(()) } } /// Returns a sender object for this channel. The sender object can be /// used to send to the channel. Multiple sender objects can be used /// and they can safely be sent to other threads. pub fn sender (&self) -> ChannelSender { // Since write/writev syscalls are threadsafe, we can simply create // a sender by using the same fd and use it in other threads. Only // the channel closes the fd when dropped. If any sender is used after // dropping the channel, it'll return an EBADF error. ChannelSender { fd: self.fd } } } impl Drop for Channel { fn drop (&mut self) { // TODO: send ioctl FUSEDEVIOCSETDAEMONDEAD on OS X before closing the fd // Close the communication channel to the kernel driver // (closing it before unnmount prevents sync unmount deadlock) unsafe { ::libc::close(self.fd); } // Unmount this channel's mount point let _ = unmount(&self.mountpoint); } } #[derive(Copy)] pub struct ChannelSender { fd: c_int, } impl ChannelSender { /// Send all data in the slice of slice of bytes in a single write (can block). pub fn send (&self, buffer: &[&[u8]]) -> io::Result<()> { let iovecs: Vec<libc::iovec> = buffer.iter().map(|d| { libc::iovec { iov_base: d.as_ptr() as *const c_void, iov_len: d.len() as size_t } }).collect(); let rc = unsafe { libc::writev(self.fd, iovecs.as_ptr(), iovecs.len() as c_int) }; if rc < 0 { Err(io::Error::last_os_error()) } else { Ok(()) } } } /// Unmount an arbitrary mount point pub fn unmount (mountpoint: &Path) -> io::Result<()> { // fuse_unmount_compat22 unfortunately doesn't return a status. Additionally, // it attempts to call realpath, which in turn calls into the filesystem. So // if the filesystem returns an error, the unmount does not take place, with // no indication of the error available to the caller. So we call unmount // directly, which is what osxfuse does anyway, since we already converted // to the real path when we first mounted. #[cfg(target_os = "macos")] #[inline] fn libc_umount (mnt: &CStr) -> c_int { unsafe { libc::unmount(mnt.as_ptr(), 0) } } #[cfg(not(target_os = "macos"))] #[inline] fn libc_umount (mnt: &CStr) -> c_int { unsafe { libc::umount(mnt.as_ptr()) } } let mnt = try!(mountpoint.as_os_str().to_cstring()); let rc = libc_umount(&mnt); if rc < 0 { Err(io::Error::last_os_error()) } else { Ok(()) } } #[cfg(test)] mod test { use super::with_fuse_args; use std::ffi::{CStr, OsStr}; #[test] fn fuse_args () { with_fuse_args(&[OsStr::from_str("foo"), OsStr::from_str("bar")], |args| { assert_eq!(args.argc, 3); assert_eq!(unsafe { CStr::from_ptr(*args.argv.offset(0)).to_bytes() }, b"rust-fuse"); assert_eq!(unsafe { CStr::from_ptr(*args.argv.offset(1)).to_bytes() }, b"foo"); assert_eq!(unsafe { CStr::from_ptr(*args.argv.offset(2)).to_bytes() }, b"bar"); }); } }
use std::{thread, time}; /// Chipate Module /// Rust emulation of the Chip-8 /// http://www.multigesturearticles/how-to-write-an-emulator-chip-8-interpreter/ #[allow(dead_code)] pub struct Chipate { // Opcodes opcode: u16, // 4K Memory memory: [u8; 4096], // Registers v: [u8; 16], // Index i: u8, // Program Counter pc: u8, // The systems memory map: // 0x000-0x1FF - Chip 8 interpreter (contains font set in emu) // 0x050-0x0A0 - Used for the built in 4x5 pixel font set (0-F) // 0x200-0xFFF - Program ROM and work RAM gfx: [u8; 64 * 32], // Timers delay_timer: u8, sound_timer: u8, // Stack stack: [u8; 16], sp: u8, // Keypad key: [u8; 16], program: &'static str, } impl Chipate { pub fn init(&mut self) { debug!("Initialize Chip"); } pub fn load_program(&mut self, program: &str) { debug!("Loading program {}", program); } pub fn emulate_cycle(&mut self) { debug!("Cycle Begin"); self.fetch_opcode(); let one_second = time::Duration::from_secs(1); thread::sleep(one_second); debug!("Cycle End"); } pub fn draw_screen(&mut self) { debug!("Drawing to Screen") } pub fn set_keys(&mut self) { debug!("Saving Key State") } pub fn setup_testing_memory(&mut self) { debug!("Setting test memory"); self.memory[self.pc as usize] = 0xA2; self.memory[(self.pc + 1) as usize] = 0xF0; debug!("location: 0x{:x} data: 0x{:x}", self.pc, self.memory[self.pc as usize]); debug!("location: 0x{:x} data: 0x{:x}", self.pc + 1, self.memory[(self.pc + 1) as usize]); } pub fn fetch_opcode(&mut self) { } } pub fn new_chipate() -> Chipate { debug!("Creating New Chip"); let chip = Chipate { opcode: 0, memory: [0; 4096], v: [0; 16], i: 0, pc: 0, gfx: [0; 64 * 32], delay_timer: 0, sound_timer: 0, stack: [0; 16], sp: 0, key: [0; 16], program: "" }; return chip; } Fetching Opcode from memory use std::{thread, time}; /// Chipate Module /// Rust emulation of the Chip-8 /// http://www.multigesturearticles/how-to-write-an-emulator-chip-8-interpreter/ #[allow(dead_code)] pub struct Chipate { // Opcodes opcode: u16, // 4K Memory memory: [u8; 4096], // The systems memory map: // 0x000-0x1FF - Chip 8 interpreter (contains font set in emu) // 0x050-0x0A0 - Used for the built in 4x5 pixel font set (0-F) // 0x200-0xFFF - Program ROM and work RAM // Registers v: [u8; 16], // Index i: u8, // Program Counter pc: u8, gfx: [u8; 64 * 32], // Timers delay_timer: u8, sound_timer: u8, // Stack stack: [u8; 16], sp: u8, // Keypad key: [u8; 16], program: &'static str, } impl Chipate { pub fn init(&mut self) { debug!("Initialize Chip"); } pub fn load_program(&mut self, program: &str) { debug!("Loading program {}", program); } pub fn emulate_cycle(&mut self) { debug!("Cycle Begin"); self.fetch_opcode(); let one_second = time::Duration::from_secs(1); thread::sleep(one_second); debug!("Cycle End"); } pub fn draw_screen(&mut self) { debug!("Drawing to Screen") } pub fn set_keys(&mut self) { debug!("Saving Key State") } pub fn setup_testing_memory(&mut self) { debug!("Setting test memory"); self.memory[self.pc as usize] = 0xA2; self.memory[(self.pc + 1) as usize] = 0xF0; debug!("Location: 0x{:x} data: 0x{:x}", self.pc, self.memory[self.pc as usize]); debug!("Location: 0x{:x} data: 0x{:x}", self.pc + 1, self.memory[(self.pc + 1) as usize]); } pub fn fetch_opcode(&mut self) { let op_a = self.memory[self.pc as usize]; self.opcode = op_a as u16; self.opcode = self.opcode << 8; // debug!("location: 0x{:x} data: 0x{:x}", self.pc, self.opcode); let op_b = self.memory[(self.pc + 1) as usize] as u16; // debug!("location: 0x{:x} data: 0x{:x}", self.pc, op_b); self.opcode = self.opcode | op_b; debug!("Opcode: 0x{:x}", self.opcode); } } pub fn new_chipate() -> Chipate { debug!("Creating New Chip"); let chip = Chipate { opcode: 0, memory: [0; 4096], v: [0; 16], i: 0, pc: 0, gfx: [0; 64 * 32], delay_timer: 0, sound_timer: 0, stack: [0; 16], sp: 0, key: [0; 16], program: "", }; return chip; }
// Copyright 2020 Google LLC // // Use of this source code is governed by an MIT-style license that can be found // in the LICENSE file or at https://opensource.org/licenses/MIT. //! Utilities for working with the chunked format. //! //! The chunked file format is extremely simple serialization procedure where //! each chunk of raw bytes is prepended with the size of the chunk (as 64-bit //! unsigned big-endian integer). //! //! Its primary application is streaming encoding and decoding of blobs of data //! in formats that do not support or are inefficient for these purposes (such //! as serialized Protocol Buffer messages). use std::io::Cursor; use byteorder::BigEndian; /// Encodes a given iterator over binary blobs into the chunked format. /// /// This is a streaming encoder and performs the encoding in a lazy way. It /// should compose well with other streaming encoders (e.g. these offered by /// the [`flate2`] crate). /// /// [`flate2`]: https://crates.io/crates/flate2 /// /// # Examples /// /// ```no_run /// use std::fs::File; /// /// let data = [b"foo", b"bar", b"baz"]; /// /// let mut stream = rrg::chunked::encode(data.iter().map(|blob| &blob[..])); /// let mut file = File::create("output.chunked").unwrap(); /// std::io::copy(&mut stream, &mut file).unwrap(); /// ``` pub fn encode<I, M>(iter: I) -> Encode<I> where I: Iterator<Item = M>, M: prost::Message, { Encode { iter: iter, cur: Cursor::new(vec!()), } } /// Decodes a buffer in the chunked format into binary blobs. /// /// This is a streaming decoder and performs the decoding in a lazy way. It /// should compose well with other streaming decoders (e.g. these offered by the /// [`flate2`] crate). /// /// [`flate2`]: https://crates.io/crates/flate2 /// /// # Examples /// /// ```no_run /// use std::fs::File; /// /// let file = File::open("input.chunked").unwrap(); /// for (idx, blob) in rrg::chunked::decode(file).enumerate() { /// println!("blob #{}: {:?}", idx, blob.unwrap()); /// } /// ``` pub fn decode<R, M>(buf: R) -> Decode<R, M> where R: std::io::Read, M: prost::Message, { Decode { reader: buf, buf: vec!(), marker: std::marker::PhantomData, } } /// Streaming encoder for the chunked format. /// /// It implements the `Read` trait, lazily polling the underlying chunk iterator /// as more bytes is needed. /// /// Instances of this type can be constructed using the [`encode`] function. /// /// [`encode`]: fn.encode.html pub struct Encode<I> { iter: I, cur: Cursor<Vec<u8>>, } impl<I, M> Encode<I> where I: Iterator<Item = M>, M: prost::Message, { /// Checks whether all the data from the underlying cursor has been read. fn is_empty(&self) -> bool { self.cur.position() == self.cur.get_ref().len() as u64 } /// Pulls another blob of data from the underlying iterator. fn pull(&mut self) -> std::io::Result<()> { use byteorder::WriteBytesExt as _; let msg = match self.iter.next() { Some(msg) => msg, None => return Ok(()), }; self.cur.get_mut().clear(); self.cur.set_position(0); self.cur.write_u64::<BigEndian>(msg.encoded_len() as u64)?; msg.encode(&mut self.cur.get_mut())?; self.cur.set_position(0); Ok(()) } } impl<I, M> std::io::Read for Encode<I> where I: Iterator<Item = M>, M: prost::Message, { fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> { if self.is_empty() { self.pull()?; } self.cur.read(buf) } } /// Streaming decoder for the chunked format. /// /// It implements the `Iterator` trait yielding chunks of decoded blobs, lazily /// decoding data from the underlying buffer. /// /// Instances of this type can be constructed using the [`decode`] function. /// /// [`decode`]: fn.decode.html pub struct Decode<R, M> { reader: R, buf: Vec<u8>, marker: std::marker::PhantomData<M>, } impl<R: std::io::Read, M> Decode<R, M> { /// Reads a size tag from the underlying buffer. /// /// It will return `None` if the is no more data in the buffer. fn read_len(&mut self) -> std::io::Result<Option<usize>> { use byteorder::ReadBytesExt as _; // `read` might not always read all 8 bytes. On the other hand, we also // cannot use just `read_exact` because the stream might have ended // already. Hence, we combine the two. First we attempt to read some // bytes with `read`: it should either return 0 (indicating end of the // stream), 8 (indicating that we have filled the whole buffer fully) // or something in between. In the last case, we use `read_exact to get // the remaining bytes (which should be non-zero now). let mut buf = [0; 8]; match self.reader.read(&mut buf[..])? { 8 => (), 0 => return Ok(None), len => self.reader.read_exact(&mut buf[len..])?, } let len = (&buf[..]).read_u64::<BigEndian>()? as usize; Ok(Some(len)) } } impl<R, M> Iterator for Decode<R, M> where R: std::io::Read, M: prost::Message + Default, { type Item = std::io::Result<M>; fn next(&mut self) -> Option<std::io::Result<M>> { let len = match self.read_len() { Ok(Some(len)) => len, Ok(None) => return None, Err(error) => return Some(Err(error)), }; self.buf.resize(len, u8::default()); match self.reader.read_exact(&mut self.buf[..]) { Ok(()) => (), Err(error) => return Some(Err(error)), } let msg = match M::decode(&self.buf[..]) { Ok(msg) => msg, Err(error) => return Some(Err(error.into())), }; Some(Ok(msg)) } } #[cfg(test)] pub mod tests { use super::*; #[test] pub fn test_encode_empty_iter() { use std::io::Read as _; let mut stream = encode(std::iter::empty::<()>()); let mut output = vec!(); stream.read_to_end(&mut output).unwrap(); assert!(output.is_empty()); } #[test] pub fn test_decode_empty_buf() { let buf: &[u8] = b""; let mut iter = decode::<_, ()>(buf); assert!(iter.next().is_none()); } #[test] pub fn test_decode_incorrect_size_tag() { let buf: &[u8] = b"\x12\x34\x56"; let mut iter = decode::<_, ()>(buf); let error = iter.next().unwrap().unwrap_err(); assert_eq!(error.kind(), std::io::ErrorKind::UnexpectedEof); } #[test] pub fn test_decode_zero_size_tag() { let buf: &[u8] = b"\x00\x00\x00\x00\x00\x00\x00\x00"; let mut iter = decode::<_, ()>(buf); assert!(iter.next().is_none()); } #[test] pub fn test_decode_partial_size_tag() { // A simple reader that yields a 0-valued size tag byte by byte. struct Reader(u8); impl std::io::Read for Reader { fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> { if self.0 > 8 { Ok(0) } else { buf[0] = 0; self.0 += 1; Ok(1) } } } let mut iter = decode::<_, ()>(Reader(0)); assert!(iter.next().is_none()); } #[test] pub fn test_decode_short_input() { let buf: &[u8] = b"\x00\x00\x00\x00\x00\x00\x00\x42foo"; let mut iter = decode::<_, ()>(buf); let error = iter.next().unwrap().unwrap_err(); assert_eq!(error.kind(), std::io::ErrorKind::UnexpectedEof); } #[test] pub fn test_encode_and_decode_single_message() { let mut iter = decode(encode(vec!(String::from("foo")).into_iter())) .map(Result::unwrap); assert_eq!(iter.next(), Some(String::from("foo"))); assert_eq!(iter.next(), None); } #[test] pub fn test_encode_and_decode_single_unit_message() { let mut iter = decode(encode(vec!(()).into_iter())) .map(Result::unwrap); assert_eq!(iter.next(), Some(())); assert_eq!(iter.next(), None); } #[test] pub fn test_encode_and_decode_multiple_messages() { let msgs = vec! { b"foo".to_vec(), b"bar".to_vec(), b"baz".to_vec(), }; let mut iter = decode(encode(msgs.into_iter())) .map(Result::unwrap); assert_eq!(iter.next(), Some(b"foo".to_vec())); assert_eq!(iter.next(), Some(b"bar".to_vec())); assert_eq!(iter.next(), Some(b"baz".to_vec())); assert_eq!(iter.next(), None); } #[test] pub fn test_encode_and_decode_multiple_unit_messages() { let msgs = vec!((), (), ()); let mut iter = decode(encode(msgs.into_iter())) .map(Result::unwrap); assert_eq!(iter.next(), Some(())); assert_eq!(iter.next(), Some(())); assert_eq!(iter.next(), Some(())); assert_eq!(iter.next(), None); } } Simplify tests for the chunked format utilities. // Copyright 2020 Google LLC // // Use of this source code is governed by an MIT-style license that can be found // in the LICENSE file or at https://opensource.org/licenses/MIT. //! Utilities for working with the chunked format. //! //! The chunked file format is extremely simple serialization procedure where //! each chunk of raw bytes is prepended with the size of the chunk (as 64-bit //! unsigned big-endian integer). //! //! Its primary application is streaming encoding and decoding of blobs of data //! in formats that do not support or are inefficient for these purposes (such //! as serialized Protocol Buffer messages). use std::io::Cursor; use byteorder::BigEndian; /// Encodes a given iterator over binary blobs into the chunked format. /// /// This is a streaming encoder and performs the encoding in a lazy way. It /// should compose well with other streaming encoders (e.g. these offered by /// the [`flate2`] crate). /// /// [`flate2`]: https://crates.io/crates/flate2 /// /// # Examples /// /// ```no_run /// use std::fs::File; /// /// let data = [b"foo", b"bar", b"baz"]; /// /// let mut stream = rrg::chunked::encode(data.iter().map(|blob| &blob[..])); /// let mut file = File::create("output.chunked").unwrap(); /// std::io::copy(&mut stream, &mut file).unwrap(); /// ``` pub fn encode<I, M>(iter: I) -> Encode<I> where I: Iterator<Item = M>, M: prost::Message, { Encode { iter: iter, cur: Cursor::new(vec!()), } } /// Decodes a buffer in the chunked format into binary blobs. /// /// This is a streaming decoder and performs the decoding in a lazy way. It /// should compose well with other streaming decoders (e.g. these offered by the /// [`flate2`] crate). /// /// [`flate2`]: https://crates.io/crates/flate2 /// /// # Examples /// /// ```no_run /// use std::fs::File; /// /// let file = File::open("input.chunked").unwrap(); /// for (idx, blob) in rrg::chunked::decode(file).enumerate() { /// println!("blob #{}: {:?}", idx, blob.unwrap()); /// } /// ``` pub fn decode<R, M>(buf: R) -> Decode<R, M> where R: std::io::Read, M: prost::Message, { Decode { reader: buf, buf: vec!(), marker: std::marker::PhantomData, } } /// Streaming encoder for the chunked format. /// /// It implements the `Read` trait, lazily polling the underlying chunk iterator /// as more bytes is needed. /// /// Instances of this type can be constructed using the [`encode`] function. /// /// [`encode`]: fn.encode.html pub struct Encode<I> { iter: I, cur: Cursor<Vec<u8>>, } impl<I, M> Encode<I> where I: Iterator<Item = M>, M: prost::Message, { /// Checks whether all the data from the underlying cursor has been read. fn is_empty(&self) -> bool { self.cur.position() == self.cur.get_ref().len() as u64 } /// Pulls another blob of data from the underlying iterator. fn pull(&mut self) -> std::io::Result<()> { use byteorder::WriteBytesExt as _; let msg = match self.iter.next() { Some(msg) => msg, None => return Ok(()), }; self.cur.get_mut().clear(); self.cur.set_position(0); self.cur.write_u64::<BigEndian>(msg.encoded_len() as u64)?; msg.encode(&mut self.cur.get_mut())?; self.cur.set_position(0); Ok(()) } } impl<I, M> std::io::Read for Encode<I> where I: Iterator<Item = M>, M: prost::Message, { fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> { if self.is_empty() { self.pull()?; } self.cur.read(buf) } } /// Streaming decoder for the chunked format. /// /// It implements the `Iterator` trait yielding chunks of decoded blobs, lazily /// decoding data from the underlying buffer. /// /// Instances of this type can be constructed using the [`decode`] function. /// /// [`decode`]: fn.decode.html pub struct Decode<R, M> { reader: R, buf: Vec<u8>, marker: std::marker::PhantomData<M>, } impl<R: std::io::Read, M> Decode<R, M> { /// Reads a size tag from the underlying buffer. /// /// It will return `None` if the is no more data in the buffer. fn read_len(&mut self) -> std::io::Result<Option<usize>> { use byteorder::ReadBytesExt as _; // `read` might not always read all 8 bytes. On the other hand, we also // cannot use just `read_exact` because the stream might have ended // already. Hence, we combine the two. First we attempt to read some // bytes with `read`: it should either return 0 (indicating end of the // stream), 8 (indicating that we have filled the whole buffer fully) // or something in between. In the last case, we use `read_exact to get // the remaining bytes (which should be non-zero now). let mut buf = [0; 8]; match self.reader.read(&mut buf[..])? { 8 => (), 0 => return Ok(None), len => self.reader.read_exact(&mut buf[len..])?, } let len = (&buf[..]).read_u64::<BigEndian>()? as usize; Ok(Some(len)) } } impl<R, M> Iterator for Decode<R, M> where R: std::io::Read, M: prost::Message + Default, { type Item = std::io::Result<M>; fn next(&mut self) -> Option<std::io::Result<M>> { let len = match self.read_len() { Ok(Some(len)) => len, Ok(None) => return None, Err(error) => return Some(Err(error)), }; self.buf.resize(len, u8::default()); match self.reader.read_exact(&mut self.buf[..]) { Ok(()) => (), Err(error) => return Some(Err(error)), } let msg = match M::decode(&self.buf[..]) { Ok(msg) => msg, Err(error) => return Some(Err(error.into())), }; Some(Ok(msg)) } } #[cfg(test)] pub mod tests { use super::*; #[test] pub fn test_encode_empty_iter() { use std::io::Read as _; let mut stream = encode(std::iter::empty::<()>()); let mut output = vec!(); stream.read_to_end(&mut output).unwrap(); assert!(output.is_empty()); } #[test] pub fn test_decode_empty_buf() { let buf: &[u8] = b""; let mut iter = decode::<_, ()>(buf); assert!(iter.next().is_none()); } #[test] pub fn test_decode_incorrect_size_tag() { let buf: &[u8] = b"\x12\x34\x56"; let mut iter = decode::<_, ()>(buf); let error = iter.next().unwrap().unwrap_err(); assert_eq!(error.kind(), std::io::ErrorKind::UnexpectedEof); } #[test] pub fn test_decode_zero_size_tag() { let buf: &[u8] = b"\x00\x00\x00\x00\x00\x00\x00\x00"; let mut iter = decode::<_, ()>(buf); assert!(iter.next().is_none()); } #[test] pub fn test_decode_partial_size_tag() { // A simple reader that yields a 0-valued size tag byte by byte. struct Reader(u8); impl std::io::Read for Reader { fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> { if self.0 > 8 { Ok(0) } else { buf[0] = 0; self.0 += 1; Ok(1) } } } let mut iter = decode::<_, ()>(Reader(0)); assert!(iter.next().is_none()); } #[test] pub fn test_decode_short_input() { let buf: &[u8] = b"\x00\x00\x00\x00\x00\x00\x00\x42foo"; let mut iter = decode::<_, ()>(buf); let error = iter.next().unwrap().unwrap_err(); assert_eq!(error.kind(), std::io::ErrorKind::UnexpectedEof); } #[test] pub fn test_encode_and_decode_single_message() { let mut iter = decode(encode(std::iter::once(String::from("foo")))) .map(Result::unwrap); assert_eq!(iter.next(), Some(String::from("foo"))); assert_eq!(iter.next(), None); } #[test] pub fn test_encode_and_decode_single_unit_message() { let mut iter = decode(encode(std::iter::once(()))) .map(Result::unwrap); assert_eq!(iter.next(), Some(())); assert_eq!(iter.next(), None); } #[test] pub fn test_encode_and_decode_multiple_messages() { let msgs = vec! { b"foo".to_vec(), b"bar".to_vec(), b"baz".to_vec(), }; let mut iter = decode(encode(msgs.into_iter())) .map(Result::unwrap); assert_eq!(iter.next(), Some(b"foo".to_vec())); assert_eq!(iter.next(), Some(b"bar".to_vec())); assert_eq!(iter.next(), Some(b"baz".to_vec())); assert_eq!(iter.next(), None); } #[test] pub fn test_encode_and_decode_multiple_unit_messages() { let msgs = vec!((), (), ()); let mut iter = decode(encode(msgs.into_iter())) .map(Result::unwrap); assert_eq!(iter.next(), Some(())); assert_eq!(iter.next(), Some(())); assert_eq!(iter.next(), Some(())); assert_eq!(iter.next(), None); } }
use std::cmp::Ordering; use rustc_serialize::json; use base64_vlq; static SOURCE_MAP_VERSION: u32 = 3; #[allow(dead_code)] #[allow(non_snake_case)] #[derive(RustcDecodable)] struct SourceMap { version: u32, sources: Vec<String>, names: Vec<String>, sourceRoot: Option<String>, mappings: String, file: Option<String> // We skip this. Keeping megabytes of data that we do not care about // in memory seems reckless to caches. //sourcesContent: Option<vec<String>>, } #[derive(Clone, Eq, PartialEq, Debug)] pub struct CodePosition { /** Line number in a code file, starting from 1 */ pub line: u32, /** Column number in a code file, starting from 0 */ pub column: u32 } #[derive(Clone, Eq, PartialEq, Debug)] pub struct Mapping { /** The position in the generated file */ pub generated: CodePosition, /** The position in the corresponding original source file */ pub original: CodePosition, /** The original source file */ pub source: String, /** The original source name of the function/class, if applicable */ pub name: String } pub struct Cache { generated_mappings: Vec<Mapping>, /** The path prefix of mapping source paths */ pub source_root: String } /** * consume parses a SourceMap into a cache that can be queried for mappings * * The only parameter is the raw source map as a JSON string. * According to the [source map spec][source-map-spec], source maps have the following attributes: * * - version: Which version of the source map spec this map is following. * - sources: An array of URLs to the original source files. * - names: An array of identifiers which can be referrenced by individual mappings. * - sourceRoot: Optional. The URL root from which all sources are relative. * - sourcesContent: Optional. An array of contents of the original source files. * - mappings: A string of base64 VLQs which contain the actual mappings. * - file: Optional. The generated file this source map is associated with. * * Here is an example source map: * * ```json * { * "version": 3, * "file": "out.js", * "sourceRoot" : "", * "sources": ["foo.js", "bar.js"], * "names": ["src", "maps", "are", "fun"], * "mappings": "AA,AB;;ABCDE;" * } * ``` * * [source-map-spec]: https://docs.google.com/document/d/1U1RGAehQwRypUTovF1KRlpiOFze0b-_2gc6fAH0KY0k/edit?pli=1# */ pub fn consume(source_map_json: &str) -> Result<Cache, String> { let source_map: SourceMap = match json::decode(source_map_json) { Ok(x) => x, Err(err) => return Err(format!("{}", err)) }; parse_mappings(&source_map) } fn parse_mappings(source_map: &SourceMap) -> Result<Cache, String>{ if source_map.version != SOURCE_MAP_VERSION { return Err("Only Source Map version 3 is implemented".into()) } let mut generated_mappings: Vec<Mapping> = Vec::new(); let mut generated_line: u32 = 0; let mut previous_original_line: u32 = 0; let mut previous_original_column: u32 = 0; let mut previous_source: u32 = 0; let mut previous_name: u32 = 0; for line in source_map.mappings.as_bytes().split(|&x| x == (';' as u8)) { generated_line += 1; let mut previous_generated_column: u32 = 0; for segment in line.split(|&x| x == (',' as u8)) { let segment_length = segment.len(); let mut fields: Vec<i32> = Vec::new(); let mut character_index = 0; while character_index < segment_length { match base64_vlq::decode(&segment[character_index..segment_length]) { Some((value, field_length)) => { fields.push(value); character_index += field_length; }, None => return Err("Invalid VLQ mapping field".into()) }; } if fields.len() < 1 { continue; } if fields.len() == 2 { return Err("Found a source, but no line and column".into()); } if fields.len() == 3 { return Err("Found a source and line, but no column".into()); } let mut mapping = Mapping { generated: CodePosition { line: generated_line, column: ((previous_generated_column as i32) + fields[0]) as u32 }, original: CodePosition { line: 0, column: 0 }, source: "".into(), name: "".into() }; previous_generated_column = mapping.generated.column; if fields.len() > 1 { // Original source. previous_source = ((previous_source as i32) + fields[1]) as u32; mapping.source = source_map.sources[previous_source as usize].to_owned(); // Original line. previous_original_line = ((previous_original_line as i32) + fields[2]) as u32; // Lines are stored 0-based mapping.original.line = previous_original_line + 1; // Original column. previous_original_column = ((previous_original_column as i32) + fields[3]) as u32; mapping.original.column = previous_original_column; if fields.len() > 4 { // Original name. previous_name = ((previous_name as i32) + fields[4]) as u32; mapping.name = source_map.names[previous_name as usize].to_owned(); } } generated_mappings.push(mapping); } } fn sort_key(mapping: &Mapping) -> (u32, u32) { (mapping.generated.line, mapping.generated.column) } generated_mappings.sort_by(|a, b| sort_key(a).cmp(&sort_key(b))); Ok(Cache { generated_mappings: generated_mappings, source_root: match &source_map.sourceRoot { &Some(ref x) => x.to_owned(), &None => "".into() } }) } impl Cache { /** * Returns the original source, line, column and name information for the generated * source's line and column positions provided. * * # Arguments * * * line: The line number in the generated source. * * column: The column number in the generated source. * * # Examples * * ``` * use js_source_mapper::consume; * * let cache = consume(r#"{ "version": 3, "file": "foo.js", "sources": ["source.js"], "names": ["name1", "name1", "name3"], "mappings": ";EAACA;;IAEEA;;MAEEE", "sourceRoot": "http://example.com" }"#).unwrap(); * * println!("{:?}", cache.mapping_for_generated_position(2, 2)); * // => Mapping { * // generated: CodePosition { line: 2, column: 2 }, * // original: CodePosition { line: 1, column: 1 }, * // source: "source.js" * // name: "name1" * // } * ``` * */ pub fn mapping_for_generated_position(&self, line: u32, column: u32) -> Mapping { let matcher = |mapping: &Mapping| -> Ordering { (mapping.generated.line, mapping.generated.column).cmp(&(line, column)) }; match self.generated_mappings.binary_search_by(matcher) { Ok(index) => &self.generated_mappings[index], Err(index) => &self.generated_mappings[index] }.clone() } } macro_rules! assert_equal_mappings( ($a:expr, $b:expr) => ( if $a != $b { panic!(format!("\n\n{:?}\n\n!=\n\n{:?}\n\n", $a, $b)); } ); ); #[test] fn test_source_map_issue_64() { let cache = consume(r#"{ "version": 3, "file": "foo.js", "sourceRoot": "http://example.com/", "sources": ["/a"], "names": [], "mappings": "AACA", "sourcesContent": ["foo"] }"#).unwrap(); let expected = Mapping { generated: CodePosition { line: 1, column: 0 }, original: CodePosition { line: 2, column: 0 }, source: "/a".into(), name: "".into() }; let actual = cache.mapping_for_generated_position(1, 0); assert_equal_mappings!(actual, expected); } #[test] fn test_source_map_issue_72_duplicate_sources() { let cache = consume(r#"{ "version": 3, "file": "foo.js", "sources": ["source1.js", "source1.js", "source3.js"], "names": [], "mappings": ";EAAC;;IAEE;;MEEE", "sourceRoot": "http://example.com" }"#).unwrap(); { let expected = Mapping { generated: CodePosition { line: 2, column: 2 }, original: CodePosition { line: 1, column: 1 }, source: "source1.js".into(), name: "".into() }; let actual = cache.mapping_for_generated_position(2, 2); assert_equal_mappings!(actual, expected); } { let expected = Mapping { generated: CodePosition { line: 4, column: 4 }, original: CodePosition { line: 3, column: 3 }, source: "source1.js".into(), name: "".into() }; let actual = cache.mapping_for_generated_position(4, 4); assert_equal_mappings!(actual, expected); } { let expected = Mapping { generated: CodePosition { line: 6, column: 6 }, original: CodePosition { line: 5, column: 5 }, source: "source3.js".into(), name: "".into() }; let actual = cache.mapping_for_generated_position(6, 6); assert_equal_mappings!(actual, expected); } } #[test] fn test_source_map_issue_72_duplicate_names() { let cache = consume(r#"{ "version": 3, "file": "foo.js", "sources": ["source.js"], "names": ["name1", "name1", "name3"], "mappings": ";EAACA;;IAEEA;;MAEEE", "sourceRoot": "http://example.com" }"#).unwrap(); { let expected = Mapping { generated: CodePosition { line: 2, column: 2 }, original: CodePosition { line: 1, column: 1 }, source: "source.js".into(), name: "name1".into() }; let actual = cache.mapping_for_generated_position(2, 2); assert_equal_mappings!(actual, expected); } { let expected = Mapping { generated: CodePosition { line: 4, column: 4 }, original: CodePosition { line: 3, column: 3 }, source: "source.js".into(), name: "name1".into() }; let actual = cache.mapping_for_generated_position(4, 4); assert_equal_mappings!(actual, expected); } { let expected = Mapping { generated: CodePosition { line: 6, column: 6 }, original: CodePosition { line: 5, column: 5 }, source: "source.js".into(), name: "name3".into() }; let actual = cache.mapping_for_generated_position(6, 6); assert_equal_mappings!(actual, expected); } } Improve panic safety with malformed source maps use std::cmp::Ordering; use rustc_serialize::json; use base64_vlq; static SOURCE_MAP_VERSION: u32 = 3; #[allow(dead_code)] #[allow(non_snake_case)] #[derive(RustcDecodable)] struct SourceMap { version: u32, sources: Vec<String>, names: Vec<String>, sourceRoot: Option<String>, mappings: String, file: Option<String> // We skip this. Keeping megabytes of data that we do not care about // in memory seems reckless to caches. //sourcesContent: Option<vec<String>>, } #[derive(Clone, Eq, PartialEq, Debug)] pub struct CodePosition { /** Line number in a code file, starting from 1 */ pub line: u32, /** Column number in a code file, starting from 0 */ pub column: u32 } #[derive(Clone, Eq, PartialEq, Debug)] pub struct Mapping { /** The position in the generated file */ pub generated: CodePosition, /** The position in the corresponding original source file */ pub original: CodePosition, /** The original source file */ pub source: String, /** The original source name of the function/class, if applicable */ pub name: String } pub struct Cache { generated_mappings: Vec<Mapping>, /** The path prefix of mapping source paths */ pub source_root: String } /** * consume parses a SourceMap into a cache that can be queried for mappings * * The only parameter is the raw source map as a JSON string. * According to the [source map spec][source-map-spec], source maps have the following attributes: * * - version: Which version of the source map spec this map is following. * - sources: An array of URLs to the original source files. * - names: An array of identifiers which can be referrenced by individual mappings. * - sourceRoot: Optional. The URL root from which all sources are relative. * - sourcesContent: Optional. An array of contents of the original source files. * - mappings: A string of base64 VLQs which contain the actual mappings. * - file: Optional. The generated file this source map is associated with. * * Here is an example source map: * * ```json * { * "version": 3, * "file": "out.js", * "sourceRoot" : "", * "sources": ["foo.js", "bar.js"], * "names": ["src", "maps", "are", "fun"], * "mappings": "AA,AB;;ABCDE;" * } * ``` * * [source-map-spec]: https://docs.google.com/document/d/1U1RGAehQwRypUTovF1KRlpiOFze0b-_2gc6fAH0KY0k/edit?pli=1# */ pub fn consume(source_map_json: &str) -> Result<Cache, String> { let source_map: SourceMap = match json::decode(source_map_json) { Ok(x) => x, Err(err) => return Err(format!("{}", err)) }; parse_mappings(&source_map) } fn parse_mappings(source_map: &SourceMap) -> Result<Cache, String>{ if source_map.version != SOURCE_MAP_VERSION { return Err("Only Source Map version 3 is implemented".into()) } let sources_length = source_map.sources.len() as u32; let names_length = source_map.names.len() as u32; let mut generated_mappings: Vec<Mapping> = Vec::new(); let mut generated_line: u32 = 0; let mut previous_original_line: u32 = 0; let mut previous_original_column: u32 = 0; let mut previous_source: u32 = 0; let mut previous_name: u32 = 0; for line in source_map.mappings.as_bytes().split(|&x| x == (';' as u8)) { generated_line += 1; let mut previous_generated_column: u32 = 0; for segment in line.split(|&x| x == (',' as u8)) { let segment_length = segment.len(); let mut fields: Vec<i32> = Vec::new(); let mut character_index = 0; while character_index < segment_length { match base64_vlq::decode(&segment[character_index..segment_length]) { Some((value, field_length)) => { fields.push(value); character_index += field_length; }, None => return Err("Invalid VLQ mapping field".into()) }; } if fields.len() < 1 { continue; } if fields.len() == 2 { return Err("Found a source, but no line and column".into()); } if fields.len() == 3 { return Err("Found a source and line, but no column".into()); } let mut mapping = Mapping { generated: CodePosition { line: generated_line, column: ((previous_generated_column as i32) + fields[0]) as u32 }, original: CodePosition { line: 0, column: 0 }, source: "".into(), name: "".into() }; previous_generated_column = mapping.generated.column; if fields.len() > 1 { // Original source. previous_source = ((previous_source as i32) + fields[1]) as u32; if previous_source < sources_length { mapping.source = source_map.sources[previous_source as usize].to_owned(); } else { return Err(format!("Invalid source map: reference to source index {} when source list length is {}", previous_source, sources_length)); } // Original line. previous_original_line = ((previous_original_line as i32) + fields[2]) as u32; // Lines are stored 0-based mapping.original.line = previous_original_line + 1; // Original column. previous_original_column = ((previous_original_column as i32) + fields[3]) as u32; mapping.original.column = previous_original_column; if fields.len() > 4 { // Original name. previous_name = ((previous_name as i32) + fields[4]) as u32; if previous_name < names_length { mapping.name = source_map.names[previous_name as usize].to_owned(); } else { return Err(format!("Invalid source map: reference to name index {} when name list length is {}", previous_name, names_length)); } } } generated_mappings.push(mapping); } } fn sort_key(mapping: &Mapping) -> (u32, u32) { (mapping.generated.line, mapping.generated.column) } generated_mappings.sort_by(|a, b| sort_key(a).cmp(&sort_key(b))); Ok(Cache { generated_mappings: generated_mappings, source_root: match &source_map.sourceRoot { &Some(ref x) => x.to_owned(), &None => "".into() } }) } impl Cache { /** * Returns the original source, line, column and name information for the generated * source's line and column positions provided. * * # Arguments * * * line: The line number in the generated source. * * column: The column number in the generated source. * * # Examples * * ``` * use js_source_mapper::consume; * * let cache = consume(r#"{ "version": 3, "file": "foo.js", "sources": ["source.js"], "names": ["name1", "name1", "name3"], "mappings": ";EAACA;;IAEEA;;MAEEE", "sourceRoot": "http://example.com" }"#).unwrap(); * * println!("{:?}", cache.mapping_for_generated_position(2, 2)); * // => Mapping { * // generated: CodePosition { line: 2, column: 2 }, * // original: CodePosition { line: 1, column: 1 }, * // source: "source.js" * // name: "name1" * // } * ``` * */ pub fn mapping_for_generated_position(&self, line: u32, column: u32) -> Mapping { let matcher = |mapping: &Mapping| -> Ordering { (mapping.generated.line, mapping.generated.column).cmp(&(line, column)) }; match self.generated_mappings.binary_search_by(matcher) { Ok(index) => &self.generated_mappings[index], Err(index) => &self.generated_mappings[index] }.clone() } } macro_rules! assert_equal_mappings( ($a:expr, $b:expr) => ( if $a != $b { panic!(format!("\n\n{:?}\n\n!=\n\n{:?}\n\n", $a, $b)); } ); ); #[test] fn test_source_map_issue_64() { let cache = consume(r#"{ "version": 3, "file": "foo.js", "sourceRoot": "http://example.com/", "sources": ["/a"], "names": [], "mappings": "AACA", "sourcesContent": ["foo"] }"#).unwrap(); let expected = Mapping { generated: CodePosition { line: 1, column: 0 }, original: CodePosition { line: 2, column: 0 }, source: "/a".into(), name: "".into() }; let actual = cache.mapping_for_generated_position(1, 0); assert_equal_mappings!(actual, expected); } #[test] fn test_source_map_issue_72_duplicate_sources() { let cache = consume(r#"{ "version": 3, "file": "foo.js", "sources": ["source1.js", "source1.js", "source3.js"], "names": [], "mappings": ";EAAC;;IAEE;;MEEE", "sourceRoot": "http://example.com" }"#).unwrap(); { let expected = Mapping { generated: CodePosition { line: 2, column: 2 }, original: CodePosition { line: 1, column: 1 }, source: "source1.js".into(), name: "".into() }; let actual = cache.mapping_for_generated_position(2, 2); assert_equal_mappings!(actual, expected); } { let expected = Mapping { generated: CodePosition { line: 4, column: 4 }, original: CodePosition { line: 3, column: 3 }, source: "source1.js".into(), name: "".into() }; let actual = cache.mapping_for_generated_position(4, 4); assert_equal_mappings!(actual, expected); } { let expected = Mapping { generated: CodePosition { line: 6, column: 6 }, original: CodePosition { line: 5, column: 5 }, source: "source3.js".into(), name: "".into() }; let actual = cache.mapping_for_generated_position(6, 6); assert_equal_mappings!(actual, expected); } } #[test] fn test_source_map_issue_72_duplicate_names() { let cache = consume(r#"{ "version": 3, "file": "foo.js", "sources": ["source.js"], "names": ["name1", "name1", "name3"], "mappings": ";EAACA;;IAEEA;;MAEEE", "sourceRoot": "http://example.com" }"#).unwrap(); { let expected = Mapping { generated: CodePosition { line: 2, column: 2 }, original: CodePosition { line: 1, column: 1 }, source: "source.js".into(), name: "name1".into() }; let actual = cache.mapping_for_generated_position(2, 2); assert_equal_mappings!(actual, expected); } { let expected = Mapping { generated: CodePosition { line: 4, column: 4 }, original: CodePosition { line: 3, column: 3 }, source: "source.js".into(), name: "name1".into() }; let actual = cache.mapping_for_generated_position(4, 4); assert_equal_mappings!(actual, expected); } { let expected = Mapping { generated: CodePosition { line: 6, column: 6 }, original: CodePosition { line: 5, column: 5 }, source: "source.js".into(), name: "name3".into() }; let actual = cache.mapping_for_generated_position(6, 6); assert_equal_mappings!(actual, expected); } } #[test] fn it_allows_omitting_source_root() { let cache_result: Result<Cache, String> = consume(r#"{ "version": 3, "file": "foo.js", "sources": ["source.js"], "names": ["name1", "name1", "name3"], "mappings": ";EAACA;;IAEEA;;MAEEE" }"#); match cache_result { Ok(_) => {}, Err(s) => panic!(format!("Error due to omitting: '{}'", s)) } } #[test] fn it_rejects_older_source_map_revisions() { let cache_result = consume(r#"{ "version": 2, "file": "", "sources": ["source.js"], "names": ["name1", "name1", "name3"], "mappings": ";EAACA;;IAEEA;;MAEEE", "sourceRoot": "http://example.com" }"#); match cache_result { Ok(_) => panic!("Source Map revision < 3 should be rejected"), Err(_) => {} } } #[test] fn it_does_not_panic_due_to_malformed_source_maps() { let cache_result = consume(r#"{ "version": 3, "file": "", "sources": [], "names": [], "mappings": ";EAACA;;IAEEA;;MAEEE" }"#); match cache_result { Ok(_) => panic!("Invalid source maps should be rejected"), Err(_) => {} } }
#![allow(safe_extern_statics)] #![allow(non_upper_case_globals)] #![allow(dead_code)] #![allow(non_camel_case_types)] use ec; use partition::*; use partition::BlockSize::*; use partition::TxSize::*; use plane::*; const PLANES: usize = 3; const PARTITION_PLOFFSET: usize = 4; const PARTITION_CONTEXTS: usize = 16; const PARTITION_TYPES: usize = 4; const MI_SIZE_LOG2: usize = 2; const MI_SIZE: usize = (1 << MI_SIZE_LOG2); const MAX_MIB_SIZE_LOG2: usize = (MAX_SB_SIZE_LOG2 - MI_SIZE_LOG2); const MAX_MIB_SIZE: usize = (1 << MAX_MIB_SIZE_LOG2); const MAX_MIB_MASK: usize = (MAX_MIB_SIZE - 1); const MAX_SB_SIZE_LOG2: usize = 6; const MAX_SB_SIZE: usize = (1 << MAX_SB_SIZE_LOG2); const MAX_SB_SQUARE: usize = (MAX_SB_SIZE * MAX_SB_SIZE); const INTRA_MODES: usize = 13; const UV_INTRA_MODES: usize = 13; static mi_size_wide: [u8; BLOCK_SIZES_ALL] = [ 1, 1, 2, 2, 2, 4, 4, 4, 8, 8, 8, 16, 16, 1, 4, 2, 8, 4, 16]; static mi_size_high: [u8; BLOCK_SIZES_ALL] = [ 1, 2, 1, 2, 4, 2, 4, 8, 4, 8, 16, 8, 16, 4, 1, 8, 2, 16, 4]; static b_width_log2_lookup: [u8; BLOCK_SIZES_ALL] = [ 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 0, 2, 1, 3, 2, 4]; static b_height_log2_lookup: [u8; BLOCK_SIZES_ALL] = [ 0, 1, 0, 1, 2, 1, 2, 3, 2, 3, 4, 3, 4, 2, 0, 3, 1, 4, 2]; const tx_size_wide_log2: [usize; TX_SIZES_ALL] = [2, 3, 4, 5, 2, 3, 3, 4, 4, 5, 2, 4, 3, 5]; const tx_size_high_log2: [usize; TX_SIZES_ALL] = [2, 3, 4, 5, 3, 2, 4, 3, 5, 4, 4, 2, 5, 3]; const EXT_TX_SIZES: usize = 4; const EXT_TX_SET_TYPES: usize = 6; const EXT_TX_SETS_INTRA: usize = 3; const EXT_TX_SETS_INTER: usize = 4; // Number of transform types in each set type const num_ext_tx_set: [usize; EXT_TX_SET_TYPES] = [1, 2, 5, 7, 12, 16]; // Maps intra set index to the set type const ext_tx_set_type_intra: [TxSetType; EXT_TX_SETS_INTRA] = [ TxSetType::EXT_TX_SET_DCTONLY, TxSetType::EXT_TX_SET_DTT4_IDTX_1DDCT, TxSetType::EXT_TX_SET_DTT4_IDTX ]; // Maps inter set index to the set type #[allow(dead_code)] const ext_tx_set_type_inter: [TxSetType; EXT_TX_SETS_INTER] = [ TxSetType::EXT_TX_SET_DCTONLY, TxSetType::EXT_TX_SET_ALL16, TxSetType::EXT_TX_SET_DTT9_IDTX_1DDCT, TxSetType::EXT_TX_SET_DCT_IDTX ]; // Maps set types above to the indices used for intra const ext_tx_set_index_intra: [i8; EXT_TX_SET_TYPES] = [0, -1, 2, 1, -1, -1 ]; // Maps set types above to the indices used for inter const ext_tx_set_index_inter: [i8; EXT_TX_SET_TYPES] = [0, 3, -1, -1, 2, 1]; const av1_ext_tx_intra_ind: [[u32; TX_TYPES]; EXT_TX_SETS_INTRA] = [[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,],[1,5,6,4,0,0,0,0,0,0,2,3,0,0,0,0,],[1,3,4,2,0,0,0,0,0,0,0,0,0,0,0,0,],]; #[allow(dead_code)] const av1_ext_tx_inter_ind: [[usize; TX_TYPES]; EXT_TX_SETS_INTER] = [[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,],[1,5,6,4,0,0,0,0,0,0,2,3,0,0,0,0,],[1,3,4,2,0,0,0,0,0,0,0,0,0,0,0,0,],[0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,],]; const ext_tx_cnt_intra: [usize;EXT_TX_SETS_INTRA] = [ 1, 7, 5 ]; const av1_coefband_trans_4x4: [u8; 16] = [ 0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5, ]; const TXSIZE_SQR_MAP: [TxSize; TX_SIZES_ALL] = [ TX_4X4, TX_8X8, TX_16X16, TX_32X32, TX_4X4, TX_4X4, TX_8X8, TX_8X8, TX_16X16, TX_16X16, TX_4X4, TX_4X4, TX_8X8, TX_8X8, ]; const TXSIZE_SQR_UP_MAP: [TxSize; TX_SIZES_ALL] = [ TX_4X4, TX_8X8, TX_16X16, TX_32X32, TX_8X8, TX_8X8, TX_16X16, TX_16X16, TX_32X32, TX_32X32, TX_16X16, TX_16X16, TX_32X32, TX_32X32, ]; // Generates 4 bit field in which each bit set to 1 represents // a blocksize partition 1111 means we split 64x64, 32x32, 16x16 // and 8x8. 1000 means we just split the 64x64 to 32x32 static partition_context_lookup: [[u8; 2]; BLOCK_SIZES_ALL] = [ [ 15, 15 ], // 4X4 - [0b1111, 0b1111] [ 15, 14 ], // 4X8 - [0b1111, 0b1110] [ 14, 15 ], // 8X4 - [0b1110, 0b1111] [ 14, 14 ], // 8X8 - [0b1110, 0b1110] [ 14, 12 ], // 8X16 - [0b1110, 0b1100] [ 12, 14 ], // 16X8 - [0b1100, 0b1110] [ 12, 12 ], // 16X16 - [0b1100, 0b1100] [ 12, 8 ], // 16X32 - [0b1100, 0b1000] [ 8, 12 ], // 32X16 - [0b1000, 0b1100] [ 8, 8 ], // 32X32 - [0b1000, 0b1000] [ 8, 0 ], // 32X64 - [0b1000, 0b0000] [ 0, 8 ], // 64X32 - [0b0000, 0b1000] [ 0, 0 ], // 64X64 - [0b0000, 0b0000] [ 15, 12 ], // 4X16 - [0b1111, 0b1100] [ 12, 15 ], // 16X4 - [0b1100, 0b1111] [ 8, 14 ], // 8X32 - [0b1110, 0b1000] [ 14, 8 ], // 32X8 - [0b1000, 0b1110] [ 12, 0 ], // 16X64- [0b1100, 0b0000] [ 0, 12 ], // 64X16- [0b0000, 0b1100] ]; pub static subsize_lookup: [[BlockSize; BLOCK_SIZES_ALL]; PARTITION_TYPES] = [ [ // PARTITION_NONE // 4X4 BLOCK_4X4, // 4X8, 8X4, 8X8 BLOCK_4X8, BLOCK_8X4, BLOCK_8X8, // 8X16, 16X8, 16X16 BLOCK_8X16, BLOCK_16X8, BLOCK_16X16, // 16X32, 32X16, 32X32 BLOCK_16X32, BLOCK_32X16, BLOCK_32X32, // 32X64, 64X32, 64X64 BLOCK_32X64, BLOCK_64X32, BLOCK_64X64, // 4X16, 16X4, 8X32 BLOCK_4X16, BLOCK_16X4, BLOCK_8X32, // 32X8, 16X64, 64X16 BLOCK_32X8, BLOCK_16X64, BLOCK_64X16, ], [ // PARTITION_HORZ // 4X4 BLOCK_INVALID, // 4X8, 8X4, 8X8 BLOCK_INVALID, BLOCK_INVALID, BLOCK_8X4, // 8X16, 16X8, 16X16 BLOCK_INVALID, BLOCK_INVALID, BLOCK_16X8, // 16X32, 32X16, 32X32 BLOCK_INVALID, BLOCK_INVALID, BLOCK_32X16, // 32X64, 64X32, 64X64 BLOCK_INVALID, BLOCK_INVALID, BLOCK_64X32, // 4X16, 16X4, 8X32 BLOCK_INVALID, BLOCK_INVALID, BLOCK_INVALID, // 32X8, 16X64, 64X16 BLOCK_INVALID, BLOCK_INVALID, BLOCK_INVALID, ], [ // PARTITION_VERT // 4X4 BLOCK_INVALID, // 4X8, 8X4, 8X8 BLOCK_INVALID, BLOCK_INVALID, BLOCK_4X8, // 8X16, 16X8, 16X16 BLOCK_INVALID, BLOCK_INVALID, BLOCK_8X16, // 16X32, 32X16, 32X32 BLOCK_INVALID, BLOCK_INVALID, BLOCK_16X32, // 32X64, 64X32, 64X64 BLOCK_INVALID, BLOCK_INVALID, BLOCK_32X64, // 4X16, 16X4, 8X32 BLOCK_INVALID, BLOCK_INVALID, BLOCK_INVALID, // 32X8, 16X64, 64X16 BLOCK_INVALID, BLOCK_INVALID, BLOCK_INVALID, ], [ // PARTITION_SPLIT // 4X4 BLOCK_INVALID, // 4X8, 8X4, 8X8 BLOCK_INVALID, BLOCK_INVALID, BLOCK_4X4, // 8X16, 16X8, 16X16 BLOCK_INVALID, BLOCK_INVALID, BLOCK_8X8, // 16X32, 32X16, 32X32 BLOCK_INVALID, BLOCK_INVALID, BLOCK_16X16, // 32X64, 64X32, 64X64 BLOCK_INVALID, BLOCK_INVALID, BLOCK_32X32, // 4X16, 16X4, 8X32 BLOCK_INVALID, BLOCK_INVALID, BLOCK_INVALID, // 32X8, 16X64, 64X16 BLOCK_INVALID, BLOCK_INVALID, BLOCK_INVALID, ] ]; #[derive(Copy,Clone,PartialEq)] #[allow(dead_code)] enum HeadToken { BlockZero = 0, Zero = 1, OneEOB = 2, OneNEOB = 3, TwoPlusEOB = 4, TwoPlusNEOB = 5, } #[derive(Copy,Clone,PartialEq)] #[allow(dead_code)] enum TailToken { Two = 0, Three = 1, Four = 2, Cat1 = 3, Cat2 = 4, Cat3 = 5, Cat4 = 6, Cat5 = 7, Cat6 = 8, } const PLANE_TYPES: usize = 2; const HEAD_TOKENS: usize = 5; const TAIL_TOKENS: usize = 9; const ENTROPY_TOKENS: usize = 12; const COEFF_CONTEXTS: usize = 6; const COEF_BANDS: usize = 6; const REF_TYPES: usize = 2; const SKIP_CONTEXTS: usize = 3; fn get_ext_tx_set_type(tx_size: TxSize, is_inter: bool, use_reduced_set: bool) -> TxSetType { let tx_size_sqr_up = TXSIZE_SQR_UP_MAP[tx_size as usize]; let tx_size_sqr = TXSIZE_SQR_MAP[tx_size as usize]; if tx_size_sqr > TxSize::TX_32X32 { TxSetType::EXT_TX_SET_DCTONLY } else if use_reduced_set { if is_inter { TxSetType::EXT_TX_SET_DCT_IDTX } else { TxSetType::EXT_TX_SET_DTT4_IDTX } } else if tx_size_sqr_up == TxSize::TX_32X32 { if is_inter { TxSetType::EXT_TX_SET_DCT_IDTX } else { TxSetType::EXT_TX_SET_DCTONLY } } else if is_inter { if tx_size_sqr == TxSize::TX_16X16 { TxSetType::EXT_TX_SET_DTT9_IDTX_1DDCT } else { TxSetType::EXT_TX_SET_ALL16 } } else { if tx_size_sqr == TxSize::TX_16X16 { TxSetType::EXT_TX_SET_DTT4_IDTX } else { TxSetType::EXT_TX_SET_DTT4_IDTX_1DDCT } } } fn get_ext_tx_set(tx_size: TxSize, is_inter: bool, use_reduced_set: bool) -> i8 { let set_type = get_ext_tx_set_type(tx_size, is_inter, use_reduced_set); if is_inter { ext_tx_set_index_inter[set_type as usize] } else { ext_tx_set_index_intra[set_type as usize] } } extern { static default_partition_cdf: [[u16; PARTITION_TYPES + 1]; PARTITION_CONTEXTS]; static default_kf_y_mode_cdf: [[[u16; INTRA_MODES + 1]; INTRA_MODES]; INTRA_MODES]; static default_uv_mode_cdf: [[u16; UV_INTRA_MODES + 1]; INTRA_MODES]; static default_intra_ext_tx_cdf: [[[[u16; TX_TYPES + 1]; INTRA_MODES]; EXT_TX_SIZES]; EXT_TX_SETS_INTRA]; static default_skip_cdfs: [[u16; 3];SKIP_CONTEXTS]; static default_coef_head_cdf_4x4: [CoeffModel; PLANE_TYPES]; static default_coef_head_cdf_8x8: [CoeffModel; PLANE_TYPES]; static default_coef_head_cdf_16x16: [CoeffModel; PLANE_TYPES]; static default_coef_head_cdf_32x32: [CoeffModel; PLANE_TYPES]; static default_coef_tail_cdf: [[CoeffModel; PLANE_TYPES]; TX_SIZES]; static av1_cat1_cdf0: [u16; 2]; static av1_cat2_cdf0: [u16; 4]; static av1_cat3_cdf0: [u16; 8]; static av1_cat4_cdf0: [u16; 16]; static av1_cat5_cdf0: [u16; 16]; static av1_cat5_cdf1: [u16; 2]; static av1_cat6_cdf0: [u16; 16]; static av1_cat6_cdf1: [u16; 16]; static av1_cat6_cdf2: [u16; 16]; static av1_cat6_cdf3: [u16; 16]; static av1_cat6_cdf4: [u16; 4]; static av1_intra_scan_orders: [[SCAN_ORDER; TX_TYPES]; TX_SIZES_ALL]; pub static exported_intra_mode_to_tx_type_context: &'static [TxType; INTRA_MODES]; } #[repr(C)] pub struct SCAN_ORDER { // FIXME: don't hardcode sizes pub scan: &'static [u16; 16], pub iscan: &'static [u16; 16], pub neighbors: &'static [u16; 17*2] } type CoeffModel = [[[[u16; ENTROPY_TOKENS + 1];COEFF_CONTEXTS];COEF_BANDS];REF_TYPES]; #[derive(Clone)] pub struct CDFContext { partition_cdf: [[u16; PARTITION_TYPES + 1]; PARTITION_CONTEXTS], kf_y_cdf: [[[u16; INTRA_MODES + 1]; INTRA_MODES]; INTRA_MODES], uv_mode_cdf: [[u16; INTRA_MODES + 1]; INTRA_MODES], intra_ext_tx_cdf: [[[[u16; TX_TYPES + 1]; INTRA_MODES]; EXT_TX_SIZES]; EXT_TX_SETS_INTRA], coef_head_cdfs: [[CoeffModel; PLANE_TYPES]; TX_SIZES], coef_tail_cdfs: [[CoeffModel; PLANE_TYPES]; TX_SIZES], skip_cdfs: [[u16; 3];SKIP_CONTEXTS], } impl CDFContext { pub fn new() -> CDFContext { CDFContext { partition_cdf: default_partition_cdf, kf_y_cdf: default_kf_y_mode_cdf, uv_mode_cdf: default_uv_mode_cdf, intra_ext_tx_cdf: default_intra_ext_tx_cdf, skip_cdfs: default_skip_cdfs, coef_head_cdfs: [default_coef_head_cdf_4x4, default_coef_head_cdf_8x8, default_coef_head_cdf_16x16, default_coef_head_cdf_32x32], coef_tail_cdfs: default_coef_tail_cdf, } } } const SUPERBLOCK_TO_PLANE_SHIFT: usize = MAX_SB_SIZE_LOG2; const SUPERBLOCK_TO_BLOCK_SHIFT: usize = MAX_MIB_SIZE_LOG2; const BLOCK_TO_PLANE_SHIFT: usize = SUPERBLOCK_TO_PLANE_SHIFT - SUPERBLOCK_TO_BLOCK_SHIFT; const LOCAL_BLOCK_MASK: usize = (1 << SUPERBLOCK_TO_BLOCK_SHIFT) - 1; /// Absolute offset in superblocks inside a plane, where a superblock is defined /// to be an N*N square where N = (1 << SUPERBLOCK_TO_PLANE_SHIFT). pub struct SuperBlockOffset { pub x: usize, pub y: usize } impl SuperBlockOffset { /// Offset of a block inside the current superblock. pub fn block_offset(&self, block_x: usize, block_y: usize) -> BlockOffset { BlockOffset { x: (self.x << SUPERBLOCK_TO_BLOCK_SHIFT) + block_x, y: (self.y << SUPERBLOCK_TO_BLOCK_SHIFT) + block_y, } } /// Offset of the top-left pixel of this block. pub fn plane_offset(&self, plane: &PlaneConfig) -> PlaneOffset { PlaneOffset { x: self.x << (SUPERBLOCK_TO_PLANE_SHIFT - plane.xdec), y: self.y << (SUPERBLOCK_TO_PLANE_SHIFT - plane.ydec), } } } /// Absolute offset in blocks inside a plane, where a block is defined /// to be an N*N square where N = (1 << BLOCK_TO_PLANE_SHIFT). pub struct BlockOffset { pub x: usize, pub y: usize } impl BlockOffset { /// Offset of the superblock in which this block is located. pub fn sb_offset(&self) -> SuperBlockOffset { SuperBlockOffset { x: self.x >> SUPERBLOCK_TO_BLOCK_SHIFT, y: self.y >> SUPERBLOCK_TO_BLOCK_SHIFT, } } /// Offset of the top-left pixel of this block. pub fn plane_offset(&self, plane: &PlaneConfig) -> PlaneOffset { let po = self.sb_offset().plane_offset(plane); let x_offset = self.x & LOCAL_BLOCK_MASK; let y_offset = self.y & LOCAL_BLOCK_MASK; PlaneOffset { x: po.x + (x_offset << BLOCK_TO_PLANE_SHIFT), y: po.y + (y_offset << BLOCK_TO_PLANE_SHIFT), } } pub fn y_in_sb(&self) -> usize { self.y % MAX_MIB_SIZE } } #[derive(Copy,Clone)] pub struct Block { pub mode: PredictionMode, pub skip: bool, } impl Block { pub fn default() -> Block { Block { mode: PredictionMode::DC_PRED, skip: false, } } pub fn is_inter(&self) -> bool { false } } #[derive(Clone, Default)] pub struct BlockContext { cols: usize, rows: usize, above_partition_context: Vec<u8>, left_partition_context: [u8; MAX_MIB_SIZE], above_coeff_context: [Vec<u8>; PLANES], left_coeff_context: [[u8; MAX_MIB_SIZE]; PLANES], blocks: Vec<Vec<Block>> } impl BlockContext { pub fn new(cols: usize, rows: usize) -> BlockContext { // Align power of two let aligned_cols = (cols + ((1 << MAX_MIB_SIZE_LOG2) - 1)) & !((1 << MAX_MIB_SIZE_LOG2) - 1); BlockContext { cols, rows, above_partition_context: vec![0; aligned_cols], left_partition_context: [0; MAX_MIB_SIZE], above_coeff_context: [vec![0; cols << (MI_SIZE_LOG2 - tx_size_wide_log2[0])], vec![0; cols << (MI_SIZE_LOG2 - tx_size_wide_log2[0])], vec![0; cols << (MI_SIZE_LOG2 - tx_size_wide_log2[0])],], left_coeff_context: [[0; MAX_MIB_SIZE]; PLANES], blocks: vec![vec![Block::default(); cols]; rows] } } pub fn at(&mut self, bo: &BlockOffset) -> &mut Block { &mut self.blocks[bo.y][bo.x] } pub fn above_of(&mut self, bo: &BlockOffset) -> Block { if bo.y > 0 { self.blocks[bo.y - 1][bo.x] } else { Block::default() } } pub fn left_of(&mut self, bo: &BlockOffset) -> Block { if bo.x > 0 { self.blocks[bo.y][bo.x - 1] } else { Block::default() } } fn coeff_context(&self, plane: usize, bo: &BlockOffset) -> usize { (self.above_coeff_context[plane][bo.x] + self.left_coeff_context[plane][bo.y_in_sb()]) as usize } fn set_coeff_context(&mut self, plane: usize, bo: &BlockOffset, value: bool) { let uvalue = value as u8; self.above_coeff_context[plane][bo.x] = uvalue; self.left_coeff_context[plane][bo.y_in_sb()] = uvalue; } pub fn reset_left_coeff_context(&mut self, plane: usize) { for c in self.left_coeff_context[plane].iter_mut() { *c = 0; } } fn partition_plane_context(&self, bo: &BlockOffset, bsize: BlockSize) -> usize { // TODO: this should be way simpler without sub8x8 let above_ctx = self.above_partition_context[bo.x]; let left_ctx = self.left_partition_context[bo.y_in_sb()]; let bsl = b_width_log2_lookup[bsize as usize] - b_width_log2_lookup[BlockSize::BLOCK_8X8 as usize]; let above = (above_ctx >> bsl) & 1; let left = (left_ctx >> bsl) & 1; assert!(b_width_log2_lookup[bsize as usize] == b_height_log2_lookup[bsize as usize]); ((left * 2 + above) + bsl) as usize * PARTITION_PLOFFSET } pub fn update_partition_context(&mut self, bo: &BlockOffset, subsize : BlockSize, bsize: BlockSize) { #[allow(dead_code)] // TODO(yushin): If CONFIG_EXT_PARTITION_TYPES is enabled, use bw and bh //let bw = mi_size_wide[bsize as usize]; //let bh = mi_size_high[bsize as usize]; let bs = mi_size_wide[bsize as usize]; let above_ctx = &mut self.above_partition_context[bo.x..bo.x + bs as usize]; let left_ctx = &mut self.left_partition_context[bo.y_in_sb()..bo.y_in_sb() + bs as usize]; // update the partition context at the end notes. set partition bits // of block sizes larger than the current one to be one, and partition // bits of smaller block sizes to be zero. for i in 0..bs { above_ctx[i as usize] = partition_context_lookup[subsize as usize][0]; } for i in 0..bs { left_ctx[i as usize] = partition_context_lookup[subsize as usize][1]; } } fn skip_context(&mut self, bo: &BlockOffset) -> usize { (self.above_of(bo).skip as usize) + (self.left_of(bo).skip as usize) } } #[derive(Clone)] pub struct ContextWriterCheckpoint { pub w: ec::WriterCheckpoint, pub fc: CDFContext, pub bc: BlockContext } pub struct ContextWriter { pub w: ec::Writer, pub fc: CDFContext, pub bc: BlockContext } impl ContextWriter { pub fn write_partition(&mut self, p: PartitionType, bsize: BlockSize) { let bo = BlockOffset { x: 0, y: 0 }; let ctx = self.bc.partition_plane_context(&bo, bsize); self.w.symbol(p as u32, &mut self.fc.partition_cdf[ctx], PARTITION_TYPES); } pub fn write_intra_mode_kf(&mut self, bo: &BlockOffset, mode: PredictionMode) { let above_mode = self.bc.above_of(bo).mode as usize; let left_mode = self.bc.left_of(bo).mode as usize; let cdf = &mut self.fc.kf_y_cdf[above_mode][left_mode]; self.w.symbol(mode as u32, cdf, INTRA_MODES); } pub fn write_intra_uv_mode(&mut self, uv_mode: PredictionMode, y_mode: PredictionMode) { let cdf = &mut self.fc.uv_mode_cdf[y_mode as usize]; self.w.symbol(uv_mode as u32, cdf, INTRA_MODES); } pub fn write_tx_type(&mut self, tx_type: TxType, y_mode: PredictionMode) { let tx_size = TxSize::TX_4X4; let square_tx_size = TXSIZE_SQR_MAP[tx_size as usize]; let eset = get_ext_tx_set(tx_size, false, true); if eset > 0 { self.w.symbol( av1_ext_tx_intra_ind[eset as usize][tx_type as usize], &mut self.fc.intra_ext_tx_cdf[eset as usize][square_tx_size as usize][y_mode as usize], ext_tx_cnt_intra[eset as usize]); } } pub fn write_skip(&mut self, bo: &BlockOffset, skip: bool) { let ctx = self.bc.skip_context(bo); self.w.symbol(skip as u32, &mut self.fc.skip_cdfs[ctx], 2); } pub fn write_token_block_zero(&mut self, plane: usize, bo: &BlockOffset) { let plane_type = if plane > 0 { 1 } else { 0 }; let tx_size_ctx = TXSIZE_SQR_MAP[TxSize::TX_4X4 as usize] as usize; let ref_type = 0; let band = 0; let ctx = self.bc.coeff_context(plane, bo); let cdf = &mut self.fc.coef_head_cdfs[tx_size_ctx][plane_type][ref_type][band][ctx]; //println!("encoding token band={} ctx={}", band, ctx); self.w.symbol(0, cdf, HEAD_TOKENS + 1); self.bc.set_coeff_context(plane, bo, false); } pub fn write_coeffs(&mut self, plane: usize, bo: &BlockOffset, coeffs_in: &[i32], tx_size: TxSize, tx_type: TxType) { let scan_order = &av1_intra_scan_orders[tx_size as usize][tx_type as usize]; let scan = scan_order.scan; let mut coeffs = [0 as i32; 16]; for i in 0..16 { coeffs[i] = coeffs_in[scan[i] as usize]; } let mut nz_coeff = 0; for (i, v) in coeffs.iter().enumerate() { if *v != 0 { nz_coeff = i + 1; } } if nz_coeff == 0 { self.write_token_block_zero(plane, bo); return; } let tx_size = TxSize::TX_4X4; let plane_type = if plane > 0 { 1 } else { 0 }; let tx_size_ctx = TXSIZE_SQR_MAP[tx_size as usize] as usize; let ref_type = 0; let neighbors = scan_order.neighbors; let mut token_cache = [0 as u8; 64*64]; for (i, v) in coeffs.iter().enumerate() { let vabs = v.abs() as u32; let first = i == 0; let last = i == (nz_coeff - 1); let band = av1_coefband_trans_4x4[i]; let ctx = if first { self.bc.coeff_context(plane, bo) } else { ((1 + token_cache[neighbors[2 * i + 0] as usize] + token_cache[neighbors[2 * i + 1] as usize]) >> 1) as usize }; let cdf = &mut self.fc.coef_head_cdfs[tx_size_ctx][plane_type][ref_type][band as usize][ctx]; match (vabs, last) { (0,_) => { self.w.symbol(HeadToken::Zero as u32 - !first as u32, cdf, HEAD_TOKENS + (first as usize)); continue }, (1, false) => self.w.symbol(HeadToken::OneNEOB as u32 - !first as u32, cdf, HEAD_TOKENS + (first as usize)), (1, true) => self.w.symbol(HeadToken::OneEOB as u32 - !first as u32, cdf, HEAD_TOKENS + (first as usize)), (_, false) => self.w.symbol(HeadToken::TwoPlusNEOB as u32 - !first as u32, cdf, HEAD_TOKENS + (first as usize)), (_, true) => self.w.symbol(HeadToken::TwoPlusEOB as u32 - !first as u32, cdf, HEAD_TOKENS + (first as usize)), }; let tailcdf = &mut self.fc.coef_tail_cdfs[tx_size_ctx][plane_type][ref_type][band as usize][ctx as usize]; match vabs { 0|1 => {}, 2 => self.w.symbol(TailToken::Two as u32, tailcdf, TAIL_TOKENS), 3 => self.w.symbol(TailToken::Three as u32, tailcdf, TAIL_TOKENS), 4 => self.w.symbol(TailToken::Four as u32, tailcdf, TAIL_TOKENS), 5...6 => { self.w.symbol(TailToken::Cat1 as u32, tailcdf, TAIL_TOKENS); self.w.cdf(vabs - 5, &av1_cat1_cdf0); } 7...10 => { self.w.symbol(TailToken::Cat2 as u32, tailcdf, TAIL_TOKENS); self.w.cdf(vabs - 7, &av1_cat2_cdf0); } 11...18 => { self.w.symbol(TailToken::Cat3 as u32, tailcdf, TAIL_TOKENS); self.w.cdf(vabs - 11, &av1_cat3_cdf0); } 19...34 => { self.w.symbol(TailToken::Cat4 as u32, tailcdf, TAIL_TOKENS); self.w.cdf(vabs - 19, &av1_cat4_cdf0); } 35...66 => { self.w.symbol(TailToken::Cat5 as u32, tailcdf, TAIL_TOKENS); self.w.cdf((vabs - 35) & 0xf, &av1_cat5_cdf0); self.w.cdf(((vabs - 35) >> 4) & 0x1, &av1_cat5_cdf1); } _ => { self.w.symbol(TailToken::Cat6 as u32, tailcdf, TAIL_TOKENS); let tx_offset = tx_size as u32 - TxSize::TX_4X4 as u32; let bit_depth = 8; let bits = bit_depth + 3 + tx_offset; self.w.cdf((vabs - 67) & 0xf, &av1_cat6_cdf0); self.w.cdf(((vabs - 67) >> 4) & 0xf, &av1_cat6_cdf1); self.w.cdf(((vabs - 67) >> 8) & 0xf, &av1_cat6_cdf2); if bits > 12 { self.w.cdf(((vabs - 67) >> 12) & 0xf, &av1_cat6_cdf3); } if bits > 16 { self.w.cdf(((vabs - 67) >> 16) & 0x3, &av1_cat6_cdf4); } } }; self.w.bool(*v < 0, 16384); let energy_class = match vabs { 0 => 0, 1 => 1, 2 => 2, 3|4 => 3, 5...10 => 4, _ => 5, }; token_cache[scan[i] as usize] = energy_class; if last { break; } } self.bc.set_coeff_context(plane, bo, true); } pub fn checkpoint(&mut self) -> ContextWriterCheckpoint { ContextWriterCheckpoint { w: self.w.checkpoint(), fc: self.fc.clone(), bc: self.bc.clone() } } pub fn rollback(&mut self, checkpoint: ContextWriterCheckpoint) { self.w.rollback(&checkpoint.w); self.fc = checkpoint.fc.clone(); self.bc = checkpoint.bc.clone(); } } Use static instead of const for array To avoid the multiple copies of array whenever it is referenced, also more importantly, the generated asm code seems slower if const is used w/o '&', i.e. referencing. #![allow(safe_extern_statics)] #![allow(non_upper_case_globals)] #![allow(dead_code)] #![allow(non_camel_case_types)] use ec; use partition::*; use partition::BlockSize::*; use partition::TxSize::*; use plane::*; const PLANES: usize = 3; const PARTITION_PLOFFSET: usize = 4; const PARTITION_CONTEXTS: usize = 16; const PARTITION_TYPES: usize = 4; const MI_SIZE_LOG2: usize = 2; const MI_SIZE: usize = (1 << MI_SIZE_LOG2); const MAX_MIB_SIZE_LOG2: usize = (MAX_SB_SIZE_LOG2 - MI_SIZE_LOG2); const MAX_MIB_SIZE: usize = (1 << MAX_MIB_SIZE_LOG2); const MAX_MIB_MASK: usize = (MAX_MIB_SIZE - 1); const MAX_SB_SIZE_LOG2: usize = 6; const MAX_SB_SIZE: usize = (1 << MAX_SB_SIZE_LOG2); const MAX_SB_SQUARE: usize = (MAX_SB_SIZE * MAX_SB_SIZE); const INTRA_MODES: usize = 13; const UV_INTRA_MODES: usize = 13; static mi_size_wide: [u8; BLOCK_SIZES_ALL] = [ 1, 1, 2, 2, 2, 4, 4, 4, 8, 8, 8, 16, 16, 1, 4, 2, 8, 4, 16]; static mi_size_high: [u8; BLOCK_SIZES_ALL] = [ 1, 2, 1, 2, 4, 2, 4, 8, 4, 8, 16, 8, 16, 4, 1, 8, 2, 16, 4]; static b_width_log2_lookup: [u8; 20] = [0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 0, 2, 1, 3]; static b_height_log2_lookup: [u8; 20] = [ 0, 0, 0, 0, 1, 0, 1, 2, 1, 2, 3, 2, 3, 4, 3, 4, 2, 0, 3, 1]; static tx_size_wide_log2: [usize; 14] = [2, 3, 4, 5, 2, 3, 3, 4, 4, 5, 2, 4, 3, 5]; static tx_size_high_log2: [usize; 14] = [2, 3, 4, 5, 3, 2, 4, 3, 5, 4, 4, 2, 5, 3]; const EXT_TX_SIZES: usize = 4; const EXT_TX_SET_TYPES: usize = 6; const EXT_TX_SETS_INTRA: usize = 3; const EXT_TX_SETS_INTER: usize = 4; // Number of transform types in each set type static num_ext_tx_set: [usize; EXT_TX_SET_TYPES] = [1, 2, 5, 7, 12, 16]; // Maps intra set index to the set type static ext_tx_set_type_intra: [TxSetType; EXT_TX_SETS_INTRA] = [ TxSetType::EXT_TX_SET_DCTONLY, TxSetType::EXT_TX_SET_DTT4_IDTX_1DDCT, TxSetType::EXT_TX_SET_DTT4_IDTX ]; // Maps inter set index to the set type #[allow(dead_code)] static ext_tx_set_type_inter: [TxSetType; EXT_TX_SETS_INTER] = [ TxSetType::EXT_TX_SET_DCTONLY, TxSetType::EXT_TX_SET_ALL16, TxSetType::EXT_TX_SET_DTT9_IDTX_1DDCT, TxSetType::EXT_TX_SET_DCT_IDTX ]; // Maps set types above to the indices used for intra static ext_tx_set_index_intra: [i8; EXT_TX_SET_TYPES] = [0, -1, 2, 1, -1, -1 ]; // Maps set types above to the indices used for inter static ext_tx_set_index_inter: [i8; EXT_TX_SET_TYPES] = [0, 3, -1, -1, 2, 1]; static av1_ext_tx_intra_ind: [[u32; TX_TYPES]; EXT_TX_SETS_INTRA] = [[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,],[1,5,6,4,0,0,0,0,0,0,2,3,0,0,0,0,],[1,3,4,2,0,0,0,0,0,0,0,0,0,0,0,0,],]; #[allow(dead_code)] static av1_ext_tx_inter_ind: [[usize; TX_TYPES]; EXT_TX_SETS_INTER] = [[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,],[1,5,6,4,0,0,0,0,0,0,2,3,0,0,0,0,],[1,3,4,2,0,0,0,0,0,0,0,0,0,0,0,0,],[0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,],]; static ext_tx_cnt_intra: [usize;EXT_TX_SETS_INTRA] = [ 1, 7, 5 ]; static av1_coefband_trans_4x4: [u8; 16] = [ 0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5, ]; static TXSIZE_SQR_MAP: [TxSize; TX_SIZES_ALL] = [ TX_4X4, TX_8X8, TX_16X16, TX_32X32, TX_4X4, TX_4X4, TX_8X8, TX_8X8, TX_16X16, TX_16X16, TX_4X4, TX_4X4, TX_8X8, TX_8X8, ]; static TXSIZE_SQR_UP_MAP: [TxSize; TX_SIZES_ALL] = [ TX_4X4, TX_8X8, TX_16X16, TX_32X32, TX_8X8, TX_8X8, TX_16X16, TX_16X16, TX_32X32, TX_32X32, TX_16X16, TX_16X16, TX_32X32, TX_32X32, ]; // Generates 4 bit field in which each bit set to 1 represents // a blocksize partition 1111 means we split 64x64, 32x32, 16x16 // and 8x8. 1000 means we just split the 64x64 to 32x32 static partition_context_lookup: [[u8; 2]; BLOCK_SIZES_ALL] = [ [ 15, 15 ], // 4X4 - [0b1111, 0b1111] [ 15, 14 ], // 4X8 - [0b1111, 0b1110] [ 14, 15 ], // 8X4 - [0b1110, 0b1111] [ 14, 14 ], // 8X8 - [0b1110, 0b1110] [ 14, 12 ], // 8X16 - [0b1110, 0b1100] [ 12, 14 ], // 16X8 - [0b1100, 0b1110] [ 12, 12 ], // 16X16 - [0b1100, 0b1100] [ 12, 8 ], // 16X32 - [0b1100, 0b1000] [ 8, 12 ], // 32X16 - [0b1000, 0b1100] [ 8, 8 ], // 32X32 - [0b1000, 0b1000] [ 8, 0 ], // 32X64 - [0b1000, 0b0000] [ 0, 8 ], // 64X32 - [0b0000, 0b1000] [ 0, 0 ], // 64X64 - [0b0000, 0b0000] [ 15, 12 ], // 4X16 - [0b1111, 0b1100] [ 12, 15 ], // 16X4 - [0b1100, 0b1111] [ 8, 14 ], // 8X32 - [0b1110, 0b1000] [ 14, 8 ], // 32X8 - [0b1000, 0b1110] [ 12, 0 ], // 16X64- [0b1100, 0b0000] [ 0, 12 ], // 64X16- [0b0000, 0b1100] ]; pub static subsize_lookup: [[BlockSize; BLOCK_SIZES_ALL]; PARTITION_TYPES] = [ [ // PARTITION_NONE // 4X4 BLOCK_4X4, // 4X8, 8X4, 8X8 BLOCK_4X8, BLOCK_8X4, BLOCK_8X8, // 8X16, 16X8, 16X16 BLOCK_8X16, BLOCK_16X8, BLOCK_16X16, // 16X32, 32X16, 32X32 BLOCK_16X32, BLOCK_32X16, BLOCK_32X32, // 32X64, 64X32, 64X64 BLOCK_32X64, BLOCK_64X32, BLOCK_64X64, // 4X16, 16X4, 8X32 BLOCK_4X16, BLOCK_16X4, BLOCK_8X32, // 32X8, 16X64, 64X16 BLOCK_32X8, BLOCK_16X64, BLOCK_64X16, ], [ // PARTITION_HORZ // 4X4 BLOCK_INVALID, // 4X8, 8X4, 8X8 BLOCK_INVALID, BLOCK_INVALID, BLOCK_8X4, // 8X16, 16X8, 16X16 BLOCK_INVALID, BLOCK_INVALID, BLOCK_16X8, // 16X32, 32X16, 32X32 BLOCK_INVALID, BLOCK_INVALID, BLOCK_32X16, // 32X64, 64X32, 64X64 BLOCK_INVALID, BLOCK_INVALID, BLOCK_64X32, // 4X16, 16X4, 8X32 BLOCK_INVALID, BLOCK_INVALID, BLOCK_INVALID, // 32X8, 16X64, 64X16 BLOCK_INVALID, BLOCK_INVALID, BLOCK_INVALID, ], [ // PARTITION_VERT // 4X4 BLOCK_INVALID, // 4X8, 8X4, 8X8 BLOCK_INVALID, BLOCK_INVALID, BLOCK_4X8, // 8X16, 16X8, 16X16 BLOCK_INVALID, BLOCK_INVALID, BLOCK_8X16, // 16X32, 32X16, 32X32 BLOCK_INVALID, BLOCK_INVALID, BLOCK_16X32, // 32X64, 64X32, 64X64 BLOCK_INVALID, BLOCK_INVALID, BLOCK_32X64, // 4X16, 16X4, 8X32 BLOCK_INVALID, BLOCK_INVALID, BLOCK_INVALID, // 32X8, 16X64, 64X16 BLOCK_INVALID, BLOCK_INVALID, BLOCK_INVALID, ], [ // PARTITION_SPLIT // 4X4 BLOCK_INVALID, // 4X8, 8X4, 8X8 BLOCK_INVALID, BLOCK_INVALID, BLOCK_4X4, // 8X16, 16X8, 16X16 BLOCK_INVALID, BLOCK_INVALID, BLOCK_8X8, // 16X32, 32X16, 32X32 BLOCK_INVALID, BLOCK_INVALID, BLOCK_16X16, // 32X64, 64X32, 64X64 BLOCK_INVALID, BLOCK_INVALID, BLOCK_32X32, // 4X16, 16X4, 8X32 BLOCK_INVALID, BLOCK_INVALID, BLOCK_INVALID, // 32X8, 16X64, 64X16 BLOCK_INVALID, BLOCK_INVALID, BLOCK_INVALID, ] ]; #[derive(Copy,Clone,PartialEq)] #[allow(dead_code)] enum HeadToken { BlockZero = 0, Zero = 1, OneEOB = 2, OneNEOB = 3, TwoPlusEOB = 4, TwoPlusNEOB = 5, } #[derive(Copy,Clone,PartialEq)] #[allow(dead_code)] enum TailToken { Two = 0, Three = 1, Four = 2, Cat1 = 3, Cat2 = 4, Cat3 = 5, Cat4 = 6, Cat5 = 7, Cat6 = 8, } const PLANE_TYPES: usize = 2; const HEAD_TOKENS: usize = 5; const TAIL_TOKENS: usize = 9; const ENTROPY_TOKENS: usize = 12; const COEFF_CONTEXTS: usize = 6; const COEF_BANDS: usize = 6; const REF_TYPES: usize = 2; const SKIP_CONTEXTS: usize = 3; fn get_ext_tx_set_type(tx_size: TxSize, is_inter: bool, use_reduced_set: bool) -> TxSetType { let tx_size_sqr_up = TXSIZE_SQR_UP_MAP[tx_size as usize]; let tx_size_sqr = TXSIZE_SQR_MAP[tx_size as usize]; if tx_size_sqr > TxSize::TX_32X32 { TxSetType::EXT_TX_SET_DCTONLY } else if use_reduced_set { if is_inter { TxSetType::EXT_TX_SET_DCT_IDTX } else { TxSetType::EXT_TX_SET_DTT4_IDTX } } else if tx_size_sqr_up == TxSize::TX_32X32 { if is_inter { TxSetType::EXT_TX_SET_DCT_IDTX } else { TxSetType::EXT_TX_SET_DCTONLY } } else if is_inter { if tx_size_sqr == TxSize::TX_16X16 { TxSetType::EXT_TX_SET_DTT9_IDTX_1DDCT } else { TxSetType::EXT_TX_SET_ALL16 } } else { if tx_size_sqr == TxSize::TX_16X16 { TxSetType::EXT_TX_SET_DTT4_IDTX } else { TxSetType::EXT_TX_SET_DTT4_IDTX_1DDCT } } } fn get_ext_tx_set(tx_size: TxSize, is_inter: bool, use_reduced_set: bool) -> i8 { let set_type = get_ext_tx_set_type(tx_size, is_inter, use_reduced_set); if is_inter { ext_tx_set_index_inter[set_type as usize] } else { ext_tx_set_index_intra[set_type as usize] } } extern { static default_partition_cdf: [[u16; PARTITION_TYPES + 1]; PARTITION_CONTEXTS]; static default_kf_y_mode_cdf: [[[u16; INTRA_MODES + 1]; INTRA_MODES]; INTRA_MODES]; static default_uv_mode_cdf: [[u16; UV_INTRA_MODES + 1]; INTRA_MODES]; static default_intra_ext_tx_cdf: [[[[u16; TX_TYPES + 1]; INTRA_MODES]; EXT_TX_SIZES]; EXT_TX_SETS_INTRA]; static default_skip_cdfs: [[u16; 3];SKIP_CONTEXTS]; static default_coef_head_cdf_4x4: [CoeffModel; PLANE_TYPES]; static default_coef_head_cdf_8x8: [CoeffModel; PLANE_TYPES]; static default_coef_head_cdf_16x16: [CoeffModel; PLANE_TYPES]; static default_coef_head_cdf_32x32: [CoeffModel; PLANE_TYPES]; static default_coef_tail_cdf: [[CoeffModel; PLANE_TYPES]; TX_SIZES]; static av1_cat1_cdf0: [u16; 2]; static av1_cat2_cdf0: [u16; 4]; static av1_cat3_cdf0: [u16; 8]; static av1_cat4_cdf0: [u16; 16]; static av1_cat5_cdf0: [u16; 16]; static av1_cat5_cdf1: [u16; 2]; static av1_cat6_cdf0: [u16; 16]; static av1_cat6_cdf1: [u16; 16]; static av1_cat6_cdf2: [u16; 16]; static av1_cat6_cdf3: [u16; 16]; static av1_cat6_cdf4: [u16; 4]; static av1_intra_scan_orders: [[SCAN_ORDER; TX_TYPES]; TX_SIZES_ALL]; pub static exported_intra_mode_to_tx_type_context: &'static [TxType; INTRA_MODES]; } #[repr(C)] pub struct SCAN_ORDER { // FIXME: don't hardcode sizes pub scan: &'static [u16; 16], pub iscan: &'static [u16; 16], pub neighbors: &'static [u16; 17*2] } type CoeffModel = [[[[u16; ENTROPY_TOKENS + 1];COEFF_CONTEXTS];COEF_BANDS];REF_TYPES]; #[derive(Clone)] pub struct CDFContext { partition_cdf: [[u16; PARTITION_TYPES + 1]; PARTITION_CONTEXTS], kf_y_cdf: [[[u16; INTRA_MODES + 1]; INTRA_MODES]; INTRA_MODES], uv_mode_cdf: [[u16; INTRA_MODES + 1]; INTRA_MODES], intra_ext_tx_cdf: [[[[u16; TX_TYPES + 1]; INTRA_MODES]; EXT_TX_SIZES]; EXT_TX_SETS_INTRA], coef_head_cdfs: [[CoeffModel; PLANE_TYPES]; TX_SIZES], coef_tail_cdfs: [[CoeffModel; PLANE_TYPES]; TX_SIZES], skip_cdfs: [[u16; 3];SKIP_CONTEXTS], } impl CDFContext { pub fn new() -> CDFContext { CDFContext { partition_cdf: default_partition_cdf, kf_y_cdf: default_kf_y_mode_cdf, uv_mode_cdf: default_uv_mode_cdf, intra_ext_tx_cdf: default_intra_ext_tx_cdf, skip_cdfs: default_skip_cdfs, coef_head_cdfs: [default_coef_head_cdf_4x4, default_coef_head_cdf_8x8, default_coef_head_cdf_16x16, default_coef_head_cdf_32x32], coef_tail_cdfs: default_coef_tail_cdf, } } } const SUPERBLOCK_TO_PLANE_SHIFT: usize = MAX_SB_SIZE_LOG2; const SUPERBLOCK_TO_BLOCK_SHIFT: usize = MAX_MIB_SIZE_LOG2; const BLOCK_TO_PLANE_SHIFT: usize = SUPERBLOCK_TO_PLANE_SHIFT - SUPERBLOCK_TO_BLOCK_SHIFT; const LOCAL_BLOCK_MASK: usize = (1 << SUPERBLOCK_TO_BLOCK_SHIFT) - 1; /// Absolute offset in superblocks inside a plane, where a superblock is defined /// to be an N*N square where N = (1 << SUPERBLOCK_TO_PLANE_SHIFT). pub struct SuperBlockOffset { pub x: usize, pub y: usize } impl SuperBlockOffset { /// Offset of a block inside the current superblock. pub fn block_offset(&self, block_x: usize, block_y: usize) -> BlockOffset { BlockOffset { x: (self.x << SUPERBLOCK_TO_BLOCK_SHIFT) + block_x, y: (self.y << SUPERBLOCK_TO_BLOCK_SHIFT) + block_y, } } /// Offset of the top-left pixel of this block. pub fn plane_offset(&self, plane: &PlaneConfig) -> PlaneOffset { PlaneOffset { x: self.x << (SUPERBLOCK_TO_PLANE_SHIFT - plane.xdec), y: self.y << (SUPERBLOCK_TO_PLANE_SHIFT - plane.ydec), } } } /// Absolute offset in blocks inside a plane, where a block is defined /// to be an N*N square where N = (1 << BLOCK_TO_PLANE_SHIFT). pub struct BlockOffset { pub x: usize, pub y: usize } impl BlockOffset { /// Offset of the superblock in which this block is located. pub fn sb_offset(&self) -> SuperBlockOffset { SuperBlockOffset { x: self.x >> SUPERBLOCK_TO_BLOCK_SHIFT, y: self.y >> SUPERBLOCK_TO_BLOCK_SHIFT, } } /// Offset of the top-left pixel of this block. pub fn plane_offset(&self, plane: &PlaneConfig) -> PlaneOffset { let po = self.sb_offset().plane_offset(plane); let x_offset = self.x & LOCAL_BLOCK_MASK; let y_offset = self.y & LOCAL_BLOCK_MASK; PlaneOffset { x: po.x + (x_offset << BLOCK_TO_PLANE_SHIFT), y: po.y + (y_offset << BLOCK_TO_PLANE_SHIFT), } } pub fn y_in_sb(&self) -> usize { self.y % MAX_MIB_SIZE } } #[derive(Copy,Clone)] pub struct Block { pub mode: PredictionMode, pub skip: bool, } impl Block { pub fn default() -> Block { Block { mode: PredictionMode::DC_PRED, skip: false, } } pub fn is_inter(&self) -> bool { false } } #[derive(Clone, Default)] pub struct BlockContext { cols: usize, rows: usize, above_partition_context: Vec<u8>, left_partition_context: [u8; MAX_MIB_SIZE], above_coeff_context: [Vec<u8>; PLANES], left_coeff_context: [[u8; MAX_MIB_SIZE]; PLANES], blocks: Vec<Vec<Block>> } impl BlockContext { pub fn new(cols: usize, rows: usize) -> BlockContext { // Align power of two let aligned_cols = (cols + ((1 << MAX_MIB_SIZE_LOG2) - 1)) & !((1 << MAX_MIB_SIZE_LOG2) - 1); BlockContext { cols, rows, above_partition_context: vec![0; aligned_cols], left_partition_context: [0; MAX_MIB_SIZE], above_coeff_context: [vec![0; cols << (MI_SIZE_LOG2 - tx_size_wide_log2[0])], vec![0; cols << (MI_SIZE_LOG2 - tx_size_wide_log2[0])], vec![0; cols << (MI_SIZE_LOG2 - tx_size_wide_log2[0])],], left_coeff_context: [[0; MAX_MIB_SIZE]; PLANES], blocks: vec![vec![Block::default(); cols]; rows] } } pub fn at(&mut self, bo: &BlockOffset) -> &mut Block { &mut self.blocks[bo.y][bo.x] } pub fn above_of(&mut self, bo: &BlockOffset) -> Block { if bo.y > 0 { self.blocks[bo.y - 1][bo.x] } else { Block::default() } } pub fn left_of(&mut self, bo: &BlockOffset) -> Block { if bo.x > 0 { self.blocks[bo.y][bo.x - 1] } else { Block::default() } } fn coeff_context(&self, plane: usize, bo: &BlockOffset) -> usize { (self.above_coeff_context[plane][bo.x] + self.left_coeff_context[plane][bo.y_in_sb()]) as usize } fn set_coeff_context(&mut self, plane: usize, bo: &BlockOffset, value: bool) { let uvalue = value as u8; self.above_coeff_context[plane][bo.x] = uvalue; self.left_coeff_context[plane][bo.y_in_sb()] = uvalue; } pub fn reset_left_coeff_context(&mut self, plane: usize) { for c in self.left_coeff_context[plane].iter_mut() { *c = 0; } } fn partition_plane_context(&self, bo: &BlockOffset, bsize: BlockSize) -> usize { // TODO: this should be way simpler without sub8x8 let above_ctx = self.above_partition_context[bo.x]; let left_ctx = self.left_partition_context[bo.y_in_sb()]; let bsl = b_width_log2_lookup[bsize as usize] - b_width_log2_lookup[BlockSize::BLOCK_8X8 as usize]; let above = (above_ctx >> bsl) & 1; let left = (left_ctx >> bsl) & 1; assert!(b_width_log2_lookup[bsize as usize] == b_height_log2_lookup[bsize as usize]); ((left * 2 + above) + bsl) as usize * PARTITION_PLOFFSET } pub fn update_partition_context(&mut self, bo: &BlockOffset, subsize : BlockSize, bsize: BlockSize) { #[allow(dead_code)] // TODO(yushin): If CONFIG_EXT_PARTITION_TYPES is enabled, use bw and bh //let bw = mi_size_wide[bsize as usize]; //let bh = mi_size_high[bsize as usize]; let bs = mi_size_wide[bsize as usize]; let above_ctx = &mut self.above_partition_context[bo.x..bo.x + bs as usize]; let left_ctx = &mut self.left_partition_context[bo.y_in_sb()..bo.y_in_sb() + bs as usize]; // update the partition context at the end notes. set partition bits // of block sizes larger than the current one to be one, and partition // bits of smaller block sizes to be zero. for i in 0..bs { above_ctx[i as usize] = partition_context_lookup[subsize as usize][0]; } for i in 0..bs { left_ctx[i as usize] = partition_context_lookup[subsize as usize][1]; } } fn skip_context(&mut self, bo: &BlockOffset) -> usize { (self.above_of(bo).skip as usize) + (self.left_of(bo).skip as usize) } } #[derive(Clone)] pub struct ContextWriterCheckpoint { pub w: ec::WriterCheckpoint, pub fc: CDFContext, pub bc: BlockContext } pub struct ContextWriter { pub w: ec::Writer, pub fc: CDFContext, pub bc: BlockContext } impl ContextWriter { pub fn write_partition(&mut self, p: PartitionType, bsize: BlockSize) { let bo = BlockOffset { x: 0, y: 0 }; let ctx = self.bc.partition_plane_context(&bo, bsize); self.w.symbol(p as u32, &mut self.fc.partition_cdf[ctx], PARTITION_TYPES); } pub fn write_intra_mode_kf(&mut self, bo: &BlockOffset, mode: PredictionMode) { let above_mode = self.bc.above_of(bo).mode as usize; let left_mode = self.bc.left_of(bo).mode as usize; let cdf = &mut self.fc.kf_y_cdf[above_mode][left_mode]; self.w.symbol(mode as u32, cdf, INTRA_MODES); } pub fn write_intra_uv_mode(&mut self, uv_mode: PredictionMode, y_mode: PredictionMode) { let cdf = &mut self.fc.uv_mode_cdf[y_mode as usize]; self.w.symbol(uv_mode as u32, cdf, INTRA_MODES); } pub fn write_tx_type(&mut self, tx_type: TxType, y_mode: PredictionMode) { let tx_size = TxSize::TX_4X4; let square_tx_size = TXSIZE_SQR_MAP[tx_size as usize]; let eset = get_ext_tx_set(tx_size, false, true); if eset > 0 { self.w.symbol( av1_ext_tx_intra_ind[eset as usize][tx_type as usize], &mut self.fc.intra_ext_tx_cdf[eset as usize][square_tx_size as usize][y_mode as usize], ext_tx_cnt_intra[eset as usize]); } } pub fn write_skip(&mut self, bo: &BlockOffset, skip: bool) { let ctx = self.bc.skip_context(bo); self.w.symbol(skip as u32, &mut self.fc.skip_cdfs[ctx], 2); } pub fn write_token_block_zero(&mut self, plane: usize, bo: &BlockOffset) { let plane_type = if plane > 0 { 1 } else { 0 }; let tx_size_ctx = TXSIZE_SQR_MAP[TxSize::TX_4X4 as usize] as usize; let ref_type = 0; let band = 0; let ctx = self.bc.coeff_context(plane, bo); let cdf = &mut self.fc.coef_head_cdfs[tx_size_ctx][plane_type][ref_type][band][ctx]; //println!("encoding token band={} ctx={}", band, ctx); self.w.symbol(0, cdf, HEAD_TOKENS + 1); self.bc.set_coeff_context(plane, bo, false); } pub fn write_coeffs(&mut self, plane: usize, bo: &BlockOffset, coeffs_in: &[i32], tx_size: TxSize, tx_type: TxType) { let scan_order = &av1_intra_scan_orders[tx_size as usize][tx_type as usize]; let scan = scan_order.scan; let mut coeffs = [0 as i32; 16]; for i in 0..16 { coeffs[i] = coeffs_in[scan[i] as usize]; } let mut nz_coeff = 0; for (i, v) in coeffs.iter().enumerate() { if *v != 0 { nz_coeff = i + 1; } } if nz_coeff == 0 { self.write_token_block_zero(plane, bo); return; } let tx_size = TxSize::TX_4X4; let plane_type = if plane > 0 { 1 } else { 0 }; let tx_size_ctx = TXSIZE_SQR_MAP[tx_size as usize] as usize; let ref_type = 0; let neighbors = scan_order.neighbors; let mut token_cache = [0 as u8; 64*64]; for (i, v) in coeffs.iter().enumerate() { let vabs = v.abs() as u32; let first = i == 0; let last = i == (nz_coeff - 1); let band = av1_coefband_trans_4x4[i]; let ctx = if first { self.bc.coeff_context(plane, bo) } else { ((1 + token_cache[neighbors[2 * i + 0] as usize] + token_cache[neighbors[2 * i + 1] as usize]) >> 1) as usize }; let cdf = &mut self.fc.coef_head_cdfs[tx_size_ctx][plane_type][ref_type][band as usize][ctx]; match (vabs, last) { (0,_) => { self.w.symbol(HeadToken::Zero as u32 - !first as u32, cdf, HEAD_TOKENS + (first as usize)); continue }, (1, false) => self.w.symbol(HeadToken::OneNEOB as u32 - !first as u32, cdf, HEAD_TOKENS + (first as usize)), (1, true) => self.w.symbol(HeadToken::OneEOB as u32 - !first as u32, cdf, HEAD_TOKENS + (first as usize)), (_, false) => self.w.symbol(HeadToken::TwoPlusNEOB as u32 - !first as u32, cdf, HEAD_TOKENS + (first as usize)), (_, true) => self.w.symbol(HeadToken::TwoPlusEOB as u32 - !first as u32, cdf, HEAD_TOKENS + (first as usize)), }; let tailcdf = &mut self.fc.coef_tail_cdfs[tx_size_ctx][plane_type][ref_type][band as usize][ctx as usize]; match vabs { 0|1 => {}, 2 => self.w.symbol(TailToken::Two as u32, tailcdf, TAIL_TOKENS), 3 => self.w.symbol(TailToken::Three as u32, tailcdf, TAIL_TOKENS), 4 => self.w.symbol(TailToken::Four as u32, tailcdf, TAIL_TOKENS), 5...6 => { self.w.symbol(TailToken::Cat1 as u32, tailcdf, TAIL_TOKENS); self.w.cdf(vabs - 5, &av1_cat1_cdf0); } 7...10 => { self.w.symbol(TailToken::Cat2 as u32, tailcdf, TAIL_TOKENS); self.w.cdf(vabs - 7, &av1_cat2_cdf0); } 11...18 => { self.w.symbol(TailToken::Cat3 as u32, tailcdf, TAIL_TOKENS); self.w.cdf(vabs - 11, &av1_cat3_cdf0); } 19...34 => { self.w.symbol(TailToken::Cat4 as u32, tailcdf, TAIL_TOKENS); self.w.cdf(vabs - 19, &av1_cat4_cdf0); } 35...66 => { self.w.symbol(TailToken::Cat5 as u32, tailcdf, TAIL_TOKENS); self.w.cdf((vabs - 35) & 0xf, &av1_cat5_cdf0); self.w.cdf(((vabs - 35) >> 4) & 0x1, &av1_cat5_cdf1); } _ => { self.w.symbol(TailToken::Cat6 as u32, tailcdf, TAIL_TOKENS); let tx_offset = tx_size as u32 - TxSize::TX_4X4 as u32; let bit_depth = 8; let bits = bit_depth + 3 + tx_offset; self.w.cdf((vabs - 67) & 0xf, &av1_cat6_cdf0); self.w.cdf(((vabs - 67) >> 4) & 0xf, &av1_cat6_cdf1); self.w.cdf(((vabs - 67) >> 8) & 0xf, &av1_cat6_cdf2); if bits > 12 { self.w.cdf(((vabs - 67) >> 12) & 0xf, &av1_cat6_cdf3); } if bits > 16 { self.w.cdf(((vabs - 67) >> 16) & 0x3, &av1_cat6_cdf4); } } }; self.w.bool(*v < 0, 16384); let energy_class = match vabs { 0 => 0, 1 => 1, 2 => 2, 3|4 => 3, 5...10 => 4, _ => 5, }; token_cache[scan[i] as usize] = energy_class; if last { break; } } self.bc.set_coeff_context(plane, bo, true); } pub fn checkpoint(&mut self) -> ContextWriterCheckpoint { ContextWriterCheckpoint { w: self.w.checkpoint(), fc: self.fc.clone(), bc: self.bc.clone() } } pub fn rollback(&mut self, checkpoint: ContextWriterCheckpoint) { self.w.rollback(&checkpoint.w); self.fc = checkpoint.fc.clone(); self.bc = checkpoint.bc.clone(); } }
//! 65816 emulator mod addressing; mod statusreg; use self::addressing::AddressingMode; use self::statusreg::StatusReg; use snes::Peripherals; /// Rudimentary memory access break points. Stores (bank, address)-tuples that cause a break on /// read access. const MEM_BREAK_LOAD: &'static [(u8, u16)] = &[ ]; const MEM_BREAK_STORE: &'static [(u8, u16)] = &[ ]; // Emulation mode vectors const IRQ_VEC8: u16 = 0xFFFE; const RESET_VEC8: u16 = 0xFFFC; const NMI_VEC8: u16 = 0xFFFA; #[allow(dead_code)] const ABORT_VEC8: u16 = 0xFFF8; #[allow(dead_code)] const COP_VEC8: u16 = 0xFFF4; // Native mode vectors const IRQ_VEC16: u16 = 0xFFEE; const NMI_VEC16: u16 = 0xFFEA; #[allow(dead_code)] const ABORT_VEC16: u16 = 0xFFE8; #[allow(dead_code)] const BRK_VEC16: u16 = 0xFFE6; #[allow(dead_code)] const COP_VEC16: u16 = 0xFFE4; /// One CPU cycle = 6 master clock cycles pub const CPU_CYCLE: u16 = 6; pub struct Cpu { a: u16, x: u16, y: u16, /// Stack pointer s: u16, /// Data bank register. Bank for all memory accesses. dbr: u8, /// Program bank register. Opcodes are fetched from this bank. pbr: u8, /// Direct (page) register. Address offset for all instruction using "direct addressing" mode. d: u16, /// Program counter. Note that PBR is not changed by the CPU, so code can not span multiple /// banks (without manual bank switching). pc: u16, p: StatusReg, emulation: bool, /// Master clock cycle counter for the current instruction. cy: u16, pub trace: bool, pub mem: Peripherals, } impl Cpu { /// Creates a new CPU and executes a reset. This will fetch the RESET vector from memory and /// put the CPU in emulation mode. pub fn new(mut mem: Peripherals) -> Cpu { let pcl = mem.load(0, RESET_VEC8) as u16; let pch = mem.load(0, RESET_VEC8 + 1) as u16; let pc = (pch << 8) | pcl; debug!("RESET @ {:02X}", pc); Cpu { // Undefined according to datasheet a: 0, x: 0, y: 0, // High byte set to 1 since we're now in emulation mode s: 0x0100, // Initialized to 0 dbr: 0, d: 0, pbr: 0, // Read from RESET vector above pc: pc, // Acc and index regs start in 8-bit mode, IRQs disabled, CPU in emulation mode p: StatusReg::new(), emulation: true, cy: 0, trace: false, mem: mem, } } /// Adds the time needed to access the given memory location to the cycle counter. fn do_io_cycle(&mut self, bank: u8, addr: u16) { const FAST: u16 = 0; const SLOW: u16 = 2; const XSLOW: u16 = 6; self.cy += match bank { 0x00 ... 0x3f => match addr { 0x0000 ... 0x1fff | 0x6000 ... 0xffff => SLOW, 0x4000 ... 0x41ff => XSLOW, _ => FAST, }, 0x40 ... 0x7f => SLOW, 0x80 ... 0xbf => match addr { 0x0000 ... 0x1fff | 0x6000 ... 0x7fff => SLOW, 0x4000 ... 0x41ff => XSLOW, // FIXME Depends on bit 1 in $420d. Assume slow for now. 0x8000 ... 0xffff => SLOW, _ => FAST }, // FIXME Depends on bit 1 in $420d. Assume slow for now. 0xc0 ... 0xff => SLOW, _ => FAST, } } /// Load a byte from memory. Will change the cycle counter according to the memory speed. fn loadb(&mut self, bank: u8, addr: u16) -> u8 { if MEM_BREAK_LOAD.iter().find(|&&(b, a)| bank == b && addr == a).is_some() { debug!("MEM-BREAK: Breakpoint triggered on load from ${:02X}:{:04X} (${:02X})", bank, addr, self.mem.load(bank, addr)) } self.do_io_cycle(bank, addr); self.mem.load(bank, addr) } fn loadw(&mut self, bank: u8, addr: u16) -> u16 { assert!(addr < 0xffff, "loadw on bank boundary"); // ^ if this should be supported, make sure to fix the potential overflow below let lo = self.loadb(bank, addr) as u16; let hi = self.loadb(bank, addr + 1) as u16; (hi << 8) | lo } fn storeb(&mut self, bank: u8, addr: u16, value: u8) { if MEM_BREAK_STORE.iter().find(|&&(b, a)| bank == b && addr == a).is_some() { debug!("MEM-BREAK: Breakpoint triggered on store of ${:02X} to ${:02X}:{:04X}", value, bank, addr) } self.do_io_cycle(bank, addr); self.mem.store(bank, addr, value) } fn storew(&mut self, bank: u8, addr: u16, value: u16) { assert!(addr < 0xffff, "storew on bank boundary"); // ^ if this should be supported, make sure to fix the potential overflow below self.storeb(bank, addr, value as u8); self.storeb(bank, addr + 1, (value >> 8) as u8); } /// Fetches the byte PC points at, then increments PC fn fetchb(&mut self) -> u8 { let (pbr, pc) = (self.pbr, self.pc); let b = self.loadb(pbr, pc); self.pc += 1; b } /// Fetches a 16-bit word (little-endian) located at PC, by fetching 2 individual bytes fn fetchw(&mut self) -> u16 { let low = self.fetchb() as u16; let high = self.fetchb() as u16; (high << 8) | low } /// Pushes a byte onto the stack and decrements the stack pointer fn pushb(&mut self, value: u8) { let s = self.s; self.storeb(0, s, value); if self.emulation { // stack must stay in 0x01xx assert_eq!(self.s & 0xff00, 0x0100); let s = self.s as u8 - 1; self.s = (self.s & 0xff00) | s as u16; } else { self.s -= 1; } } fn pushw(&mut self, value: u16) { let hi = (value >> 8) as u8; let lo = value as u8; self.pushb(hi); self.pushb(lo); } fn popb(&mut self) -> u8 { if self.emulation { // stack must stay in 0x01xx assert_eq!(self.s & 0xff00, 0x0100); let s = self.s as u8 + 1; self.s = (self.s & 0xff00) | s as u16; } else { self.s += 1; } let s = self.s; self.loadb(0, s) } fn popw(&mut self) -> u16 { let lo = self.popb() as u16; let hi = self.popb() as u16; (hi << 8) | lo } /// Enters/exits emulation mode fn set_emulation(&mut self, value: bool) { if !self.emulation && value { // Enter emulation mode // Set high byte of stack ptr to 0x01 and set M/X bits to make A,X and Y 8-bit self.s = 0x0100 | (self.s & 0xff); self.p.set_small_acc(true); self.p.set_small_index(true); // "If the Index Select Bit (X) equals one, both registers will be 8 bits wide, and the // high byte is forced to zero" self.x &= 0xff; self.y &= 0xff; } self.emulation = value; } fn trace_op(&self, pc: u16, raw: u8, op: &str, am: Option<&AddressingMode>) { use log::LogLevel::Trace; if !log_enabled!(Trace) || !self.trace { return } let opstr = match am { Some(am) => format!("{} {}", op, am), None => format!("{}", op), }; trace!("${:02X}:{:04X} {:02X} {:14} a:{:04X} x:{:04X} y:{:04X} s:{:04X} d:{:04X} dbr:{:02X} emu:{} {}", self.pbr, pc, raw, opstr, self.a, self.x, self.y, self.s, self.d, self.dbr, self.emulation as u8, self.p, ); } /// Executes a single opcode and returns the number of master clock cycles used. pub fn dispatch(&mut self) -> u16 { // CPU cycles each opcode takes (at the minimum). static CYCLE_TABLE: [u8; 256] = [ 7,6,7,4,5,3,5,6, 3,2,2,4,6,4,6,5, // $00 - $0f 2,5,5,7,5,4,6,6, 2,4,2,2,6,4,7,5, // $10 - $1f 6,6,8,4,3,3,5,6, 4,2,2,5,4,4,6,5, // $20 - $2f 2,5,5,7,4,4,6,6, 2,4,2,2,4,4,7,5, // $30 - $3f 7,6,2,4,7,3,5,6, 3,2,2,3,3,4,6,5, // $40 - $4f 2,5,5,7,7,4,6,6, 2,4,3,2,4,4,7,5, // $50 - $5f 7,6,6,4,3,3,5,6, 4,2,2,6,5,4,6,5, // $60 - $6f 2,5,5,7,4,4,6,6, 2,4,4,2,6,2,7,5, // $70 - $7f 2,6,3,4,3,3,3,2, 2,2,2,3,4,4,4,5, // $80 - $8f 2,6,5,7,4,4,4,6, 2,5,2,2,3,5,5,5, // $90 - $9f 2,6,2,4,3,3,3,6, 2,2,2,4,4,4,4,5, // $a0 - $af 2,5,5,7,4,4,4,6, 2,4,2,2,4,4,4,5, // $b0 - $bf 2,6,3,4,3,3,5,6, 2,2,2,3,4,4,6,5, // $c0 - $cf 2,5,5,7,6,4,6,6, 2,4,3,3,6,4,7,5, // $d0 - $df 2,6,3,4,3,3,5,6, 2,2,2,3,4,4,6,5, // $e0 - $ef 2,5,5,7,5,4,6,6, 2,4,4,2,6,4,7,5, // $f0 - $ff ]; let pc = self.pc; self.cy = 0; let op = self.fetchb(); self.cy += CYCLE_TABLE[op as usize] as u16 * CPU_CYCLE + 4; // FIXME: The +4 is a timing correction. I'm not sure what causes the inaccuracy, but I // suspect the addressing mode / memory access timing is a bit off. macro_rules! instr { ( $name:ident ) => {{ self.trace_op(pc, op, stringify!($name), None); self.$name() }}; ( $name:ident $am:ident ) => {{ let am = self.$am(); self.trace_op(pc, op, stringify!($name), Some(&am)); self.$name(am) }}; } match op { // Stack operations 0x4b => instr!(phk), 0x0b => instr!(phd), 0x2b => instr!(pld), 0x8b => instr!(phb), 0xab => instr!(plb), 0x08 => instr!(php), 0x28 => instr!(plp), 0x48 => instr!(pha), 0x68 => instr!(pla), 0xda => instr!(phx), 0xfa => instr!(plx), 0x5a => instr!(phy), 0x7a => instr!(ply), // Processor status 0x18 => instr!(clc), 0x38 => instr!(sec), 0x58 => instr!(cli), 0x78 => instr!(sei), 0xfb => instr!(xce), 0xc2 => instr!(rep immediate8), 0xe2 => instr!(sep immediate8), // Arithmetic 0x0a => instr!(asl_a), 0x06 => instr!(asl direct), 0x16 => instr!(asl direct_indexed_x), 0x0e => instr!(asl absolute), 0x2a => instr!(rol_a), 0x26 => instr!(rol direct), 0x2e => instr!(rol absolute), 0x3e => instr!(rol absolute_indexed_x), 0x36 => instr!(rol direct_indexed_x), 0x4a => instr!(lsr_a), 0x46 => instr!(lsr direct), 0x6a => instr!(ror_a), 0x7e => instr!(ror absolute_indexed_x), 0x25 => instr!(and direct), 0x21 => instr!(and direct_indexed_indirect), 0x29 => instr!(and immediate_acc), 0x2d => instr!(and absolute), 0x3d => instr!(and absolute_indexed_x), 0x39 => instr!(and absolute_indexed_y), 0x2f => instr!(and absolute_long), 0x3f => instr!(and absolute_long_indexed_x), 0x03 => instr!(ora stack_rel), 0x05 => instr!(ora direct), 0x09 => instr!(ora immediate_acc), 0x12 => instr!(ora direct_indirect), 0x07 => instr!(ora direct_indirect_long), 0x0d => instr!(ora absolute), 0x1d => instr!(ora absolute_indexed_x), 0x19 => instr!(ora absolute_indexed_y), 0x0f => instr!(ora absolute_long), 0x45 => instr!(eor direct), 0x55 => instr!(eor direct_indexed_x), 0x49 => instr!(eor immediate_acc), 0x4d => instr!(eor absolute), 0x5d => instr!(eor absolute_indexed_x), 0x59 => instr!(eor absolute_indexed_y), 0x4f => instr!(eor absolute_long), 0x5f => instr!(eor absolute_long_indexed_x), 0x65 => instr!(adc direct), 0x75 => instr!(adc direct_indexed_x), 0x69 => instr!(adc immediate_acc), 0x6d => instr!(adc absolute), 0x7d => instr!(adc absolute_indexed_x), 0x79 => instr!(adc absolute_indexed_y), 0x6f => instr!(adc absolute_long), 0x7f => instr!(adc absolute_long_indexed_x), 0x71 => instr!(adc direct_indirect), 0xe5 => instr!(sbc direct), 0xf5 => instr!(sbc direct_indexed_x), 0xe9 => instr!(sbc immediate_acc), 0xed => instr!(sbc absolute), 0xf9 => instr!(sbc absolute_indexed_y), 0xfd => instr!(sbc absolute_indexed_x), 0xef => instr!(sbc absolute_long), 0xff => instr!(sbc absolute_long_indexed_x), 0xe6 => instr!(inc direct), 0xf6 => instr!(inc direct_indexed_x), 0xfe => instr!(inc absolute_indexed_x), 0xee => instr!(inc absolute), 0x1a => instr!(ina), 0xe8 => instr!(inx), 0xc8 => instr!(iny), 0x3a => instr!(dea), 0xc6 => instr!(dec direct), 0xd6 => instr!(dec direct_indexed_x), 0xce => instr!(dec absolute), 0xde => instr!(dec absolute_indexed_x), 0xca => instr!(dex), 0x88 => instr!(dey), // Register and memory transfers 0x5b => instr!(tcd), 0x1b => instr!(tcs), 0xaa => instr!(tax), 0xa8 => instr!(tay), 0x8a => instr!(txa), 0x9b => instr!(txy), 0x98 => instr!(tya), 0xbb => instr!(tyx), 0xeb => instr!(xba), 0x85 => instr!(sta direct), 0x95 => instr!(sta direct_indexed_x), 0x92 => instr!(sta direct_indirect), 0x87 => instr!(sta direct_indirect_long), 0x97 => instr!(sta direct_indirect_long_idx), 0x8d => instr!(sta absolute), 0x8f => instr!(sta absolute_long), 0x9d => instr!(sta absolute_indexed_x), 0x99 => instr!(sta absolute_indexed_y), 0x9f => instr!(sta absolute_long_indexed_x), 0x86 => instr!(stx direct), 0x96 => instr!(stx direct_indexed_y), 0x8e => instr!(stx absolute), 0x84 => instr!(sty direct), 0x94 => instr!(sty direct_indexed_y), 0x8c => instr!(sty absolute), 0x64 => instr!(stz direct), 0x9c => instr!(stz absolute), 0x74 => instr!(stz direct_indexed_x), 0x9e => instr!(stz absolute_indexed_x), 0xa5 => instr!(lda direct), 0xb5 => instr!(lda direct_indexed_x), 0xb1 => instr!(lda direct_indirect_indexed), 0xa9 => instr!(lda immediate_acc), 0xb2 => instr!(lda direct_indirect), 0xa7 => instr!(lda direct_indirect_long), 0xb7 => instr!(lda direct_indirect_long_idx), 0xad => instr!(lda absolute), 0xbd => instr!(lda absolute_indexed_x), 0xb9 => instr!(lda absolute_indexed_y), 0xaf => instr!(lda absolute_long), 0xbf => instr!(lda absolute_long_indexed_x), 0xa6 => instr!(ldx direct), 0xb6 => instr!(ldx direct_indexed_y), 0xa2 => instr!(ldx immediate_index), 0xae => instr!(ldx absolute), 0xbe => instr!(ldx absolute_indexed_y), 0xa4 => instr!(ldy direct), 0xb4 => instr!(ldy direct_indexed_x), 0xa0 => instr!(ldy immediate_index), 0xac => instr!(ldy absolute), 0xbc => instr!(ldy absolute_indexed_x), 0x54 => instr!(mvn block_move), 0x44 => instr!(mvp block_move), // Bit operations 0x24 => instr!(bit direct), 0x2c => instr!(bit absolute), 0x34 => instr!(bit direct_indexed_x), 0x3c => instr!(bit absolute_indexed_x), 0x89 => instr!(bit immediate_acc), 0x04 => instr!(tsb direct), 0x0c => instr!(tsb absolute), 0x14 => instr!(trb direct), 0x1c => instr!(trb absolute), // Comparisons 0xc9 => instr!(cmp immediate_acc), 0xc5 => instr!(cmp direct), 0xd5 => instr!(cmp direct_indexed_x), 0xcd => instr!(cmp absolute), 0xdd => instr!(cmp absolute_indexed_x), 0xd9 => instr!(cmp absolute_indexed_y), 0xcf => instr!(cmp absolute_long), 0xdf => instr!(cmp absolute_long_indexed_x), 0xd7 => instr!(cmp direct_indirect_long_idx), 0xe0 => instr!(cpx immediate_index), 0xe4 => instr!(cpx direct), 0xec => instr!(cpx absolute), 0xc0 => instr!(cpy immediate_index), 0xc4 => instr!(cpy direct), 0xcc => instr!(cpy absolute), // Branches 0x80 => instr!(bra rel), 0xf0 => instr!(beq rel), 0xd0 => instr!(bne rel), 0x10 => instr!(bpl rel), 0x30 => instr!(bmi rel), 0x50 => instr!(bvc rel), 0x70 => instr!(bvs rel), 0x90 => instr!(bcc rel), 0xb0 => instr!(bcs rel), // Jumps, calls and returns 0x4c => instr!(jmp absolute), 0x5c => instr!(jml absolute_long), 0x6c => instr!(jmp absolute_indirect), 0x7c => instr!(jmp absolute_indexed_indirect), 0xdc => instr!(jml absolute_indirect_long), 0x20 => instr!(jsr absolute), 0x22 => instr!(jsl absolute_long), 0x40 => instr!(rti), 0x60 => instr!(rts), 0x6b => instr!(rtl), 0xea => instr!(nop), _ => { instr!(ill); panic!("illegal CPU opcode: ${:02X}", op); } } self.cy } /// Immediately executes an IRQ sequence and jumps to the NMI handler. pub fn trigger_nmi(&mut self) { if self.emulation { self.interrupt(NMI_VEC8); } else { self.interrupt(NMI_VEC16); } } pub fn trigger_irq(&mut self) { if self.emulation { self.interrupt(IRQ_VEC8); } else { self.interrupt(IRQ_VEC16); } } /// Execute an IRQ sequence. This pushes PBR, PC and the processor status register P on the /// stack, sets the PBR to 0, loads the handler address from the given vector, and jumps to the /// handler. fn interrupt(&mut self, vector: u16) { if !self.emulation { let pbr = self.pbr; self.pushb(pbr); self.pbr = 0; } let pc = self.pc; self.pushw(pc); let p = self.p.0; self.pushb(p); let handler = self.loadw(0, vector); self.pc = handler; } fn return_from_interrupt(&mut self) { let p = self.popb(); self.p.0 = p; let pc = self.popw(); self.pc = pc; if !self.emulation { let pbr = self.popb(); self.pbr = pbr; } } /// Common method for all comparison opcodes. Compares `a` to `b` by effectively computing /// `a-b`. This method only works correctly for 16-bit values. /// /// The Z flag is set if both numbers are equal. /// The C flag will be set to `a >= b`. /// The N flag is set to the most significant bit of `a-b`. fn compare(&mut self, a: u16, b: u16) { self.p.set_zero(a == b); self.p.set_carry(a >= b); self.p.set_negative(a.wrapping_sub(b) & 0x8000 != 0); } /// Does the exact same thing as `compare`, but for 8-bit operands fn compare8(&mut self, a: u8, b: u8) { self.p.set_zero(a == b); self.p.set_carry(a >= b); self.p.set_negative(a.wrapping_sub(b) & 0x80 != 0); } /// Branch to an absolute address. This will overwrite the current program bank. fn branch(&mut self, target: (u8, u16)) { self.pbr = target.0; self.pc = target.1; } /// Changes the status register. fn set_p(&mut self, new: u8) { let small_idx = self.p.small_index(); self.p.0 = new; if !small_idx && self.p.small_index() { // "If the Index Select Bit (X) equals one, both registers will be 8 bits wide, and the // high byte is forced to zero" self.x &= 0xff; self.y &= 0xff; } } } /// Opcode implementations impl Cpu { /// Move Next (incrementing address). Copies C+1 (16-bit A) bytes from the address in X to the /// address in Y. fn mvn(&mut self, am: AddressingMode) { if let AddressingMode::BlockMove(destbank, srcbank) = am { while self.a != 0xffff { let (x, y) = (self.x, self.y); let val = self.loadb(srcbank, x); self.storeb(destbank, y, val); self.x = self.x.wrapping_add(1); self.y = self.y.wrapping_add(1); self.a = self.a.wrapping_sub(1); } } else { panic!("MVN with invalid addressing mode"); } } /// Move Previous (decrementing address) fn mvp(&mut self, am: AddressingMode) { if let AddressingMode::BlockMove(destbank, srcbank) = am { while self.a != 0xffff { let (x, y) = (self.x, self.y); let val = self.loadb(srcbank, x); self.storeb(destbank, y, val); self.x = self.x.wrapping_sub(1); self.y = self.y.wrapping_sub(1); self.a = self.a.wrapping_sub(1); } } else { panic!("MVP with invalid addressing mode"); } } /// Push Program Bank Register fn phk(&mut self) { let pbr = self.pbr; self.pushb(pbr); } /// Push Direct Page Register fn phd(&mut self) { let d = self.d; self.pushw(d); } /// Pull Direct Page Register fn pld(&mut self) { let d = self.popw(); self.d = d; } /// Push Data Bank Register fn phb(&mut self) { let dbr = self.dbr; self.pushb(dbr); } /// Pop Data Bank Register fn plb(&mut self) { let dbr = self.popb(); self.dbr = dbr; } /// Push Processor Status Register fn php(&mut self) { // Changes no flags let p = self.p.0; self.pushb(p); } /// Pull Processor Status Register fn plp(&mut self) { let p = self.popb(); self.set_p(p); } /// Push A on the stack fn pha(&mut self) { // No flags modified if self.p.small_acc() { let a = self.a as u8; self.pushb(a); } else { let a = self.a; self.pushw(a); self.cy += CPU_CYCLE; } } /// Pull Accumulator from stack fn pla(&mut self) { // Changes N and Z if self.p.small_acc() { let a = self.popb(); self.a = (self.a & 0xff00) | self.p.set_nz_8(a) as u16; } else { let a = self.popw(); self.a = self.p.set_nz(a); self.cy += CPU_CYCLE; } } /// Push Index Register X fn phx(&mut self) { if self.p.small_index() { let val = self.x as u8; self.pushb(val); } else { let val = self.x; self.pushw(val); self.cy += CPU_CYCLE; } } /// Pop Index Register X fn plx(&mut self) { // Changes N and Z if self.p.small_index() { let val = self.popb(); self.x = self.p.set_nz_8(val) as u16; } else { let val = self.popw(); self.x = self.p.set_nz(val); self.cy += CPU_CYCLE; } } /// Push Index Register Y fn phy(&mut self) { if self.p.small_index() { let val = self.y as u8; self.pushb(val); } else { let val = self.y; self.pushw(val); self.cy += CPU_CYCLE; } } /// Pop Index Register Y fn ply(&mut self) { // Changes N and Z if self.p.small_index() { let val = self.popb(); self.y = self.p.set_nz_8(val) as u16; } else { let val = self.popw(); self.y = self.p.set_nz(val); self.cy += CPU_CYCLE; } } /// AND Accumulator with Memory (or immediate) fn and(&mut self, am: AddressingMode) { // Sets N and Z if self.p.small_acc() { let val = am.loadb(self); let res = self.a as u8 & val; self.p.set_nz_8(res); self.a = (self.a & 0xff00) | res as u16; } else { let val = am.loadw(self); let res = self.a & val; self.a = self.p.set_nz(res); self.cy += CPU_CYCLE; } } /// OR Accumulator with Memory fn ora(&mut self, am: AddressingMode) { // Sets N and Z if self.p.small_acc() { let val = am.loadb(self); let res = self.a as u8 | val; self.p.set_nz_8(res); self.a = (self.a & 0xff00) | res as u16; } else { let val = am.loadw(self); let res = self.a | val; self.a = self.p.set_nz(res); self.cy += CPU_CYCLE; } } /// Exclusive Or Accumulator with Memory fn eor(&mut self, am: AddressingMode) { // Sets N and Z if self.p.small_acc() { let val = am.loadb(self); let res = self.a as u8 ^ val; self.p.set_nz_8(res); self.a = (self.a & 0xff00) | res as u16; } else { let val = am.loadw(self); let res = self.a ^ val; self.a = self.p.set_nz(res); self.cy += CPU_CYCLE; } } /// Add With Carry fn adc(&mut self, am: AddressingMode) { // Sets N, V, C and Z // FIXME is this correct? double-check this! let c = if self.p.carry() { 1 } else { 0 }; if self.p.small_acc() { let a = self.a as u8; let val = am.loadb(self); let res = a as u16 + val as u16 + c; self.p.set_carry(res > 255); let res = res as u8; self.p.set_overflow((a ^ val) & 0x80 == 0 && (a ^ res) & 0x80 == 0x80); self.a = (self.a & 0xff00) | self.p.set_nz_8(res) as u16; } else { let val = am.loadw(self); let res = self.a as u32 + val as u32 + c as u32; self.p.set_carry(res > 65535); let res = res as u16; self.p.set_overflow((self.a ^ val) & 0x8000 == 0 && (self.a ^ res) & 0x8000 == 0x8000); self.a = self.p.set_nz(res); self.cy += CPU_CYCLE; } } /// Subtract with Borrow from Accumulator fn sbc(&mut self, am: AddressingMode) { // Changes N, Z, C and V // FIXME Set V flag! let c = if self.p.carry() { 0 } else { 1 }; if self.p.small_acc() { let a = self.a as u8; let v = am.loadb(self); let res = a as i16 - v as i16 - c; self.p.set_carry(res < 0); self.a = (self.a & 0xff00) | self.p.set_nz_8(res as u8) as u16; } else { let v = am.loadw(self); let res = self.a as i32 - v as i32 - c as i32; self.p.set_carry(res < 0); self.a = self.p.set_nz(res as u16); self.cy += CPU_CYCLE; } } /// Shift accumulator left by 1 bit fn asl_a(&mut self) { // Sets N, Z and C. The rightmost bit is filled with 0. if self.p.small_acc() { let a = self.a as u8; self.p.set_carry(self.a & 0x80 != 0); self.a = (self.a & 0xff00) | self.p.set_nz_8(a << 1) as u16; } else { self.p.set_carry(self.a & 0x8000 != 0); self.a = self.p.set_nz(self.a << 1); } } /// Arithmetic left-shift: Shift a memory location left by 1 bit (Read-Modify-Write) fn asl(&mut self, am: AddressingMode) { // Sets N, Z and C. The rightmost bit is filled with 0. let (bank, addr) = am.address(self); if self.p.small_acc() { let val = self.loadb(bank, addr); self.p.set_carry(val & 0x80 != 0); let res = self.p.set_nz_8(val << 1); self.storeb(bank, addr, res); } else { let val = self.loadw(bank, addr); self.p.set_carry(val & 0x8000 != 0); let res = self.p.set_nz(val << 1); self.storew(bank, addr, res); self.cy += 2 * CPU_CYCLE; } } /// Rotate Accumulator Left fn rol_a(&mut self) { // Sets N, Z, and C. C is used to fill the rightmost bit. let c: u8 = if self.p.carry() { 1 } else { 0 }; if self.p.small_acc() { let a = self.a as u8; self.p.set_carry(self.a & 0x80 != 0); let res = (a << 1) | c; self.a = (self.a & 0xff00) | self.p.set_nz_8(res) as u16; } else { self.p.set_carry(self.a & 0x8000 != 0); let res = (self.a << 1) | c as u16; self.a = self.p.set_nz(res); self.cy += CPU_CYCLE; } } /// Rotate Memory Left fn rol(&mut self, am: AddressingMode) { // Sets N, Z, and C. C is used to fill the rightmost bit. let c: u8 = if self.p.carry() { 1 } else { 0 }; if self.p.small_acc() { let a = am.clone().loadb(self); self.p.set_carry(a & 0x80 != 0); let res = self.p.set_nz_8((a << 1) | c); am.storeb(self, res); } else { let a = am.clone().loadw(self); self.p.set_carry(a & 0x8000 != 0); let res = self.p.set_nz((a << 1) | c as u16); am.storew(self, res); self.cy += CPU_CYCLE; // FIXME times 2? } } /// Logical Shift Accumulator Right fn lsr_a(&mut self) { // Sets N (always cleared), Z and C. The leftmost bit is filled with 0. // FIXME New code, needs small review if self.p.small_acc() { let a = self.a as u8; self.p.set_carry(self.a & 0x01 != 0); self.a = (self.a & 0xff00) | self.p.set_nz_8(a >> 1) as u16; } else { self.p.set_carry(self.a & 0x0001 != 0); self.a = self.p.set_nz(self.a >> 1); } } /// Logical Shift Right fn lsr(&mut self, am: AddressingMode) { // Sets N (always cleared), Z and C. The leftmost bit is filled with 0. if self.p.small_acc() { let a = am.clone().loadb(self); self.p.set_carry(a & 0x01 != 0); let res = self.p.set_nz_8(a >> 1); am.storeb(self, res); } else { let a = am.clone().loadw(self); self.p.set_carry(a & 0x0001 != 0); let res = self.p.set_nz(a >> 1); am.storew(self, res); } } /// Rotate accumulator right fn ror_a(&mut self) { // Sets N, Z, and C. Memory width can be changed. C is used to fill the leftmost bit. let c: u8 = if self.p.carry() { 1 } else { 0 }; if self.p.small_acc() { let val = self.a as u8; self.p.set_carry(val & 0x80 != 0); let res = self.p.set_nz_8((val >> 1) | (c << 7)); self.a = (self.a & 0xff00) | res as u16; } else { let val = self.a; self.p.set_carry(val & 0x8000 != 0); let res = self.p.set_nz((val >> 1) | ((c as u16) << 15)); self.a = res; self.cy += 2 * CPU_CYCLE; } } /// Rotate Memory Right fn ror(&mut self, am: AddressingMode) { // Sets N, Z, and C. Memory width can be changed. C is used to fill the leftmost bit. // The `AddressingMode` is used for both loading and storing the value (Read-Modify-Write // instruction) let c: u8 = if self.p.carry() { 1 } else { 0 }; let (bank, addr) = am.address(self); if self.p.small_acc() { let val = self.loadb(bank, addr); self.p.set_carry(val & 0x80 != 0); let res = self.p.set_nz_8((val >> 1) | (c << 7)); self.storeb(bank, addr, res); } else { let val = self.loadw(bank, addr); self.p.set_carry(val & 0x8000 != 0); let res = self.p.set_nz((val >> 1) | ((c as u16) << 15)); self.storew(bank, addr, res); self.cy += 2 * CPU_CYCLE; } } /// Exchange B with A (B is the MSB of the accumulator, A is the LSB) fn xba(&mut self) { // Changes N and Z: "The flags are changed based on the new value of the low byte, the A // accumulator (that is, on the former value of the high byte, the B accumulator), even in // sixteen-bit accumulator mode." let lo = self.a & 0xff; let hi = self.a >> 8; self.a = (lo << 8) | self.p.set_nz_8(hi as u8) as u16; } /// Transfer Accumulator to Index Register X fn tax(&mut self) { // Changes N and Z if self.p.small_index() { self.x = (self.x & 0xff00) | self.p.set_nz_8(self.a as u8) as u16; } else { self.x = self.p.set_nz(self.a); } } /// Transfer Accumulator to Index register Y fn tay(&mut self) { // Changes N and Z if self.p.small_index() { self.y = (self.y & 0xff00) | self.p.set_nz_8(self.a as u8) as u16; } else { self.y = self.p.set_nz(self.a); } } /// Transfer X to A fn txa(&mut self) { // Changes N and Z if self.p.small_acc() { self.a = (self.a & 0xff00) | self.p.set_nz_8(self.x as u8) as u16; } else { self.a = self.p.set_nz(self.x); } } /// Transfer X to Y fn txy(&mut self) { // Changes N and Z if self.p.small_index() { self.y = self.p.set_nz_8(self.x as u8) as u16; } else { self.y = self.p.set_nz(self.x); } } /// Transfer Index Register Y to Accumulator fn tya(&mut self) { // Changes N and Z if self.p.small_acc() { self.a = (self.a & 0xff00) | self.p.set_nz_8(self.y as u8) as u16; } else { self.a = self.p.set_nz(self.y); } } /// Transfer Y to X fn tyx(&mut self) { // Changes N and Z if self.p.small_index() { self.x = self.p.set_nz_8(self.y as u8) as u16; } else { self.x = self.p.set_nz(self.y); } } /// Increment memory location fn inc(&mut self, am: AddressingMode) { let (bank, addr) = am.address(self); if self.p.small_acc() { let res = self.loadb(bank, addr).wrapping_add(1); self.p.set_nz_8(res); self.storeb(bank, addr, res); } else { let res = self.loadw(bank, addr).wrapping_add(1); self.p.set_nz(res); self.storew(bank, addr, res); } } /// Increment accumulator fn ina(&mut self) { // Changes N and Z. Timing does not depend on accumulator size. if self.p.small_acc() { let res = self.p.set_nz_8((self.a as u8).wrapping_add(1)); self.a = (self.a & 0xff00) | res as u16; } else { self.a = self.p.set_nz(self.a.wrapping_add(1)); } } /// Increment Index Register X fn inx(&mut self) { // Changes N and Z. Timing does not depend on index register size. if self.p.small_index() { let res = self.p.set_nz_8((self.x as u8).wrapping_add(1)); self.x = (self.x & 0xff00) | res as u16; } else { self.x = self.p.set_nz(self.x.wrapping_add(1)); } } /// Increment Index Register Y fn iny(&mut self) { // Changes N and Z. Timing does not depend on index register size. if self.p.small_index() { let res = self.p.set_nz_8((self.y as u8).wrapping_add(1)); self.y = (self.y & 0xff00) | res as u16; } else { self.y = self.p.set_nz(self.y.wrapping_add(1)); } } /// Decrement Accumulator fn dea(&mut self) { // Changes N and Z. Timing does not depend on accumulator size. if self.p.small_acc() { let res = self.p.set_nz_8((self.a as u8).wrapping_sub(1)); self.a = (self.a & 0xff00) | res as u16; } else { self.a = self.p.set_nz(self.a.wrapping_sub(1)); } } /// Decrement memory location fn dec(&mut self, am: AddressingMode) { let (bank, addr) = am.address(self); if self.p.small_acc() { let res = self.loadb(bank, addr).wrapping_sub(1); self.p.set_nz_8(res); self.storeb(bank, addr, res); } else { let res = self.loadw(bank, addr).wrapping_sub(1); self.p.set_nz(res); self.storew(bank, addr, res); } } /// Decrement X fn dex(&mut self) { // Changes N and Z. Timing does not depend on index register size. // NB According to the datasheet, this writes the result to A, not X! But since this // doesn't make sense when looking at the way it's used, I'm going to ignore the datasheet if self.p.small_index() { let res = self.p.set_nz_8((self.x as u8).wrapping_sub(1)); self.x = (self.x & 0xff00) | res as u16; } else { self.x = self.p.set_nz(self.x.wrapping_sub(1)); } } /// Decrement Y fn dey(&mut self) { // Changes N and Z. Timing does not depend on index register size. if self.p.small_index() { let res = self.p.set_nz_8((self.y as u8).wrapping_sub(1)); self.y = (self.y & 0xff00) | res as u16; } else { self.y = self.p.set_nz(self.y.wrapping_sub(1)); } } /// Jump long. Changes the PBR. fn jml(&mut self, am: AddressingMode) { let a = am.address(self); self.branch(a); } /// Jump inside current program bank fn jmp(&mut self, am: AddressingMode) { let (_, addr) = am.address(self); self.pc = addr; } /// Branch always (inside current program bank, but this isn't checked) fn bra(&mut self, am: AddressingMode) { let a = am.address(self); self.branch(a); } /// Branch if Plus (N = 0) fn bpl(&mut self, am: AddressingMode) { let a = am.address(self); if !self.p.negative() { self.branch(a); self.cy += CPU_CYCLE; } } /// Branch if Minus/Negative (N = 1) fn bmi(&mut self, am: AddressingMode) { let a = am.address(self); if self.p.negative() { self.branch(a); self.cy += CPU_CYCLE; } } /// Branch if Overflow Clear fn bvc(&mut self, am: AddressingMode) { let a = am.address(self); if !self.p.overflow() { self.branch(a); self.cy += CPU_CYCLE; } } /// Branch if Overflow Set fn bvs(&mut self, am: AddressingMode) { let a = am.address(self); if self.p.overflow() { self.branch(a); self.cy += CPU_CYCLE; } } /// Branch if carry clear fn bcc(&mut self, am: AddressingMode) { let a = am.address(self); if !self.p.carry() { self.branch(a); self.cy += CPU_CYCLE; } } /// Branch if carry set fn bcs(&mut self, am: AddressingMode) { let a = am.address(self); if self.p.carry() { self.branch(a); self.cy += CPU_CYCLE; } } /// Branch if Equal fn beq(&mut self, am: AddressingMode) { let a = am.address(self); if self.p.zero() { self.branch(a); self.cy += CPU_CYCLE; } } /// Branch if Not Equal (Branch if Z = 0) fn bne(&mut self, am: AddressingMode) { let a = am.address(self); if !self.p.zero() { self.branch(a); self.cy += CPU_CYCLE; } } /// Test memory bits against accumulator fn bit(&mut self, am: AddressingMode) { if self.p.small_index() { let val = am.clone().loadb(self); self.p.set_zero(val & self.a as u8 == 0); match am { AddressingMode::Immediate(_) | AddressingMode::Immediate8(_) => {} _ => { self.p.set_negative(val & 0x80 != 0); self.p.set_overflow(val & 0x40 != 0); } } } else { let val = am.clone().loadw(self); self.p.set_zero(val & self.a == 0); match am { AddressingMode::Immediate(_) | AddressingMode::Immediate8(_) => {} _ => { self.p.set_negative(val & 0x8000 != 0); self.p.set_overflow(val & 0x4000 != 0); } } self.cy += CPU_CYCLE; } } /// Test and set memory bits against accumulator fn tsb(&mut self, am: AddressingMode) { // Sets Z // FIXME Is this correct? if self.p.small_index() { let val = am.clone().loadb(self); self.p.set_zero(val & self.a as u8 == 0); let res = val | self.a as u8; am.storeb(self, res); } else { let val = am.clone().loadw(self); self.p.set_zero(val & self.a == 0); let res = val | self.a; am.storew(self, res); self.cy += 2 * CPU_CYCLE; } } /// Test and reset memory bits against accumulator fn trb(&mut self, am: AddressingMode) { // Sets Z // FIXME Is this correct? if self.p.small_index() { let val = am.clone().loadb(self); self.p.set_zero(val & self.a as u8 == 0); let res = val & !(self.a as u8); am.storeb(self, res); } else { let val = am.clone().loadw(self); self.p.set_zero(val & self.a == 0); let res = val & !self.a; am.storew(self, res); self.cy += 2 * CPU_CYCLE; } } /// Compare Accumulator with Memory fn cmp(&mut self, am: AddressingMode) { if self.p.small_acc() { let a = self.a as u8; let b = am.loadb(self); self.compare8(a, b); } else { let a = self.a; let b = am.loadw(self); self.compare(a, b); self.cy += CPU_CYCLE; } } /// Compare Index Register X with Memory fn cpx(&mut self, am: AddressingMode) { if self.p.small_index() { let val = am.loadb(self); let x = self.x as u8; self.compare8(x, val); } else { let val = am.loadw(self); let x = self.x; self.compare(x, val); self.cy += CPU_CYCLE; } } /// Compare Index Register Y with Memory fn cpy(&mut self, am: AddressingMode) { if self.p.small_index() { let val = am.loadb(self); let y = self.y as u8; self.compare8(y, val); } else { let val = am.loadw(self); let y = self.y; self.compare(y, val); self.cy += CPU_CYCLE; } } /// Jump to Subroutine (with short address). Doesn't change PBR. /// /// "The address saved is the address of the last byte of the JSR instruction (the address of /// the last byte of the operand), not the address of the next instruction as is the case with /// some other processors. The address is pushed onto the stack in standard 65x order – the /// low byte in the lower address, the high byte in the higher address – and done in standard /// 65x fashion – the first byte is stored at the location pointed to by the stack pointer, the /// stack pointer is decremented, the second byte is stored, and the stack pointer is /// decremented again." fn jsr(&mut self, am: AddressingMode) { // Changes no flags let pc = self.pc - 1; self.pushb((pc >> 8) as u8); self.pushb(pc as u8); self.pc = am.address(self).1; } /// Long jump to subroutine. Additionally saves PBR on the stack and sets it to the bank /// returned by `am.address()`. fn jsl(&mut self, am: AddressingMode) { // Changes no flags let pbr = self.pbr; self.pushb(pbr); let pc = self.pc - 1; self.pushb((pc >> 8) as u8); self.pushb(pc as u8); let (pbr, pc) = am.address(self); self.pbr = pbr; self.pc = pc; } /// Return from Interrupt fn rti(&mut self) { self.return_from_interrupt() } /// Return from Subroutine (Short - Like JSR) fn rts(&mut self) { let pcl = self.popb() as u16; let pch = self.popb() as u16; let pc = (pch << 8) | pcl; self.pc = pc + 1; // +1 since the last byte of the JSR was saved } /// Return from Subroutine called with `jsl`. /// /// This also restores the PBR. fn rtl(&mut self) { let pcl = self.popb() as u16; let pch = self.popb() as u16; let pbr = self.popb(); let pc = (pch << 8) | pcl; self.pbr = pbr; self.pc = pc + 1; // +1 since the last byte of the JSR was saved } fn cli(&mut self) { self.p.set_irq_disable(false) } fn sei(&mut self) { self.p.set_irq_disable(true) } fn clc(&mut self) { self.p.set_carry(false); } fn sec(&mut self) { self.p.set_carry(true); } /// Store 0 to memory fn stz(&mut self, am: AddressingMode) { if self.p.small_acc() { am.storeb(self, 0); } else { am.storew(self, 0); self.cy += CPU_CYCLE; } } /// Load accumulator from memory fn lda(&mut self, am: AddressingMode) { // Changes N and Z if self.p.small_acc() { let val = am.loadb(self); self.a = (self.a & 0xff00) | self.p.set_nz_8(val) as u16; } else { let val = am.loadw(self); self.a = self.p.set_nz(val); self.cy += CPU_CYCLE; } } /// Load X register from memory fn ldx(&mut self, am: AddressingMode) { // Changes N and Z if self.p.small_index() { let val = am.loadb(self); self.x = (self.x & 0xff00) | self.p.set_nz_8(val) as u16; } else { let val = am.loadw(self); self.x = self.p.set_nz(val); self.cy += CPU_CYCLE; } } /// Load Y register from memory fn ldy(&mut self, am: AddressingMode) { // Changes N and Z if self.p.small_index() { let val = am.loadb(self); self.y = (self.y & 0xff00) | self.p.set_nz_8(val) as u16; } else { let val = am.loadw(self); self.y = self.p.set_nz(val); self.cy += CPU_CYCLE; } } /// Store accumulator to memory fn sta(&mut self, am: AddressingMode) { // Changes no flags if self.p.small_acc() { let b = self.a as u8; am.storeb(self, b); } else { let w = self.a; am.storew(self, w); self.cy += CPU_CYCLE; } } fn stx(&mut self, am: AddressingMode) { // Changes no flags if self.p.small_index() { let b = self.x as u8; am.storeb(self, b); } else { let w = self.x; am.storew(self, w); self.cy += CPU_CYCLE; } } fn sty(&mut self, am: AddressingMode) { // Changes no flags if self.p.small_index() { let b = self.y as u8; am.storeb(self, b); } else { let w = self.y; am.storew(self, w); self.cy += CPU_CYCLE; } } /// Exchange carry and emulation flags fn xce(&mut self) { let carry = self.p.carry(); let e = self.emulation; self.p.set_carry(e); self.set_emulation(carry); } /// Reset status bits /// /// Clears the bits in the status register that are 1 in the argument (argument is interpreted /// as 8-bit) fn rep(&mut self, am: AddressingMode) { assert!(!self.emulation); let p = self.p.0 & !am.loadb(self); self.set_p(p); } /// Set Processor Status Bits fn sep(&mut self, am: AddressingMode) { assert!(!self.emulation); let p = self.p.0 | am.loadb(self); self.set_p(p); } /// Transfer 16-bit Accumulator to Direct Page Register fn tcd(&mut self) { self.d = self.p.set_nz(self.a); } /// Transfer 16-bit Accumulator to Stack Pointer fn tcs(&mut self) { if self.emulation { // "When in the Emulation mode, a 01 is forced into SH. In this case, the B Accumulator // will not be loaded into SH during a TCS instruction." // S = 16-bit A; B = High byte of S self.s = 0x0100 | (self.a & 0xff); } else { self.s = self.a; } } fn nop(&mut self) {} fn ill(&mut self) {} } /// Addressing mode construction impl Cpu { fn block_move(&mut self) -> AddressingMode { let dest = self.fetchb(); let src = self.fetchb(); AddressingMode::BlockMove(dest, src) } fn direct_indirect(&mut self) -> AddressingMode { AddressingMode::DirectIndirect(self.fetchb()) } fn direct_indirect_long(&mut self) -> AddressingMode { AddressingMode::DirectIndirectLong(self.fetchb()) } fn direct_indirect_long_idx(&mut self) -> AddressingMode { AddressingMode::DirectIndirectLongIdx(self.fetchb()) } fn absolute(&mut self) -> AddressingMode { AddressingMode::Absolute(self.fetchw()) } fn absolute_indexed_x(&mut self) -> AddressingMode { AddressingMode::AbsIndexedX(self.fetchw()) } fn absolute_indexed_y(&mut self) -> AddressingMode { AddressingMode::AbsIndexedY(self.fetchw()) } fn absolute_indexed_indirect(&mut self) -> AddressingMode { AddressingMode::AbsIndexedIndirect(self.fetchw()) } fn absolute_long(&mut self) -> AddressingMode { let addr = self.fetchw(); let bank = self.fetchb(); AddressingMode::AbsoluteLong(bank, addr) } fn absolute_long_indexed_x(&mut self) -> AddressingMode { let addr = self.fetchw(); let bank = self.fetchb(); AddressingMode::AbsLongIndexedX(bank, addr) } fn absolute_indirect(&mut self) -> AddressingMode { AddressingMode::AbsoluteIndirect(self.fetchw()) } fn absolute_indirect_long(&mut self) -> AddressingMode { AddressingMode::AbsoluteIndirectLong(self.fetchw()) } fn rel(&mut self) -> AddressingMode { AddressingMode::Rel(self.fetchb() as i8) } fn stack_rel(&mut self) -> AddressingMode { AddressingMode::StackRel(self.fetchb()) } fn direct(&mut self) -> AddressingMode { AddressingMode::Direct(self.fetchb()) } fn direct_indexed_x(&mut self) -> AddressingMode { AddressingMode::DirectIndexedX(self.fetchb()) } fn direct_indexed_y(&mut self) -> AddressingMode { AddressingMode::DirectIndexedY(self.fetchb()) } fn direct_indexed_indirect(&mut self) -> AddressingMode { AddressingMode::DirectIndexedIndirect(self.fetchb()) } fn direct_indirect_indexed(&mut self) -> AddressingMode { AddressingMode::DirectIndirectIndexed(self.fetchb()) } /// Immediate value with accumulator size fn immediate_acc(&mut self) -> AddressingMode { if self.p.small_acc() { AddressingMode::Immediate8(self.fetchb()) } else { self.cy += CPU_CYCLE; AddressingMode::Immediate(self.fetchw()) } } /// Immediate value with index register size fn immediate_index(&mut self) -> AddressingMode { if self.p.small_index() { AddressingMode::Immediate8(self.fetchb()) } else { self.cy += CPU_CYCLE; AddressingMode::Immediate(self.fetchw()) } } /// Immediate value, one byte fn immediate8(&mut self) -> AddressingMode { AddressingMode::Immediate8(self.fetchb()) } } CPU: tsc //! 65816 emulator mod addressing; mod statusreg; use self::addressing::AddressingMode; use self::statusreg::StatusReg; use snes::Peripherals; /// Rudimentary memory access break points. Stores (bank, address)-tuples that cause a break on /// read access. const MEM_BREAK_LOAD: &'static [(u8, u16)] = &[ ]; const MEM_BREAK_STORE: &'static [(u8, u16)] = &[ ]; // Emulation mode vectors const IRQ_VEC8: u16 = 0xFFFE; const RESET_VEC8: u16 = 0xFFFC; const NMI_VEC8: u16 = 0xFFFA; #[allow(dead_code)] const ABORT_VEC8: u16 = 0xFFF8; #[allow(dead_code)] const COP_VEC8: u16 = 0xFFF4; // Native mode vectors const IRQ_VEC16: u16 = 0xFFEE; const NMI_VEC16: u16 = 0xFFEA; #[allow(dead_code)] const ABORT_VEC16: u16 = 0xFFE8; #[allow(dead_code)] const BRK_VEC16: u16 = 0xFFE6; #[allow(dead_code)] const COP_VEC16: u16 = 0xFFE4; /// One CPU cycle = 6 master clock cycles pub const CPU_CYCLE: u16 = 6; pub struct Cpu { a: u16, x: u16, y: u16, /// Stack pointer s: u16, /// Data bank register. Bank for all memory accesses. dbr: u8, /// Program bank register. Opcodes are fetched from this bank. pbr: u8, /// Direct (page) register. Address offset for all instruction using "direct addressing" mode. d: u16, /// Program counter. Note that PBR is not changed by the CPU, so code can not span multiple /// banks (without manual bank switching). pc: u16, p: StatusReg, emulation: bool, /// Master clock cycle counter for the current instruction. cy: u16, pub trace: bool, pub mem: Peripherals, } impl Cpu { /// Creates a new CPU and executes a reset. This will fetch the RESET vector from memory and /// put the CPU in emulation mode. pub fn new(mut mem: Peripherals) -> Cpu { let pcl = mem.load(0, RESET_VEC8) as u16; let pch = mem.load(0, RESET_VEC8 + 1) as u16; let pc = (pch << 8) | pcl; debug!("RESET @ {:02X}", pc); Cpu { // Undefined according to datasheet a: 0, x: 0, y: 0, // High byte set to 1 since we're now in emulation mode s: 0x0100, // Initialized to 0 dbr: 0, d: 0, pbr: 0, // Read from RESET vector above pc: pc, // Acc and index regs start in 8-bit mode, IRQs disabled, CPU in emulation mode p: StatusReg::new(), emulation: true, cy: 0, trace: false, mem: mem, } } /// Adds the time needed to access the given memory location to the cycle counter. fn do_io_cycle(&mut self, bank: u8, addr: u16) { const FAST: u16 = 0; const SLOW: u16 = 2; const XSLOW: u16 = 6; self.cy += match bank { 0x00 ... 0x3f => match addr { 0x0000 ... 0x1fff | 0x6000 ... 0xffff => SLOW, 0x4000 ... 0x41ff => XSLOW, _ => FAST, }, 0x40 ... 0x7f => SLOW, 0x80 ... 0xbf => match addr { 0x0000 ... 0x1fff | 0x6000 ... 0x7fff => SLOW, 0x4000 ... 0x41ff => XSLOW, // FIXME Depends on bit 1 in $420d. Assume slow for now. 0x8000 ... 0xffff => SLOW, _ => FAST }, // FIXME Depends on bit 1 in $420d. Assume slow for now. 0xc0 ... 0xff => SLOW, _ => FAST, } } /// Load a byte from memory. Will change the cycle counter according to the memory speed. fn loadb(&mut self, bank: u8, addr: u16) -> u8 { if MEM_BREAK_LOAD.iter().find(|&&(b, a)| bank == b && addr == a).is_some() { debug!("MEM-BREAK: Breakpoint triggered on load from ${:02X}:{:04X} (${:02X})", bank, addr, self.mem.load(bank, addr)) } self.do_io_cycle(bank, addr); self.mem.load(bank, addr) } fn loadw(&mut self, bank: u8, addr: u16) -> u16 { assert!(addr < 0xffff, "loadw on bank boundary"); // ^ if this should be supported, make sure to fix the potential overflow below let lo = self.loadb(bank, addr) as u16; let hi = self.loadb(bank, addr + 1) as u16; (hi << 8) | lo } fn storeb(&mut self, bank: u8, addr: u16, value: u8) { if MEM_BREAK_STORE.iter().find(|&&(b, a)| bank == b && addr == a).is_some() { debug!("MEM-BREAK: Breakpoint triggered on store of ${:02X} to ${:02X}:{:04X}", value, bank, addr) } self.do_io_cycle(bank, addr); self.mem.store(bank, addr, value) } fn storew(&mut self, bank: u8, addr: u16, value: u16) { assert!(addr < 0xffff, "storew on bank boundary"); // ^ if this should be supported, make sure to fix the potential overflow below self.storeb(bank, addr, value as u8); self.storeb(bank, addr + 1, (value >> 8) as u8); } /// Fetches the byte PC points at, then increments PC fn fetchb(&mut self) -> u8 { let (pbr, pc) = (self.pbr, self.pc); let b = self.loadb(pbr, pc); self.pc += 1; b } /// Fetches a 16-bit word (little-endian) located at PC, by fetching 2 individual bytes fn fetchw(&mut self) -> u16 { let low = self.fetchb() as u16; let high = self.fetchb() as u16; (high << 8) | low } /// Pushes a byte onto the stack and decrements the stack pointer fn pushb(&mut self, value: u8) { let s = self.s; self.storeb(0, s, value); if self.emulation { // stack must stay in 0x01xx assert_eq!(self.s & 0xff00, 0x0100); let s = self.s as u8 - 1; self.s = (self.s & 0xff00) | s as u16; } else { self.s -= 1; } } fn pushw(&mut self, value: u16) { let hi = (value >> 8) as u8; let lo = value as u8; self.pushb(hi); self.pushb(lo); } fn popb(&mut self) -> u8 { if self.emulation { // stack must stay in 0x01xx assert_eq!(self.s & 0xff00, 0x0100); let s = self.s as u8 + 1; self.s = (self.s & 0xff00) | s as u16; } else { self.s += 1; } let s = self.s; self.loadb(0, s) } fn popw(&mut self) -> u16 { let lo = self.popb() as u16; let hi = self.popb() as u16; (hi << 8) | lo } /// Enters/exits emulation mode fn set_emulation(&mut self, value: bool) { if !self.emulation && value { // Enter emulation mode // Set high byte of stack ptr to 0x01 and set M/X bits to make A,X and Y 8-bit self.s = 0x0100 | (self.s & 0xff); self.p.set_small_acc(true); self.p.set_small_index(true); // "If the Index Select Bit (X) equals one, both registers will be 8 bits wide, and the // high byte is forced to zero" self.x &= 0xff; self.y &= 0xff; } self.emulation = value; } fn trace_op(&self, pc: u16, raw: u8, op: &str, am: Option<&AddressingMode>) { use log::LogLevel::Trace; if !log_enabled!(Trace) || !self.trace { return } let opstr = match am { Some(am) => format!("{} {}", op, am), None => format!("{}", op), }; trace!("${:02X}:{:04X} {:02X} {:14} a:{:04X} x:{:04X} y:{:04X} s:{:04X} d:{:04X} dbr:{:02X} emu:{} {}", self.pbr, pc, raw, opstr, self.a, self.x, self.y, self.s, self.d, self.dbr, self.emulation as u8, self.p, ); } /// Executes a single opcode and returns the number of master clock cycles used. pub fn dispatch(&mut self) -> u16 { // CPU cycles each opcode takes (at the minimum). static CYCLE_TABLE: [u8; 256] = [ 7,6,7,4,5,3,5,6, 3,2,2,4,6,4,6,5, // $00 - $0f 2,5,5,7,5,4,6,6, 2,4,2,2,6,4,7,5, // $10 - $1f 6,6,8,4,3,3,5,6, 4,2,2,5,4,4,6,5, // $20 - $2f 2,5,5,7,4,4,6,6, 2,4,2,2,4,4,7,5, // $30 - $3f 7,6,2,4,7,3,5,6, 3,2,2,3,3,4,6,5, // $40 - $4f 2,5,5,7,7,4,6,6, 2,4,3,2,4,4,7,5, // $50 - $5f 7,6,6,4,3,3,5,6, 4,2,2,6,5,4,6,5, // $60 - $6f 2,5,5,7,4,4,6,6, 2,4,4,2,6,2,7,5, // $70 - $7f 2,6,3,4,3,3,3,2, 2,2,2,3,4,4,4,5, // $80 - $8f 2,6,5,7,4,4,4,6, 2,5,2,2,3,5,5,5, // $90 - $9f 2,6,2,4,3,3,3,6, 2,2,2,4,4,4,4,5, // $a0 - $af 2,5,5,7,4,4,4,6, 2,4,2,2,4,4,4,5, // $b0 - $bf 2,6,3,4,3,3,5,6, 2,2,2,3,4,4,6,5, // $c0 - $cf 2,5,5,7,6,4,6,6, 2,4,3,3,6,4,7,5, // $d0 - $df 2,6,3,4,3,3,5,6, 2,2,2,3,4,4,6,5, // $e0 - $ef 2,5,5,7,5,4,6,6, 2,4,4,2,6,4,7,5, // $f0 - $ff ]; let pc = self.pc; self.cy = 0; let op = self.fetchb(); self.cy += CYCLE_TABLE[op as usize] as u16 * CPU_CYCLE + 4; // FIXME: The +4 is a timing correction. I'm not sure what causes the inaccuracy, but I // suspect the addressing mode / memory access timing is a bit off. macro_rules! instr { ( $name:ident ) => {{ self.trace_op(pc, op, stringify!($name), None); self.$name() }}; ( $name:ident $am:ident ) => {{ let am = self.$am(); self.trace_op(pc, op, stringify!($name), Some(&am)); self.$name(am) }}; } match op { // Stack operations 0x4b => instr!(phk), 0x0b => instr!(phd), 0x2b => instr!(pld), 0x8b => instr!(phb), 0xab => instr!(plb), 0x08 => instr!(php), 0x28 => instr!(plp), 0x48 => instr!(pha), 0x68 => instr!(pla), 0xda => instr!(phx), 0xfa => instr!(plx), 0x5a => instr!(phy), 0x7a => instr!(ply), // Processor status 0x18 => instr!(clc), 0x38 => instr!(sec), 0x58 => instr!(cli), 0x78 => instr!(sei), 0xfb => instr!(xce), 0xc2 => instr!(rep immediate8), 0xe2 => instr!(sep immediate8), // Arithmetic 0x0a => instr!(asl_a), 0x06 => instr!(asl direct), 0x16 => instr!(asl direct_indexed_x), 0x0e => instr!(asl absolute), 0x2a => instr!(rol_a), 0x26 => instr!(rol direct), 0x2e => instr!(rol absolute), 0x3e => instr!(rol absolute_indexed_x), 0x36 => instr!(rol direct_indexed_x), 0x4a => instr!(lsr_a), 0x46 => instr!(lsr direct), 0x6a => instr!(ror_a), 0x7e => instr!(ror absolute_indexed_x), 0x25 => instr!(and direct), 0x21 => instr!(and direct_indexed_indirect), 0x29 => instr!(and immediate_acc), 0x2d => instr!(and absolute), 0x3d => instr!(and absolute_indexed_x), 0x39 => instr!(and absolute_indexed_y), 0x2f => instr!(and absolute_long), 0x3f => instr!(and absolute_long_indexed_x), 0x03 => instr!(ora stack_rel), 0x05 => instr!(ora direct), 0x15 => instr!(ora direct_indexed_x), 0x09 => instr!(ora immediate_acc), 0x12 => instr!(ora direct_indirect), 0x07 => instr!(ora direct_indirect_long), 0x0d => instr!(ora absolute), 0x1d => instr!(ora absolute_indexed_x), 0x19 => instr!(ora absolute_indexed_y), 0x0f => instr!(ora absolute_long), 0x45 => instr!(eor direct), 0x55 => instr!(eor direct_indexed_x), 0x49 => instr!(eor immediate_acc), 0x4d => instr!(eor absolute), 0x5d => instr!(eor absolute_indexed_x), 0x59 => instr!(eor absolute_indexed_y), 0x4f => instr!(eor absolute_long), 0x5f => instr!(eor absolute_long_indexed_x), 0x65 => instr!(adc direct), 0x75 => instr!(adc direct_indexed_x), 0x69 => instr!(adc immediate_acc), 0x6d => instr!(adc absolute), 0x7d => instr!(adc absolute_indexed_x), 0x79 => instr!(adc absolute_indexed_y), 0x6f => instr!(adc absolute_long), 0x7f => instr!(adc absolute_long_indexed_x), 0x71 => instr!(adc direct_indirect), 0xe5 => instr!(sbc direct), 0xf5 => instr!(sbc direct_indexed_x), 0xe9 => instr!(sbc immediate_acc), 0xed => instr!(sbc absolute), 0xf9 => instr!(sbc absolute_indexed_y), 0xfd => instr!(sbc absolute_indexed_x), 0xef => instr!(sbc absolute_long), 0xff => instr!(sbc absolute_long_indexed_x), 0xe6 => instr!(inc direct), 0xf6 => instr!(inc direct_indexed_x), 0xfe => instr!(inc absolute_indexed_x), 0xee => instr!(inc absolute), 0x1a => instr!(ina), 0xe8 => instr!(inx), 0xc8 => instr!(iny), 0x3a => instr!(dea), 0xc6 => instr!(dec direct), 0xd6 => instr!(dec direct_indexed_x), 0xce => instr!(dec absolute), 0xde => instr!(dec absolute_indexed_x), 0xca => instr!(dex), 0x88 => instr!(dey), // Register and memory transfers 0x5b => instr!(tcd), 0x1b => instr!(tcs), 0x3b => instr!(tsc), 0xaa => instr!(tax), 0xa8 => instr!(tay), 0x8a => instr!(txa), 0x9b => instr!(txy), 0x98 => instr!(tya), 0xbb => instr!(tyx), 0xeb => instr!(xba), 0x85 => instr!(sta direct), 0x95 => instr!(sta direct_indexed_x), 0x92 => instr!(sta direct_indirect), 0x87 => instr!(sta direct_indirect_long), 0x97 => instr!(sta direct_indirect_long_idx), 0x8d => instr!(sta absolute), 0x8f => instr!(sta absolute_long), 0x9d => instr!(sta absolute_indexed_x), 0x99 => instr!(sta absolute_indexed_y), 0x9f => instr!(sta absolute_long_indexed_x), 0x86 => instr!(stx direct), 0x96 => instr!(stx direct_indexed_y), 0x8e => instr!(stx absolute), 0x84 => instr!(sty direct), 0x94 => instr!(sty direct_indexed_y), 0x8c => instr!(sty absolute), 0x64 => instr!(stz direct), 0x9c => instr!(stz absolute), 0x74 => instr!(stz direct_indexed_x), 0x9e => instr!(stz absolute_indexed_x), 0xa5 => instr!(lda direct), 0xb5 => instr!(lda direct_indexed_x), 0xb1 => instr!(lda direct_indirect_indexed), 0xa9 => instr!(lda immediate_acc), 0xb2 => instr!(lda direct_indirect), 0xa7 => instr!(lda direct_indirect_long), 0xb7 => instr!(lda direct_indirect_long_idx), 0xad => instr!(lda absolute), 0xbd => instr!(lda absolute_indexed_x), 0xb9 => instr!(lda absolute_indexed_y), 0xaf => instr!(lda absolute_long), 0xbf => instr!(lda absolute_long_indexed_x), 0xa6 => instr!(ldx direct), 0xb6 => instr!(ldx direct_indexed_y), 0xa2 => instr!(ldx immediate_index), 0xae => instr!(ldx absolute), 0xbe => instr!(ldx absolute_indexed_y), 0xa4 => instr!(ldy direct), 0xb4 => instr!(ldy direct_indexed_x), 0xa0 => instr!(ldy immediate_index), 0xac => instr!(ldy absolute), 0xbc => instr!(ldy absolute_indexed_x), 0x54 => instr!(mvn block_move), 0x44 => instr!(mvp block_move), // Bit operations 0x24 => instr!(bit direct), 0x2c => instr!(bit absolute), 0x34 => instr!(bit direct_indexed_x), 0x3c => instr!(bit absolute_indexed_x), 0x89 => instr!(bit immediate_acc), 0x04 => instr!(tsb direct), 0x0c => instr!(tsb absolute), 0x14 => instr!(trb direct), 0x1c => instr!(trb absolute), // Comparisons 0xc9 => instr!(cmp immediate_acc), 0xc5 => instr!(cmp direct), 0xd5 => instr!(cmp direct_indexed_x), 0xcd => instr!(cmp absolute), 0xdd => instr!(cmp absolute_indexed_x), 0xd9 => instr!(cmp absolute_indexed_y), 0xcf => instr!(cmp absolute_long), 0xdf => instr!(cmp absolute_long_indexed_x), 0xd7 => instr!(cmp direct_indirect_long_idx), 0xe0 => instr!(cpx immediate_index), 0xe4 => instr!(cpx direct), 0xec => instr!(cpx absolute), 0xc0 => instr!(cpy immediate_index), 0xc4 => instr!(cpy direct), 0xcc => instr!(cpy absolute), // Branches 0x80 => instr!(bra rel), 0xf0 => instr!(beq rel), 0xd0 => instr!(bne rel), 0x10 => instr!(bpl rel), 0x30 => instr!(bmi rel), 0x50 => instr!(bvc rel), 0x70 => instr!(bvs rel), 0x90 => instr!(bcc rel), 0xb0 => instr!(bcs rel), // Jumps, calls and returns 0x4c => instr!(jmp absolute), 0x5c => instr!(jml absolute_long), 0x6c => instr!(jmp absolute_indirect), 0x7c => instr!(jmp absolute_indexed_indirect), 0xdc => instr!(jml absolute_indirect_long), 0x20 => instr!(jsr absolute), 0x22 => instr!(jsl absolute_long), 0x40 => instr!(rti), 0x60 => instr!(rts), 0x6b => instr!(rtl), 0xea => instr!(nop), _ => { instr!(ill); panic!("illegal CPU opcode: ${:02X}", op); } } self.cy } /// Immediately executes an IRQ sequence and jumps to the NMI handler. pub fn trigger_nmi(&mut self) { if self.emulation { self.interrupt(NMI_VEC8); } else { self.interrupt(NMI_VEC16); } } pub fn trigger_irq(&mut self) { if self.emulation { self.interrupt(IRQ_VEC8); } else { self.interrupt(IRQ_VEC16); } } /// Execute an IRQ sequence. This pushes PBR, PC and the processor status register P on the /// stack, sets the PBR to 0, loads the handler address from the given vector, and jumps to the /// handler. fn interrupt(&mut self, vector: u16) { if !self.emulation { let pbr = self.pbr; self.pushb(pbr); self.pbr = 0; } let pc = self.pc; self.pushw(pc); let p = self.p.0; self.pushb(p); let handler = self.loadw(0, vector); self.pc = handler; } fn return_from_interrupt(&mut self) { let p = self.popb(); self.p.0 = p; let pc = self.popw(); self.pc = pc; if !self.emulation { let pbr = self.popb(); self.pbr = pbr; } } /// Common method for all comparison opcodes. Compares `a` to `b` by effectively computing /// `a-b`. This method only works correctly for 16-bit values. /// /// The Z flag is set if both numbers are equal. /// The C flag will be set to `a >= b`. /// The N flag is set to the most significant bit of `a-b`. fn compare(&mut self, a: u16, b: u16) { self.p.set_zero(a == b); self.p.set_carry(a >= b); self.p.set_negative(a.wrapping_sub(b) & 0x8000 != 0); } /// Does the exact same thing as `compare`, but for 8-bit operands fn compare8(&mut self, a: u8, b: u8) { self.p.set_zero(a == b); self.p.set_carry(a >= b); self.p.set_negative(a.wrapping_sub(b) & 0x80 != 0); } /// Branch to an absolute address. This will overwrite the current program bank. fn branch(&mut self, target: (u8, u16)) { self.pbr = target.0; self.pc = target.1; } /// Changes the status register. fn set_p(&mut self, new: u8) { let small_idx = self.p.small_index(); self.p.0 = new; if !small_idx && self.p.small_index() { // "If the Index Select Bit (X) equals one, both registers will be 8 bits wide, and the // high byte is forced to zero" self.x &= 0xff; self.y &= 0xff; } } } /// Opcode implementations impl Cpu { /// Move Next (incrementing address). Copies C+1 (16-bit A) bytes from the address in X to the /// address in Y. fn mvn(&mut self, am: AddressingMode) { if let AddressingMode::BlockMove(destbank, srcbank) = am { while self.a != 0xffff { let (x, y) = (self.x, self.y); let val = self.loadb(srcbank, x); self.storeb(destbank, y, val); self.x = self.x.wrapping_add(1); self.y = self.y.wrapping_add(1); self.a = self.a.wrapping_sub(1); } } else { panic!("MVN with invalid addressing mode"); } } /// Move Previous (decrementing address) fn mvp(&mut self, am: AddressingMode) { if let AddressingMode::BlockMove(destbank, srcbank) = am { while self.a != 0xffff { let (x, y) = (self.x, self.y); let val = self.loadb(srcbank, x); self.storeb(destbank, y, val); self.x = self.x.wrapping_sub(1); self.y = self.y.wrapping_sub(1); self.a = self.a.wrapping_sub(1); } } else { panic!("MVP with invalid addressing mode"); } } /// Push Program Bank Register fn phk(&mut self) { let pbr = self.pbr; self.pushb(pbr); } /// Push Direct Page Register fn phd(&mut self) { let d = self.d; self.pushw(d); } /// Pull Direct Page Register fn pld(&mut self) { let d = self.popw(); self.d = d; } /// Push Data Bank Register fn phb(&mut self) { let dbr = self.dbr; self.pushb(dbr); } /// Pop Data Bank Register fn plb(&mut self) { let dbr = self.popb(); self.dbr = dbr; } /// Push Processor Status Register fn php(&mut self) { // Changes no flags let p = self.p.0; self.pushb(p); } /// Pull Processor Status Register fn plp(&mut self) { let p = self.popb(); self.set_p(p); } /// Push A on the stack fn pha(&mut self) { // No flags modified if self.p.small_acc() { let a = self.a as u8; self.pushb(a); } else { let a = self.a; self.pushw(a); self.cy += CPU_CYCLE; } } /// Pull Accumulator from stack fn pla(&mut self) { // Changes N and Z if self.p.small_acc() { let a = self.popb(); self.a = (self.a & 0xff00) | self.p.set_nz_8(a) as u16; } else { let a = self.popw(); self.a = self.p.set_nz(a); self.cy += CPU_CYCLE; } } /// Push Index Register X fn phx(&mut self) { if self.p.small_index() { let val = self.x as u8; self.pushb(val); } else { let val = self.x; self.pushw(val); self.cy += CPU_CYCLE; } } /// Pop Index Register X fn plx(&mut self) { // Changes N and Z if self.p.small_index() { let val = self.popb(); self.x = self.p.set_nz_8(val) as u16; } else { let val = self.popw(); self.x = self.p.set_nz(val); self.cy += CPU_CYCLE; } } /// Push Index Register Y fn phy(&mut self) { if self.p.small_index() { let val = self.y as u8; self.pushb(val); } else { let val = self.y; self.pushw(val); self.cy += CPU_CYCLE; } } /// Pop Index Register Y fn ply(&mut self) { // Changes N and Z if self.p.small_index() { let val = self.popb(); self.y = self.p.set_nz_8(val) as u16; } else { let val = self.popw(); self.y = self.p.set_nz(val); self.cy += CPU_CYCLE; } } /// AND Accumulator with Memory (or immediate) fn and(&mut self, am: AddressingMode) { // Sets N and Z if self.p.small_acc() { let val = am.loadb(self); let res = self.a as u8 & val; self.p.set_nz_8(res); self.a = (self.a & 0xff00) | res as u16; } else { let val = am.loadw(self); let res = self.a & val; self.a = self.p.set_nz(res); self.cy += CPU_CYCLE; } } /// OR Accumulator with Memory fn ora(&mut self, am: AddressingMode) { // Sets N and Z if self.p.small_acc() { let val = am.loadb(self); let res = self.a as u8 | val; self.p.set_nz_8(res); self.a = (self.a & 0xff00) | res as u16; } else { let val = am.loadw(self); let res = self.a | val; self.a = self.p.set_nz(res); self.cy += CPU_CYCLE; } } /// Exclusive Or Accumulator with Memory fn eor(&mut self, am: AddressingMode) { // Sets N and Z if self.p.small_acc() { let val = am.loadb(self); let res = self.a as u8 ^ val; self.p.set_nz_8(res); self.a = (self.a & 0xff00) | res as u16; } else { let val = am.loadw(self); let res = self.a ^ val; self.a = self.p.set_nz(res); self.cy += CPU_CYCLE; } } /// Add With Carry fn adc(&mut self, am: AddressingMode) { // Sets N, V, C and Z // FIXME is this correct? double-check this! let c = if self.p.carry() { 1 } else { 0 }; if self.p.small_acc() { let a = self.a as u8; let val = am.loadb(self); let res = a as u16 + val as u16 + c; self.p.set_carry(res > 255); let res = res as u8; self.p.set_overflow((a ^ val) & 0x80 == 0 && (a ^ res) & 0x80 == 0x80); self.a = (self.a & 0xff00) | self.p.set_nz_8(res) as u16; } else { let val = am.loadw(self); let res = self.a as u32 + val as u32 + c as u32; self.p.set_carry(res > 65535); let res = res as u16; self.p.set_overflow((self.a ^ val) & 0x8000 == 0 && (self.a ^ res) & 0x8000 == 0x8000); self.a = self.p.set_nz(res); self.cy += CPU_CYCLE; } } /// Subtract with Borrow from Accumulator fn sbc(&mut self, am: AddressingMode) { // Changes N, Z, C and V // FIXME Set V flag! let c = if self.p.carry() { 0 } else { 1 }; if self.p.small_acc() { let a = self.a as u8; let v = am.loadb(self); let res = a as i16 - v as i16 - c; self.p.set_carry(res < 0); self.a = (self.a & 0xff00) | self.p.set_nz_8(res as u8) as u16; } else { let v = am.loadw(self); let res = self.a as i32 - v as i32 - c as i32; self.p.set_carry(res < 0); self.a = self.p.set_nz(res as u16); self.cy += CPU_CYCLE; } } /// Shift accumulator left by 1 bit fn asl_a(&mut self) { // Sets N, Z and C. The rightmost bit is filled with 0. if self.p.small_acc() { let a = self.a as u8; self.p.set_carry(self.a & 0x80 != 0); self.a = (self.a & 0xff00) | self.p.set_nz_8(a << 1) as u16; } else { self.p.set_carry(self.a & 0x8000 != 0); self.a = self.p.set_nz(self.a << 1); } } /// Arithmetic left-shift: Shift a memory location left by 1 bit (Read-Modify-Write) fn asl(&mut self, am: AddressingMode) { // Sets N, Z and C. The rightmost bit is filled with 0. let (bank, addr) = am.address(self); if self.p.small_acc() { let val = self.loadb(bank, addr); self.p.set_carry(val & 0x80 != 0); let res = self.p.set_nz_8(val << 1); self.storeb(bank, addr, res); } else { let val = self.loadw(bank, addr); self.p.set_carry(val & 0x8000 != 0); let res = self.p.set_nz(val << 1); self.storew(bank, addr, res); self.cy += 2 * CPU_CYCLE; } } /// Rotate Accumulator Left fn rol_a(&mut self) { // Sets N, Z, and C. C is used to fill the rightmost bit. let c: u8 = if self.p.carry() { 1 } else { 0 }; if self.p.small_acc() { let a = self.a as u8; self.p.set_carry(self.a & 0x80 != 0); let res = (a << 1) | c; self.a = (self.a & 0xff00) | self.p.set_nz_8(res) as u16; } else { self.p.set_carry(self.a & 0x8000 != 0); let res = (self.a << 1) | c as u16; self.a = self.p.set_nz(res); self.cy += CPU_CYCLE; } } /// Rotate Memory Left fn rol(&mut self, am: AddressingMode) { // Sets N, Z, and C. C is used to fill the rightmost bit. let c: u8 = if self.p.carry() { 1 } else { 0 }; if self.p.small_acc() { let a = am.clone().loadb(self); self.p.set_carry(a & 0x80 != 0); let res = self.p.set_nz_8((a << 1) | c); am.storeb(self, res); } else { let a = am.clone().loadw(self); self.p.set_carry(a & 0x8000 != 0); let res = self.p.set_nz((a << 1) | c as u16); am.storew(self, res); self.cy += CPU_CYCLE; // FIXME times 2? } } /// Logical Shift Accumulator Right fn lsr_a(&mut self) { // Sets N (always cleared), Z and C. The leftmost bit is filled with 0. // FIXME New code, needs small review if self.p.small_acc() { let a = self.a as u8; self.p.set_carry(self.a & 0x01 != 0); self.a = (self.a & 0xff00) | self.p.set_nz_8(a >> 1) as u16; } else { self.p.set_carry(self.a & 0x0001 != 0); self.a = self.p.set_nz(self.a >> 1); } } /// Logical Shift Right fn lsr(&mut self, am: AddressingMode) { // Sets N (always cleared), Z and C. The leftmost bit is filled with 0. if self.p.small_acc() { let a = am.clone().loadb(self); self.p.set_carry(a & 0x01 != 0); let res = self.p.set_nz_8(a >> 1); am.storeb(self, res); } else { let a = am.clone().loadw(self); self.p.set_carry(a & 0x0001 != 0); let res = self.p.set_nz(a >> 1); am.storew(self, res); } } /// Rotate accumulator right fn ror_a(&mut self) { // Sets N, Z, and C. Memory width can be changed. C is used to fill the leftmost bit. let c: u8 = if self.p.carry() { 1 } else { 0 }; if self.p.small_acc() { let val = self.a as u8; self.p.set_carry(val & 0x80 != 0); let res = self.p.set_nz_8((val >> 1) | (c << 7)); self.a = (self.a & 0xff00) | res as u16; } else { let val = self.a; self.p.set_carry(val & 0x8000 != 0); let res = self.p.set_nz((val >> 1) | ((c as u16) << 15)); self.a = res; self.cy += 2 * CPU_CYCLE; } } /// Rotate Memory Right fn ror(&mut self, am: AddressingMode) { // Sets N, Z, and C. Memory width can be changed. C is used to fill the leftmost bit. // The `AddressingMode` is used for both loading and storing the value (Read-Modify-Write // instruction) let c: u8 = if self.p.carry() { 1 } else { 0 }; let (bank, addr) = am.address(self); if self.p.small_acc() { let val = self.loadb(bank, addr); self.p.set_carry(val & 0x80 != 0); let res = self.p.set_nz_8((val >> 1) | (c << 7)); self.storeb(bank, addr, res); } else { let val = self.loadw(bank, addr); self.p.set_carry(val & 0x8000 != 0); let res = self.p.set_nz((val >> 1) | ((c as u16) << 15)); self.storew(bank, addr, res); self.cy += 2 * CPU_CYCLE; } } /// Exchange B with A (B is the MSB of the accumulator, A is the LSB) fn xba(&mut self) { // Changes N and Z: "The flags are changed based on the new value of the low byte, the A // accumulator (that is, on the former value of the high byte, the B accumulator), even in // sixteen-bit accumulator mode." let lo = self.a & 0xff; let hi = self.a >> 8; self.a = (lo << 8) | self.p.set_nz_8(hi as u8) as u16; } /// Transfer Accumulator to Index Register X fn tax(&mut self) { // Changes N and Z if self.p.small_index() { self.x = (self.x & 0xff00) | self.p.set_nz_8(self.a as u8) as u16; } else { self.x = self.p.set_nz(self.a); } } /// Transfer Accumulator to Index register Y fn tay(&mut self) { // Changes N and Z if self.p.small_index() { self.y = (self.y & 0xff00) | self.p.set_nz_8(self.a as u8) as u16; } else { self.y = self.p.set_nz(self.a); } } /// Transfer X to A fn txa(&mut self) { // Changes N and Z if self.p.small_acc() { self.a = (self.a & 0xff00) | self.p.set_nz_8(self.x as u8) as u16; } else { self.a = self.p.set_nz(self.x); } } /// Transfer X to Y fn txy(&mut self) { // Changes N and Z if self.p.small_index() { self.y = self.p.set_nz_8(self.x as u8) as u16; } else { self.y = self.p.set_nz(self.x); } } /// Transfer Index Register Y to Accumulator fn tya(&mut self) { // Changes N and Z if self.p.small_acc() { self.a = (self.a & 0xff00) | self.p.set_nz_8(self.y as u8) as u16; } else { self.a = self.p.set_nz(self.y); } } /// Transfer Y to X fn tyx(&mut self) { // Changes N and Z if self.p.small_index() { self.x = self.p.set_nz_8(self.y as u8) as u16; } else { self.x = self.p.set_nz(self.y); } } /// Increment memory location fn inc(&mut self, am: AddressingMode) { let (bank, addr) = am.address(self); if self.p.small_acc() { let res = self.loadb(bank, addr).wrapping_add(1); self.p.set_nz_8(res); self.storeb(bank, addr, res); } else { let res = self.loadw(bank, addr).wrapping_add(1); self.p.set_nz(res); self.storew(bank, addr, res); } } /// Increment accumulator fn ina(&mut self) { // Changes N and Z. Timing does not depend on accumulator size. if self.p.small_acc() { let res = self.p.set_nz_8((self.a as u8).wrapping_add(1)); self.a = (self.a & 0xff00) | res as u16; } else { self.a = self.p.set_nz(self.a.wrapping_add(1)); } } /// Increment Index Register X fn inx(&mut self) { // Changes N and Z. Timing does not depend on index register size. if self.p.small_index() { let res = self.p.set_nz_8((self.x as u8).wrapping_add(1)); self.x = (self.x & 0xff00) | res as u16; } else { self.x = self.p.set_nz(self.x.wrapping_add(1)); } } /// Increment Index Register Y fn iny(&mut self) { // Changes N and Z. Timing does not depend on index register size. if self.p.small_index() { let res = self.p.set_nz_8((self.y as u8).wrapping_add(1)); self.y = (self.y & 0xff00) | res as u16; } else { self.y = self.p.set_nz(self.y.wrapping_add(1)); } } /// Decrement Accumulator fn dea(&mut self) { // Changes N and Z. Timing does not depend on accumulator size. if self.p.small_acc() { let res = self.p.set_nz_8((self.a as u8).wrapping_sub(1)); self.a = (self.a & 0xff00) | res as u16; } else { self.a = self.p.set_nz(self.a.wrapping_sub(1)); } } /// Decrement memory location fn dec(&mut self, am: AddressingMode) { let (bank, addr) = am.address(self); if self.p.small_acc() { let res = self.loadb(bank, addr).wrapping_sub(1); self.p.set_nz_8(res); self.storeb(bank, addr, res); } else { let res = self.loadw(bank, addr).wrapping_sub(1); self.p.set_nz(res); self.storew(bank, addr, res); } } /// Decrement X fn dex(&mut self) { // Changes N and Z. Timing does not depend on index register size. // NB According to the datasheet, this writes the result to A, not X! But since this // doesn't make sense when looking at the way it's used, I'm going to ignore the datasheet if self.p.small_index() { let res = self.p.set_nz_8((self.x as u8).wrapping_sub(1)); self.x = (self.x & 0xff00) | res as u16; } else { self.x = self.p.set_nz(self.x.wrapping_sub(1)); } } /// Decrement Y fn dey(&mut self) { // Changes N and Z. Timing does not depend on index register size. if self.p.small_index() { let res = self.p.set_nz_8((self.y as u8).wrapping_sub(1)); self.y = (self.y & 0xff00) | res as u16; } else { self.y = self.p.set_nz(self.y.wrapping_sub(1)); } } /// Jump long. Changes the PBR. fn jml(&mut self, am: AddressingMode) { let a = am.address(self); self.branch(a); } /// Jump inside current program bank fn jmp(&mut self, am: AddressingMode) { let (_, addr) = am.address(self); self.pc = addr; } /// Branch always (inside current program bank, but this isn't checked) fn bra(&mut self, am: AddressingMode) { let a = am.address(self); self.branch(a); } /// Branch if Plus (N = 0) fn bpl(&mut self, am: AddressingMode) { let a = am.address(self); if !self.p.negative() { self.branch(a); self.cy += CPU_CYCLE; } } /// Branch if Minus/Negative (N = 1) fn bmi(&mut self, am: AddressingMode) { let a = am.address(self); if self.p.negative() { self.branch(a); self.cy += CPU_CYCLE; } } /// Branch if Overflow Clear fn bvc(&mut self, am: AddressingMode) { let a = am.address(self); if !self.p.overflow() { self.branch(a); self.cy += CPU_CYCLE; } } /// Branch if Overflow Set fn bvs(&mut self, am: AddressingMode) { let a = am.address(self); if self.p.overflow() { self.branch(a); self.cy += CPU_CYCLE; } } /// Branch if carry clear fn bcc(&mut self, am: AddressingMode) { let a = am.address(self); if !self.p.carry() { self.branch(a); self.cy += CPU_CYCLE; } } /// Branch if carry set fn bcs(&mut self, am: AddressingMode) { let a = am.address(self); if self.p.carry() { self.branch(a); self.cy += CPU_CYCLE; } } /// Branch if Equal fn beq(&mut self, am: AddressingMode) { let a = am.address(self); if self.p.zero() { self.branch(a); self.cy += CPU_CYCLE; } } /// Branch if Not Equal (Branch if Z = 0) fn bne(&mut self, am: AddressingMode) { let a = am.address(self); if !self.p.zero() { self.branch(a); self.cy += CPU_CYCLE; } } /// Test memory bits against accumulator fn bit(&mut self, am: AddressingMode) { if self.p.small_index() { let val = am.clone().loadb(self); self.p.set_zero(val & self.a as u8 == 0); match am { AddressingMode::Immediate(_) | AddressingMode::Immediate8(_) => {} _ => { self.p.set_negative(val & 0x80 != 0); self.p.set_overflow(val & 0x40 != 0); } } } else { let val = am.clone().loadw(self); self.p.set_zero(val & self.a == 0); match am { AddressingMode::Immediate(_) | AddressingMode::Immediate8(_) => {} _ => { self.p.set_negative(val & 0x8000 != 0); self.p.set_overflow(val & 0x4000 != 0); } } self.cy += CPU_CYCLE; } } /// Test and set memory bits against accumulator fn tsb(&mut self, am: AddressingMode) { // Sets Z // FIXME Is this correct? if self.p.small_index() { let val = am.clone().loadb(self); self.p.set_zero(val & self.a as u8 == 0); let res = val | self.a as u8; am.storeb(self, res); } else { let val = am.clone().loadw(self); self.p.set_zero(val & self.a == 0); let res = val | self.a; am.storew(self, res); self.cy += 2 * CPU_CYCLE; } } /// Test and reset memory bits against accumulator fn trb(&mut self, am: AddressingMode) { // Sets Z // FIXME Is this correct? if self.p.small_index() { let val = am.clone().loadb(self); self.p.set_zero(val & self.a as u8 == 0); let res = val & !(self.a as u8); am.storeb(self, res); } else { let val = am.clone().loadw(self); self.p.set_zero(val & self.a == 0); let res = val & !self.a; am.storew(self, res); self.cy += 2 * CPU_CYCLE; } } /// Compare Accumulator with Memory fn cmp(&mut self, am: AddressingMode) { if self.p.small_acc() { let a = self.a as u8; let b = am.loadb(self); self.compare8(a, b); } else { let a = self.a; let b = am.loadw(self); self.compare(a, b); self.cy += CPU_CYCLE; } } /// Compare Index Register X with Memory fn cpx(&mut self, am: AddressingMode) { if self.p.small_index() { let val = am.loadb(self); let x = self.x as u8; self.compare8(x, val); } else { let val = am.loadw(self); let x = self.x; self.compare(x, val); self.cy += CPU_CYCLE; } } /// Compare Index Register Y with Memory fn cpy(&mut self, am: AddressingMode) { if self.p.small_index() { let val = am.loadb(self); let y = self.y as u8; self.compare8(y, val); } else { let val = am.loadw(self); let y = self.y; self.compare(y, val); self.cy += CPU_CYCLE; } } /// Jump to Subroutine (with short address). Doesn't change PBR. /// /// "The address saved is the address of the last byte of the JSR instruction (the address of /// the last byte of the operand), not the address of the next instruction as is the case with /// some other processors. The address is pushed onto the stack in standard 65x order – the /// low byte in the lower address, the high byte in the higher address – and done in standard /// 65x fashion – the first byte is stored at the location pointed to by the stack pointer, the /// stack pointer is decremented, the second byte is stored, and the stack pointer is /// decremented again." fn jsr(&mut self, am: AddressingMode) { // Changes no flags let pc = self.pc - 1; self.pushb((pc >> 8) as u8); self.pushb(pc as u8); self.pc = am.address(self).1; } /// Long jump to subroutine. Additionally saves PBR on the stack and sets it to the bank /// returned by `am.address()`. fn jsl(&mut self, am: AddressingMode) { // Changes no flags let pbr = self.pbr; self.pushb(pbr); let pc = self.pc - 1; self.pushb((pc >> 8) as u8); self.pushb(pc as u8); let (pbr, pc) = am.address(self); self.pbr = pbr; self.pc = pc; } /// Return from Interrupt fn rti(&mut self) { self.return_from_interrupt() } /// Return from Subroutine (Short - Like JSR) fn rts(&mut self) { let pcl = self.popb() as u16; let pch = self.popb() as u16; let pc = (pch << 8) | pcl; self.pc = pc + 1; // +1 since the last byte of the JSR was saved } /// Return from Subroutine called with `jsl`. /// /// This also restores the PBR. fn rtl(&mut self) { let pcl = self.popb() as u16; let pch = self.popb() as u16; let pbr = self.popb(); let pc = (pch << 8) | pcl; self.pbr = pbr; self.pc = pc + 1; // +1 since the last byte of the JSR was saved } fn cli(&mut self) { self.p.set_irq_disable(false) } fn sei(&mut self) { self.p.set_irq_disable(true) } fn clc(&mut self) { self.p.set_carry(false); } fn sec(&mut self) { self.p.set_carry(true); } /// Store 0 to memory fn stz(&mut self, am: AddressingMode) { if self.p.small_acc() { am.storeb(self, 0); } else { am.storew(self, 0); self.cy += CPU_CYCLE; } } /// Load accumulator from memory fn lda(&mut self, am: AddressingMode) { // Changes N and Z if self.p.small_acc() { let val = am.loadb(self); self.a = (self.a & 0xff00) | self.p.set_nz_8(val) as u16; } else { let val = am.loadw(self); self.a = self.p.set_nz(val); self.cy += CPU_CYCLE; } } /// Load X register from memory fn ldx(&mut self, am: AddressingMode) { // Changes N and Z if self.p.small_index() { let val = am.loadb(self); self.x = (self.x & 0xff00) | self.p.set_nz_8(val) as u16; } else { let val = am.loadw(self); self.x = self.p.set_nz(val); self.cy += CPU_CYCLE; } } /// Load Y register from memory fn ldy(&mut self, am: AddressingMode) { // Changes N and Z if self.p.small_index() { let val = am.loadb(self); self.y = (self.y & 0xff00) | self.p.set_nz_8(val) as u16; } else { let val = am.loadw(self); self.y = self.p.set_nz(val); self.cy += CPU_CYCLE; } } /// Store accumulator to memory fn sta(&mut self, am: AddressingMode) { // Changes no flags if self.p.small_acc() { let b = self.a as u8; am.storeb(self, b); } else { let w = self.a; am.storew(self, w); self.cy += CPU_CYCLE; } } fn stx(&mut self, am: AddressingMode) { // Changes no flags if self.p.small_index() { let b = self.x as u8; am.storeb(self, b); } else { let w = self.x; am.storew(self, w); self.cy += CPU_CYCLE; } } fn sty(&mut self, am: AddressingMode) { // Changes no flags if self.p.small_index() { let b = self.y as u8; am.storeb(self, b); } else { let w = self.y; am.storew(self, w); self.cy += CPU_CYCLE; } } /// Exchange carry and emulation flags fn xce(&mut self) { let carry = self.p.carry(); let e = self.emulation; self.p.set_carry(e); self.set_emulation(carry); } /// Reset status bits /// /// Clears the bits in the status register that are 1 in the argument (argument is interpreted /// as 8-bit) fn rep(&mut self, am: AddressingMode) { assert!(!self.emulation); let p = self.p.0 & !am.loadb(self); self.set_p(p); } /// Set Processor Status Bits fn sep(&mut self, am: AddressingMode) { assert!(!self.emulation); let p = self.p.0 | am.loadb(self); self.set_p(p); } /// Transfer 16-bit Accumulator to Direct Page Register fn tcd(&mut self) { self.d = self.p.set_nz(self.a); } /// Transfer 16-bit Accumulator to Stack Pointer fn tcs(&mut self) { if self.emulation { // "When in the Emulation mode, a 01 is forced into SH. In this case, the B Accumulator // will not be loaded into SH during a TCS instruction." // S = 16-bit A; B = High byte of S self.s = 0x0100 | (self.a & 0xff); } else { self.s = self.a; } } /// Transfer Stack Pointer to 16-bit Accumulator fn tsc(&mut self) { self.a = self.s; } fn nop(&mut self) {} fn ill(&mut self) {} } /// Addressing mode construction impl Cpu { fn block_move(&mut self) -> AddressingMode { let dest = self.fetchb(); let src = self.fetchb(); AddressingMode::BlockMove(dest, src) } fn direct_indirect(&mut self) -> AddressingMode { AddressingMode::DirectIndirect(self.fetchb()) } fn direct_indirect_long(&mut self) -> AddressingMode { AddressingMode::DirectIndirectLong(self.fetchb()) } fn direct_indirect_long_idx(&mut self) -> AddressingMode { AddressingMode::DirectIndirectLongIdx(self.fetchb()) } fn absolute(&mut self) -> AddressingMode { AddressingMode::Absolute(self.fetchw()) } fn absolute_indexed_x(&mut self) -> AddressingMode { AddressingMode::AbsIndexedX(self.fetchw()) } fn absolute_indexed_y(&mut self) -> AddressingMode { AddressingMode::AbsIndexedY(self.fetchw()) } fn absolute_indexed_indirect(&mut self) -> AddressingMode { AddressingMode::AbsIndexedIndirect(self.fetchw()) } fn absolute_long(&mut self) -> AddressingMode { let addr = self.fetchw(); let bank = self.fetchb(); AddressingMode::AbsoluteLong(bank, addr) } fn absolute_long_indexed_x(&mut self) -> AddressingMode { let addr = self.fetchw(); let bank = self.fetchb(); AddressingMode::AbsLongIndexedX(bank, addr) } fn absolute_indirect(&mut self) -> AddressingMode { AddressingMode::AbsoluteIndirect(self.fetchw()) } fn absolute_indirect_long(&mut self) -> AddressingMode { AddressingMode::AbsoluteIndirectLong(self.fetchw()) } fn rel(&mut self) -> AddressingMode { AddressingMode::Rel(self.fetchb() as i8) } fn stack_rel(&mut self) -> AddressingMode { AddressingMode::StackRel(self.fetchb()) } fn direct(&mut self) -> AddressingMode { AddressingMode::Direct(self.fetchb()) } fn direct_indexed_x(&mut self) -> AddressingMode { AddressingMode::DirectIndexedX(self.fetchb()) } fn direct_indexed_y(&mut self) -> AddressingMode { AddressingMode::DirectIndexedY(self.fetchb()) } fn direct_indexed_indirect(&mut self) -> AddressingMode { AddressingMode::DirectIndexedIndirect(self.fetchb()) } fn direct_indirect_indexed(&mut self) -> AddressingMode { AddressingMode::DirectIndirectIndexed(self.fetchb()) } /// Immediate value with accumulator size fn immediate_acc(&mut self) -> AddressingMode { if self.p.small_acc() { AddressingMode::Immediate8(self.fetchb()) } else { self.cy += CPU_CYCLE; AddressingMode::Immediate(self.fetchw()) } } /// Immediate value with index register size fn immediate_index(&mut self) -> AddressingMode { if self.p.small_index() { AddressingMode::Immediate8(self.fetchb()) } else { self.cy += CPU_CYCLE; AddressingMode::Immediate(self.fetchw()) } } /// Immediate value, one byte fn immediate8(&mut self) -> AddressingMode { AddressingMode::Immediate8(self.fetchb()) } }
use if_chain::if_chain; use rustc_errors::Applicability; use rustc_hir::def::{DefKind, Res}; use rustc_hir::{def, BindingAnnotation, Block, Expr, ExprKind, MatchSource, PatKind, StmtKind}; use rustc_lint::{LateContext, LateLintPass}; use rustc_session::{declare_lint_pass, declare_tool_lint}; use crate::utils::paths::{OPTION, OPTION_NONE}; use crate::utils::sugg::Sugg; use crate::utils::{ higher, match_def_path, match_qpath, match_type, snippet_with_applicability, span_lint_and_then, SpanlessEq, }; declare_clippy_lint! { /// **What it does:** Checks for expressions that could be replaced by the question mark operator. /// /// **Why is this bad?** Question mark usage is more idiomatic. /// /// **Known problems:** None /// /// **Example:** /// ```ignore /// if option.is_none() { /// return None; /// } /// ``` /// /// Could be written: /// /// ```ignore /// option?; /// ``` pub QUESTION_MARK, style, "checks for expressions that could be replaced by the question mark operator" } declare_lint_pass!(QuestionMark => [QUESTION_MARK]); impl QuestionMark { /// Checks if the given expression on the given context matches the following structure: /// /// ```ignore /// if option.is_none() { /// return None; /// } /// ``` /// /// If it matches, it will suggest to use the question mark operator instead fn check_is_none_and_early_return_none(cx: &LateContext<'_, '_>, expr: &Expr<'_>) { if_chain! { if let Some((if_expr, body, else_)) = higher::if_block(&expr); if let ExprKind::MethodCall(segment, _, args) = &if_expr.kind; if segment.ident.name == sym!(is_none); if Self::expression_returns_none(cx, body); if let Some(subject) = args.get(0); if Self::is_option(cx, subject); then { let mut applicability = Applicability::MachineApplicable; let receiver_str = snippet_with_applicability(cx, subject.span, "..", &mut applicability); let mut replacement: Option<String> = None; if let Some(else_) = else_ { if_chain! { if let ExprKind::Block(block, None) = &else_.kind; if block.stmts.is_empty(); if let Some(block_expr) = &block.expr; if SpanlessEq::new(cx).ignore_fn().eq_expr(subject, block_expr); then { replacement = Some(format!("Some({}?)", receiver_str)); } } } else if Self::moves_by_default(cx, subject) { replacement = Some(format!("{}.as_ref()?;", receiver_str)); } else { replacement = Some(format!("{}?;", receiver_str)); } if let Some(replacement_str) = replacement { span_lint_and_then( cx, QUESTION_MARK, expr.span, "this block may be rewritten with the `?` operator", |db| { db.span_suggestion( expr.span, "replace it with", replacement_str, applicability, ); } ) } } } } fn check_if_let_some_and_early_return_none(cx: &LateContext<'_, '_>, expr: &Expr<'_>) { if_chain! { if let ExprKind::Match(subject, arms, source) = &expr.kind; if *source == MatchSource::IfLetDesugar { contains_else_clause: true }; if Self::is_option(cx, subject); if let PatKind::TupleStruct(path1, fields, None) = &arms[0].pat.kind; if match_qpath(path1, &["Some"]); if let PatKind::Binding(annot, _, bind, _) = &fields[0].kind; let by_ref = matches!(annot, BindingAnnotation::Ref | BindingAnnotation::RefMut); if let ExprKind::Block(block, None) = &arms[0].body.kind; if block.stmts.is_empty(); if let Some(trailing_expr) = &block.expr; if let ExprKind::Path(path) = &trailing_expr.kind; if match_qpath(path, &[&bind.as_str()]); if let PatKind::Wild = arms[1].pat.kind; if Self::expression_returns_none(cx, arms[1].body); then { let mut applicability = Applicability::MachineApplicable; let receiver_str = snippet_with_applicability(cx, subject.span, "..", &mut applicability); let replacement = format!( "{}{}?", receiver_str, if by_ref { ".as_ref()" } else { "" }, ); span_lint_and_then( cx, QUESTION_MARK, expr.span, "this if-let-else may be rewritten with the `?` operator", |db| { db.span_suggestion( expr.span, "replace it with", replacement, applicability, ); } ) } } } fn moves_by_default(cx: &LateContext<'_, '_>, expression: &Expr<'_>) -> bool { let expr_ty = cx.tables.expr_ty(expression); !expr_ty.is_copy_modulo_regions(cx.tcx, cx.param_env, expression.span) } fn is_option(cx: &LateContext<'_, '_>, expression: &Expr<'_>) -> bool { let expr_ty = cx.tables.expr_ty(expression); match_type(cx, expr_ty, &OPTION) } fn expression_returns_none(cx: &LateContext<'_, '_>, expression: &Expr<'_>) -> bool { match expression.kind { ExprKind::Block(ref block, _) => { if let Some(return_expression) = Self::return_expression(block) { return Self::expression_returns_none(cx, &return_expression); } false }, ExprKind::Ret(Some(ref expr)) => Self::expression_returns_none(cx, expr), ExprKind::Path(ref qp) => { if let Res::Def(DefKind::Ctor(def::CtorOf::Variant, def::CtorKind::Const), def_id) = cx.tables.qpath_res(qp, expression.hir_id) { return match_def_path(cx, def_id, &OPTION_NONE); } false }, _ => false, } } fn return_expression<'tcx>(block: &Block<'tcx>) -> Option<&'tcx Expr<'tcx>> { // Check if last expression is a return statement. Then, return the expression if_chain! { if block.stmts.len() == 1; if let Some(expr) = block.stmts.iter().last(); if let StmtKind::Semi(ref expr) = expr.kind; if let ExprKind::Ret(ret_expr) = expr.kind; if let Some(ret_expr) = ret_expr; then { return Some(ret_expr); } } // Check for `return` without a semicolon. if_chain! { if block.stmts.is_empty(); if let Some(ExprKind::Ret(Some(ret_expr))) = block.expr.as_ref().map(|e| &e.kind); then { return Some(ret_expr); } } None } } impl<'a, 'tcx> LateLintPass<'a, 'tcx> for QuestionMark { fn check_expr(&mut self, cx: &LateContext<'a, 'tcx>, expr: &'tcx Expr<'_>) { Self::check_is_none_and_early_return_none(cx, expr); Self::check_if_let_some_and_early_return_none(cx, expr); } } Apply suggestions from code review Co-Authored-By: Philipp Krones <aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d@philkrones.com> use if_chain::if_chain; use rustc_errors::Applicability; use rustc_hir::def::{DefKind, Res}; use rustc_hir::{def, BindingAnnotation, Block, Expr, ExprKind, MatchSource, PatKind, StmtKind}; use rustc_lint::{LateContext, LateLintPass}; use rustc_session::{declare_lint_pass, declare_tool_lint}; use crate::utils::paths::{OPTION, OPTION_NONE}; use crate::utils::sugg::Sugg; use crate::utils::{ higher, match_def_path, match_qpath, match_type, snippet_with_applicability, span_lint_and_sugg, SpanlessEq, }; declare_clippy_lint! { /// **What it does:** Checks for expressions that could be replaced by the question mark operator. /// /// **Why is this bad?** Question mark usage is more idiomatic. /// /// **Known problems:** None /// /// **Example:** /// ```ignore /// if option.is_none() { /// return None; /// } /// ``` /// /// Could be written: /// /// ```ignore /// option?; /// ``` pub QUESTION_MARK, style, "checks for expressions that could be replaced by the question mark operator" } declare_lint_pass!(QuestionMark => [QUESTION_MARK]); impl QuestionMark { /// Checks if the given expression on the given context matches the following structure: /// /// ```ignore /// if option.is_none() { /// return None; /// } /// ``` /// /// If it matches, it will suggest to use the question mark operator instead fn check_is_none_and_early_return_none(cx: &LateContext<'_, '_>, expr: &Expr<'_>) { if_chain! { if let Some((if_expr, body, else_)) = higher::if_block(&expr); if let ExprKind::MethodCall(segment, _, args) = &if_expr.kind; if segment.ident.name == sym!(is_none); if Self::expression_returns_none(cx, body); if let Some(subject) = args.get(0); if Self::is_option(cx, subject); then { let mut applicability = Applicability::MachineApplicable; let receiver_str = &Sugg::hir_with_applicability(cx, subject, "..", &mut applicability); let mut replacement: Option<String> = None; if let Some(else_) = else_ { if_chain! { if let ExprKind::Block(block, None) = &else_.kind; if block.stmts.is_empty(); if let Some(block_expr) = &block.expr; if SpanlessEq::new(cx).ignore_fn().eq_expr(subject, block_expr); then { replacement = Some(format!("Some({}?)", receiver_str)); } } } else if Self::moves_by_default(cx, subject) { replacement = Some(format!("{}.as_ref()?;", receiver_str)); } else { replacement = Some(format!("{}?;", receiver_str)); } if let Some(replacement_str) = replacement { span_lint_and_sugg( cx, QUESTION_MARK, expr.span, "this block may be rewritten with the `?` operator", "replace it with", replacement_str, applicability, ) } } } } fn check_if_let_some_and_early_return_none(cx: &LateContext<'_, '_>, expr: &Expr<'_>) { if_chain! { if let ExprKind::Match(subject, arms, source) = &expr.kind; if *source == MatchSource::IfLetDesugar { contains_else_clause: true }; if Self::is_option(cx, subject); if let PatKind::TupleStruct(path1, fields, None) = &arms[0].pat.kind; if match_qpath(path1, &["Some"]); if let PatKind::Binding(annot, _, bind, _) = &fields[0].kind; let by_ref = matches!(annot, BindingAnnotation::Ref | BindingAnnotation::RefMut); if let ExprKind::Block(block, None) = &arms[0].body.kind; if block.stmts.is_empty(); if let Some(trailing_expr) = &block.expr; if let ExprKind::Path(path) = &trailing_expr.kind; if match_qpath(path, &[&bind.as_str()]); if let PatKind::Wild = arms[1].pat.kind; if Self::expression_returns_none(cx, arms[1].body); then { let mut applicability = Applicability::MachineApplicable; let receiver_str = snippet_with_applicability(cx, subject.span, "..", &mut applicability); let replacement = format!( "{}{}?", receiver_str, if by_ref { ".as_ref()" } else { "" }, ); span_lint_and_sugg( cx, QUESTION_MARK, expr.span, "this if-let-else may be rewritten with the `?` operator", "replace it with", replacement, applicability, ) } } } fn moves_by_default(cx: &LateContext<'_, '_>, expression: &Expr<'_>) -> bool { let expr_ty = cx.tables.expr_ty(expression); !expr_ty.is_copy_modulo_regions(cx.tcx, cx.param_env, expression.span) } fn is_option(cx: &LateContext<'_, '_>, expression: &Expr<'_>) -> bool { let expr_ty = cx.tables.expr_ty(expression); match_type(cx, expr_ty, &OPTION) } fn expression_returns_none(cx: &LateContext<'_, '_>, expression: &Expr<'_>) -> bool { match expression.kind { ExprKind::Block(ref block, _) => { if let Some(return_expression) = Self::return_expression(block) { return Self::expression_returns_none(cx, &return_expression); } false }, ExprKind::Ret(Some(ref expr)) => Self::expression_returns_none(cx, expr), ExprKind::Path(ref qp) => { if let Res::Def(DefKind::Ctor(def::CtorOf::Variant, def::CtorKind::Const), def_id) = cx.tables.qpath_res(qp, expression.hir_id) { return match_def_path(cx, def_id, &OPTION_NONE); } false }, _ => false, } } fn return_expression<'tcx>(block: &Block<'tcx>) -> Option<&'tcx Expr<'tcx>> { // Check if last expression is a return statement. Then, return the expression if_chain! { if block.stmts.len() == 1; if let Some(expr) = block.stmts.iter().last(); if let StmtKind::Semi(ref expr) = expr.kind; if let ExprKind::Ret(ret_expr) = expr.kind; if let Some(ret_expr) = ret_expr; then { return Some(ret_expr); } } // Check for `return` without a semicolon. if_chain! { if block.stmts.is_empty(); if let Some(ExprKind::Ret(Some(ret_expr))) = block.expr.as_ref().map(|e| &e.kind); then { return Some(ret_expr); } } None } } impl<'a, 'tcx> LateLintPass<'a, 'tcx> for QuestionMark { fn check_expr(&mut self, cx: &LateContext<'a, 'tcx>, expr: &'tcx Expr<'_>) { Self::check_is_none_and_early_return_none(cx, expr); Self::check_if_let_some_and_early_return_none(cx, expr); } }
use Error; use num::{FromPrimitive, One, ToPrimitive, Zero}; use std::cmp::*; use std::cmp::Ordering::Equal; use std::fmt; use std::hash::{Hash, Hasher}; use std::iter::repeat; use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Rem, RemAssign, Sub, SubAssign}; use std::str::FromStr; // Sign mask for the flags field. A value of zero in this bit indicates a // positive Decimal value, and a value of one in this bit indicates a // negative Decimal value. const SIGN_MASK: u32 = 0x8000_0000; // Scale mask for the flags field. This byte in the flags field contains // the power of 10 to divide the Decimal value by. The scale byte must // contain a value between 0 and 28 inclusive. const SCALE_MASK: u32 = 0x00FF_0000; const U8_MASK: u32 = 0x0000_00FF; const U32_MASK: u64 = 0xFFFF_FFFF; // Number of bits scale is shifted by. const SCALE_SHIFT: u32 = 16; // The maximum supported precision const MAX_PRECISION: u32 = 28; static ONE_INTERNAL_REPR: [u32; 3] = [1, 0, 0]; lazy_static! { static ref MIN: Decimal = Decimal { flags: 2_147_483_648, lo: 4_294_967_295, mid: 4_294_967_295, hi: 4_294_967_295 }; static ref MAX: Decimal = Decimal { flags: 0, lo: 4_294_967_295, mid: 4_294_967_295, hi: 4_294_967_295 }; } // Fast access for 10^n where n is 0-9 static POWERS_10: [u32; 10] = [ 1, 10, 100, 1_000, 10_000, 100_000, 1_000_000, 10_000_000, 100_000_000, 1_000_000_000, ]; // Fast access for 10^n where n is 10-19 #[allow(dead_code)] static BIG_POWERS_10: [u64; 10] = [ 10_000_000_000, 100_000_000_000, 1_000_000_000_000, 10_000_000_000_000, 100_000_000_000_000, 1_000_000_000_000_000, 10_000_000_000_000_000, 100_000_000_000_000_000, 1_000_000_000_000_000_000, 10_000_000_000_000_000_000, ]; /// `Decimal` represents a 128 bit representation of a fixed-precision decimal number. /// The finite set of values of type `Decimal` are of the form m / 10<sup>e</sup>, /// where m is an integer such that -2<sup>96</sup> <= m <= 2<sup>96</sup>, and e is an integer /// between 0 and 28 inclusive. #[derive(Clone, Copy, Debug)] pub struct Decimal { // Bits 0-15: unused // Bits 16-23: Contains "e", a value between 0-28 that indicates the scale // Bits 24-30: unused // Bit 31: the sign of the Decimal value, 0 meaning positive and 1 meaning negative. flags: u32, // The lo, mid, hi, and flags fields contain the representation of the // Decimal value as a 96-bit integer. hi: u32, lo: u32, mid: u32, } #[allow(dead_code)] impl Decimal { /// Returns a `Decimal` with a 64 bit `m` representation and corresponding `e` scale. /// /// # Arguments /// /// * `num` - An i64 that represents the `m` portion of the decimal number /// * `scale` - A u32 representing the `e` portion of the decimal number. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let pi = Decimal::new(3141, 3); /// assert_eq!(pi.to_string(), "3.141"); /// ``` pub fn new(num: i64, scale: u32) -> Decimal { if scale > MAX_PRECISION { panic!( "Scale exceeds the maximum precision allowed: {} > {}", scale, MAX_PRECISION ); } let flags: u32 = scale << SCALE_SHIFT; if num < 0 { return Decimal { flags: flags | SIGN_MASK, hi: 0, lo: (num.abs() as u64 & U32_MASK) as u32, mid: ((num.abs() as u64 >> 32) & U32_MASK) as u32, }; } Decimal { flags: flags, hi: 0, lo: (num as u64 & U32_MASK) as u32, mid: ((num as u64 >> 32) & U32_MASK) as u32, } } /// Returns a `Decimal` using the instances constituent parts. /// /// # Arguments /// /// * `lo` - The low 32 bits of a 96-bit integer. /// * `mid` - The middle 32 bits of a 96-bit integer. /// * `hi` - The high 32 bits of a 96-bit integer. /// * `negative` - `true` to indicate a negative number. /// * `scale` - A power of 10 ranging from 0 to 28. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let pi = Decimal::from_parts(1102470952, 185874565, 1703060790, false, 28); /// assert_eq!(pi.to_string(), "3.1415926535897932384626433832"); /// ``` pub fn from_parts(lo: u32, mid: u32, hi: u32, negative: bool, scale: u32) -> Decimal { Decimal { lo: lo, mid: mid, hi: hi, flags: flags(negative, scale), } } /// Returns a `Result` which if successful contains the `Decimal` constitution of /// the scientific notation provided by `value`. /// /// # Arguments /// /// * `value` - The scientific notation of the `Decimal`. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let value = Decimal::from_scientific("9.7e-7").unwrap(); /// assert_eq!(value.to_string(), "0.00000097"); /// ``` pub fn from_scientific(value: &str) -> Result<Decimal, Error> { let err = Error::new("Failed to parse"); let mut split = value.splitn(2, 'e'); let base = split.next().ok_or(err.clone())?; let mut scale = split.next().ok_or(err.clone())?.to_string(); let mut ret = Decimal::from_str(base)?; if scale.contains('-') { scale.remove(0); let scale: u32 = scale.as_str().parse().map_err(move |_| err.clone())?; let current_scale = ret.scale(); ret.set_scale(current_scale+ scale)?; } else { if scale.contains('+') { scale.remove(0); } let pow: u32 = scale.as_str().parse().map_err(move |_| err.clone())?; ret *= Decimal::from_i64(10_i64.pow(pow)).unwrap(); ret = ret.normalize(); } Ok(ret) } /// Returns the scale of the decimal number, otherwise known as `e`. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let num = Decimal::new(1234, 3); /// assert_eq!(num.scale(), 3u32); /// ``` #[inline] pub fn scale(&self) -> u32 { ((self.flags & SCALE_MASK) >> SCALE_SHIFT) as u32 } /// An optimized method for changing the sign of a decimal number. /// /// # Arguments /// /// * `positive`: true if the resulting decimal should be positive. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let mut one = Decimal::new(1, 0); /// one.set_sign(false); /// assert_eq!(one.to_string(), "-1"); /// ``` pub fn set_sign(&mut self, positive: bool) { if positive { if self.is_sign_negative() { self.flags ^= SIGN_MASK; } } else { self.flags |= SIGN_MASK; } } /// An optimized method for changing the scale of a decimal number. /// /// # Arguments /// /// * `scale`: the new scale of the number /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let mut one = Decimal::new(1, 0); /// one.set_scale(5); /// assert_eq!(one.to_string(), "0.00001"); /// ``` pub fn set_scale(&mut self, scale: u32) -> Result<(), Error> { if scale > MAX_PRECISION { return Err(Error::new("Scale exceeds maximum precision")); } self.flags = (scale << SCALE_SHIFT) | (self.flags & SIGN_MASK); Ok(()) } /// Returns a serialized version of the decimal number. /// The resulting byte array will have the following representation: /// /// * Bytes 1-4: flags /// * Bytes 5-8: lo portion of `m` /// * Bytes 9-12: mid portion of `m` /// * Bytes 13-16: high portion of `m` pub fn serialize(&self) -> [u8; 16] { [ (self.flags & U8_MASK) as u8, ((self.flags >> 8) & U8_MASK) as u8, ((self.flags >> 16) & U8_MASK) as u8, ((self.flags >> 24) & U8_MASK) as u8, (self.lo & U8_MASK) as u8, ((self.lo >> 8) & U8_MASK) as u8, ((self.lo >> 16) & U8_MASK) as u8, ((self.lo >> 24) & U8_MASK) as u8, (self.mid & U8_MASK) as u8, ((self.mid >> 8) & U8_MASK) as u8, ((self.mid >> 16) & U8_MASK) as u8, ((self.mid >> 24) & U8_MASK) as u8, (self.hi & U8_MASK) as u8, ((self.hi >> 8) & U8_MASK) as u8, ((self.hi >> 16) & U8_MASK) as u8, ((self.hi >> 24) & U8_MASK) as u8, ] } /// Deserializes the given bytes into a decimal number. /// The deserialized byte representation must be 16 bytes and adhere to the followign convention: /// /// * Bytes 1-4: flags /// * Bytes 5-8: lo portion of `m` /// * Bytes 9-12: mid portion of `m` /// * Bytes 13-16: high portion of `m` pub fn deserialize(bytes: [u8; 16]) -> Decimal { Decimal { flags: u32::from(bytes[0]) | u32::from(bytes[1]) << 8 | u32::from(bytes[2]) << 16 | u32::from(bytes[3]) << 24, lo: u32::from(bytes[4]) | u32::from(bytes[5]) << 8 | u32::from(bytes[6]) << 16 | u32::from(bytes[7]) << 24, mid: u32::from(bytes[8]) | u32::from(bytes[9]) << 8 | u32::from(bytes[10]) << 16 | u32::from(bytes[11]) << 24, hi: u32::from(bytes[12]) | u32::from(bytes[13]) << 8 | u32::from(bytes[14]) << 16 | u32::from(bytes[15]) << 24, } } /// Returns `true` if the decimal is negative. #[deprecated(since = "0.6.3", note = "please use `is_sign_negative` instead")] pub fn is_negative(&self) -> bool { self.is_sign_negative() } /// Returns `true` if the decimal is positive. #[deprecated(since = "0.6.3", note = "please use `is_sign_positive` instead")] pub fn is_positive(&self) -> bool { self.is_sign_positive() } /// Returns `true` if the decimal is negative. #[inline(always)] pub fn is_sign_negative(&self) -> bool { self.flags & SIGN_MASK > 0 } /// Returns `true` if the decimal is positive. pub fn is_sign_positive(&self) -> bool { self.flags & SIGN_MASK == 0 } /// Returns the minimum possible number that `Decimal` can represent. pub fn min_value() -> Decimal { *MIN } /// Returns the maximum possible number that `Decimal` can represent. pub fn max_value() -> Decimal { *MAX } /// Returns a new `Decimal` integral with no fractional portion. /// This is a true truncation whereby no rounding is performed. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let pi = Decimal::new(3141, 3); /// let trunc = Decimal::new(3, 0); /// // note that it returns a decimal /// assert_eq!(pi.trunc(), trunc); /// ``` pub fn trunc(&self) -> Decimal { let mut scale = self.scale(); if scale == 0 { // Nothing to do return *self; } let mut working = [self.lo, self.mid, self.hi]; while scale > 0 { // We're removing precision, so we don't care about overflow if scale < 10 { div_by_u32(&mut working, POWERS_10[scale as usize]); break; } else { div_by_u32(&mut working, POWERS_10[9]); // Only 9 as this array starts with 1 scale -= 9; } } Decimal { lo: working[0], mid: working[1], hi: working[2], flags: flags(self.is_sign_negative(), 0), } } /// Returns a new `Decimal` representing the fractional portion of the number. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let pi = Decimal::new(3141, 3); /// let fract = Decimal::new(141, 3); /// // note that it returns a decimal /// assert_eq!(pi.fract(), fract); /// ``` pub fn fract(&self) -> Decimal { // This is essentially the original number minus the integral. // Could possibly be optimized in the future *self - self.trunc() } /// Computes the absolute value of `self`. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let num = Decimal::new(-3141, 3); /// assert_eq!(num.abs().to_string(), "3.141"); /// ``` pub fn abs(&self) -> Decimal { let mut me = *self; me.set_sign(true); me } /// Returns the largest integer less than or equal to a number. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let num = Decimal::new(3641, 3); /// assert_eq!(num.floor().to_string(), "3"); /// ``` pub fn floor(&self) -> Decimal { // Opportunity for optimization here self.trunc() } /// Returns the smallest integer greater than or equal to a number. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let num = Decimal::new(3141, 3); /// assert_eq!(num.ceil().to_string(), "4"); /// let num = Decimal::new(3, 0); /// assert_eq!(num.ceil().to_string(), "3"); /// ``` pub fn ceil(&self) -> Decimal { // Opportunity for optimization here if self.fract().is_zero() { *self } else { self.trunc() + Decimal::one() } } /// Strips any trailing zero's from a `Decimal`. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let number = Decimal::new(3100, 3); /// // note that it returns a decimal, without the extra scale /// assert_eq!(number.normalize().to_string(), "3.1"); /// ``` pub fn normalize(&self) -> Decimal { let mut scale = self.scale(); if scale == 0 { // Nothing to do return *self; } let mut result = [self.lo, self.mid, self.hi]; let mut working = [self.lo, self.mid, self.hi]; while scale > 0 { if div_by_u32(&mut working, 10) > 0 { break; } scale -= 1; result.copy_from_slice(&working); } Decimal { lo: result[0], mid: result[1], hi: result[2], flags: flags(self.is_sign_negative(), scale), } } /// Returns a new `Decimal` number with no fractional portion (i.e. an integer). /// Rounding currently follows "Bankers Rounding" rules. e.g. 6.5 -> 6, 7.5 -> 8 /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// // Demonstrating bankers rounding... /// let number_down = Decimal::new(65, 1); /// let number_up = Decimal::new(75, 1); /// assert_eq!(number_down.round().to_string(), "6"); /// assert_eq!(number_up.round().to_string(), "8"); /// ``` pub fn round(&self) -> Decimal { self.round_dp(0) } /// Returns a new `Decimal` number with the specified number of decimal points for fractional portion. /// Rounding currently follows "Bankers Rounding" rules. e.g. 6.5 -> 6, 7.5 -> 8 /// /// # Arguments /// * `dp`: the number of decimal points to round to. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// use std::str::FromStr; /// /// let pi = Decimal::from_str("3.1415926535897932384626433832").unwrap(); /// assert_eq!(pi.round_dp(2).to_string(), "3.14"); /// ``` pub fn round_dp(&self, dp: u32) -> Decimal { let old_scale = self.scale(); if dp < old_scale { // Short circuit for zero if self.is_zero() { return Decimal { lo: 0, mid: 0, hi: 0, flags: flags(self.is_sign_negative(), dp), }; } let mut value = [self.lo, self.mid, self.hi]; let mut value_scale = self.scale(); let negative = self.is_sign_negative(); value_scale -= dp; // Rescale to zero so it's easier to work with while value_scale > 0 { if value_scale < 10 { div_by_u32(&mut value, POWERS_10[value_scale as usize]); value_scale = 0; } else { div_by_u32(&mut value, POWERS_10[9]); value_scale -= 9; } } // Do some midpoint rounding checks // We're actually doing two things here. // 1. Figuring out midpoint rounding when we're right on the boundary. e.g. 2.50000 // 2. Figuring out whether to add one or not e.g. 2.51 // For this, we need to figure out the fractional portion that is additional to // the rounded number. e.g. for 0.12345 rounding to 2dp we'd want 345. // We're doing the equivalent of losing precision (e.g. to get 0.12) // then increasing the precision back up to 0.12000 let mut offset = [self.lo, self.mid, self.hi]; let mut diff = old_scale - dp; while diff > 0 { if diff < 10 { div_by_u32(&mut offset, POWERS_10[diff as usize]); break; } else { div_by_u32(&mut offset, POWERS_10[9]); // Only 9 as this array starts with 1 diff -= 9; } } let mut diff = old_scale - dp; while diff > 0 { if diff < 10 { mul_by_u32(&mut offset, POWERS_10[diff as usize]); break; } else { mul_by_u32(&mut offset, POWERS_10[9]); // Only 9 as this array starts with 1 diff -= 9; } } let mut decimal_portion = [self.lo, self.mid, self.hi]; sub_internal(&mut decimal_portion, &offset); // If the decimal_portion is zero then we round based on the other data let mut cap = [5, 0, 0]; for _ in 0..(old_scale - dp - 1) { mul_by_u32(&mut cap, 10); } let order = cmp_internal(&decimal_portion, &cap); match order { Ordering::Equal => { if (value[0] & 1) == 1 { add_internal(&mut value, &ONE_INTERNAL_REPR); } } Ordering::Greater => { // Doesn't matter about the decimal portion add_internal(&mut value, &ONE_INTERNAL_REPR); } _ => {} } Decimal { lo: value[0], mid: value[1], hi: value[2], flags: flags(negative, dp), } } else { *self } } fn base2_to_decimal(bits: &mut [u32; 3], exponent2: i32, positive: bool, is64: bool) -> Option<Self> { // 2^exponent2 = (10^exponent2)/(5^exponent2) // = (5^-exponent2)*(10^exponent2) let mut exponent5 = -exponent2; let mut exponent10 = exponent2; // Ultimately, we want this for the scale while exponent5 > 0 { // Check to see if the mantissa is divisible by 2 if bits[0] & 0x1 == 0 { exponent10 += 1; exponent5 -= 1; // We can divide by 2 without losing precision let hi_carry = bits[2] & 0x1 == 1; bits[2] >>= 1; let mid_carry = bits[1] & 0x1 == 1; bits[1] = (bits[1] >> 1) | if hi_carry { SIGN_MASK } else { 0 }; bits[0] = (bits[0] >> 1) | if mid_carry { SIGN_MASK } else { 0 }; } else { // The mantissa is NOT divisible by 2. Therefore the mantissa should // be multiplied by 5, unless the multiplication overflows. exponent5 -= 1; let mut temp = [bits[0], bits[1], bits[2]]; if mul_by_u32(&mut temp, 5) == 0 { // Multiplication succeeded without overflow, so copy result back bits[0] = temp[0]; bits[1] = temp[1]; bits[2] = temp[2]; } else { // Multiplication by 5 overflows. The mantissa should be divided // by 2, and therefore will lose significant digits. exponent10 += 1; // Shift right let hi_carry = bits[2] & 0x1 == 1; bits[2] >>= 1; let mid_carry = bits[1] & 0x1 == 1; bits[1] = (bits[1] >> 1) | if hi_carry { SIGN_MASK } else { 0 }; bits[0] = (bits[0] >> 1) | if mid_carry { SIGN_MASK } else { 0 }; } } } // In order to divide the value by 5, it is best to multiply by 2/10. // Therefore, exponent10 is decremented, and the mantissa should be multiplied by 2 while exponent5 < 0 { if bits[2] & SIGN_MASK == 0 { // No far left bit, the mantissa can withstand a shift-left without overflowing exponent10 -= 1; exponent5 += 1; shl_internal(bits, 1, 0); } else { // The mantissa would overflow if shifted. Therefore it should be // directly divided by 5. This will lose significant digits, unless // by chance the mantissa happens to be divisible by 5. exponent5 += 1; div_by_u32(bits, 5); } } // At this point, the mantissa has assimilated the exponent5, but // exponent10 might not be suitable for assignment. exponent10 must be // in the range [-MAX_PRECISION..0], so the mantissa must be scaled up or // down appropriately. while exponent10 > 0 { // In order to bring exponent10 down to 0, the mantissa should be // multiplied by 10 to compensate. If the exponent10 is too big, this // will cause the mantissa to overflow. if mul_by_u32(bits, 10) == 0 { exponent10 -= 1; } else { // Overflowed - return? return None; } } // In order to bring exponent up to -MAX_PRECISION, the mantissa should // be divided by 10 to compensate. If the exponent10 is too small, this // will cause the mantissa to underflow and become 0. while exponent10 < -(MAX_PRECISION as i32) { let rem10 = div_by_u32(bits, 10); exponent10 += 1; if is_all_zero(bits) { // Underflow, unable to keep dividing exponent10 = 0; } else if rem10 >= 5 { add_internal(bits, &ONE_INTERNAL_REPR); } } // This step is required in order to remove excess bits of precision from the // end of the bit representation, down to the precision guaranteed by the // floating point number if is64 { // Guaranteed to about 16 dp while exponent10 < 0 && (bits[2] != 0 || (bits[1] & 0xFFE0_0000) != 0) { let rem10 = div_by_u32(bits, 10); exponent10 += 1; if rem10 >= 5 { add_internal(bits, &ONE_INTERNAL_REPR); } } } else { // Guaranteed to about 7 dp while exponent10 < 0 && (bits[2] != 0 || bits[1] != 0 || (bits[2] == 0 && bits[1] == 0 && (bits[0] & 0xFF00_0000) != 0)) { let rem10 = div_by_u32(bits, 10); exponent10 += 1; if rem10 >= 5 { add_internal(bits, &ONE_INTERNAL_REPR); } } } // Remove multiples of 10 from the representation while exponent10 < 0 { let mut temp = [bits[0], bits[1], bits[2]]; let remainder = div_by_u32(&mut temp, 10); if remainder == 0 { exponent10 += 1; bits[0] = temp[0]; bits[1] = temp[1]; bits[2] = temp[2]; } else { break; } } Some(Decimal { lo: bits[0], mid: bits[1], hi: bits[2], flags: flags(!positive, -exponent10 as u32), }) } } #[inline] fn flags(neg: bool, scale: u32) -> u32 { (scale << SCALE_SHIFT) | if neg { SIGN_MASK } else { 0 } } /// Rescales the given decimals to equivalent scales. /// It will firstly try to scale both the left and the right side to /// the maximum scale of left/right. If it is unable to do that it /// will try to reduce the accuracy of the other argument. /// e.g. with 1.23 and 2.345 it'll rescale the first arg to 1.230 #[inline(always)] fn rescale(left: &mut [u32; 3], left_scale: &mut u32, right: &mut [u32; 3], right_scale: &mut u32) { if left_scale == right_scale { // Nothing to do return; } enum Target { Left, Right, } let target; // The target which we're aiming for let mut diff; let my; let other; if left_scale > right_scale { diff = *left_scale - *right_scale; my = right; other = left; target = Target::Left; } else { diff = *right_scale - *left_scale; my = left; other = right; target = Target::Right; }; let mut working = [my[0], my[1], my[2]]; while diff > 0 && mul_by_10(&mut working) == 0 { my.copy_from_slice(&working); diff -= 1; } match target { Target::Left => *right_scale = *left_scale, Target::Right => *left_scale = *right_scale, } if diff == 0 { // We're done - same scale return; } // Scaling further isn't possible since we got an overflow // In this case we need to reduce the accuracy of the "side to keep" // Now do the necessary rounding let mut remainder = 0; while diff > 0 && !is_all_zero(other) { diff -= 1; *left_scale -= 1; *right_scale -= 1; // Any remainder is discarded if diff > 0 still (i.e. lost precision) remainder = div_by_10(other); } if remainder >= 5 { for part in other.iter_mut() { let digit = u64::from(*part) + 1u64; remainder = if digit > 0xFFFF_FFFF { 1 } else { 0 }; *part = (digit & 0xFFFF_FFFF) as u32; if remainder == 0 { break; } } } } // This method should only be used where copy from slice cannot be #[inline] fn copy_array_diff_lengths(into: &mut [u32], from: &[u32]) { for i in 0..into.len() { if i >= from.len() { break; } into[i] = from[i]; } } #[inline] fn u64_to_array(value: u64) -> [u32;2] { [ (value & U32_MASK) as u32, (value >> 32 & U32_MASK) as u32, ] } fn add_internal(value: &mut [u32], by: &[u32]) -> u32 { let mut carry: u64 = 0; let vl = value.len(); let bl = by.len(); if vl >= bl { let mut sum: u64; for i in 0..bl { sum = u64::from(value[i]) + u64::from(by[i]) + carry; value[i] = (sum & U32_MASK) as u32; carry = sum >> 32; } if vl > bl && carry > 0 { for i in value.iter_mut().skip(bl) { sum = u64::from(*i) + carry; *i = (sum & U32_MASK) as u32; carry = sum >> 32; if carry == 0 { break; } } } } else if vl + 1 == bl { // Overflow, by default, is anything in the high portion of by let mut sum: u64; for i in 0..vl { sum = u64::from(value[i]) + u64::from(by[i]) + carry; value[i] = (sum & U32_MASK) as u32; carry = sum >> 32; } if by[vl] > 0 { carry += u64::from(by[vl]); } } else { panic!("Internal error: add using incompatible length arrays. {} <- {}", vl, bl); } carry as u32 } #[inline] fn add3_internal(value: &mut [u32; 3], by: &[u32; 3]) -> u32 { let mut carry: u32 = 0; let bl = by.len(); for i in 0..bl { let res1 = value[i].overflowing_add(by[i]); let res2 = res1.0.overflowing_add(carry); value[i] = res2.0; carry = (res1.1 | res2.1) as u32; } carry } fn add_with_scale_internal( quotient: &mut [u32; 3], quotient_scale: &mut i32, working_quotient: &mut [u32; 4], working_scale: &mut i32, ) -> bool { // Add quotient and the working (i.e. quotient = quotient + working) if is_all_zero(quotient) { // Quotient is zero so we can just copy the working quotient in directly // First, make sure they are both 96 bit. while working_quotient[3] != 0 { div_by_u32(working_quotient, 10); *working_scale -= 1; } copy_array_diff_lengths(quotient, working_quotient); *quotient_scale = *working_scale; return false; } if is_all_zero(working_quotient) { return false; } // We have ensured that our working is not zero so we should do the addition // If our two quotients are different then // try to scale down the one with the bigger scale let mut temp3 = [0u32, 0u32, 0u32]; let mut temp4 = [0u32, 0u32, 0u32, 0u32]; if *quotient_scale != *working_scale { // TODO: Remove necessity for temp (without performance impact) fn div_by_10(target: &mut [u32], temp: &mut [u32], scale: &mut i32, target_scale: i32) { // Copy to the temp array temp.copy_from_slice(target); // divide by 10 until target scale is reached while *scale > target_scale { let remainder = div_by_u32(temp, 10); if remainder == 0 { *scale -= 1; target.copy_from_slice(&temp); } else { break; } } } if *quotient_scale < *working_scale { div_by_10(working_quotient, &mut temp4, working_scale, *quotient_scale); } else { div_by_10(quotient, &mut temp3, quotient_scale, *working_scale); } } // If our two quotients are still different then // try to scale up the smaller scale if *quotient_scale != *working_scale { // TODO: Remove necessity for temp (without performance impact) fn mul_by_10(target: &mut [u32], temp: &mut [u32], scale: &mut i32, target_scale: i32) { temp.copy_from_slice(target); let mut overflow = 0; // Multiply by 10 until target scale reached or overflow while *scale < target_scale && overflow == 0 { overflow = mul_by_u32(temp, 10); if overflow == 0 { // Still no overflow *scale += 1; target.copy_from_slice(&temp); } } } if *quotient_scale > *working_scale { mul_by_10(working_quotient, &mut temp4, working_scale, *quotient_scale); } else { mul_by_10(quotient, &mut temp3, quotient_scale, *working_scale); } } // If our two quotients are still different then // try to scale down the one with the bigger scale // (ultimately losing significant digits) if *quotient_scale != *working_scale { // TODO: Remove necessity for temp (without performance impact) fn div_by_10_lossy(target: &mut [u32], temp: &mut [u32], scale: &mut i32, target_scale: i32) { temp.copy_from_slice(target); // divide by 10 until target scale is reached while *scale > target_scale { div_by_u32(temp, 10); *scale -= 1; target.copy_from_slice(&temp); } } if *quotient_scale < *working_scale { div_by_10_lossy(working_quotient, &mut temp4, working_scale, *quotient_scale); } else { div_by_10_lossy(quotient, &mut temp3, quotient_scale, *working_scale); } } // If quotient or working are zero we have an underflow condition if is_all_zero(quotient) || is_all_zero(working_quotient) { // Underflow return true; } else { // Both numbers have the same scale and can be added. // We just need to know whether we can fit them in let mut underflow = false; let mut temp = [0u32, 0u32, 0u32]; while !underflow { temp.copy_from_slice(quotient); // Add the working quotient let overflow = add_internal(&mut temp, working_quotient); if overflow == 0 { // addition was successful quotient.copy_from_slice(&temp); break; } else { // addition overflowed - remove significant digits and try again div_by_u32(quotient, 10); *quotient_scale -= 1; div_by_u32(working_quotient, 10); *working_scale -= 1; // Check for underflow underflow = is_all_zero(quotient) || is_all_zero(working_quotient); } } if underflow { return true; } } false } #[inline] fn add_part(left: u32, right: u32) -> (u32, u32) { let added = u64::from(left) + u64::from(right); ( (added & U32_MASK) as u32, (added >> 32 & U32_MASK) as u32, ) } #[inline(always)] fn sub3_internal(value: &mut [u32; 3], by: &[u32; 3]) { let mut overflow = 0; let vl = value.len(); for i in 0..vl { let part = (0x1_0000_0000u64 + u64::from(value[i])) - (u64::from(by[i]) + overflow); value[i] = part as u32; overflow = 1 - (part >> 32); } } fn sub_internal(value: &mut [u32], by: &[u32]) -> u32 { // The way this works is similar to long subtraction // Let's assume we're working with bytes for simpliciy in an example: // 257 - 8 = 249 // 0000_0001 0000_0001 - 0000_0000 0000_1000 = 0000_0000 1111_1001 // We start by doing the first byte... // Overflow = 0 // Left = 0000_0001 (1) // Right = 0000_1000 (8) // Firstly, we make sure the left and right are scaled up to twice the size // Left = 0000_0000 0000_0001 // Right = 0000_0000 0000_1000 // We then subtract right from left // Result = Left - Right = 1111_1111 1111_1001 // We subtract the overflow, which in this case is 0. // Because left < right (1 < 8) we invert the high part. // Lo = 1111_1001 // Hi = 1111_1111 -> 0000_0001 // Lo is the field, hi is the overflow. // We do the same for the second byte... // Overflow = 1 // Left = 0000_0001 // Right = 0000_0000 // Result = Left - Right = 0000_0000 0000_0001 // We subtract the overflow... // Result = 0000_0000 0000_0001 - 1 = 0 // And we invert the high, just because (invert 0 = 0). // So our result is: // 0000_0000 1111_1001 let mut overflow = 0; let vl = value.len(); let bl = by.len(); for i in 0..vl { if i >= bl { break; } let (lo, hi) = sub_part(value[i], by[i], overflow); value[i] = lo; overflow = hi; } overflow } fn sub_part(left: u32, right: u32, overflow: u32) -> (u32, u32) { let part = 0x1_0000_0000u64 + u64::from(left) - (u64::from(right) + u64::from(overflow)); let lo = part as u32; let hi = 1 - ((part >> 32) as u32); (lo, hi) } // Returns overflow #[inline] fn mul_by_10(bits: &mut [u32; 3]) -> u32 { let mut overflow = 0u64; for b in bits.iter_mut() { let result = u64::from(*b) * 10u64 + overflow; let hi = (result >> 32) & U32_MASK; let lo = (result & U32_MASK) as u32; *b = lo; overflow = hi; } overflow as u32 } // Returns overflow fn mul_by_u32(bits: &mut [u32], m: u32) -> u32 { let mut overflow = 0; for b in bits.iter_mut() { let (lo, hi) = mul_part(*b, m, overflow); *b = lo; overflow = hi; } overflow } fn mul_part(left: u32, right: u32, high: u32) -> (u32, u32) { let result = u64::from(left) * u64::from(right) + u64::from(high); let hi = ((result >> 32) & U32_MASK) as u32; let lo = (result & U32_MASK) as u32; (lo, hi) } fn div_internal(quotient: &mut [u32; 4], remainder: &mut [u32; 4], divisor: &[u32; 3]) { // There are a couple of ways to do division on binary numbers: // 1. Using long division // 2. Using the complement method // ref: https://www.wikihow.com/Divide-Binary-Numbers // The complement method basically keeps trying to subtract the // divisor until it can't anymore and placing the rest in remainder. let mut complement = [ divisor[0] ^ 0xFFFF_FFFF, divisor[1] ^ 0xFFFF_FFFF, divisor[2] ^ 0xFFFF_FFFF, 0xFFFF_FFFF, ]; // Add one onto the complement add_internal(&mut complement, &[1u32]); // Make sure the remainder is 0 remainder.iter_mut().for_each(|x| *x = 0); // If we have nothing in our hi+ block then shift over till we do let mut blocks_to_process = 0; while blocks_to_process < 4 && quotient[3] == 0 { // Shift whole blocks to the "left" shl_internal(quotient, 32, 0); // Incremember the counter blocks_to_process += 1; } // Let's try and do the addition... let mut block = blocks_to_process << 5; let mut working = [0u32, 0u32, 0u32, 0u32]; while block < 128 { // << 1 for quotient AND remainder let carry = shl_internal(quotient, 1, 0); shl_internal(remainder, 1, carry); // Copy the remainder of working into sub working.copy_from_slice(remainder); // Add the remainder with the complement add_internal(&mut working, &complement); // Check for the significant bit - move over to the quotient // as necessary if (working[3] & 0x8000_0000) == 0 { remainder.copy_from_slice(&working); quotient[0] |= 1; } // Increment our pointer block += 1; } } // Returns remainder fn div_by_u32(bits: &mut [u32], divisor: u32) -> u32 { if divisor == 0 { // Divide by zero panic!("Internal error: divide by zero"); } else if divisor == 1 { // dividend remains unchanged 0 } else { let mut remainder = 0u32; let divisor = u64::from(divisor); for part in bits.iter_mut().rev() { let temp = (u64::from(remainder) << 32) + u64::from(*part); remainder = (temp % divisor) as u32; *part = (temp / divisor) as u32; } remainder } } fn div_by_10(bits: &mut [u32; 3]) -> u32 { let mut remainder = 0u32; let divisor = 10u64; for part in bits.iter_mut().rev() { let temp = (u64::from(remainder) << 32) + u64::from(*part); remainder = (temp % divisor) as u32; *part = (temp / divisor) as u32; } remainder } #[inline] fn shl_internal(bits: &mut [u32], shift: u32, carry: u32) -> u32 { let mut shift = shift; // Whole blocks first while shift >= 32 { // memcpy would be useful here for i in (1..bits.len()).rev() { bits[i] = bits[i - 1]; } bits[0] = 0; shift -= 32; } // Continue with the rest if shift > 0 { let mut carry = carry; for part in bits.iter_mut() { let b = *part >> (32 - shift); *part = (*part << shift) | carry; carry = b; } carry } else { 0 } } #[inline] fn cmp_internal(left: &[u32; 3], right: &[u32; 3]) -> Ordering { let left_hi: u32 = left[2]; let right_hi: u32 = right[2]; let left_lo: u64 = u64::from(left[1]) << 32 | u64::from(left[0]); let right_lo: u64 = u64::from(right[1]) << 32 | u64::from(right[0]); if left_hi < right_hi || (left_hi <= right_hi && left_lo < right_lo) { Ordering::Less } else if left_hi == right_hi && left_lo == right_lo { Ordering::Equal } else { Ordering::Greater } } #[inline] fn is_all_zero(bits: &[u32]) -> bool { bits.iter().all(|b| *b == 0) } macro_rules! impl_from { ($T:ty, $from_ty:path) => { impl From<$T> for Decimal { #[inline] fn from(t: $T) -> Decimal { $from_ty(t).unwrap() } } } } impl_from!(isize, FromPrimitive::from_isize); impl_from!(i8, FromPrimitive::from_i8); impl_from!(i16, FromPrimitive::from_i16); impl_from!(i32, FromPrimitive::from_i32); impl_from!(i64, FromPrimitive::from_i64); impl_from!(usize, FromPrimitive::from_usize); impl_from!(u8, FromPrimitive::from_u8); impl_from!(u16, FromPrimitive::from_u16); impl_from!(u32, FromPrimitive::from_u32); impl_from!(u64, FromPrimitive::from_u64); macro_rules! forward_val_val_binop { (impl $imp:ident for $res:ty, $method:ident) => { impl $imp<$res> for $res { type Output = $res; #[inline] fn $method(self, other: $res) -> $res { (&self).$method(&other) } } } } macro_rules! forward_ref_val_binop { (impl $imp:ident for $res:ty, $method:ident) => { impl<'a> $imp<$res> for &'a $res { type Output = $res; #[inline] fn $method(self, other: $res) -> $res { self.$method(&other) } } } } macro_rules! forward_val_ref_binop { (impl $imp:ident for $res:ty, $method:ident) => { impl<'a> $imp<&'a $res> for $res { type Output = $res; #[inline] fn $method(self, other: &$res) -> $res { (&self).$method(other) } } } } macro_rules! forward_all_binop { (impl $imp:ident for $res:ty, $method:ident) => { forward_val_val_binop!(impl $imp for $res, $method); forward_ref_val_binop!(impl $imp for $res, $method); forward_val_ref_binop!(impl $imp for $res, $method); }; } impl Zero for Decimal { fn is_zero(&self) -> bool { self.lo.is_zero() && self.mid.is_zero() && self.hi.is_zero() } fn zero() -> Decimal { Decimal { flags: 0, hi: 0, lo: 0, mid: 0, } } } impl One for Decimal { fn one() -> Decimal { Decimal { flags: 0, hi: 0, lo: 1, mid: 0, } } } impl FromStr for Decimal { type Err = Error; fn from_str(value: &str) -> Result<Decimal, Self::Err> { if value.is_empty() { return Err(Error::new("Invalid decimal: empty")); } let mut offset = 0; let mut len = value.len(); let bytes: Vec<u8> = value.bytes().collect(); let mut negative = false; // assume positive // handle the sign if bytes[offset] == b'-' { negative = true; // leading minus means negative offset += 1; len -= 1; } else if bytes[offset] == b'+' { // leading + allowed offset += 1; len -= 1; } // should now be at numeric part of the significand let mut dot_offset: i32 = -1; // '.' offset, -1 if none let cfirst = offset; // record start of integer let mut coeff = Vec::new(); // integer significand array while len > 0 { let b = bytes[offset]; match b { b'0'...b'9' => { coeff.push(u32::from(b - b'0')); offset += 1; len -= 1; // If the coefficient is longer than 29 then it'll affect the scale, so exit early if coeff.len() as u32 > MAX_PRECISION { // Before we exit, do some rounding if necessary if offset < bytes.len() { // We only need to look at the next significant digit let next_byte = bytes[offset]; match next_byte { b'0'...b'9' => { let digit = u32::from(next_byte - b'0'); if digit >= 5 { let mut index = coeff.len() - 1; loop { let new_digit = coeff[index] + 1; if new_digit <= 9 { coeff[index] = new_digit; break; } else { coeff[index] = 0; if index == 0 { coeff.insert(0, 1u32); dot_offset += 1; coeff.pop(); break; } } index -= 1; } } } b'_' => {} b'.' => { // Still an error if we have a second dp if dot_offset >= 0 { return Err(Error::new("Invalid decimal: two decimal points")); } } _ => return Err(Error::new("Invalid decimal: unknown character")), } } break; } } b'.' => { if dot_offset >= 0 { return Err(Error::new("Invalid decimal: two decimal points")); } dot_offset = offset as i32; offset += 1; len -= 1; } b'_' => { // Must start with a number... if coeff.is_empty() { return Err(Error::new("Invalid decimal: must start lead with a number")); } offset += 1; len -= 1; } _ => return Err(Error::new("Invalid decimal: unknown character")), } } // here when no characters left if coeff.is_empty() { return Err(Error::new("Invalid decimal: no digits found")); } let scale = if dot_offset >= 0 { // we had a decimal place so set the scale (coeff.len() as u32) - (dot_offset as u32 - cfirst as u32) } else { 0 }; // Parse this using base 10 (future allow using radix?) let mut data = [0u32, 0u32, 0u32]; for digit in coeff { // If the data is going to overflow then we should go into recovery mode let overflow = mul_by_u32(&mut data, 10u32); if overflow > 0 { // This indicates a bug in the coeeficient rounding above return Err(Error::new("Invalid decimal: overflow")); } let carry = add_internal(&mut data, &[digit]); if carry > 0 { // Highly unlikely scenario which is more indicative of a bug return Err(Error::new("Invalid decimal: overflow")); } } Ok(Decimal { lo: data[0], mid: data[1], hi: data[2], flags: flags(negative, scale), }) } } impl FromPrimitive for Decimal { fn from_i32(n: i32) -> Option<Decimal> { let flags: u32; let value_copy: i32; if n >= 0 { flags = 0; value_copy = n; } else { flags = SIGN_MASK; value_copy = -n; } Some(Decimal { flags: flags, lo: value_copy as u32, mid: 0, hi: 0, }) } fn from_i64(n: i64) -> Option<Decimal> { let flags: u32; let value_copy: i64; if n >= 0 { flags = 0; value_copy = n; } else { flags = SIGN_MASK; value_copy = -n; } Some(Decimal { flags: flags, lo: value_copy as u32, mid: (value_copy >> 32) as u32, hi: 0, }) } fn from_u32(n: u32) -> Option<Decimal> { Some(Decimal { flags: 0, lo: n, mid: 0, hi: 0, }) } fn from_u64(n: u64) -> Option<Decimal> { Some(Decimal { flags: 0, lo: n as u32, mid: (n >> 32) as u32, hi: 0, }) } fn from_f32(n: f32) -> Option<Decimal> { // Handle the case if it is NaN, Infinity or -Infinity if !n.is_finite() { return None; } // It's a shame we can't use a union for this due to it being broken up by bits // i.e. 1/8/23 (sign, exponent, mantissa) // See https://en.wikipedia.org/wiki/IEEE_754-1985 // n = (sign*-1) * 2^exp * mantissa // Decimal of course stores this differently... 10^-exp * significand let raw = n.to_bits(); let positive = (raw >> 31) == 0; let biased_exponent = ((raw >> 23) & 0xFF) as i32; let mantissa = raw & 0x007F_FFFF; // Handle the special zero case if biased_exponent == 0 && mantissa == 0 { let mut zero = Decimal::zero(); if !positive { zero.set_sign(false); } return Some(zero); } // Get the bits and exponent2 let mut exponent2 = biased_exponent - 127; let mut bits = [mantissa, 0u32, 0u32]; if biased_exponent == 0 { // Denormalized number - correct the exponent exponent2 += 1; } else { // Add extra hidden bit to mantissa bits[0] |= 0x0080_0000; } // The act of copying a mantissa as integer bits is equivalent to shifting // left the mantissa 23 bits. The exponent is reduced to compensate. exponent2 -= 23; // Convert to decimal Decimal::base2_to_decimal(&mut bits, exponent2, positive, false) } fn from_f64(n: f64) -> Option<Decimal> { // Handle the case if it is NaN, Infinity or -Infinity if !n.is_finite() { return None; } // It's a shame we can't use a union for this due to it being broken up by bits // i.e. 1/11/52 (sign, exponent, mantissa) // See https://en.wikipedia.org/wiki/IEEE_754-1985 // n = (sign*-1) * 2^exp * mantissa // Decimal of course stores this differently... 10^-exp * significand let raw = n.to_bits(); let positive = (raw >> 63) == 0; let biased_exponent = ((raw >> 52) & 0x7FF) as i32; let mantissa = raw & 0x000F_FFFF_FFFF_FFFF; // Handle the special zero case if biased_exponent == 0 && mantissa == 0 { let mut zero = Decimal::zero(); if !positive { zero.set_sign(false); } return Some(zero); } // Get the bits and exponent2 let mut exponent2 = biased_exponent - 1023; let mut bits = [ (mantissa & 0xFFFF_FFFF) as u32, ((mantissa >> 32) & 0xFFFF_FFFF) as u32, 0u32, ]; if biased_exponent == 0 { // Denormalized number - correct the exponent exponent2 += 1; } else { // Add extra hidden bit to mantissa bits[1] |= 0x0010_0000; } // The act of copying a mantissa as integer bits is equivalent to shifting // left the mantissa 52 bits. The exponent is reduced to compensate. exponent2 -= 52; // Convert to decimal Decimal::base2_to_decimal(&mut bits, exponent2, positive, true) } } impl ToPrimitive for Decimal { fn to_f64(&self) -> Option<f64> { if self.scale() == 0 { let integer = self.to_i64(); match integer { Some(i) => Some(i as f64), None => None, } } else { // TODO: Utilize mantissa algorithm. match self.to_string().parse::<f64>() { Ok(s) => Some(s), Err(_) => None, } } } fn to_i64(&self) -> Option<i64> { let d = self.trunc(); // Quick overflow check if d.hi != 0 || (d.mid & 0x8000_0000) > 0 { // Overflow return None; } let raw: i64 = (i64::from(d.mid) << 32) | i64::from(d.lo); if self.is_sign_negative() { Some(-raw) } else { Some(raw) } } fn to_u64(&self) -> Option<u64> { if self.is_sign_negative() { return None; } let d = self.trunc(); if d.hi != 0 { // Overflow return None; } Some((u64::from(d.mid) << 32) | u64::from(d.lo)) } } impl fmt::Display for Decimal { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { // Get the scale - where we need to put the decimal point let mut scale = self.scale() as usize; // Convert to a string and manipulate that (neg at front, inject decimal) let mut chars = Vec::new(); let mut working = [self.lo, self.mid, self.hi]; while !is_all_zero(&working) { let remainder = div_by_u32(&mut working, 10u32); chars.push(char::from(b'0' + remainder as u8)); } while scale > chars.len() { chars.push('0'); } let mut rep = chars.iter().rev().collect::<String>(); let len = rep.len(); if let Some(n_dp) = f.precision() { if n_dp < scale { rep.truncate(len - scale + n_dp) } else { let zeros = repeat("0").take(n_dp - scale).collect::<String>(); rep.push_str(&zeros[..]); } scale = n_dp; } let len = rep.len(); // Inject the decimal point if scale > 0 { // Must be a low fractional // TODO: Remove this condition as it's no longer possible for `scale > len` if scale > len { let mut new_rep = String::new(); let zeros = repeat("0").take(scale as usize - len).collect::<String>(); new_rep.push_str("0."); new_rep.push_str(&zeros[..]); new_rep.push_str(&rep[..]); rep = new_rep; } else if scale == len { rep.insert(0, '.'); rep.insert(0, '0'); } else { rep.insert(len - scale as usize, '.'); } } else if rep.is_empty() { // corner case for when we truncated everything in a low fractional rep.insert(0, '0'); } f.pad_integral(self.is_sign_positive(), "", &rep) } } impl Neg for Decimal { type Output = Decimal; fn neg(self) -> Decimal { -&self } } impl<'a> Neg for &'a Decimal { type Output = Decimal; fn neg(self) -> Decimal { Decimal { flags: flags(!self.is_sign_negative(), self.scale()), hi: self.hi, lo: self.lo, mid: self.mid, } } } forward_all_binop!(impl Add for Decimal, add); impl<'a, 'b> Add<&'b Decimal> for &'a Decimal { type Output = Decimal; #[inline(always)] fn add(self, other: &Decimal) -> Decimal { // Convert to the same scale let mut my = [self.lo, self.mid, self.hi]; let mut my_scale = self.scale(); let mut ot = [other.lo, other.mid, other.hi]; let mut other_scale = other.scale(); rescale(&mut my, &mut my_scale, &mut ot, &mut other_scale); let mut final_scale = my_scale.max(other_scale); // Add the items together let my_negative = self.is_sign_negative(); let other_negative = other.is_sign_negative(); let mut negative = false; let carry; if !(my_negative ^ other_negative) { negative = my_negative; carry = add3_internal(&mut my, &ot); } else { let cmp = cmp_internal(&my, &ot); // -x + y // if x > y then it's negative (i.e. -2 + 1) match cmp { Ordering::Less => { negative = other_negative; sub3_internal(&mut ot, &my); my[0] = ot[0]; my[1] = ot[1]; my[2] = ot[2]; } Ordering::Greater => { negative = my_negative; sub3_internal(&mut my, &ot); } Ordering::Equal => { // -2 + 2 my[0] = 0; my[1] = 0; my[2] = 0; } } carry = 0; } // If we have a carry we underflowed. // We need to lose some significant digits (if possible) if carry > 0 { if final_scale == 0 { panic!("Addition overflowed"); } // Copy it over to a temp array for modification let mut temp = [my[0], my[1], my[2], carry]; while final_scale > 0 && temp[3] != 0 { div_by_u32(&mut temp, 10); final_scale -= 1; } // If we still have a carry bit then we overflowed if temp[3] > 0 { panic!("Addition overflowed"); } // Copy it back - we're done my[0] = temp[0]; my[1] = temp[1]; my[2] = temp[2]; } Decimal { lo: my[0], mid: my[1], hi: my[2], flags: flags(negative, final_scale), } } } impl AddAssign for Decimal { fn add_assign(&mut self, other: Decimal) { let result = self.add(other); self.lo = result.lo; self.mid = result.mid; self.hi = result.hi; self.flags = result.flags; } } forward_all_binop!(impl Sub for Decimal, sub); impl<'a, 'b> Sub<&'b Decimal> for &'a Decimal { type Output = Decimal; #[inline(always)] fn sub(self, other: &Decimal) -> Decimal { let negated_other = Decimal { lo: other.lo, mid: other.mid, hi: other.hi, flags: other.flags ^ SIGN_MASK, }; self.add(negated_other) } } impl SubAssign for Decimal { fn sub_assign(&mut self, other: Decimal) { let result = self.sub(other); self.lo = result.lo; self.mid = result.mid; self.hi = result.hi; self.flags = result.flags; } } forward_all_binop!(impl Mul for Decimal, mul); impl<'a, 'b> Mul<&'b Decimal> for &'a Decimal { type Output = Decimal; #[inline] fn mul(self, other: &Decimal) -> Decimal { // Early exit if either is zero if self.is_zero() || other.is_zero() { return Decimal::zero(); } // We are only resulting in a negative if we have mismatched signs let negative = self.is_sign_negative() ^ other.is_sign_negative(); // We get the scale of the result by adding the operands. This may be too big, however // we'll correct later let mut final_scale = self.scale() + other.scale(); // First of all, if ONLY the lo parts of both numbers is filled // then we can simply do a standard 64 bit calculation. It's a minor // optimization however prevents the need for long form multiplication if self.mid == 0 && self.hi == 0 && other.mid == 0 && other.hi == 0 { // Simply multiplication let mut u64_result = u64_to_array(u64::from(self.lo) * u64::from(other.lo)); // If we're above max precision then this is a very small number if final_scale > MAX_PRECISION { final_scale -= MAX_PRECISION; // If the number is above 19 then this will equate to zero. // This is because the max value in 64 bits is 1.84E19 if final_scale > 19 { return Decimal::zero(); } let mut rem_lo = 0; let mut power; if final_scale > 9 { // Since 10^10 doesn't fit into u32, we divide by 10^10/4 // and multiply the next divisor by 4. rem_lo = div_by_u32(&mut u64_result, 2500000000); power = POWERS_10[final_scale as usize - 10] << 2; } else { power = POWERS_10[final_scale as usize]; } // Divide fits in 32 bits let rem_hi = div_by_u32(&mut u64_result, power); // Round the result. Since the divisor is a power of 10 // we check to see if the remainder is >= 1/2 divisor power >>= 1; if rem_hi >= power && (rem_hi > power || (rem_lo | (u64_result[0] & 0x1)) != 0) { u64_result[0] += 1; } final_scale = MAX_PRECISION; } return Decimal { lo: u64_result[0], mid: u64_result[1], hi: 0, flags: flags(negative, final_scale), }; } // We're using some of the high bits, so we essentially perform // long form multiplication. We compute the 9 partial products // into a 192 bit result array. // // [my-h][my-m][my-l] // x [ot-h][ot-m][ot-l] // -------------------------------------- // 1. [r-hi][r-lo] my-l * ot-l [0, 0] // 2. [r-hi][r-lo] my-l * ot-m [0, 1] // 3. [r-hi][r-lo] my-m * ot-l [1, 0] // 4. [r-hi][r-lo] my-m * ot-m [1, 1] // 5. [r-hi][r-lo] my-l * ot-h [0, 2] // 6. [r-hi][r-lo] my-h * ot-l [2, 0] // 7. [r-hi][r-lo] my-m * ot-h [1, 2] // 8. [r-hi][r-lo] my-h * ot-m [2, 1] // 9.[r-hi][r-lo] my-h * ot-h [2, 2] let my = [self.lo, self.mid, self.hi]; let ot = [other.lo, other.mid, other.hi]; let mut product = [0u32, 0u32, 0u32, 0u32, 0u32, 0u32]; // We can perform a minor short circuit here. If the // high portions are both 0 then we can skip portions 5-9 let to = if my[2] == 0 && ot[2] == 0 { 2 } else { 3 }; for my_index in 0..to { for ot_index in 0..to { let (mut rlo, mut rhi) = mul_part(my[my_index], ot[ot_index], 0); // Get the index for the lo portion of the product for prod in product.iter_mut().skip(my_index + ot_index) { let (res, overflow) = add_part(rlo, *prod); *prod = res; // If we have something in rhi from before then promote that if rhi > 0 { // If we overflowed in the last add, add that with rhi if overflow > 0 { let (nlo, nhi) = add_part(rhi, overflow); rlo = nlo; rhi = nhi; } else { rlo = rhi; rhi = 0; } } else if overflow > 0 { rlo = overflow; rhi = 0; } else { break; } // If nothing to do next round then break out if rlo == 0 { break; } } } } // If our result has used up the high portion of the product // then we either have an overflow or an underflow situation // Overflow will occur if we can't scale it back, whereas underflow // with kick in rounding let mut remainder = 0; while final_scale > 0 && (product[3] != 0 || product[4] != 0 || product[5] != 0) { remainder = div_by_u32(&mut product, 10u32); final_scale -= 1; } // Round up the carry if we need to if remainder >= 5 { for part in product.iter_mut() { if remainder == 0 { break; } let digit: u64 = u64::from(*part) + 1; remainder = if digit > 0xFFFF_FFFF { 1 } else { 0 }; *part = (digit & 0xFFFF_FFFF) as u32; } } // If we're still above max precision then we'll try again to // reduce precision - we may be dealing with a limit of "0" if final_scale > MAX_PRECISION { // We're in an underflow situation // The easiest way to remove precision is to divide off the result while final_scale > MAX_PRECISION && !is_all_zero(&product) { div_by_u32(&mut product, 10); final_scale -= 1; } // If we're still at limit then we can't represent any // siginificant decimal digits and will return an integer only // Can also be invoked while representing 0. if final_scale > MAX_PRECISION { final_scale = 0; } } else if !(product[3] == 0 && product[4] == 0 && product[5] == 0) { // We're in an overflow situation - we're within our precision bounds // but still have bits in overflow panic!("Multiplication overflowed"); } Decimal { lo: product[0], mid: product[1], hi: product[2], flags: flags(negative, final_scale), } } } impl MulAssign for Decimal { fn mul_assign(&mut self, other: Decimal) { let result = self.mul(other); self.lo = result.lo; self.mid = result.mid; self.hi = result.hi; self.flags = result.flags; } } forward_all_binop!(impl Div for Decimal, div); impl<'a, 'b> Div<&'b Decimal> for &'a Decimal { type Output = Decimal; #[inline] fn div(self, other: &Decimal) -> Decimal { if other.is_zero() { panic!("Division by zero"); } if self.is_zero() { return Decimal::zero(); } let dividend = [self.lo, self.mid, self.hi]; let divisor = [other.lo, other.mid, other.hi]; let mut quotient = [0u32, 0u32, 0u32]; let mut quotient_scale: i32 = self.scale() as i32 - other.scale() as i32; // We supply an extra overflow word for each of the dividend and the remainder let mut working_quotient = [ dividend[0], dividend[1], dividend[2], 0u32, ]; let mut working_remainder = [ 0u32, 0u32, 0u32, 0u32, ]; let mut working_scale = quotient_scale; let mut remainder_scale = quotient_scale; let mut underflow; loop { div_internal(&mut working_quotient, &mut working_remainder, &divisor); underflow = add_with_scale_internal( &mut quotient, &mut quotient_scale, &mut working_quotient, &mut working_scale, ); // Multiply the remainder by 10 let mut overflow = 0; for part in working_remainder.iter_mut() { let (lo, hi) = mul_part(*part, 10, overflow); *part = lo; overflow = hi; } // Copy temp remainder into the temp quotient section working_quotient.copy_from_slice(&working_remainder); remainder_scale += 1; working_scale = remainder_scale; if underflow || is_all_zero(&working_remainder) { break; } } // If we have a really big number try to adjust the scale to 0 while quotient_scale < 0 { copy_array_diff_lengths(&mut working_quotient, &quotient); working_quotient[3] = 0; working_remainder.iter_mut().for_each(|x| *x = 0); // Mul 10 let mut overflow = 0; for part in &mut working_quotient { let (lo, hi) = mul_part(*part, 10, overflow); *part = lo; overflow = hi; } for part in &mut working_remainder { let (lo, hi) = mul_part(*part, 10, overflow); *part = lo; overflow = hi; } if working_quotient[3] == 0 && is_all_zero(&working_remainder) { quotient_scale += 1; quotient[0] = working_quotient[0]; quotient[1] = working_quotient[1]; quotient[2] = working_quotient[2]; } else { // Overflow panic!("Division overflowed"); } } if quotient_scale > 255 { quotient[0] = 0; quotient[1] = 0; quotient[2] = 0; quotient_scale = 0; } let mut quotient_negative = self.is_sign_negative() ^ other.is_sign_negative(); // Check for underflow let mut final_scale: u32 = quotient_scale as u32; if final_scale > MAX_PRECISION { let mut remainder = 0; // Division underflowed. We must remove some significant digits over using // an invalid scale. while final_scale > MAX_PRECISION && !is_all_zero(&quotient) { remainder = div_by_u32(&mut quotient, 10); final_scale -= 1; } if final_scale > MAX_PRECISION { // Result underflowed so set to zero final_scale = 0; quotient_negative = false; } else if remainder >= 5 { for part in &mut quotient { if remainder == 0 { break; } let digit: u64 = u64::from(*part) + 1; remainder = if digit > 0xFFFF_FFFF { 1 } else { 0 }; *part = (digit & 0xFFFF_FFFF) as u32; } } } Decimal { lo: quotient[0], mid: quotient[1], hi: quotient[2], flags: flags(quotient_negative, final_scale), } } } impl DivAssign for Decimal { fn div_assign(&mut self, other: Decimal) { let result = self.div(other); self.lo = result.lo; self.mid = result.mid; self.hi = result.hi; self.flags = result.flags; } } forward_all_binop!(impl Rem for Decimal, rem); impl<'a, 'b> Rem<&'b Decimal> for &'a Decimal { type Output = Decimal; #[inline] fn rem(self, other: &Decimal) -> Decimal { if other.is_zero() { panic!("Division by zero"); } if self.is_zero() { return Decimal::zero(); } // Working is the remainder + the quotient // We use an aligned array since we'll be using it alot. let mut working_quotient = [self.lo, self.mid, self.hi, 0u32]; let mut working_remainder = [0u32, 0u32, 0u32, 0u32]; let divisor = [other.lo, other.mid, other.hi]; div_internal(&mut working_quotient, &mut working_remainder, &divisor); // Remainder has no scale however does have a sign (the same as self) Decimal { lo: working_remainder[0], mid: working_remainder[1], hi: working_remainder[2], flags: if self.is_sign_negative() { SIGN_MASK } else { 0 }, } } } impl RemAssign for Decimal { fn rem_assign(&mut self, other: Decimal) { let result = self.rem(other); self.lo = result.lo; self.mid = result.mid; self.hi = result.hi; self.flags = result.flags; } } impl PartialEq for Decimal { #[inline] fn eq(&self, other: &Decimal) -> bool { self.cmp(other) == Equal } } impl Eq for Decimal {} impl Hash for Decimal { fn hash<H: Hasher>(&self, state: &mut H) { self.lo.hash(state); self.mid.hash(state); self.hi.hash(state); self.flags.hash(state); } } impl PartialOrd for Decimal { #[inline] fn partial_cmp(&self, other: &Decimal) -> Option<Ordering> { Some(self.cmp(other)) } } impl Ord for Decimal { fn cmp(&self, other: &Decimal) -> Ordering { // Quick exit if major differences let self_negative = self.is_sign_negative(); let other_negative = other.is_sign_negative(); if self_negative && !other_negative { return Ordering::Less; } else if !self_negative && other_negative { return Ordering::Greater; } // If we have 1.23 and 1.2345 then we have // 123 scale 2 and 12345 scale 4 // We need to convert the first to // 12300 scale 4 so we can compare equally let left : &Decimal; let right : &Decimal; if self_negative && other_negative { // Both are negative, so reverse cmp left = other; right = self; } else { left = self; right = other; } let mut left_scale = left.scale(); let mut right_scale = right.scale(); if left_scale == right_scale { // Fast path for same scale if left.hi != right.hi { return left.hi.cmp(&right.hi); } if left.mid != right.mid { return left.mid.cmp(&right.mid); } return left.lo.cmp(&right.lo); } // Rescale and compare let mut left_raw = [left.lo, left.mid, left.hi]; let mut right_raw = [right.lo, right.mid, right.hi]; rescale( &mut left_raw, &mut left_scale, &mut right_raw, &mut right_scale, ); cmp_internal(&left_raw, &right_raw) } } #[cfg(test)] mod test { // Tests on private methods. // // All public tests should go under `tests/`. use super::*; #[test] fn it_can_rescale() { fn extract(value: &str) -> ([u32; 3], u32) { let v = Decimal::from_str(value).unwrap(); ([v.lo, v.mid, v.hi], v.scale()) } let tests = &[ ("1", "1", "1", "1"), ("1", "1.0", "1.0", "1.0"), ("1", "1.00000", "1.00000", "1.00000"), ("1", "1.0000000000", "1.0000000000", "1.0000000000"), ( "1", "1.00000000000000000000", "1.00000000000000000000", "1.00000000000000000000", ), ("1.1", "1.1", "1.1", "1.1"), ("1.1", "1.10000", "1.10000", "1.10000"), ("1.1", "1.1000000000", "1.1000000000", "1.1000000000"), ( "1.1", "1.10000000000000000000", "1.10000000000000000000", "1.10000000000000000000", ), ( "0.6386554621848739495798319328", "11.815126050420168067226890757", "0.638655462184873949579831933", "11.815126050420168067226890757", ), ( "0.0872727272727272727272727272", // Scale 28 "843.65000000", // Scale 8 "0.0872727272727272727272727", // 25 "843.6500000000000000000000000", // 25 ), ]; for &(left_raw, right_raw, expected_left, expected_right) in tests { // Left = the value to rescale // Right = the new scale we're scaling to // Expected = the expected left value after rescale let (expected_left, expected_lscale) = extract(expected_left); let (expected_right, expected_rscale) = extract(expected_right); let (mut left, mut left_scale) = extract(left_raw); let (mut right, mut right_scale) = extract(right_raw); rescale(&mut left, &mut left_scale, &mut right, &mut right_scale); assert_eq!(left, expected_left); assert_eq!(left_scale, expected_lscale); assert_eq!(right, expected_right); assert_eq!(right_scale, expected_rscale); // Also test the transitive case let (mut left, mut left_scale) = extract(left_raw); let (mut right, mut right_scale) = extract(right_raw); rescale(&mut right, &mut right_scale, &mut left, &mut left_scale); assert_eq!(left, expected_left); assert_eq!(left_scale, expected_lscale); assert_eq!(right, expected_right); assert_eq!(right_scale, expected_rscale); } } } accoding to benches inline in this case make speed down use Error; use num::{FromPrimitive, One, ToPrimitive, Zero}; use std::cmp::*; use std::cmp::Ordering::Equal; use std::fmt; use std::hash::{Hash, Hasher}; use std::iter::repeat; use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Rem, RemAssign, Sub, SubAssign}; use std::str::FromStr; // Sign mask for the flags field. A value of zero in this bit indicates a // positive Decimal value, and a value of one in this bit indicates a // negative Decimal value. const SIGN_MASK: u32 = 0x8000_0000; // Scale mask for the flags field. This byte in the flags field contains // the power of 10 to divide the Decimal value by. The scale byte must // contain a value between 0 and 28 inclusive. const SCALE_MASK: u32 = 0x00FF_0000; const U8_MASK: u32 = 0x0000_00FF; const U32_MASK: u64 = 0xFFFF_FFFF; // Number of bits scale is shifted by. const SCALE_SHIFT: u32 = 16; // The maximum supported precision const MAX_PRECISION: u32 = 28; static ONE_INTERNAL_REPR: [u32; 3] = [1, 0, 0]; lazy_static! { static ref MIN: Decimal = Decimal { flags: 2_147_483_648, lo: 4_294_967_295, mid: 4_294_967_295, hi: 4_294_967_295 }; static ref MAX: Decimal = Decimal { flags: 0, lo: 4_294_967_295, mid: 4_294_967_295, hi: 4_294_967_295 }; } // Fast access for 10^n where n is 0-9 static POWERS_10: [u32; 10] = [ 1, 10, 100, 1_000, 10_000, 100_000, 1_000_000, 10_000_000, 100_000_000, 1_000_000_000, ]; // Fast access for 10^n where n is 10-19 #[allow(dead_code)] static BIG_POWERS_10: [u64; 10] = [ 10_000_000_000, 100_000_000_000, 1_000_000_000_000, 10_000_000_000_000, 100_000_000_000_000, 1_000_000_000_000_000, 10_000_000_000_000_000, 100_000_000_000_000_000, 1_000_000_000_000_000_000, 10_000_000_000_000_000_000, ]; /// `Decimal` represents a 128 bit representation of a fixed-precision decimal number. /// The finite set of values of type `Decimal` are of the form m / 10<sup>e</sup>, /// where m is an integer such that -2<sup>96</sup> <= m <= 2<sup>96</sup>, and e is an integer /// between 0 and 28 inclusive. #[derive(Clone, Copy, Debug)] pub struct Decimal { // Bits 0-15: unused // Bits 16-23: Contains "e", a value between 0-28 that indicates the scale // Bits 24-30: unused // Bit 31: the sign of the Decimal value, 0 meaning positive and 1 meaning negative. flags: u32, // The lo, mid, hi, and flags fields contain the representation of the // Decimal value as a 96-bit integer. hi: u32, lo: u32, mid: u32, } #[allow(dead_code)] impl Decimal { /// Returns a `Decimal` with a 64 bit `m` representation and corresponding `e` scale. /// /// # Arguments /// /// * `num` - An i64 that represents the `m` portion of the decimal number /// * `scale` - A u32 representing the `e` portion of the decimal number. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let pi = Decimal::new(3141, 3); /// assert_eq!(pi.to_string(), "3.141"); /// ``` pub fn new(num: i64, scale: u32) -> Decimal { if scale > MAX_PRECISION { panic!( "Scale exceeds the maximum precision allowed: {} > {}", scale, MAX_PRECISION ); } let flags: u32 = scale << SCALE_SHIFT; if num < 0 { return Decimal { flags: flags | SIGN_MASK, hi: 0, lo: (num.abs() as u64 & U32_MASK) as u32, mid: ((num.abs() as u64 >> 32) & U32_MASK) as u32, }; } Decimal { flags: flags, hi: 0, lo: (num as u64 & U32_MASK) as u32, mid: ((num as u64 >> 32) & U32_MASK) as u32, } } /// Returns a `Decimal` using the instances constituent parts. /// /// # Arguments /// /// * `lo` - The low 32 bits of a 96-bit integer. /// * `mid` - The middle 32 bits of a 96-bit integer. /// * `hi` - The high 32 bits of a 96-bit integer. /// * `negative` - `true` to indicate a negative number. /// * `scale` - A power of 10 ranging from 0 to 28. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let pi = Decimal::from_parts(1102470952, 185874565, 1703060790, false, 28); /// assert_eq!(pi.to_string(), "3.1415926535897932384626433832"); /// ``` pub fn from_parts(lo: u32, mid: u32, hi: u32, negative: bool, scale: u32) -> Decimal { Decimal { lo: lo, mid: mid, hi: hi, flags: flags(negative, scale), } } /// Returns a `Result` which if successful contains the `Decimal` constitution of /// the scientific notation provided by `value`. /// /// # Arguments /// /// * `value` - The scientific notation of the `Decimal`. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let value = Decimal::from_scientific("9.7e-7").unwrap(); /// assert_eq!(value.to_string(), "0.00000097"); /// ``` pub fn from_scientific(value: &str) -> Result<Decimal, Error> { let err = Error::new("Failed to parse"); let mut split = value.splitn(2, 'e'); let base = split.next().ok_or(err.clone())?; let mut scale = split.next().ok_or(err.clone())?.to_string(); let mut ret = Decimal::from_str(base)?; if scale.contains('-') { scale.remove(0); let scale: u32 = scale.as_str().parse().map_err(move |_| err.clone())?; let current_scale = ret.scale(); ret.set_scale(current_scale+ scale)?; } else { if scale.contains('+') { scale.remove(0); } let pow: u32 = scale.as_str().parse().map_err(move |_| err.clone())?; ret *= Decimal::from_i64(10_i64.pow(pow)).unwrap(); ret = ret.normalize(); } Ok(ret) } /// Returns the scale of the decimal number, otherwise known as `e`. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let num = Decimal::new(1234, 3); /// assert_eq!(num.scale(), 3u32); /// ``` #[inline] pub fn scale(&self) -> u32 { ((self.flags & SCALE_MASK) >> SCALE_SHIFT) as u32 } /// An optimized method for changing the sign of a decimal number. /// /// # Arguments /// /// * `positive`: true if the resulting decimal should be positive. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let mut one = Decimal::new(1, 0); /// one.set_sign(false); /// assert_eq!(one.to_string(), "-1"); /// ``` pub fn set_sign(&mut self, positive: bool) { if positive { if self.is_sign_negative() { self.flags ^= SIGN_MASK; } } else { self.flags |= SIGN_MASK; } } /// An optimized method for changing the scale of a decimal number. /// /// # Arguments /// /// * `scale`: the new scale of the number /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let mut one = Decimal::new(1, 0); /// one.set_scale(5); /// assert_eq!(one.to_string(), "0.00001"); /// ``` pub fn set_scale(&mut self, scale: u32) -> Result<(), Error> { if scale > MAX_PRECISION { return Err(Error::new("Scale exceeds maximum precision")); } self.flags = (scale << SCALE_SHIFT) | (self.flags & SIGN_MASK); Ok(()) } /// Returns a serialized version of the decimal number. /// The resulting byte array will have the following representation: /// /// * Bytes 1-4: flags /// * Bytes 5-8: lo portion of `m` /// * Bytes 9-12: mid portion of `m` /// * Bytes 13-16: high portion of `m` pub fn serialize(&self) -> [u8; 16] { [ (self.flags & U8_MASK) as u8, ((self.flags >> 8) & U8_MASK) as u8, ((self.flags >> 16) & U8_MASK) as u8, ((self.flags >> 24) & U8_MASK) as u8, (self.lo & U8_MASK) as u8, ((self.lo >> 8) & U8_MASK) as u8, ((self.lo >> 16) & U8_MASK) as u8, ((self.lo >> 24) & U8_MASK) as u8, (self.mid & U8_MASK) as u8, ((self.mid >> 8) & U8_MASK) as u8, ((self.mid >> 16) & U8_MASK) as u8, ((self.mid >> 24) & U8_MASK) as u8, (self.hi & U8_MASK) as u8, ((self.hi >> 8) & U8_MASK) as u8, ((self.hi >> 16) & U8_MASK) as u8, ((self.hi >> 24) & U8_MASK) as u8, ] } /// Deserializes the given bytes into a decimal number. /// The deserialized byte representation must be 16 bytes and adhere to the followign convention: /// /// * Bytes 1-4: flags /// * Bytes 5-8: lo portion of `m` /// * Bytes 9-12: mid portion of `m` /// * Bytes 13-16: high portion of `m` pub fn deserialize(bytes: [u8; 16]) -> Decimal { Decimal { flags: u32::from(bytes[0]) | u32::from(bytes[1]) << 8 | u32::from(bytes[2]) << 16 | u32::from(bytes[3]) << 24, lo: u32::from(bytes[4]) | u32::from(bytes[5]) << 8 | u32::from(bytes[6]) << 16 | u32::from(bytes[7]) << 24, mid: u32::from(bytes[8]) | u32::from(bytes[9]) << 8 | u32::from(bytes[10]) << 16 | u32::from(bytes[11]) << 24, hi: u32::from(bytes[12]) | u32::from(bytes[13]) << 8 | u32::from(bytes[14]) << 16 | u32::from(bytes[15]) << 24, } } /// Returns `true` if the decimal is negative. #[deprecated(since = "0.6.3", note = "please use `is_sign_negative` instead")] pub fn is_negative(&self) -> bool { self.is_sign_negative() } /// Returns `true` if the decimal is positive. #[deprecated(since = "0.6.3", note = "please use `is_sign_positive` instead")] pub fn is_positive(&self) -> bool { self.is_sign_positive() } /// Returns `true` if the decimal is negative. #[inline(always)] pub fn is_sign_negative(&self) -> bool { self.flags & SIGN_MASK > 0 } /// Returns `true` if the decimal is positive. pub fn is_sign_positive(&self) -> bool { self.flags & SIGN_MASK == 0 } /// Returns the minimum possible number that `Decimal` can represent. pub fn min_value() -> Decimal { *MIN } /// Returns the maximum possible number that `Decimal` can represent. pub fn max_value() -> Decimal { *MAX } /// Returns a new `Decimal` integral with no fractional portion. /// This is a true truncation whereby no rounding is performed. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let pi = Decimal::new(3141, 3); /// let trunc = Decimal::new(3, 0); /// // note that it returns a decimal /// assert_eq!(pi.trunc(), trunc); /// ``` pub fn trunc(&self) -> Decimal { let mut scale = self.scale(); if scale == 0 { // Nothing to do return *self; } let mut working = [self.lo, self.mid, self.hi]; while scale > 0 { // We're removing precision, so we don't care about overflow if scale < 10 { div_by_u32(&mut working, POWERS_10[scale as usize]); break; } else { div_by_u32(&mut working, POWERS_10[9]); // Only 9 as this array starts with 1 scale -= 9; } } Decimal { lo: working[0], mid: working[1], hi: working[2], flags: flags(self.is_sign_negative(), 0), } } /// Returns a new `Decimal` representing the fractional portion of the number. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let pi = Decimal::new(3141, 3); /// let fract = Decimal::new(141, 3); /// // note that it returns a decimal /// assert_eq!(pi.fract(), fract); /// ``` pub fn fract(&self) -> Decimal { // This is essentially the original number minus the integral. // Could possibly be optimized in the future *self - self.trunc() } /// Computes the absolute value of `self`. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let num = Decimal::new(-3141, 3); /// assert_eq!(num.abs().to_string(), "3.141"); /// ``` pub fn abs(&self) -> Decimal { let mut me = *self; me.set_sign(true); me } /// Returns the largest integer less than or equal to a number. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let num = Decimal::new(3641, 3); /// assert_eq!(num.floor().to_string(), "3"); /// ``` pub fn floor(&self) -> Decimal { // Opportunity for optimization here self.trunc() } /// Returns the smallest integer greater than or equal to a number. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let num = Decimal::new(3141, 3); /// assert_eq!(num.ceil().to_string(), "4"); /// let num = Decimal::new(3, 0); /// assert_eq!(num.ceil().to_string(), "3"); /// ``` pub fn ceil(&self) -> Decimal { // Opportunity for optimization here if self.fract().is_zero() { *self } else { self.trunc() + Decimal::one() } } /// Strips any trailing zero's from a `Decimal`. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let number = Decimal::new(3100, 3); /// // note that it returns a decimal, without the extra scale /// assert_eq!(number.normalize().to_string(), "3.1"); /// ``` pub fn normalize(&self) -> Decimal { let mut scale = self.scale(); if scale == 0 { // Nothing to do return *self; } let mut result = [self.lo, self.mid, self.hi]; let mut working = [self.lo, self.mid, self.hi]; while scale > 0 { if div_by_u32(&mut working, 10) > 0 { break; } scale -= 1; result.copy_from_slice(&working); } Decimal { lo: result[0], mid: result[1], hi: result[2], flags: flags(self.is_sign_negative(), scale), } } /// Returns a new `Decimal` number with no fractional portion (i.e. an integer). /// Rounding currently follows "Bankers Rounding" rules. e.g. 6.5 -> 6, 7.5 -> 8 /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// // Demonstrating bankers rounding... /// let number_down = Decimal::new(65, 1); /// let number_up = Decimal::new(75, 1); /// assert_eq!(number_down.round().to_string(), "6"); /// assert_eq!(number_up.round().to_string(), "8"); /// ``` pub fn round(&self) -> Decimal { self.round_dp(0) } /// Returns a new `Decimal` number with the specified number of decimal points for fractional portion. /// Rounding currently follows "Bankers Rounding" rules. e.g. 6.5 -> 6, 7.5 -> 8 /// /// # Arguments /// * `dp`: the number of decimal points to round to. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// use std::str::FromStr; /// /// let pi = Decimal::from_str("3.1415926535897932384626433832").unwrap(); /// assert_eq!(pi.round_dp(2).to_string(), "3.14"); /// ``` pub fn round_dp(&self, dp: u32) -> Decimal { let old_scale = self.scale(); if dp < old_scale { // Short circuit for zero if self.is_zero() { return Decimal { lo: 0, mid: 0, hi: 0, flags: flags(self.is_sign_negative(), dp), }; } let mut value = [self.lo, self.mid, self.hi]; let mut value_scale = self.scale(); let negative = self.is_sign_negative(); value_scale -= dp; // Rescale to zero so it's easier to work with while value_scale > 0 { if value_scale < 10 { div_by_u32(&mut value, POWERS_10[value_scale as usize]); value_scale = 0; } else { div_by_u32(&mut value, POWERS_10[9]); value_scale -= 9; } } // Do some midpoint rounding checks // We're actually doing two things here. // 1. Figuring out midpoint rounding when we're right on the boundary. e.g. 2.50000 // 2. Figuring out whether to add one or not e.g. 2.51 // For this, we need to figure out the fractional portion that is additional to // the rounded number. e.g. for 0.12345 rounding to 2dp we'd want 345. // We're doing the equivalent of losing precision (e.g. to get 0.12) // then increasing the precision back up to 0.12000 let mut offset = [self.lo, self.mid, self.hi]; let mut diff = old_scale - dp; while diff > 0 { if diff < 10 { div_by_u32(&mut offset, POWERS_10[diff as usize]); break; } else { div_by_u32(&mut offset, POWERS_10[9]); // Only 9 as this array starts with 1 diff -= 9; } } let mut diff = old_scale - dp; while diff > 0 { if diff < 10 { mul_by_u32(&mut offset, POWERS_10[diff as usize]); break; } else { mul_by_u32(&mut offset, POWERS_10[9]); // Only 9 as this array starts with 1 diff -= 9; } } let mut decimal_portion = [self.lo, self.mid, self.hi]; sub_internal(&mut decimal_portion, &offset); // If the decimal_portion is zero then we round based on the other data let mut cap = [5, 0, 0]; for _ in 0..(old_scale - dp - 1) { mul_by_u32(&mut cap, 10); } let order = cmp_internal(&decimal_portion, &cap); match order { Ordering::Equal => { if (value[0] & 1) == 1 { add_internal(&mut value, &ONE_INTERNAL_REPR); } } Ordering::Greater => { // Doesn't matter about the decimal portion add_internal(&mut value, &ONE_INTERNAL_REPR); } _ => {} } Decimal { lo: value[0], mid: value[1], hi: value[2], flags: flags(negative, dp), } } else { *self } } fn base2_to_decimal(bits: &mut [u32; 3], exponent2: i32, positive: bool, is64: bool) -> Option<Self> { // 2^exponent2 = (10^exponent2)/(5^exponent2) // = (5^-exponent2)*(10^exponent2) let mut exponent5 = -exponent2; let mut exponent10 = exponent2; // Ultimately, we want this for the scale while exponent5 > 0 { // Check to see if the mantissa is divisible by 2 if bits[0] & 0x1 == 0 { exponent10 += 1; exponent5 -= 1; // We can divide by 2 without losing precision let hi_carry = bits[2] & 0x1 == 1; bits[2] >>= 1; let mid_carry = bits[1] & 0x1 == 1; bits[1] = (bits[1] >> 1) | if hi_carry { SIGN_MASK } else { 0 }; bits[0] = (bits[0] >> 1) | if mid_carry { SIGN_MASK } else { 0 }; } else { // The mantissa is NOT divisible by 2. Therefore the mantissa should // be multiplied by 5, unless the multiplication overflows. exponent5 -= 1; let mut temp = [bits[0], bits[1], bits[2]]; if mul_by_u32(&mut temp, 5) == 0 { // Multiplication succeeded without overflow, so copy result back bits[0] = temp[0]; bits[1] = temp[1]; bits[2] = temp[2]; } else { // Multiplication by 5 overflows. The mantissa should be divided // by 2, and therefore will lose significant digits. exponent10 += 1; // Shift right let hi_carry = bits[2] & 0x1 == 1; bits[2] >>= 1; let mid_carry = bits[1] & 0x1 == 1; bits[1] = (bits[1] >> 1) | if hi_carry { SIGN_MASK } else { 0 }; bits[0] = (bits[0] >> 1) | if mid_carry { SIGN_MASK } else { 0 }; } } } // In order to divide the value by 5, it is best to multiply by 2/10. // Therefore, exponent10 is decremented, and the mantissa should be multiplied by 2 while exponent5 < 0 { if bits[2] & SIGN_MASK == 0 { // No far left bit, the mantissa can withstand a shift-left without overflowing exponent10 -= 1; exponent5 += 1; shl_internal(bits, 1, 0); } else { // The mantissa would overflow if shifted. Therefore it should be // directly divided by 5. This will lose significant digits, unless // by chance the mantissa happens to be divisible by 5. exponent5 += 1; div_by_u32(bits, 5); } } // At this point, the mantissa has assimilated the exponent5, but // exponent10 might not be suitable for assignment. exponent10 must be // in the range [-MAX_PRECISION..0], so the mantissa must be scaled up or // down appropriately. while exponent10 > 0 { // In order to bring exponent10 down to 0, the mantissa should be // multiplied by 10 to compensate. If the exponent10 is too big, this // will cause the mantissa to overflow. if mul_by_u32(bits, 10) == 0 { exponent10 -= 1; } else { // Overflowed - return? return None; } } // In order to bring exponent up to -MAX_PRECISION, the mantissa should // be divided by 10 to compensate. If the exponent10 is too small, this // will cause the mantissa to underflow and become 0. while exponent10 < -(MAX_PRECISION as i32) { let rem10 = div_by_u32(bits, 10); exponent10 += 1; if is_all_zero(bits) { // Underflow, unable to keep dividing exponent10 = 0; } else if rem10 >= 5 { add_internal(bits, &ONE_INTERNAL_REPR); } } // This step is required in order to remove excess bits of precision from the // end of the bit representation, down to the precision guaranteed by the // floating point number if is64 { // Guaranteed to about 16 dp while exponent10 < 0 && (bits[2] != 0 || (bits[1] & 0xFFE0_0000) != 0) { let rem10 = div_by_u32(bits, 10); exponent10 += 1; if rem10 >= 5 { add_internal(bits, &ONE_INTERNAL_REPR); } } } else { // Guaranteed to about 7 dp while exponent10 < 0 && (bits[2] != 0 || bits[1] != 0 || (bits[2] == 0 && bits[1] == 0 && (bits[0] & 0xFF00_0000) != 0)) { let rem10 = div_by_u32(bits, 10); exponent10 += 1; if rem10 >= 5 { add_internal(bits, &ONE_INTERNAL_REPR); } } } // Remove multiples of 10 from the representation while exponent10 < 0 { let mut temp = [bits[0], bits[1], bits[2]]; let remainder = div_by_u32(&mut temp, 10); if remainder == 0 { exponent10 += 1; bits[0] = temp[0]; bits[1] = temp[1]; bits[2] = temp[2]; } else { break; } } Some(Decimal { lo: bits[0], mid: bits[1], hi: bits[2], flags: flags(!positive, -exponent10 as u32), }) } } #[inline] fn flags(neg: bool, scale: u32) -> u32 { (scale << SCALE_SHIFT) | if neg { SIGN_MASK } else { 0 } } /// Rescales the given decimals to equivalent scales. /// It will firstly try to scale both the left and the right side to /// the maximum scale of left/right. If it is unable to do that it /// will try to reduce the accuracy of the other argument. /// e.g. with 1.23 and 2.345 it'll rescale the first arg to 1.230 #[inline(always)] fn rescale(left: &mut [u32; 3], left_scale: &mut u32, right: &mut [u32; 3], right_scale: &mut u32) { if left_scale == right_scale { // Nothing to do return; } enum Target { Left, Right, } let target; // The target which we're aiming for let mut diff; let my; let other; if left_scale > right_scale { diff = *left_scale - *right_scale; my = right; other = left; target = Target::Left; } else { diff = *right_scale - *left_scale; my = left; other = right; target = Target::Right; }; let mut working = [my[0], my[1], my[2]]; while diff > 0 && mul_by_10(&mut working) == 0 { my.copy_from_slice(&working); diff -= 1; } match target { Target::Left => *right_scale = *left_scale, Target::Right => *left_scale = *right_scale, } if diff == 0 { // We're done - same scale return; } // Scaling further isn't possible since we got an overflow // In this case we need to reduce the accuracy of the "side to keep" // Now do the necessary rounding let mut remainder = 0; while diff > 0 && !is_all_zero(other) { diff -= 1; *left_scale -= 1; *right_scale -= 1; // Any remainder is discarded if diff > 0 still (i.e. lost precision) remainder = div_by_10(other); } if remainder >= 5 { for part in other.iter_mut() { let digit = u64::from(*part) + 1u64; remainder = if digit > 0xFFFF_FFFF { 1 } else { 0 }; *part = (digit & 0xFFFF_FFFF) as u32; if remainder == 0 { break; } } } } // This method should only be used where copy from slice cannot be #[inline] fn copy_array_diff_lengths(into: &mut [u32], from: &[u32]) { for i in 0..into.len() { if i >= from.len() { break; } into[i] = from[i]; } } #[inline] fn u64_to_array(value: u64) -> [u32;2] { [ (value & U32_MASK) as u32, (value >> 32 & U32_MASK) as u32, ] } fn add_internal(value: &mut [u32], by: &[u32]) -> u32 { let mut carry: u64 = 0; let vl = value.len(); let bl = by.len(); if vl >= bl { let mut sum: u64; for i in 0..bl { sum = u64::from(value[i]) + u64::from(by[i]) + carry; value[i] = (sum & U32_MASK) as u32; carry = sum >> 32; } if vl > bl && carry > 0 { for i in value.iter_mut().skip(bl) { sum = u64::from(*i) + carry; *i = (sum & U32_MASK) as u32; carry = sum >> 32; if carry == 0 { break; } } } } else if vl + 1 == bl { // Overflow, by default, is anything in the high portion of by let mut sum: u64; for i in 0..vl { sum = u64::from(value[i]) + u64::from(by[i]) + carry; value[i] = (sum & U32_MASK) as u32; carry = sum >> 32; } if by[vl] > 0 { carry += u64::from(by[vl]); } } else { panic!("Internal error: add using incompatible length arrays. {} <- {}", vl, bl); } carry as u32 } #[inline] fn add3_internal(value: &mut [u32; 3], by: &[u32; 3]) -> u32 { let mut carry: u32 = 0; let bl = by.len(); for i in 0..bl { let res1 = value[i].overflowing_add(by[i]); let res2 = res1.0.overflowing_add(carry); value[i] = res2.0; carry = (res1.1 | res2.1) as u32; } carry } fn add_with_scale_internal( quotient: &mut [u32; 3], quotient_scale: &mut i32, working_quotient: &mut [u32; 4], working_scale: &mut i32, ) -> bool { // Add quotient and the working (i.e. quotient = quotient + working) if is_all_zero(quotient) { // Quotient is zero so we can just copy the working quotient in directly // First, make sure they are both 96 bit. while working_quotient[3] != 0 { div_by_u32(working_quotient, 10); *working_scale -= 1; } copy_array_diff_lengths(quotient, working_quotient); *quotient_scale = *working_scale; return false; } if is_all_zero(working_quotient) { return false; } // We have ensured that our working is not zero so we should do the addition // If our two quotients are different then // try to scale down the one with the bigger scale let mut temp3 = [0u32, 0u32, 0u32]; let mut temp4 = [0u32, 0u32, 0u32, 0u32]; if *quotient_scale != *working_scale { // TODO: Remove necessity for temp (without performance impact) fn div_by_10(target: &mut [u32], temp: &mut [u32], scale: &mut i32, target_scale: i32) { // Copy to the temp array temp.copy_from_slice(target); // divide by 10 until target scale is reached while *scale > target_scale { let remainder = div_by_u32(temp, 10); if remainder == 0 { *scale -= 1; target.copy_from_slice(&temp); } else { break; } } } if *quotient_scale < *working_scale { div_by_10(working_quotient, &mut temp4, working_scale, *quotient_scale); } else { div_by_10(quotient, &mut temp3, quotient_scale, *working_scale); } } // If our two quotients are still different then // try to scale up the smaller scale if *quotient_scale != *working_scale { // TODO: Remove necessity for temp (without performance impact) fn mul_by_10(target: &mut [u32], temp: &mut [u32], scale: &mut i32, target_scale: i32) { temp.copy_from_slice(target); let mut overflow = 0; // Multiply by 10 until target scale reached or overflow while *scale < target_scale && overflow == 0 { overflow = mul_by_u32(temp, 10); if overflow == 0 { // Still no overflow *scale += 1; target.copy_from_slice(&temp); } } } if *quotient_scale > *working_scale { mul_by_10(working_quotient, &mut temp4, working_scale, *quotient_scale); } else { mul_by_10(quotient, &mut temp3, quotient_scale, *working_scale); } } // If our two quotients are still different then // try to scale down the one with the bigger scale // (ultimately losing significant digits) if *quotient_scale != *working_scale { // TODO: Remove necessity for temp (without performance impact) fn div_by_10_lossy(target: &mut [u32], temp: &mut [u32], scale: &mut i32, target_scale: i32) { temp.copy_from_slice(target); // divide by 10 until target scale is reached while *scale > target_scale { div_by_u32(temp, 10); *scale -= 1; target.copy_from_slice(&temp); } } if *quotient_scale < *working_scale { div_by_10_lossy(working_quotient, &mut temp4, working_scale, *quotient_scale); } else { div_by_10_lossy(quotient, &mut temp3, quotient_scale, *working_scale); } } // If quotient or working are zero we have an underflow condition if is_all_zero(quotient) || is_all_zero(working_quotient) { // Underflow return true; } else { // Both numbers have the same scale and can be added. // We just need to know whether we can fit them in let mut underflow = false; let mut temp = [0u32, 0u32, 0u32]; while !underflow { temp.copy_from_slice(quotient); // Add the working quotient let overflow = add_internal(&mut temp, working_quotient); if overflow == 0 { // addition was successful quotient.copy_from_slice(&temp); break; } else { // addition overflowed - remove significant digits and try again div_by_u32(quotient, 10); *quotient_scale -= 1; div_by_u32(working_quotient, 10); *working_scale -= 1; // Check for underflow underflow = is_all_zero(quotient) || is_all_zero(working_quotient); } } if underflow { return true; } } false } #[inline] fn add_part(left: u32, right: u32) -> (u32, u32) { let added = u64::from(left) + u64::from(right); ( (added & U32_MASK) as u32, (added >> 32 & U32_MASK) as u32, ) } #[inline(always)] fn sub3_internal(value: &mut [u32; 3], by: &[u32; 3]) { let mut overflow = 0; let vl = value.len(); for i in 0..vl { let part = (0x1_0000_0000u64 + u64::from(value[i])) - (u64::from(by[i]) + overflow); value[i] = part as u32; overflow = 1 - (part >> 32); } } fn sub_internal(value: &mut [u32], by: &[u32]) -> u32 { // The way this works is similar to long subtraction // Let's assume we're working with bytes for simpliciy in an example: // 257 - 8 = 249 // 0000_0001 0000_0001 - 0000_0000 0000_1000 = 0000_0000 1111_1001 // We start by doing the first byte... // Overflow = 0 // Left = 0000_0001 (1) // Right = 0000_1000 (8) // Firstly, we make sure the left and right are scaled up to twice the size // Left = 0000_0000 0000_0001 // Right = 0000_0000 0000_1000 // We then subtract right from left // Result = Left - Right = 1111_1111 1111_1001 // We subtract the overflow, which in this case is 0. // Because left < right (1 < 8) we invert the high part. // Lo = 1111_1001 // Hi = 1111_1111 -> 0000_0001 // Lo is the field, hi is the overflow. // We do the same for the second byte... // Overflow = 1 // Left = 0000_0001 // Right = 0000_0000 // Result = Left - Right = 0000_0000 0000_0001 // We subtract the overflow... // Result = 0000_0000 0000_0001 - 1 = 0 // And we invert the high, just because (invert 0 = 0). // So our result is: // 0000_0000 1111_1001 let mut overflow = 0; let vl = value.len(); let bl = by.len(); for i in 0..vl { if i >= bl { break; } let (lo, hi) = sub_part(value[i], by[i], overflow); value[i] = lo; overflow = hi; } overflow } fn sub_part(left: u32, right: u32, overflow: u32) -> (u32, u32) { let part = 0x1_0000_0000u64 + u64::from(left) - (u64::from(right) + u64::from(overflow)); let lo = part as u32; let hi = 1 - ((part >> 32) as u32); (lo, hi) } // Returns overflow #[inline] fn mul_by_10(bits: &mut [u32; 3]) -> u32 { let mut overflow = 0u64; for b in bits.iter_mut() { let result = u64::from(*b) * 10u64 + overflow; let hi = (result >> 32) & U32_MASK; let lo = (result & U32_MASK) as u32; *b = lo; overflow = hi; } overflow as u32 } // Returns overflow fn mul_by_u32(bits: &mut [u32], m: u32) -> u32 { let mut overflow = 0; for b in bits.iter_mut() { let (lo, hi) = mul_part(*b, m, overflow); *b = lo; overflow = hi; } overflow } fn mul_part(left: u32, right: u32, high: u32) -> (u32, u32) { let result = u64::from(left) * u64::from(right) + u64::from(high); let hi = ((result >> 32) & U32_MASK) as u32; let lo = (result & U32_MASK) as u32; (lo, hi) } fn div_internal(quotient: &mut [u32; 4], remainder: &mut [u32; 4], divisor: &[u32; 3]) { // There are a couple of ways to do division on binary numbers: // 1. Using long division // 2. Using the complement method // ref: https://www.wikihow.com/Divide-Binary-Numbers // The complement method basically keeps trying to subtract the // divisor until it can't anymore and placing the rest in remainder. let mut complement = [ divisor[0] ^ 0xFFFF_FFFF, divisor[1] ^ 0xFFFF_FFFF, divisor[2] ^ 0xFFFF_FFFF, 0xFFFF_FFFF, ]; // Add one onto the complement add_internal(&mut complement, &[1u32]); // Make sure the remainder is 0 remainder.iter_mut().for_each(|x| *x = 0); // If we have nothing in our hi+ block then shift over till we do let mut blocks_to_process = 0; while blocks_to_process < 4 && quotient[3] == 0 { // Shift whole blocks to the "left" shl_internal(quotient, 32, 0); // Incremember the counter blocks_to_process += 1; } // Let's try and do the addition... let mut block = blocks_to_process << 5; let mut working = [0u32, 0u32, 0u32, 0u32]; while block < 128 { // << 1 for quotient AND remainder let carry = shl_internal(quotient, 1, 0); shl_internal(remainder, 1, carry); // Copy the remainder of working into sub working.copy_from_slice(remainder); // Add the remainder with the complement add_internal(&mut working, &complement); // Check for the significant bit - move over to the quotient // as necessary if (working[3] & 0x8000_0000) == 0 { remainder.copy_from_slice(&working); quotient[0] |= 1; } // Increment our pointer block += 1; } } // Returns remainder fn div_by_u32(bits: &mut [u32], divisor: u32) -> u32 { if divisor == 0 { // Divide by zero panic!("Internal error: divide by zero"); } else if divisor == 1 { // dividend remains unchanged 0 } else { let mut remainder = 0u32; let divisor = u64::from(divisor); for part in bits.iter_mut().rev() { let temp = (u64::from(remainder) << 32) + u64::from(*part); remainder = (temp % divisor) as u32; *part = (temp / divisor) as u32; } remainder } } fn div_by_10(bits: &mut [u32; 3]) -> u32 { let mut remainder = 0u32; let divisor = 10u64; for part in bits.iter_mut().rev() { let temp = (u64::from(remainder) << 32) + u64::from(*part); remainder = (temp % divisor) as u32; *part = (temp / divisor) as u32; } remainder } #[inline] fn shl_internal(bits: &mut [u32], shift: u32, carry: u32) -> u32 { let mut shift = shift; // Whole blocks first while shift >= 32 { // memcpy would be useful here for i in (1..bits.len()).rev() { bits[i] = bits[i - 1]; } bits[0] = 0; shift -= 32; } // Continue with the rest if shift > 0 { let mut carry = carry; for part in bits.iter_mut() { let b = *part >> (32 - shift); *part = (*part << shift) | carry; carry = b; } carry } else { 0 } } #[inline] fn cmp_internal(left: &[u32; 3], right: &[u32; 3]) -> Ordering { let left_hi: u32 = left[2]; let right_hi: u32 = right[2]; let left_lo: u64 = u64::from(left[1]) << 32 | u64::from(left[0]); let right_lo: u64 = u64::from(right[1]) << 32 | u64::from(right[0]); if left_hi < right_hi || (left_hi <= right_hi && left_lo < right_lo) { Ordering::Less } else if left_hi == right_hi && left_lo == right_lo { Ordering::Equal } else { Ordering::Greater } } #[inline] fn is_all_zero(bits: &[u32]) -> bool { bits.iter().all(|b| *b == 0) } macro_rules! impl_from { ($T:ty, $from_ty:path) => { impl From<$T> for Decimal { #[inline] fn from(t: $T) -> Decimal { $from_ty(t).unwrap() } } } } impl_from!(isize, FromPrimitive::from_isize); impl_from!(i8, FromPrimitive::from_i8); impl_from!(i16, FromPrimitive::from_i16); impl_from!(i32, FromPrimitive::from_i32); impl_from!(i64, FromPrimitive::from_i64); impl_from!(usize, FromPrimitive::from_usize); impl_from!(u8, FromPrimitive::from_u8); impl_from!(u16, FromPrimitive::from_u16); impl_from!(u32, FromPrimitive::from_u32); impl_from!(u64, FromPrimitive::from_u64); macro_rules! forward_val_val_binop { (impl $imp:ident for $res:ty, $method:ident) => { impl $imp<$res> for $res { type Output = $res; #[inline] fn $method(self, other: $res) -> $res { (&self).$method(&other) } } } } macro_rules! forward_ref_val_binop { (impl $imp:ident for $res:ty, $method:ident) => { impl<'a> $imp<$res> for &'a $res { type Output = $res; #[inline] fn $method(self, other: $res) -> $res { self.$method(&other) } } } } macro_rules! forward_val_ref_binop { (impl $imp:ident for $res:ty, $method:ident) => { impl<'a> $imp<&'a $res> for $res { type Output = $res; #[inline] fn $method(self, other: &$res) -> $res { (&self).$method(other) } } } } macro_rules! forward_all_binop { (impl $imp:ident for $res:ty, $method:ident) => { forward_val_val_binop!(impl $imp for $res, $method); forward_ref_val_binop!(impl $imp for $res, $method); forward_val_ref_binop!(impl $imp for $res, $method); }; } impl Zero for Decimal { fn is_zero(&self) -> bool { self.lo.is_zero() && self.mid.is_zero() && self.hi.is_zero() } fn zero() -> Decimal { Decimal { flags: 0, hi: 0, lo: 0, mid: 0, } } } impl One for Decimal { fn one() -> Decimal { Decimal { flags: 0, hi: 0, lo: 1, mid: 0, } } } impl FromStr for Decimal { type Err = Error; fn from_str(value: &str) -> Result<Decimal, Self::Err> { if value.is_empty() { return Err(Error::new("Invalid decimal: empty")); } let mut offset = 0; let mut len = value.len(); let bytes: Vec<u8> = value.bytes().collect(); let mut negative = false; // assume positive // handle the sign if bytes[offset] == b'-' { negative = true; // leading minus means negative offset += 1; len -= 1; } else if bytes[offset] == b'+' { // leading + allowed offset += 1; len -= 1; } // should now be at numeric part of the significand let mut dot_offset: i32 = -1; // '.' offset, -1 if none let cfirst = offset; // record start of integer let mut coeff = Vec::new(); // integer significand array while len > 0 { let b = bytes[offset]; match b { b'0'...b'9' => { coeff.push(u32::from(b - b'0')); offset += 1; len -= 1; // If the coefficient is longer than 29 then it'll affect the scale, so exit early if coeff.len() as u32 > MAX_PRECISION { // Before we exit, do some rounding if necessary if offset < bytes.len() { // We only need to look at the next significant digit let next_byte = bytes[offset]; match next_byte { b'0'...b'9' => { let digit = u32::from(next_byte - b'0'); if digit >= 5 { let mut index = coeff.len() - 1; loop { let new_digit = coeff[index] + 1; if new_digit <= 9 { coeff[index] = new_digit; break; } else { coeff[index] = 0; if index == 0 { coeff.insert(0, 1u32); dot_offset += 1; coeff.pop(); break; } } index -= 1; } } } b'_' => {} b'.' => { // Still an error if we have a second dp if dot_offset >= 0 { return Err(Error::new("Invalid decimal: two decimal points")); } } _ => return Err(Error::new("Invalid decimal: unknown character")), } } break; } } b'.' => { if dot_offset >= 0 { return Err(Error::new("Invalid decimal: two decimal points")); } dot_offset = offset as i32; offset += 1; len -= 1; } b'_' => { // Must start with a number... if coeff.is_empty() { return Err(Error::new("Invalid decimal: must start lead with a number")); } offset += 1; len -= 1; } _ => return Err(Error::new("Invalid decimal: unknown character")), } } // here when no characters left if coeff.is_empty() { return Err(Error::new("Invalid decimal: no digits found")); } let scale = if dot_offset >= 0 { // we had a decimal place so set the scale (coeff.len() as u32) - (dot_offset as u32 - cfirst as u32) } else { 0 }; // Parse this using base 10 (future allow using radix?) let mut data = [0u32, 0u32, 0u32]; for digit in coeff { // If the data is going to overflow then we should go into recovery mode let overflow = mul_by_u32(&mut data, 10u32); if overflow > 0 { // This indicates a bug in the coeeficient rounding above return Err(Error::new("Invalid decimal: overflow")); } let carry = add_internal(&mut data, &[digit]); if carry > 0 { // Highly unlikely scenario which is more indicative of a bug return Err(Error::new("Invalid decimal: overflow")); } } Ok(Decimal { lo: data[0], mid: data[1], hi: data[2], flags: flags(negative, scale), }) } } impl FromPrimitive for Decimal { fn from_i32(n: i32) -> Option<Decimal> { let flags: u32; let value_copy: i32; if n >= 0 { flags = 0; value_copy = n; } else { flags = SIGN_MASK; value_copy = -n; } Some(Decimal { flags: flags, lo: value_copy as u32, mid: 0, hi: 0, }) } fn from_i64(n: i64) -> Option<Decimal> { let flags: u32; let value_copy: i64; if n >= 0 { flags = 0; value_copy = n; } else { flags = SIGN_MASK; value_copy = -n; } Some(Decimal { flags: flags, lo: value_copy as u32, mid: (value_copy >> 32) as u32, hi: 0, }) } fn from_u32(n: u32) -> Option<Decimal> { Some(Decimal { flags: 0, lo: n, mid: 0, hi: 0, }) } fn from_u64(n: u64) -> Option<Decimal> { Some(Decimal { flags: 0, lo: n as u32, mid: (n >> 32) as u32, hi: 0, }) } fn from_f32(n: f32) -> Option<Decimal> { // Handle the case if it is NaN, Infinity or -Infinity if !n.is_finite() { return None; } // It's a shame we can't use a union for this due to it being broken up by bits // i.e. 1/8/23 (sign, exponent, mantissa) // See https://en.wikipedia.org/wiki/IEEE_754-1985 // n = (sign*-1) * 2^exp * mantissa // Decimal of course stores this differently... 10^-exp * significand let raw = n.to_bits(); let positive = (raw >> 31) == 0; let biased_exponent = ((raw >> 23) & 0xFF) as i32; let mantissa = raw & 0x007F_FFFF; // Handle the special zero case if biased_exponent == 0 && mantissa == 0 { let mut zero = Decimal::zero(); if !positive { zero.set_sign(false); } return Some(zero); } // Get the bits and exponent2 let mut exponent2 = biased_exponent - 127; let mut bits = [mantissa, 0u32, 0u32]; if biased_exponent == 0 { // Denormalized number - correct the exponent exponent2 += 1; } else { // Add extra hidden bit to mantissa bits[0] |= 0x0080_0000; } // The act of copying a mantissa as integer bits is equivalent to shifting // left the mantissa 23 bits. The exponent is reduced to compensate. exponent2 -= 23; // Convert to decimal Decimal::base2_to_decimal(&mut bits, exponent2, positive, false) } fn from_f64(n: f64) -> Option<Decimal> { // Handle the case if it is NaN, Infinity or -Infinity if !n.is_finite() { return None; } // It's a shame we can't use a union for this due to it being broken up by bits // i.e. 1/11/52 (sign, exponent, mantissa) // See https://en.wikipedia.org/wiki/IEEE_754-1985 // n = (sign*-1) * 2^exp * mantissa // Decimal of course stores this differently... 10^-exp * significand let raw = n.to_bits(); let positive = (raw >> 63) == 0; let biased_exponent = ((raw >> 52) & 0x7FF) as i32; let mantissa = raw & 0x000F_FFFF_FFFF_FFFF; // Handle the special zero case if biased_exponent == 0 && mantissa == 0 { let mut zero = Decimal::zero(); if !positive { zero.set_sign(false); } return Some(zero); } // Get the bits and exponent2 let mut exponent2 = biased_exponent - 1023; let mut bits = [ (mantissa & 0xFFFF_FFFF) as u32, ((mantissa >> 32) & 0xFFFF_FFFF) as u32, 0u32, ]; if biased_exponent == 0 { // Denormalized number - correct the exponent exponent2 += 1; } else { // Add extra hidden bit to mantissa bits[1] |= 0x0010_0000; } // The act of copying a mantissa as integer bits is equivalent to shifting // left the mantissa 52 bits. The exponent is reduced to compensate. exponent2 -= 52; // Convert to decimal Decimal::base2_to_decimal(&mut bits, exponent2, positive, true) } } impl ToPrimitive for Decimal { fn to_f64(&self) -> Option<f64> { if self.scale() == 0 { let integer = self.to_i64(); match integer { Some(i) => Some(i as f64), None => None, } } else { // TODO: Utilize mantissa algorithm. match self.to_string().parse::<f64>() { Ok(s) => Some(s), Err(_) => None, } } } fn to_i64(&self) -> Option<i64> { let d = self.trunc(); // Quick overflow check if d.hi != 0 || (d.mid & 0x8000_0000) > 0 { // Overflow return None; } let raw: i64 = (i64::from(d.mid) << 32) | i64::from(d.lo); if self.is_sign_negative() { Some(-raw) } else { Some(raw) } } fn to_u64(&self) -> Option<u64> { if self.is_sign_negative() { return None; } let d = self.trunc(); if d.hi != 0 { // Overflow return None; } Some((u64::from(d.mid) << 32) | u64::from(d.lo)) } } impl fmt::Display for Decimal { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { // Get the scale - where we need to put the decimal point let mut scale = self.scale() as usize; // Convert to a string and manipulate that (neg at front, inject decimal) let mut chars = Vec::new(); let mut working = [self.lo, self.mid, self.hi]; while !is_all_zero(&working) { let remainder = div_by_u32(&mut working, 10u32); chars.push(char::from(b'0' + remainder as u8)); } while scale > chars.len() { chars.push('0'); } let mut rep = chars.iter().rev().collect::<String>(); let len = rep.len(); if let Some(n_dp) = f.precision() { if n_dp < scale { rep.truncate(len - scale + n_dp) } else { let zeros = repeat("0").take(n_dp - scale).collect::<String>(); rep.push_str(&zeros[..]); } scale = n_dp; } let len = rep.len(); // Inject the decimal point if scale > 0 { // Must be a low fractional // TODO: Remove this condition as it's no longer possible for `scale > len` if scale > len { let mut new_rep = String::new(); let zeros = repeat("0").take(scale as usize - len).collect::<String>(); new_rep.push_str("0."); new_rep.push_str(&zeros[..]); new_rep.push_str(&rep[..]); rep = new_rep; } else if scale == len { rep.insert(0, '.'); rep.insert(0, '0'); } else { rep.insert(len - scale as usize, '.'); } } else if rep.is_empty() { // corner case for when we truncated everything in a low fractional rep.insert(0, '0'); } f.pad_integral(self.is_sign_positive(), "", &rep) } } impl Neg for Decimal { type Output = Decimal; fn neg(self) -> Decimal { -&self } } impl<'a> Neg for &'a Decimal { type Output = Decimal; fn neg(self) -> Decimal { Decimal { flags: flags(!self.is_sign_negative(), self.scale()), hi: self.hi, lo: self.lo, mid: self.mid, } } } forward_all_binop!(impl Add for Decimal, add); impl<'a, 'b> Add<&'b Decimal> for &'a Decimal { type Output = Decimal; #[inline(always)] fn add(self, other: &Decimal) -> Decimal { // Convert to the same scale let mut my = [self.lo, self.mid, self.hi]; let mut my_scale = self.scale(); let mut ot = [other.lo, other.mid, other.hi]; let mut other_scale = other.scale(); rescale(&mut my, &mut my_scale, &mut ot, &mut other_scale); let mut final_scale = my_scale.max(other_scale); // Add the items together let my_negative = self.is_sign_negative(); let other_negative = other.is_sign_negative(); let mut negative = false; let carry; if !(my_negative ^ other_negative) { negative = my_negative; carry = add3_internal(&mut my, &ot); } else { let cmp = cmp_internal(&my, &ot); // -x + y // if x > y then it's negative (i.e. -2 + 1) match cmp { Ordering::Less => { negative = other_negative; sub3_internal(&mut ot, &my); my[0] = ot[0]; my[1] = ot[1]; my[2] = ot[2]; } Ordering::Greater => { negative = my_negative; sub3_internal(&mut my, &ot); } Ordering::Equal => { // -2 + 2 my[0] = 0; my[1] = 0; my[2] = 0; } } carry = 0; } // If we have a carry we underflowed. // We need to lose some significant digits (if possible) if carry > 0 { if final_scale == 0 { panic!("Addition overflowed"); } // Copy it over to a temp array for modification let mut temp = [my[0], my[1], my[2], carry]; while final_scale > 0 && temp[3] != 0 { div_by_u32(&mut temp, 10); final_scale -= 1; } // If we still have a carry bit then we overflowed if temp[3] > 0 { panic!("Addition overflowed"); } // Copy it back - we're done my[0] = temp[0]; my[1] = temp[1]; my[2] = temp[2]; } Decimal { lo: my[0], mid: my[1], hi: my[2], flags: flags(negative, final_scale), } } } impl AddAssign for Decimal { fn add_assign(&mut self, other: Decimal) { let result = self.add(other); self.lo = result.lo; self.mid = result.mid; self.hi = result.hi; self.flags = result.flags; } } forward_all_binop!(impl Sub for Decimal, sub); impl<'a, 'b> Sub<&'b Decimal> for &'a Decimal { type Output = Decimal; #[inline(always)] fn sub(self, other: &Decimal) -> Decimal { let negated_other = Decimal { lo: other.lo, mid: other.mid, hi: other.hi, flags: other.flags ^ SIGN_MASK, }; self.add(negated_other) } } impl SubAssign for Decimal { fn sub_assign(&mut self, other: Decimal) { let result = self.sub(other); self.lo = result.lo; self.mid = result.mid; self.hi = result.hi; self.flags = result.flags; } } forward_all_binop!(impl Mul for Decimal, mul); impl<'a, 'b> Mul<&'b Decimal> for &'a Decimal { type Output = Decimal; #[inline] fn mul(self, other: &Decimal) -> Decimal { // Early exit if either is zero if self.is_zero() || other.is_zero() { return Decimal::zero(); } // We are only resulting in a negative if we have mismatched signs let negative = self.is_sign_negative() ^ other.is_sign_negative(); // We get the scale of the result by adding the operands. This may be too big, however // we'll correct later let mut final_scale = self.scale() + other.scale(); // First of all, if ONLY the lo parts of both numbers is filled // then we can simply do a standard 64 bit calculation. It's a minor // optimization however prevents the need for long form multiplication if self.mid == 0 && self.hi == 0 && other.mid == 0 && other.hi == 0 { // Simply multiplication let mut u64_result = u64_to_array(u64::from(self.lo) * u64::from(other.lo)); // If we're above max precision then this is a very small number if final_scale > MAX_PRECISION { final_scale -= MAX_PRECISION; // If the number is above 19 then this will equate to zero. // This is because the max value in 64 bits is 1.84E19 if final_scale > 19 { return Decimal::zero(); } let mut rem_lo = 0; let mut power; if final_scale > 9 { // Since 10^10 doesn't fit into u32, we divide by 10^10/4 // and multiply the next divisor by 4. rem_lo = div_by_u32(&mut u64_result, 2500000000); power = POWERS_10[final_scale as usize - 10] << 2; } else { power = POWERS_10[final_scale as usize]; } // Divide fits in 32 bits let rem_hi = div_by_u32(&mut u64_result, power); // Round the result. Since the divisor is a power of 10 // we check to see if the remainder is >= 1/2 divisor power >>= 1; if rem_hi >= power && (rem_hi > power || (rem_lo | (u64_result[0] & 0x1)) != 0) { u64_result[0] += 1; } final_scale = MAX_PRECISION; } return Decimal { lo: u64_result[0], mid: u64_result[1], hi: 0, flags: flags(negative, final_scale), }; } // We're using some of the high bits, so we essentially perform // long form multiplication. We compute the 9 partial products // into a 192 bit result array. // // [my-h][my-m][my-l] // x [ot-h][ot-m][ot-l] // -------------------------------------- // 1. [r-hi][r-lo] my-l * ot-l [0, 0] // 2. [r-hi][r-lo] my-l * ot-m [0, 1] // 3. [r-hi][r-lo] my-m * ot-l [1, 0] // 4. [r-hi][r-lo] my-m * ot-m [1, 1] // 5. [r-hi][r-lo] my-l * ot-h [0, 2] // 6. [r-hi][r-lo] my-h * ot-l [2, 0] // 7. [r-hi][r-lo] my-m * ot-h [1, 2] // 8. [r-hi][r-lo] my-h * ot-m [2, 1] // 9.[r-hi][r-lo] my-h * ot-h [2, 2] let my = [self.lo, self.mid, self.hi]; let ot = [other.lo, other.mid, other.hi]; let mut product = [0u32, 0u32, 0u32, 0u32, 0u32, 0u32]; // We can perform a minor short circuit here. If the // high portions are both 0 then we can skip portions 5-9 let to = if my[2] == 0 && ot[2] == 0 { 2 } else { 3 }; for my_index in 0..to { for ot_index in 0..to { let (mut rlo, mut rhi) = mul_part(my[my_index], ot[ot_index], 0); // Get the index for the lo portion of the product for prod in product.iter_mut().skip(my_index + ot_index) { let (res, overflow) = add_part(rlo, *prod); *prod = res; // If we have something in rhi from before then promote that if rhi > 0 { // If we overflowed in the last add, add that with rhi if overflow > 0 { let (nlo, nhi) = add_part(rhi, overflow); rlo = nlo; rhi = nhi; } else { rlo = rhi; rhi = 0; } } else if overflow > 0 { rlo = overflow; rhi = 0; } else { break; } // If nothing to do next round then break out if rlo == 0 { break; } } } } // If our result has used up the high portion of the product // then we either have an overflow or an underflow situation // Overflow will occur if we can't scale it back, whereas underflow // with kick in rounding let mut remainder = 0; while final_scale > 0 && (product[3] != 0 || product[4] != 0 || product[5] != 0) { remainder = div_by_u32(&mut product, 10u32); final_scale -= 1; } // Round up the carry if we need to if remainder >= 5 { for part in product.iter_mut() { if remainder == 0 { break; } let digit: u64 = u64::from(*part) + 1; remainder = if digit > 0xFFFF_FFFF { 1 } else { 0 }; *part = (digit & 0xFFFF_FFFF) as u32; } } // If we're still above max precision then we'll try again to // reduce precision - we may be dealing with a limit of "0" if final_scale > MAX_PRECISION { // We're in an underflow situation // The easiest way to remove precision is to divide off the result while final_scale > MAX_PRECISION && !is_all_zero(&product) { div_by_u32(&mut product, 10); final_scale -= 1; } // If we're still at limit then we can't represent any // siginificant decimal digits and will return an integer only // Can also be invoked while representing 0. if final_scale > MAX_PRECISION { final_scale = 0; } } else if !(product[3] == 0 && product[4] == 0 && product[5] == 0) { // We're in an overflow situation - we're within our precision bounds // but still have bits in overflow panic!("Multiplication overflowed"); } Decimal { lo: product[0], mid: product[1], hi: product[2], flags: flags(negative, final_scale), } } } impl MulAssign for Decimal { fn mul_assign(&mut self, other: Decimal) { let result = self.mul(other); self.lo = result.lo; self.mid = result.mid; self.hi = result.hi; self.flags = result.flags; } } forward_all_binop!(impl Div for Decimal, div); impl<'a, 'b> Div<&'b Decimal> for &'a Decimal { type Output = Decimal; fn div(self, other: &Decimal) -> Decimal { if other.is_zero() { panic!("Division by zero"); } if self.is_zero() { return Decimal::zero(); } let dividend = [self.lo, self.mid, self.hi]; let divisor = [other.lo, other.mid, other.hi]; let mut quotient = [0u32, 0u32, 0u32]; let mut quotient_scale: i32 = self.scale() as i32 - other.scale() as i32; // We supply an extra overflow word for each of the dividend and the remainder let mut working_quotient = [ dividend[0], dividend[1], dividend[2], 0u32, ]; let mut working_remainder = [ 0u32, 0u32, 0u32, 0u32, ]; let mut working_scale = quotient_scale; let mut remainder_scale = quotient_scale; let mut underflow; loop { div_internal(&mut working_quotient, &mut working_remainder, &divisor); underflow = add_with_scale_internal( &mut quotient, &mut quotient_scale, &mut working_quotient, &mut working_scale, ); // Multiply the remainder by 10 let mut overflow = 0; for part in working_remainder.iter_mut() { let (lo, hi) = mul_part(*part, 10, overflow); *part = lo; overflow = hi; } // Copy temp remainder into the temp quotient section working_quotient.copy_from_slice(&working_remainder); remainder_scale += 1; working_scale = remainder_scale; if underflow || is_all_zero(&working_remainder) { break; } } // If we have a really big number try to adjust the scale to 0 while quotient_scale < 0 { copy_array_diff_lengths(&mut working_quotient, &quotient); working_quotient[3] = 0; working_remainder.iter_mut().for_each(|x| *x = 0); // Mul 10 let mut overflow = 0; for part in &mut working_quotient { let (lo, hi) = mul_part(*part, 10, overflow); *part = lo; overflow = hi; } for part in &mut working_remainder { let (lo, hi) = mul_part(*part, 10, overflow); *part = lo; overflow = hi; } if working_quotient[3] == 0 && is_all_zero(&working_remainder) { quotient_scale += 1; quotient[0] = working_quotient[0]; quotient[1] = working_quotient[1]; quotient[2] = working_quotient[2]; } else { // Overflow panic!("Division overflowed"); } } if quotient_scale > 255 { quotient[0] = 0; quotient[1] = 0; quotient[2] = 0; quotient_scale = 0; } let mut quotient_negative = self.is_sign_negative() ^ other.is_sign_negative(); // Check for underflow let mut final_scale: u32 = quotient_scale as u32; if final_scale > MAX_PRECISION { let mut remainder = 0; // Division underflowed. We must remove some significant digits over using // an invalid scale. while final_scale > MAX_PRECISION && !is_all_zero(&quotient) { remainder = div_by_u32(&mut quotient, 10); final_scale -= 1; } if final_scale > MAX_PRECISION { // Result underflowed so set to zero final_scale = 0; quotient_negative = false; } else if remainder >= 5 { for part in &mut quotient { if remainder == 0 { break; } let digit: u64 = u64::from(*part) + 1; remainder = if digit > 0xFFFF_FFFF { 1 } else { 0 }; *part = (digit & 0xFFFF_FFFF) as u32; } } } Decimal { lo: quotient[0], mid: quotient[1], hi: quotient[2], flags: flags(quotient_negative, final_scale), } } } impl DivAssign for Decimal { fn div_assign(&mut self, other: Decimal) { let result = self.div(other); self.lo = result.lo; self.mid = result.mid; self.hi = result.hi; self.flags = result.flags; } } forward_all_binop!(impl Rem for Decimal, rem); impl<'a, 'b> Rem<&'b Decimal> for &'a Decimal { type Output = Decimal; #[inline] fn rem(self, other: &Decimal) -> Decimal { if other.is_zero() { panic!("Division by zero"); } if self.is_zero() { return Decimal::zero(); } // Working is the remainder + the quotient // We use an aligned array since we'll be using it alot. let mut working_quotient = [self.lo, self.mid, self.hi, 0u32]; let mut working_remainder = [0u32, 0u32, 0u32, 0u32]; let divisor = [other.lo, other.mid, other.hi]; div_internal(&mut working_quotient, &mut working_remainder, &divisor); // Remainder has no scale however does have a sign (the same as self) Decimal { lo: working_remainder[0], mid: working_remainder[1], hi: working_remainder[2], flags: if self.is_sign_negative() { SIGN_MASK } else { 0 }, } } } impl RemAssign for Decimal { fn rem_assign(&mut self, other: Decimal) { let result = self.rem(other); self.lo = result.lo; self.mid = result.mid; self.hi = result.hi; self.flags = result.flags; } } impl PartialEq for Decimal { #[inline] fn eq(&self, other: &Decimal) -> bool { self.cmp(other) == Equal } } impl Eq for Decimal {} impl Hash for Decimal { fn hash<H: Hasher>(&self, state: &mut H) { self.lo.hash(state); self.mid.hash(state); self.hi.hash(state); self.flags.hash(state); } } impl PartialOrd for Decimal { #[inline] fn partial_cmp(&self, other: &Decimal) -> Option<Ordering> { Some(self.cmp(other)) } } impl Ord for Decimal { fn cmp(&self, other: &Decimal) -> Ordering { // Quick exit if major differences let self_negative = self.is_sign_negative(); let other_negative = other.is_sign_negative(); if self_negative && !other_negative { return Ordering::Less; } else if !self_negative && other_negative { return Ordering::Greater; } // If we have 1.23 and 1.2345 then we have // 123 scale 2 and 12345 scale 4 // We need to convert the first to // 12300 scale 4 so we can compare equally let left : &Decimal; let right : &Decimal; if self_negative && other_negative { // Both are negative, so reverse cmp left = other; right = self; } else { left = self; right = other; } let mut left_scale = left.scale(); let mut right_scale = right.scale(); if left_scale == right_scale { // Fast path for same scale if left.hi != right.hi { return left.hi.cmp(&right.hi); } if left.mid != right.mid { return left.mid.cmp(&right.mid); } return left.lo.cmp(&right.lo); } // Rescale and compare let mut left_raw = [left.lo, left.mid, left.hi]; let mut right_raw = [right.lo, right.mid, right.hi]; rescale( &mut left_raw, &mut left_scale, &mut right_raw, &mut right_scale, ); cmp_internal(&left_raw, &right_raw) } } #[cfg(test)] mod test { // Tests on private methods. // // All public tests should go under `tests/`. use super::*; #[test] fn it_can_rescale() { fn extract(value: &str) -> ([u32; 3], u32) { let v = Decimal::from_str(value).unwrap(); ([v.lo, v.mid, v.hi], v.scale()) } let tests = &[ ("1", "1", "1", "1"), ("1", "1.0", "1.0", "1.0"), ("1", "1.00000", "1.00000", "1.00000"), ("1", "1.0000000000", "1.0000000000", "1.0000000000"), ( "1", "1.00000000000000000000", "1.00000000000000000000", "1.00000000000000000000", ), ("1.1", "1.1", "1.1", "1.1"), ("1.1", "1.10000", "1.10000", "1.10000"), ("1.1", "1.1000000000", "1.1000000000", "1.1000000000"), ( "1.1", "1.10000000000000000000", "1.10000000000000000000", "1.10000000000000000000", ), ( "0.6386554621848739495798319328", "11.815126050420168067226890757", "0.638655462184873949579831933", "11.815126050420168067226890757", ), ( "0.0872727272727272727272727272", // Scale 28 "843.65000000", // Scale 8 "0.0872727272727272727272727", // 25 "843.6500000000000000000000000", // 25 ), ]; for &(left_raw, right_raw, expected_left, expected_right) in tests { // Left = the value to rescale // Right = the new scale we're scaling to // Expected = the expected left value after rescale let (expected_left, expected_lscale) = extract(expected_left); let (expected_right, expected_rscale) = extract(expected_right); let (mut left, mut left_scale) = extract(left_raw); let (mut right, mut right_scale) = extract(right_raw); rescale(&mut left, &mut left_scale, &mut right, &mut right_scale); assert_eq!(left, expected_left); assert_eq!(left_scale, expected_lscale); assert_eq!(right, expected_right); assert_eq!(right_scale, expected_rscale); // Also test the transitive case let (mut left, mut left_scale) = extract(left_raw); let (mut right, mut right_scale) = extract(right_raw); rescale(&mut right, &mut right_scale, &mut left, &mut left_scale); assert_eq!(left, expected_left); assert_eq!(left_scale, expected_lscale); assert_eq!(right, expected_right); assert_eq!(right_scale, expected_rscale); } } }
use {types as ot, errors as oe, utils}; use std::io; use std::io::{Read, BufRead}; use byteorder::{BigEndian, ReadBytesExt}; /// Common MTP size for ethernet pub const MTP: usize = 1536; pub fn decode(msg: &[u8]) -> ot::OscResult<ot::OscPacket> { match msg[0] as char { '/' => { decode_message(msg) } '#' => { decode_bundle(msg) } _ => Err(oe::OscError::BadPacket("Unknown message format.".to_string())), } } fn decode_message(msg: &[u8]) -> ot::OscResult<ot::OscPacket> { let mut cursor: io::Cursor<&[u8]> = io::Cursor::new(msg); match read_osc_string(&mut cursor) { Ok(s) => { let addr: String = s; match read_osc_string(&mut cursor) { Ok(type_tags) => { if type_tags.len() > 1 { match read_osc_args(&mut cursor, type_tags) { Ok(args) => { Ok(ot::OscPacket::Message(ot::OscMessage { addr: addr, args: Some(args), })) } Err(e) => Err(e), } } else { Ok(ot::OscPacket::Message(ot::OscMessage { addr: addr, args: None, })) } } Err(e) => Err(e), } } Err(e) => Err(e), } } fn decode_bundle(msg: &[u8]) -> ot::OscResult<ot::OscPacket> { let mut cursor: io::Cursor<&[u8]> = io::Cursor::new(msg); match read_osc_string(&mut cursor).map(|s| s == "bundle") { Ok(b) => { if b { match read_time_tag(&mut cursor) { Ok(tt) => { let mut bundle: Vec<ot::OscPacket> = Vec::new(); match cursor.read_u32::<BigEndian>() .map_err(|e| oe::OscError::ByteOrderError(e)) { Ok(size) => { let mut buf: Vec<u8> = Vec::new(); match cursor.take(size as u64).read_to_end(&mut buf) { Ok(cnt) => { if cnt == (size as usize) { match decode(&mut buf) { Ok(p) => bundle.push(p), Err(e) => return Err(e), } } else { return Err(oe::OscError::BadBundle); } } Err(e) => return Err(oe::OscError::ReadError(e)), } } Err(e) => return Err(e), } Ok(ot::OscPacket::Bundle(ot::OscBundle { timetag: tt, content: bundle, })) } Err(e) => Err(e), } } else { Err(oe::OscError::BadBundle) } } Err(e) => Err(e), } } fn read_osc_string(cursor: &mut io::Cursor<&[u8]>) -> ot::OscResult<String> { let mut str_buf: Vec<u8> = Vec::new(); match cursor.read_until(0, &mut str_buf) { Ok(_) => { pad_cursor(cursor); String::from_utf8(str_buf) .map_err(|e| oe::OscError::StringError(e)) .map(|s| s.trim_matches(0u8 as char).to_string()) } Err(e) => Err(oe::OscError::ReadError(e)), } } fn read_osc_args(cursor: &mut io::Cursor<&[u8]>, raw_type_tags: String) -> ot::OscResult<Vec<ot::OscType>> { let type_tags: Vec<char> = raw_type_tags.chars() .skip(1) .map(|c| c as char) .collect(); let mut args: Vec<ot::OscType> = Vec::with_capacity(type_tags.len()); for tag in type_tags { match read_osc_arg(cursor, tag) { Ok(arg) => { args.push(arg); } Err(e) => return Err(e), } } Ok(args) } fn read_osc_arg(cursor: &mut io::Cursor<&[u8]>, tag: char) -> ot::OscResult<ot::OscType> { match tag { 'f' => { cursor.read_f32::<BigEndian>() .map(|f| ot::OscType::Float(f)) .map_err(|e| oe::OscError::ByteOrderError(e)) } 'd' => { cursor.read_f64::<BigEndian>() .map(|d| ot::OscType::Double(d)) .map_err(|e| oe::OscError::ByteOrderError(e)) } 'i' => { cursor.read_i32::<BigEndian>() .map(|i| ot::OscType::Int(i)) .map_err(|e| oe::OscError::ByteOrderError(e)) } 'h' => { cursor.read_i64::<BigEndian>() .map(|l| ot::OscType::Long(l)) .map_err(|e| oe::OscError::ByteOrderError(e)) } 's' => { read_osc_string(cursor).map(|s| ot::OscType::String(s)) } 't' => { // http://opensoundcontrol.org/node/3/#timetags read_time_tag(cursor) } 'b' => { match cursor.read_u32::<BigEndian>() { Ok(size) => { let mut buf: Vec<u8> = Vec::with_capacity(size as usize); match cursor.take(size as u64).read(&mut buf) { Ok(blob_size) => Ok(ot::OscType::Blob(buf)), Err(e) => Err(oe::OscError::ReadError(e)), } } // TODO: use correct error type Err(e) => return Err(oe::OscError::BadBundle), } } _ => Err(oe::OscError::BadArg(format!("Type tag \"{}\" is not implemented!", tag))), } } fn read_time_tag(cursor: &mut io::Cursor<&[u8]>) -> ot::OscResult<ot::OscType> { match cursor.read_u32::<BigEndian>() { Ok(date) => { match cursor.read_u32::<BigEndian>() { Ok(frac) => Ok(ot::OscType::Time(date, frac)), Err(e) => Err(oe::OscError::ByteOrderError(e)), } } Err(e) => Err(oe::OscError::ByteOrderError(e)), } } fn pad_cursor(cursor: &mut io::Cursor<&[u8]>) { let pos = cursor.position(); cursor.set_position(utils::pad(pos)); } Refactor Osc bundle decoding use {types as ot, errors as oe, utils}; use std::io; use std::io::{Read, BufRead}; use byteorder::{BigEndian, ReadBytesExt}; /// Common MTP size for ethernet pub const MTP: usize = 1536; pub fn decode(msg: &[u8]) -> ot::OscResult<ot::OscPacket> { match msg[0] as char { '/' => { decode_message(msg) } '#' => { decode_bundle(msg) } _ => Err(oe::OscError::BadPacket("Unknown message format.".to_string())), } } fn decode_message(msg: &[u8]) -> ot::OscResult<ot::OscPacket> { let mut cursor: io::Cursor<&[u8]> = io::Cursor::new(msg); match read_osc_string(&mut cursor) { Ok(s) => { let addr: String = s; match read_osc_string(&mut cursor) { Ok(type_tags) => { if type_tags.len() > 1 { match read_osc_args(&mut cursor, type_tags) { Ok(args) => { Ok(ot::OscPacket::Message(ot::OscMessage { addr: addr, args: Some(args), })) } Err(e) => Err(e), } } else { Ok(ot::OscPacket::Message(ot::OscMessage { addr: addr, args: None, })) } } Err(e) => Err(e), } } Err(e) => Err(e), } } fn decode_bundle(msg: &[u8]) -> ot::OscResult<ot::OscPacket> { let mut cursor: io::Cursor<&[u8]> = io::Cursor::new(msg); let b = try!(read_osc_string(&mut cursor)); if b != "bundle" { return Err(oe::OscError::BadBundle); } let time_tag = try!(read_time_tag(&mut cursor)); let mut bundle: Vec<ot::OscPacket> = Vec::new(); let size: usize = try!(cursor.read_u32::<BigEndian>() .map_err(oe::OscError::ByteOrderError)) as usize; let mut buf: Vec<u8> = Vec::new(); let cnt: usize = try!(cursor.take(size as u64) .read_to_end(&mut buf) .map_err(oe::OscError::ReadError)); if cnt == size { try!(decode(&mut buf).map(|p| bundle.push(p))); } else { return Err(oe::OscError::BadBundle); } Ok(ot::OscPacket::Bundle(ot::OscBundle { timetag: time_tag, content: bundle, })) } fn read_osc_string(cursor: &mut io::Cursor<&[u8]>) -> ot::OscResult<String> { let mut str_buf: Vec<u8> = Vec::new(); match cursor.read_until(0, &mut str_buf) { Ok(_) => { pad_cursor(cursor); String::from_utf8(str_buf) .map_err(|e| oe::OscError::StringError(e)) .map(|s| s.trim_matches(0u8 as char).to_string()) } Err(e) => Err(oe::OscError::ReadError(e)), } } fn read_osc_args(cursor: &mut io::Cursor<&[u8]>, raw_type_tags: String) -> ot::OscResult<Vec<ot::OscType>> { let type_tags: Vec<char> = raw_type_tags.chars() .skip(1) .map(|c| c as char) .collect(); let mut args: Vec<ot::OscType> = Vec::with_capacity(type_tags.len()); for tag in type_tags { match read_osc_arg(cursor, tag) { Ok(arg) => { args.push(arg); } Err(e) => return Err(e), } } Ok(args) } fn read_osc_arg(cursor: &mut io::Cursor<&[u8]>, tag: char) -> ot::OscResult<ot::OscType> { match tag { 'f' => { cursor.read_f32::<BigEndian>() .map(|f| ot::OscType::Float(f)) .map_err(|e| oe::OscError::ByteOrderError(e)) } 'd' => { cursor.read_f64::<BigEndian>() .map(|d| ot::OscType::Double(d)) .map_err(|e| oe::OscError::ByteOrderError(e)) } 'i' => { cursor.read_i32::<BigEndian>() .map(|i| ot::OscType::Int(i)) .map_err(|e| oe::OscError::ByteOrderError(e)) } 'h' => { cursor.read_i64::<BigEndian>() .map(|l| ot::OscType::Long(l)) .map_err(|e| oe::OscError::ByteOrderError(e)) } 's' => { read_osc_string(cursor).map(|s| ot::OscType::String(s)) } 't' => { // http://opensoundcontrol.org/node/3/#timetags read_time_tag(cursor) } 'b' => { match cursor.read_u32::<BigEndian>() { Ok(size) => { let mut buf: Vec<u8> = Vec::with_capacity(size as usize); match cursor.take(size as u64).read(&mut buf) { Ok(blob_size) => Ok(ot::OscType::Blob(buf)), Err(e) => Err(oe::OscError::ReadError(e)), } } // TODO: use correct error type Err(e) => return Err(oe::OscError::BadBundle), } } _ => Err(oe::OscError::BadArg(format!("Type tag \"{}\" is not implemented!", tag))), } } fn read_time_tag(cursor: &mut io::Cursor<&[u8]>) -> ot::OscResult<ot::OscType> { match cursor.read_u32::<BigEndian>() { Ok(date) => { match cursor.read_u32::<BigEndian>() { Ok(frac) => Ok(ot::OscType::Time(date, frac)), Err(e) => Err(oe::OscError::ByteOrderError(e)), } } Err(e) => Err(oe::OscError::ByteOrderError(e)), } } fn pad_cursor(cursor: &mut io::Cursor<&[u8]>) { let pos = cursor.position(); cursor.set_position(utils::pad(pos)); }
use std::borrow::IntoCow; use std::collections::hash_map::HashMap; use std::old_io::{IoResult, MemReader}; use std::old_io::Reader as IoReader; use std::mem::transmute; use byteorder::{ReaderBytesExt, BigEndian}; use rustc_serialize::Decodable; use rustc_decoder::CborDecoder; use { Cbor, CborUnsigned, CborSigned, CborFloat, CborBytes, CborTag, Type, CborResult, CborError, ReadError, }; /// Read CBOR data items into Rust values from the underlying reader `R`. pub struct Decoder<R> { rdr: CborReader<R>, } impl<R: IoReader> Decoder<R> { /// Create a new CBOR decoder from the underlying reader. pub fn from_reader(rdr: R) -> Decoder<R> { Decoder { rdr: CborReader::new(rdr) } } /// Decode a sequence of top-level CBOR data items into Rust values. /// /// # Example /// /// This shows how to encode and decode a sequence of data items: /// /// ```rust /// use cbor::{Decoder, Encoder}; /// /// let data = vec![("a".to_string(), 1), ("b".to_string(), 2), /// ("c".to_string(), 3)]; /// /// let mut enc = Encoder::from_memory(); /// enc.encode(&data).unwrap(); /// /// let mut dec = Decoder::from_bytes(enc.as_bytes()); /// let items: Vec<(String, i32)> = dec.decode() /// .collect::<Result<_, _>>() /// .unwrap(); /// /// assert_eq!(items, data); /// ``` pub fn decode<D: Decodable>(&mut self) -> DecodedItems<R, D> { DecodedItems { it: self.items(), _phantom: ::std::marker::PhantomData, } } /// Read a sequence of top-level CBOR data items. /// /// This yields data items represented by the `Cbor` type, which is its /// abstract syntax. (Using the `decode` iterator is probably much more /// convenient, but this is useful when you need to do more sophisticated /// analysis on the CBOR data.) /// /// # Example /// /// This shows how to encode and decode a sequence of data items: /// /// ```rust /// use cbor::{Cbor, CborUnsigned, Decoder, Encoder}; /// /// let mut enc = Encoder::from_memory(); /// enc.encode(vec![("a", 1), ("b", 2), ("c", 3)]).unwrap(); /// /// let mut dec = Decoder::from_bytes(enc.as_bytes()); /// let items = dec.items().collect::<Result<Vec<_>, _>>().unwrap(); /// /// assert_eq!(items, vec![ /// Cbor::Array(vec![ /// Cbor::Unicode("a".to_string()), /// Cbor::Unsigned(CborUnsigned::UInt8(1)), /// ]), /// Cbor::Array(vec![ /// Cbor::Unicode("b".to_string()), /// Cbor::Unsigned(CborUnsigned::UInt8(2)), /// ]), /// Cbor::Array(vec![ /// Cbor::Unicode("c".to_string()), /// Cbor::Unsigned(CborUnsigned::UInt8(3)), /// ]), /// ]); /// ``` pub fn items(&mut self) -> Items<R> { Items { dec: self } } fn read_data_item(&mut self, first: Option<u8>) -> CborResult<Cbor> { let first = match first { Some(first) => first, None => try!(self.rdr.read_byte()), }; match (first & 0b111_00000) >> 5 { 0 => self.read_uint(first).map(Cbor::Unsigned), 1 => self.read_int(first).map(Cbor::Signed), 2 => self.read_bytes(first), 3 => self.read_string(first), 4 => self.read_array(first), 5 => self.read_map(first), 6 => self.read_tag(first), 7 => match first & 0b000_11111 { v @ 0...23 => self.read_simple_value(v), 24 => { let b = try!(self.rdr.read_byte()); self.read_simple_value(b) } 25...27 => self.read_float(first).map(Cbor::Float), v @ 28...30 => Err(self.errat( ReadError::Unassigned { major: 7, add: v })), 31 => Ok(Cbor::Break), // Because max(byte & 0b000_11111) == 2^5 - 1 == 31 _ => unreachable!(), }, // This is truly unreachable because `byte & 0b111_00000 >> 5` // can only produce 8 distinct values---each of which are handled // above. ---AG _ => unreachable!(), } } fn read_simple_value(&mut self, val: u8) -> CborResult<Cbor> { Ok(match val { v @ 0...19 => return Err(self.errat( ReadError::Unassigned { major: 7, add: v })), 20 => Cbor::Bool(false), 21 => Cbor::Bool(true), 22 => Cbor::Null, 23 => Cbor::Undefined, v @ 24...31 => return Err(self.errat( ReadError::Reserved { major: 7, add: v })), v /* 32...255 */ => return Err(self.errat( ReadError::Unassigned { major: 7, add: v })), }) } fn read_float(&mut self, first: u8) -> CborResult<CborFloat> { Ok(match first & 0b000_11111 { 25 => { // Rust doesn't have a `f16` type, so just read a u16 and // cast it to a u32 and then a f32. // I think this is wrong. ---AG let n = try!(self.rdr.read_u16::<BigEndian>()); CborFloat::Float16(unsafe { transmute(n as u32) }) } 26 => CborFloat::Float32(try!(self.rdr.read_f32::<BigEndian>())), 27 => CborFloat::Float64(try!(self.rdr.read_f64::<BigEndian>())), // Reaching this case is probably a bug. ---AG v => return Err(self.errat( ReadError::InvalidAddValue { ty: Type::Float, val: v })), }) } fn read_tag(&mut self, first: u8) -> CborResult<Cbor> { let tag = try!(self.read_uint(first)); let tag = try!(tag.to_u64().map_err(|err| self.errat(err))); let data = try!(self.read_data_item(None)); Ok(Cbor::Tag(CborTag { tag: tag, data: Box::new(data) })) } fn read_map(&mut self, first: u8) -> CborResult<Cbor> { let len = try!(self.read_len(first)); let mut map = HashMap::with_capacity(len); let at = self.rdr.bytes_read; // for coherent error reporting for _ in 0..len { let key = match try!(self.read_data_item(None)) { Cbor::Unicode(s) => s, v => return Err(CborError::AtOffset { kind: ReadError::mismatch(Type::Unicode, &v), offset: at, }), }; let val = try!(self.read_data_item(None)); map.insert(key, val); } Ok(Cbor::Map(map)) } fn read_array(&mut self, first: u8) -> CborResult<Cbor> { let len = try!(self.read_len(first)); let mut array = Vec::with_capacity(len); for _ in 0..len { let v = try!(self.read_data_item(None)); array.push(v); } Ok(Cbor::Array(array)) } fn read_string(&mut self, first: u8) -> CborResult<Cbor> { let len = try!(self.read_len(first)); let mut buf = vec_from_elem(len, 0u8); try!(self.rdr.read_full(&mut buf)); String::from_utf8(buf) .map(Cbor::Unicode) .map_err(|err| self.errstr(err.utf8_error().to_string())) } fn read_bytes(&mut self, first: u8) -> CborResult<Cbor> { let len = try!(self.read_len(first)); let mut buf = vec_from_elem(len, 0u8); try!(self.rdr.read_full(&mut buf)); Ok(Cbor::Bytes(CborBytes(buf))) } fn read_len(&mut self, first: u8) -> CborResult<usize> { self.read_uint(first) .and_then(|v| v.to_usize().map_err(|err| self.errat(err))) } fn read_uint(&mut self, first: u8) -> CborResult<CborUnsigned> { Ok(match first & 0b000_11111 { n @ 0...23 => CborUnsigned::UInt8(n), 24 => CborUnsigned::UInt8(try!(self.rdr.read_byte())), 25 => CborUnsigned::UInt16(try!(self.rdr.read_u16::<BigEndian>())), 26 => CborUnsigned::UInt32(try!(self.rdr.read_u32::<BigEndian>())), 27 => CborUnsigned::UInt64(try!(self.rdr.read_u64::<BigEndian>())), v => return Err(self.errat( ReadError::InvalidAddValue { ty: Type::UInt, val: v })), }) } fn read_int(&mut self, first: u8) -> CborResult<CborSigned> { Ok(match first & 0b000_11111 { n @ 0...23 => CborSigned::Int8(-1 - (n as i8)), 24 => { let n = try!(self.rdr.read_byte()); if n > ::std::i8::MAX as u8 { CborSigned::Int16(-1 - (n as i16)) } else { CborSigned::Int8(-1 - (n as i8)) } } 25 => { let n = try!(self.rdr.read_u16::<BigEndian>()); if n > ::std::i16::MAX as u16 { CborSigned::Int32(-1 - (n as i32)) } else { CborSigned::Int16(-1 - (n as i16)) } } 26 => { let n = try!(self.rdr.read_u32::<BigEndian>()); if n > ::std::i32::MAX as u32 { CborSigned::Int64(-1 - (n as i64)) } else { CborSigned::Int32(-1 - (n as i32)) } } 27 => { let n = try!(self.rdr.read_u64::<BigEndian>()); if n > ::std::i64::MAX as u64 { return Err(self.errstr(format!( "Negative integer out of range: {:?}", n))); } CborSigned::Int64(-1 - (n as i64)) } v => return Err(self.errat( ReadError::InvalidAddValue { ty: Type::Int, val: v })), }) } } impl Decoder<MemReader> { /// Create a new CBOR decoder that reads from the buffer given. /// /// The buffer is usually given as either a `Vec<u8>` or a `&[u8]`. pub fn from_bytes<'a, T>(bytes: T) -> Decoder<MemReader> where T: IntoCow<'a, [u8]> { Decoder::from_reader(MemReader::new(bytes.into_cow().into_owned())) } } impl<R: IoReader> Decoder<R> { fn errat(&self, err: ReadError) -> CborError { CborError::AtOffset { kind: err, offset: self.rdr.last_offset } } fn errstr(&self, s: String) -> CborError { self.errat(ReadError::Other(s)) } } /// An iterator over items decoded from CBOR into Rust values. /// /// `D` represents the type of the Rust value being decoded into, `R` /// represents the underlying reader and `'a` is the lifetime of the decoder. pub struct DecodedItems<'a, R: 'a, D> { it: Items<'a, R>, _phantom: ::std::marker::PhantomData<D>, } impl<'a, R: IoReader, D: Decodable> Iterator for DecodedItems<'a, R, D> { type Item = CborResult<D>; fn next(&mut self) -> Option<CborResult<D>> { self.it.next().map(|result| { result.and_then(|v| Decodable::decode(&mut CborDecoder::new(v))) }) } } /// An iterator over CBOR items in terms of the abstract syntax. /// /// `R` represents the underlying reader and `'a` is the lifetime of the /// decoder. pub struct Items<'a, R: 'a> { dec: &'a mut Decoder<R>, } impl<'a, R: IoReader> Iterator for Items<'a, R> { type Item = CborResult<Cbor>; fn next(&mut self) -> Option<CborResult<Cbor>> { match self.dec.read_data_item(None) { Err(ref err) if err.is_eof() => None, Err(err) => Some(Err(err)), Ok(v) => Some(Ok(v)), } } } /// A very light layer over a basic reader that keeps track of offset /// information at the byte level. struct CborReader<R> { rdr: R, // used for error reporting last_offset: usize, bytes_read: usize, } impl<R: IoReader> IoReader for CborReader<R> { fn read(&mut self, buf: &mut [u8]) -> IoResult<usize> { let n = try!(self.rdr.read(buf)); self.last_offset = self.bytes_read; self.bytes_read += n; Ok(n) } } impl<R: IoReader> CborReader<R> { fn new(rdr: R) -> CborReader<R> { CborReader { rdr: rdr, last_offset: 0, bytes_read: 0 } } fn read_full(&mut self, buf: &mut [u8]) -> IoResult<()> { let mut n = 0usize; while n < buf.len() { n += try!(self.read(&mut buf[n..])); } Ok(()) } } fn vec_from_elem<T: Copy>(len: usize, v: T) -> Vec<T> { let mut xs = Vec::with_capacity(len); unsafe { xs.set_len(len); } for x in &mut xs { *x = v; } xs } Major lapse --- use a buffered reader in the decoder. use std::borrow::IntoCow; use std::collections::hash_map::HashMap; use std::old_io::{BufferedReader, IoResult, MemReader}; use std::old_io::Reader as IoReader; use std::mem::transmute; use byteorder::{ReaderBytesExt, BigEndian}; use rustc_serialize::Decodable; use rustc_decoder::CborDecoder; use { Cbor, CborUnsigned, CborSigned, CborFloat, CborBytes, CborTag, Type, CborResult, CborError, ReadError, }; /// Read CBOR data items into Rust values from the underlying reader `R`. pub struct Decoder<R> { rdr: CborReader<R>, } impl<R: IoReader> Decoder<BufferedReader<R>> { /// Create a new CBOR decoder from the underlying reader. pub fn from_reader(rdr: R) -> Decoder<BufferedReader<R>> { Decoder { rdr: CborReader::new(BufferedReader::new(rdr)) } } } impl<R: IoReader> Decoder<R> { /// Decode a sequence of top-level CBOR data items into Rust values. /// /// # Example /// /// This shows how to encode and decode a sequence of data items: /// /// ```rust /// use cbor::{Decoder, Encoder}; /// /// let data = vec![("a".to_string(), 1), ("b".to_string(), 2), /// ("c".to_string(), 3)]; /// /// let mut enc = Encoder::from_memory(); /// enc.encode(&data).unwrap(); /// /// let mut dec = Decoder::from_bytes(enc.as_bytes()); /// let items: Vec<(String, i32)> = dec.decode() /// .collect::<Result<_, _>>() /// .unwrap(); /// /// assert_eq!(items, data); /// ``` pub fn decode<D: Decodable>(&mut self) -> DecodedItems<R, D> { DecodedItems { it: self.items(), _phantom: ::std::marker::PhantomData, } } /// Read a sequence of top-level CBOR data items. /// /// This yields data items represented by the `Cbor` type, which is its /// abstract syntax. (Using the `decode` iterator is probably much more /// convenient, but this is useful when you need to do more sophisticated /// analysis on the CBOR data.) /// /// # Example /// /// This shows how to encode and decode a sequence of data items: /// /// ```rust /// use cbor::{Cbor, CborUnsigned, Decoder, Encoder}; /// /// let mut enc = Encoder::from_memory(); /// enc.encode(vec![("a", 1), ("b", 2), ("c", 3)]).unwrap(); /// /// let mut dec = Decoder::from_bytes(enc.as_bytes()); /// let items = dec.items().collect::<Result<Vec<_>, _>>().unwrap(); /// /// assert_eq!(items, vec![ /// Cbor::Array(vec![ /// Cbor::Unicode("a".to_string()), /// Cbor::Unsigned(CborUnsigned::UInt8(1)), /// ]), /// Cbor::Array(vec![ /// Cbor::Unicode("b".to_string()), /// Cbor::Unsigned(CborUnsigned::UInt8(2)), /// ]), /// Cbor::Array(vec![ /// Cbor::Unicode("c".to_string()), /// Cbor::Unsigned(CborUnsigned::UInt8(3)), /// ]), /// ]); /// ``` pub fn items(&mut self) -> Items<R> { Items { dec: self } } fn read_data_item(&mut self, first: Option<u8>) -> CborResult<Cbor> { let first = match first { Some(first) => first, None => try!(self.rdr.read_byte()), }; match (first & 0b111_00000) >> 5 { 0 => self.read_uint(first).map(Cbor::Unsigned), 1 => self.read_int(first).map(Cbor::Signed), 2 => self.read_bytes(first), 3 => self.read_string(first), 4 => self.read_array(first), 5 => self.read_map(first), 6 => self.read_tag(first), 7 => match first & 0b000_11111 { v @ 0...23 => self.read_simple_value(v), 24 => { let b = try!(self.rdr.read_byte()); self.read_simple_value(b) } 25...27 => self.read_float(first).map(Cbor::Float), v @ 28...30 => Err(self.errat( ReadError::Unassigned { major: 7, add: v })), 31 => Ok(Cbor::Break), // Because max(byte & 0b000_11111) == 2^5 - 1 == 31 _ => unreachable!(), }, // This is truly unreachable because `byte & 0b111_00000 >> 5` // can only produce 8 distinct values---each of which are handled // above. ---AG _ => unreachable!(), } } fn read_simple_value(&mut self, val: u8) -> CborResult<Cbor> { Ok(match val { v @ 0...19 => return Err(self.errat( ReadError::Unassigned { major: 7, add: v })), 20 => Cbor::Bool(false), 21 => Cbor::Bool(true), 22 => Cbor::Null, 23 => Cbor::Undefined, v @ 24...31 => return Err(self.errat( ReadError::Reserved { major: 7, add: v })), v /* 32...255 */ => return Err(self.errat( ReadError::Unassigned { major: 7, add: v })), }) } fn read_float(&mut self, first: u8) -> CborResult<CborFloat> { Ok(match first & 0b000_11111 { 25 => { // Rust doesn't have a `f16` type, so just read a u16 and // cast it to a u32 and then a f32. // I think this is wrong. ---AG let n = try!(self.rdr.read_u16::<BigEndian>()); CborFloat::Float16(unsafe { transmute(n as u32) }) } 26 => CborFloat::Float32(try!(self.rdr.read_f32::<BigEndian>())), 27 => CborFloat::Float64(try!(self.rdr.read_f64::<BigEndian>())), // Reaching this case is probably a bug. ---AG v => return Err(self.errat( ReadError::InvalidAddValue { ty: Type::Float, val: v })), }) } fn read_tag(&mut self, first: u8) -> CborResult<Cbor> { let tag = try!(self.read_uint(first)); let tag = try!(tag.to_u64().map_err(|err| self.errat(err))); let data = try!(self.read_data_item(None)); Ok(Cbor::Tag(CborTag { tag: tag, data: Box::new(data) })) } fn read_map(&mut self, first: u8) -> CborResult<Cbor> { let len = try!(self.read_len(first)); let mut map = HashMap::with_capacity(len); let at = self.rdr.bytes_read; // for coherent error reporting for _ in 0..len { let key = match try!(self.read_data_item(None)) { Cbor::Unicode(s) => s, v => return Err(CborError::AtOffset { kind: ReadError::mismatch(Type::Unicode, &v), offset: at, }), }; let val = try!(self.read_data_item(None)); map.insert(key, val); } Ok(Cbor::Map(map)) } fn read_array(&mut self, first: u8) -> CborResult<Cbor> { let len = try!(self.read_len(first)); let mut array = Vec::with_capacity(len); for _ in 0..len { let v = try!(self.read_data_item(None)); array.push(v); } Ok(Cbor::Array(array)) } fn read_string(&mut self, first: u8) -> CborResult<Cbor> { let len = try!(self.read_len(first)); let mut buf = vec_from_elem(len, 0u8); try!(self.rdr.read_full(&mut buf)); String::from_utf8(buf) .map(Cbor::Unicode) .map_err(|err| self.errstr(err.utf8_error().to_string())) } fn read_bytes(&mut self, first: u8) -> CborResult<Cbor> { let len = try!(self.read_len(first)); let mut buf = vec_from_elem(len, 0u8); try!(self.rdr.read_full(&mut buf)); Ok(Cbor::Bytes(CborBytes(buf))) } fn read_len(&mut self, first: u8) -> CborResult<usize> { self.read_uint(first) .and_then(|v| v.to_usize().map_err(|err| self.errat(err))) } fn read_uint(&mut self, first: u8) -> CborResult<CborUnsigned> { Ok(match first & 0b000_11111 { n @ 0...23 => CborUnsigned::UInt8(n), 24 => CborUnsigned::UInt8(try!(self.rdr.read_byte())), 25 => CborUnsigned::UInt16(try!(self.rdr.read_u16::<BigEndian>())), 26 => CborUnsigned::UInt32(try!(self.rdr.read_u32::<BigEndian>())), 27 => CborUnsigned::UInt64(try!(self.rdr.read_u64::<BigEndian>())), v => return Err(self.errat( ReadError::InvalidAddValue { ty: Type::UInt, val: v })), }) } fn read_int(&mut self, first: u8) -> CborResult<CborSigned> { Ok(match first & 0b000_11111 { n @ 0...23 => CborSigned::Int8(-1 - (n as i8)), 24 => { let n = try!(self.rdr.read_byte()); if n > ::std::i8::MAX as u8 { CborSigned::Int16(-1 - (n as i16)) } else { CborSigned::Int8(-1 - (n as i8)) } } 25 => { let n = try!(self.rdr.read_u16::<BigEndian>()); if n > ::std::i16::MAX as u16 { CborSigned::Int32(-1 - (n as i32)) } else { CborSigned::Int16(-1 - (n as i16)) } } 26 => { let n = try!(self.rdr.read_u32::<BigEndian>()); if n > ::std::i32::MAX as u32 { CborSigned::Int64(-1 - (n as i64)) } else { CborSigned::Int32(-1 - (n as i32)) } } 27 => { let n = try!(self.rdr.read_u64::<BigEndian>()); if n > ::std::i64::MAX as u64 { return Err(self.errstr(format!( "Negative integer out of range: {:?}", n))); } CborSigned::Int64(-1 - (n as i64)) } v => return Err(self.errat( ReadError::InvalidAddValue { ty: Type::Int, val: v })), }) } } impl Decoder<MemReader> { /// Create a new CBOR decoder that reads from the buffer given. /// /// The buffer is usually given as either a `Vec<u8>` or a `&[u8]`. pub fn from_bytes<'a, T>(bytes: T) -> Decoder<MemReader> where T: IntoCow<'a, [u8]> { let rdr = MemReader::new(bytes.into_cow().into_owned()); Decoder { rdr: CborReader::new(rdr) } } } impl<R: IoReader> Decoder<R> { fn errat(&self, err: ReadError) -> CborError { CborError::AtOffset { kind: err, offset: self.rdr.last_offset } } fn errstr(&self, s: String) -> CborError { self.errat(ReadError::Other(s)) } } /// An iterator over items decoded from CBOR into Rust values. /// /// `D` represents the type of the Rust value being decoded into, `R` /// represents the underlying reader and `'a` is the lifetime of the decoder. pub struct DecodedItems<'a, R: 'a, D> { it: Items<'a, R>, _phantom: ::std::marker::PhantomData<D>, } impl<'a, R: IoReader, D: Decodable> Iterator for DecodedItems<'a, R, D> { type Item = CborResult<D>; fn next(&mut self) -> Option<CborResult<D>> { self.it.next().map(|result| { result.and_then(|v| Decodable::decode(&mut CborDecoder::new(v))) }) } } /// An iterator over CBOR items in terms of the abstract syntax. /// /// `R` represents the underlying reader and `'a` is the lifetime of the /// decoder. pub struct Items<'a, R: 'a> { dec: &'a mut Decoder<R>, } impl<'a, R: IoReader> Iterator for Items<'a, R> { type Item = CborResult<Cbor>; fn next(&mut self) -> Option<CborResult<Cbor>> { match self.dec.read_data_item(None) { Err(ref err) if err.is_eof() => None, Err(err) => Some(Err(err)), Ok(v) => Some(Ok(v)), } } } /// A very light layer over a basic reader that keeps track of offset /// information at the byte level. struct CborReader<R> { rdr: R, // used for error reporting last_offset: usize, bytes_read: usize, } impl<R: IoReader> IoReader for CborReader<R> { fn read(&mut self, buf: &mut [u8]) -> IoResult<usize> { let n = try!(self.rdr.read(buf)); self.last_offset = self.bytes_read; self.bytes_read += n; Ok(n) } } impl<R: IoReader> CborReader<R> { fn new(rdr: R) -> CborReader<R> { CborReader { rdr: rdr, last_offset: 0, bytes_read: 0, } } fn read_full(&mut self, buf: &mut [u8]) -> IoResult<()> { let mut n = 0usize; while n < buf.len() { n += try!(self.read(&mut buf[n..])); } Ok(()) } } fn vec_from_elem<T: Copy>(len: usize, v: T) -> Vec<T> { let mut xs = Vec::with_capacity(len); unsafe { xs.set_len(len); } for x in &mut xs { *x = v; } xs }
#![allow(dead_code)] use std::cmp; use std::io::{self, Read}; use encoding_rs::{Decoder, Encoding, UTF_8}; /// A BOM is at least 2 bytes and at most 3 bytes. /// /// If fewer than 2 bytes are available to be read at the beginning of a /// reader, then a BOM is `None`. #[derive(Clone, Copy, Debug, Eq, PartialEq)] struct Bom { bytes: [u8; 3], len: usize, } impl Bom { fn as_slice(&self) -> &[u8] { &self.bytes[0..self.len] } fn decoder(&self) -> Option<Decoder> { let bom = self.as_slice(); if bom.len() < 3 { return None; } if let Some((enc, _)) = Encoding::for_bom(bom) { if enc != UTF_8 { return Some(enc.new_decoder_with_bom_removal()); } } None } } /// BomPeeker wraps `R` and satisfies the `io::Read` interface while also /// providing a peek at the BOM if one exists. Peeking at the BOM does not /// advance the reader. struct BomPeeker<R> { rdr: R, bom: Option<Bom>, nread: usize, } impl<R: io::Read> BomPeeker<R> { /// Create a new BomPeeker. /// /// The first three bytes can be read using the `peek_bom` method, but /// will not advance the reader. fn new(rdr: R) -> BomPeeker<R> { BomPeeker { rdr: rdr, bom: None, nread: 0 } } /// Peek at the first three bytes of the underlying reader. /// /// This does not advance the reader provided by `BomPeeker`. /// /// If the underlying reader does not have at least two bytes available, /// then `None` is returned. fn peek_bom(&mut self) -> io::Result<Bom> { if let Some(bom) = self.bom { return Ok(bom); } self.bom = Some(Bom { bytes: [0; 3], len: 0 }); let mut buf = [0u8; 3]; let bom_len = try!(read_full(&mut self.rdr, &mut buf)); self.bom = Some(Bom { bytes: buf, len: bom_len }); Ok(self.bom.unwrap()) } } impl<R: io::Read> io::Read for BomPeeker<R> { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { if self.nread < 3 { let bom = try!(self.peek_bom()); let bom = bom.as_slice(); if self.nread < bom.len() { let rest = &bom[self.nread..]; let len = cmp::min(buf.len(), rest.len()); buf[..len].copy_from_slice(&rest[..len]); self.nread += len; return Ok(len); } } let nread = try!(self.rdr.read(buf)); self.nread += nread; Ok(nread) } } /// Like io::Read::read_exact, except it never returns UnexpectedEof and /// instead returns the number of bytes read if EOF is seen before filling /// `buf`. fn read_full<R: io::Read>( mut rdr: R, mut buf: &mut [u8], ) -> io::Result<usize> { let mut nread = 0; while !buf.is_empty() { match rdr.read(buf) { Ok(0) => break, Ok(n) => { nread += n; let tmp = buf; buf = &mut tmp[n..]; } Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {} Err(e) => return Err(e), } } Ok(nread) } /// A reader that transcodes to UTF-8. The source encoding is determined by /// inspecting the BOM from the stream read from `R`, if one exists. If a /// UTF-16 BOM exists, then the source stream is trancoded to UTF-8 with /// invalid UTF-16 sequences translated to the Unicode replacement character. /// In all other cases, the underlying reader is passed through unchanged. /// /// `R` is the type of the underlying reader and `B` is the type of an internal /// buffer used to store the results of trancoding. /// /// Note that not all methods on `io::Read` work with this implementation. /// For example, the `bytes` adapter method attempts to read a single byte at /// a time, but this implementation requires a buffer of size at least `4`. If /// a buffer of size less than 4 is given, then an error is returned. pub struct DecodeReader<R, B> { /// The underlying reader, wrapped in a peeker for reading a BOM if one /// exists. rdr: BomPeeker<R>, /// The internal buffer to store transcoded bytes before they are read by /// callers. buf: B, /// The current position in `buf`. Subsequent reads start here. pos: usize, /// The number of transcoded bytes in `buf`. Subsequent reads end here. buflen: usize, /// Whether this is the first read or not (in which we inspect the BOM). first: bool, /// Whether a "last" read has occurred. After this point, EOF will always /// be returned. last: bool, /// The underlying text decoder derived from the BOM, if one exists. decoder: Option<Decoder>, } impl<R: io::Read, B: AsMut<[u8]>> DecodeReader<R, B> { /// Create a new transcoder that converts a source stream to valid UTF-8. /// /// If an encoding is specified, then it is used to transcode `rdr` to /// UTF-8. Otherwise, if no encoding is specified, and if a UTF-16 BOM is /// found, then the corresponding UTF-16 encoding is used to transcode /// `rdr` to UTF-8. In all other cases, `rdr` is assumed to be at least /// ASCII-compatible and passed through untouched. /// /// Errors in the encoding of `rdr` are handled with the Unicode /// replacement character. If no encoding of `rdr` is specified, then /// errors are not handled. pub fn new( rdr: R, buf: B, enc: Option<&'static Encoding>, ) -> DecodeReader<R, B> { DecodeReader { rdr: BomPeeker::new(rdr), buf: buf, buflen: 0, pos: 0, first: enc.is_none(), last: false, decoder: enc.map(|enc| enc.new_decoder_with_bom_removal()), } } /// Fill the internal buffer from the underlying reader. /// /// If there are unread bytes in the internal buffer, then we move them /// to the beginning of the internal buffer and fill the remainder. /// /// If the internal buffer is too small to read additional bytes, then an /// error is returned. #[inline(always)] // massive perf benefit (???) fn fill(&mut self) -> io::Result<()> { if self.pos < self.buflen { if self.buflen >= self.buf.as_mut().len() { return Err(io::Error::new( io::ErrorKind::Other, "DecodeReader: internal buffer exhausted")); } let newlen = self.buflen - self.pos; let mut tmp = Vec::with_capacity(newlen); tmp.extend_from_slice(&self.buf.as_mut()[self.pos..self.buflen]); self.buf.as_mut()[..newlen].copy_from_slice(&tmp); self.buflen = newlen; } else { self.buflen = 0; } self.pos = 0; self.buflen += try!(self.rdr.read(&mut self.buf.as_mut()[self.buflen..])); Ok(()) } /// Transcode the inner stream to UTF-8 in `buf`. This assumes that there /// is a decoder capable of transcoding the inner stream to UTF-8. This /// returns the number of bytes written to `buf`. /// /// When this function returns, exactly one of the following things will /// be true: /// /// 1. A non-zero number of bytes were written to `buf`. /// 2. The underlying reader reached EOF. /// 3. An error is returned: the internal buffer ran out of room. /// 4. An I/O error occurred. /// /// Note that `buf` must have at least 4 bytes of space. fn transcode(&mut self, buf: &mut [u8]) -> io::Result<usize> { assert!(buf.len() >= 4); if self.last { return Ok(0); } if self.pos >= self.buflen { try!(self.fill()); } let mut nwrite = 0; loop { let (_, nin, nout, _) = self.decoder.as_mut().unwrap().decode_to_utf8( &self.buf.as_mut()[self.pos..self.buflen], buf, false); self.pos += nin; nwrite += nout; // If we've written at least one byte to the caller-provided // buffer, then our mission is complete. if nwrite > 0 { break; } // Otherwise, we know that our internal buffer has insufficient // data to transcode at least one char, so we attempt to refill it. try!(self.fill()); // Quit on EOF. if self.buflen == 0 { self.pos = 0; self.last = true; let (_, _, nout, _) = self.decoder.as_mut().unwrap().decode_to_utf8( &[], buf, true); return Ok(nout); } } Ok(nwrite) } fn detect(&mut self) -> io::Result<()> { let bom = try!(self.rdr.peek_bom()); self.decoder = bom.decoder(); Ok(()) } } impl<R: io::Read, B: AsMut<[u8]>> io::Read for DecodeReader<R, B> { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { if self.first { self.first = false; try!(self.detect()); } if self.decoder.is_none() { return self.rdr.read(buf); } // When decoding UTF-8, we need at least 4 bytes of space to guarantee // that we can decode at least one codepoint. If we don't have it, we // can either return `0` for the number of bytes read or return an // error. Since `0` would be interpreted as a possibly premature EOF, // we opt for an error. if buf.len() < 4 { return Err(io::Error::new( io::ErrorKind::Other, "DecodeReader: byte buffer must have length at least 4")); } self.transcode(buf) } } #[cfg(test)] mod tests { use std::io::Read; use encoding_rs::Encoding; use super::{Bom, BomPeeker, DecodeReader}; fn utf8(bytes: &[u8]) -> &str { ::std::str::from_utf8(bytes).unwrap() } fn read_to_string<R: Read>(mut rdr: R) -> String { let mut s = String::new(); rdr.read_to_string(&mut s).unwrap(); s } #[test] fn peeker_empty() { let buf = []; let mut peeker = BomPeeker::new(&buf[..]); assert_eq!(Bom { bytes: [0; 3], len: 0}, peeker.peek_bom().unwrap()); let mut tmp = [0; 100]; assert_eq!(0, peeker.read(&mut tmp).unwrap()); } #[test] fn peeker_one() { let buf = [1]; let mut peeker = BomPeeker::new(&buf[..]); assert_eq!( Bom { bytes: [1, 0, 0], len: 1}, peeker.peek_bom().unwrap()); let mut tmp = [0; 100]; assert_eq!(1, peeker.read(&mut tmp).unwrap()); assert_eq!(1, tmp[0]); assert_eq!(0, peeker.read(&mut tmp).unwrap()); } #[test] fn peeker_two() { let buf = [1, 2]; let mut peeker = BomPeeker::new(&buf[..]); assert_eq!( Bom { bytes: [1, 2, 0], len: 2}, peeker.peek_bom().unwrap()); let mut tmp = [0; 100]; assert_eq!(2, peeker.read(&mut tmp).unwrap()); assert_eq!(1, tmp[0]); assert_eq!(2, tmp[1]); assert_eq!(0, peeker.read(&mut tmp).unwrap()); } #[test] fn peeker_three() { let buf = [1, 2, 3]; let mut peeker = BomPeeker::new(&buf[..]); assert_eq!( Bom { bytes: [1, 2, 3], len: 3}, peeker.peek_bom().unwrap()); let mut tmp = [0; 100]; assert_eq!(3, peeker.read(&mut tmp).unwrap()); assert_eq!(1, tmp[0]); assert_eq!(2, tmp[1]); assert_eq!(3, tmp[2]); assert_eq!(0, peeker.read(&mut tmp).unwrap()); } #[test] fn peeker_four() { let buf = [1, 2, 3, 4]; let mut peeker = BomPeeker::new(&buf[..]); assert_eq!( Bom { bytes: [1, 2, 3], len: 3}, peeker.peek_bom().unwrap()); let mut tmp = [0; 100]; assert_eq!(3, peeker.read(&mut tmp).unwrap()); assert_eq!(1, tmp[0]); assert_eq!(2, tmp[1]); assert_eq!(3, tmp[2]); assert_eq!(1, peeker.read(&mut tmp).unwrap()); assert_eq!(4, tmp[0]); assert_eq!(0, peeker.read(&mut tmp).unwrap()); } #[test] fn peeker_one_at_a_time() { let buf = [1, 2, 3, 4]; let mut peeker = BomPeeker::new(&buf[..]); let mut tmp = [0; 1]; assert_eq!(0, peeker.read(&mut tmp[..0]).unwrap()); assert_eq!(0, tmp[0]); assert_eq!(1, peeker.read(&mut tmp).unwrap()); assert_eq!(1, tmp[0]); assert_eq!(1, peeker.read(&mut tmp).unwrap()); assert_eq!(2, tmp[0]); assert_eq!(1, peeker.read(&mut tmp).unwrap()); assert_eq!(3, tmp[0]); assert_eq!(1, peeker.read(&mut tmp).unwrap()); assert_eq!(4, tmp[0]); } // In cases where all we have is a bom, we expect the bytes to be // passed through unchanged. #[test] fn trans_utf16_bom() { let srcbuf = vec![0xFF, 0xFE]; let mut dstbuf = vec![0; 8 * (1<<10)]; let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None); let n = rdr.read(&mut dstbuf).unwrap(); assert_eq!(&*srcbuf, &dstbuf[..n]); let srcbuf = vec![0xFE, 0xFF]; let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None); let n = rdr.read(&mut dstbuf).unwrap(); assert_eq!(&*srcbuf, &dstbuf[..n]); let srcbuf = vec![0xEF, 0xBB, 0xBF]; let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None); let n = rdr.read(&mut dstbuf).unwrap(); assert_eq!(&*srcbuf, &dstbuf[..n]); } // Test basic UTF-16 decoding. #[test] fn trans_utf16_basic() { let srcbuf = vec![0xFF, 0xFE, 0x61, 0x00]; let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None); assert_eq!("a", read_to_string(&mut rdr)); let srcbuf = vec![0xFE, 0xFF, 0x00, 0x61]; let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None); assert_eq!("a", read_to_string(&mut rdr)); } // Test incomplete UTF-16 decoding. This ensures we see a replacement char // if the stream ends with an unpaired code unit. #[test] fn trans_utf16_incomplete() { let srcbuf = vec![0xFF, 0xFE, 0x61, 0x00, 0x00]; let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None); assert_eq!("a\u{FFFD}", read_to_string(&mut rdr)); } macro_rules! test_trans_simple { ($name:ident, $enc:expr, $srcbytes:expr, $dst:expr) => { #[test] fn $name() { let srcbuf = &$srcbytes[..]; let enc = Encoding::for_label($enc.as_bytes()); let mut rdr = DecodeReader::new( &*srcbuf, vec![0; 8 * (1<<10)], enc); assert_eq!($dst, read_to_string(&mut rdr)); } } } // This isn't exhaustive obviously, but it lets us test base level support. test_trans_simple!(trans_simple_auto, "does not exist", b"\xD0\x96", "Ж"); test_trans_simple!(trans_simple_utf8, "utf-8", b"\xD0\x96", "Ж"); test_trans_simple!(trans_simple_utf16le, "utf-16le", b"\x16\x04", "Ж"); test_trans_simple!(trans_simple_utf16be, "utf-16be", b"\x04\x16", "Ж"); test_trans_simple!(trans_simple_chinese, "chinese", b"\xA7\xA8", "Ж"); test_trans_simple!(trans_simple_korean, "korean", b"\xAC\xA8", "Ж"); test_trans_simple!(trans_simple_big5_hkscs, "big5-hkscs", b"\xC7\xFA", "Ж"); test_trans_simple!(trans_simple_gbk, "gbk", b"\xA7\xA8", "Ж"); test_trans_simple!(trans_simple_sjis, "sjis", b"\x84\x47", "Ж"); test_trans_simple!(trans_simple_eucjp, "euc-jp", b"\xA7\xA8", "Ж"); test_trans_simple!(trans_simple_latin1, "latin1", b"\xA9", "©"); } Stop aggressive inlining. It's not clear what exactly is happening here, but the Read implementation for text decoding appears a bit sensitive. Small pertubations in the code appear to have a nearly 100% impact on the overall speed of ripgrep when searching UTF-16 files. I haven't had the time to examine the generated code in detail, but `perf stat` seems to think that the instruction cache is performing a lot worse when the code slows down. This might mean that excessive inlining causes a different code structure that leads to less-than-optimal icache usage, but it's at best a guess. Explicitly disabling the inline for the cold path seems to help the optimizer figure out the right thing. #![allow(dead_code)] use std::cmp; use std::io::{self, Read}; use encoding_rs::{Decoder, Encoding, UTF_8}; /// A BOM is at least 2 bytes and at most 3 bytes. /// /// If fewer than 2 bytes are available to be read at the beginning of a /// reader, then a BOM is `None`. #[derive(Clone, Copy, Debug, Eq, PartialEq)] struct Bom { bytes: [u8; 3], len: usize, } impl Bom { fn as_slice(&self) -> &[u8] { &self.bytes[0..self.len] } fn decoder(&self) -> Option<Decoder> { let bom = self.as_slice(); if bom.len() < 3 { return None; } if let Some((enc, _)) = Encoding::for_bom(bom) { if enc != UTF_8 { return Some(enc.new_decoder_with_bom_removal()); } } None } } /// BomPeeker wraps `R` and satisfies the `io::Read` interface while also /// providing a peek at the BOM if one exists. Peeking at the BOM does not /// advance the reader. struct BomPeeker<R> { rdr: R, bom: Option<Bom>, nread: usize, } impl<R: io::Read> BomPeeker<R> { /// Create a new BomPeeker. /// /// The first three bytes can be read using the `peek_bom` method, but /// will not advance the reader. fn new(rdr: R) -> BomPeeker<R> { BomPeeker { rdr: rdr, bom: None, nread: 0 } } /// Peek at the first three bytes of the underlying reader. /// /// This does not advance the reader provided by `BomPeeker`. /// /// If the underlying reader does not have at least two bytes available, /// then `None` is returned. fn peek_bom(&mut self) -> io::Result<Bom> { if let Some(bom) = self.bom { return Ok(bom); } self.bom = Some(Bom { bytes: [0; 3], len: 0 }); let mut buf = [0u8; 3]; let bom_len = try!(read_full(&mut self.rdr, &mut buf)); self.bom = Some(Bom { bytes: buf, len: bom_len }); Ok(self.bom.unwrap()) } } impl<R: io::Read> io::Read for BomPeeker<R> { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { if self.nread < 3 { let bom = try!(self.peek_bom()); let bom = bom.as_slice(); if self.nread < bom.len() { let rest = &bom[self.nread..]; let len = cmp::min(buf.len(), rest.len()); buf[..len].copy_from_slice(&rest[..len]); self.nread += len; return Ok(len); } } let nread = try!(self.rdr.read(buf)); self.nread += nread; Ok(nread) } } /// Like io::Read::read_exact, except it never returns UnexpectedEof and /// instead returns the number of bytes read if EOF is seen before filling /// `buf`. fn read_full<R: io::Read>( mut rdr: R, mut buf: &mut [u8], ) -> io::Result<usize> { let mut nread = 0; while !buf.is_empty() { match rdr.read(buf) { Ok(0) => break, Ok(n) => { nread += n; let tmp = buf; buf = &mut tmp[n..]; } Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {} Err(e) => return Err(e), } } Ok(nread) } /// A reader that transcodes to UTF-8. The source encoding is determined by /// inspecting the BOM from the stream read from `R`, if one exists. If a /// UTF-16 BOM exists, then the source stream is trancoded to UTF-8 with /// invalid UTF-16 sequences translated to the Unicode replacement character. /// In all other cases, the underlying reader is passed through unchanged. /// /// `R` is the type of the underlying reader and `B` is the type of an internal /// buffer used to store the results of trancoding. /// /// Note that not all methods on `io::Read` work with this implementation. /// For example, the `bytes` adapter method attempts to read a single byte at /// a time, but this implementation requires a buffer of size at least `4`. If /// a buffer of size less than 4 is given, then an error is returned. pub struct DecodeReader<R, B> { /// The underlying reader, wrapped in a peeker for reading a BOM if one /// exists. rdr: BomPeeker<R>, /// The internal buffer to store transcoded bytes before they are read by /// callers. buf: B, /// The current position in `buf`. Subsequent reads start here. pos: usize, /// The number of transcoded bytes in `buf`. Subsequent reads end here. buflen: usize, /// Whether this is the first read or not (in which we inspect the BOM). first: bool, /// Whether a "last" read has occurred. After this point, EOF will always /// be returned. last: bool, /// The underlying text decoder derived from the BOM, if one exists. decoder: Option<Decoder>, } impl<R: io::Read, B: AsMut<[u8]>> DecodeReader<R, B> { /// Create a new transcoder that converts a source stream to valid UTF-8. /// /// If an encoding is specified, then it is used to transcode `rdr` to /// UTF-8. Otherwise, if no encoding is specified, and if a UTF-16 BOM is /// found, then the corresponding UTF-16 encoding is used to transcode /// `rdr` to UTF-8. In all other cases, `rdr` is assumed to be at least /// ASCII-compatible and passed through untouched. /// /// Errors in the encoding of `rdr` are handled with the Unicode /// replacement character. If no encoding of `rdr` is specified, then /// errors are not handled. pub fn new( rdr: R, buf: B, enc: Option<&'static Encoding>, ) -> DecodeReader<R, B> { DecodeReader { rdr: BomPeeker::new(rdr), buf: buf, buflen: 0, pos: 0, first: enc.is_none(), last: false, decoder: enc.map(|enc| enc.new_decoder_with_bom_removal()), } } /// Fill the internal buffer from the underlying reader. /// /// If there are unread bytes in the internal buffer, then we move them /// to the beginning of the internal buffer and fill the remainder. /// /// If the internal buffer is too small to read additional bytes, then an /// error is returned. #[inline(always)] // massive perf benefit (???) fn fill(&mut self) -> io::Result<()> { if self.pos < self.buflen { if self.buflen >= self.buf.as_mut().len() { return Err(io::Error::new( io::ErrorKind::Other, "DecodeReader: internal buffer exhausted")); } let newlen = self.buflen - self.pos; let mut tmp = Vec::with_capacity(newlen); tmp.extend_from_slice(&self.buf.as_mut()[self.pos..self.buflen]); self.buf.as_mut()[..newlen].copy_from_slice(&tmp); self.buflen = newlen; } else { self.buflen = 0; } self.pos = 0; self.buflen += try!(self.rdr.read(&mut self.buf.as_mut()[self.buflen..])); Ok(()) } /// Transcode the inner stream to UTF-8 in `buf`. This assumes that there /// is a decoder capable of transcoding the inner stream to UTF-8. This /// returns the number of bytes written to `buf`. /// /// When this function returns, exactly one of the following things will /// be true: /// /// 1. A non-zero number of bytes were written to `buf`. /// 2. The underlying reader reached EOF. /// 3. An error is returned: the internal buffer ran out of room. /// 4. An I/O error occurred. /// /// Note that `buf` must have at least 4 bytes of space. fn transcode(&mut self, buf: &mut [u8]) -> io::Result<usize> { assert!(buf.len() >= 4); if self.last { return Ok(0); } if self.pos >= self.buflen { try!(self.fill()); } let mut nwrite = 0; loop { let (_, nin, nout, _) = self.decoder.as_mut().unwrap().decode_to_utf8( &self.buf.as_mut()[self.pos..self.buflen], buf, false); self.pos += nin; nwrite += nout; // If we've written at least one byte to the caller-provided // buffer, then our mission is complete. if nwrite > 0 { break; } // Otherwise, we know that our internal buffer has insufficient // data to transcode at least one char, so we attempt to refill it. try!(self.fill()); // Quit on EOF. if self.buflen == 0 { self.pos = 0; self.last = true; let (_, _, nout, _) = self.decoder.as_mut().unwrap().decode_to_utf8( &[], buf, true); return Ok(nout); } } Ok(nwrite) } #[inline(never)] // impacts perf... fn detect(&mut self) -> io::Result<()> { let bom = try!(self.rdr.peek_bom()); self.decoder = bom.decoder(); Ok(()) } } impl<R: io::Read, B: AsMut<[u8]>> io::Read for DecodeReader<R, B> { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { if self.first { self.first = false; try!(self.detect()); } if self.decoder.is_none() { return self.rdr.read(buf); } // When decoding UTF-8, we need at least 4 bytes of space to guarantee // that we can decode at least one codepoint. If we don't have it, we // can either return `0` for the number of bytes read or return an // error. Since `0` would be interpreted as a possibly premature EOF, // we opt for an error. if buf.len() < 4 { return Err(io::Error::new( io::ErrorKind::Other, "DecodeReader: byte buffer must have length at least 4")); } self.transcode(buf) } } #[cfg(test)] mod tests { use std::io::Read; use encoding_rs::Encoding; use super::{Bom, BomPeeker, DecodeReader}; fn utf8(bytes: &[u8]) -> &str { ::std::str::from_utf8(bytes).unwrap() } fn read_to_string<R: Read>(mut rdr: R) -> String { let mut s = String::new(); rdr.read_to_string(&mut s).unwrap(); s } #[test] fn peeker_empty() { let buf = []; let mut peeker = BomPeeker::new(&buf[..]); assert_eq!(Bom { bytes: [0; 3], len: 0}, peeker.peek_bom().unwrap()); let mut tmp = [0; 100]; assert_eq!(0, peeker.read(&mut tmp).unwrap()); } #[test] fn peeker_one() { let buf = [1]; let mut peeker = BomPeeker::new(&buf[..]); assert_eq!( Bom { bytes: [1, 0, 0], len: 1}, peeker.peek_bom().unwrap()); let mut tmp = [0; 100]; assert_eq!(1, peeker.read(&mut tmp).unwrap()); assert_eq!(1, tmp[0]); assert_eq!(0, peeker.read(&mut tmp).unwrap()); } #[test] fn peeker_two() { let buf = [1, 2]; let mut peeker = BomPeeker::new(&buf[..]); assert_eq!( Bom { bytes: [1, 2, 0], len: 2}, peeker.peek_bom().unwrap()); let mut tmp = [0; 100]; assert_eq!(2, peeker.read(&mut tmp).unwrap()); assert_eq!(1, tmp[0]); assert_eq!(2, tmp[1]); assert_eq!(0, peeker.read(&mut tmp).unwrap()); } #[test] fn peeker_three() { let buf = [1, 2, 3]; let mut peeker = BomPeeker::new(&buf[..]); assert_eq!( Bom { bytes: [1, 2, 3], len: 3}, peeker.peek_bom().unwrap()); let mut tmp = [0; 100]; assert_eq!(3, peeker.read(&mut tmp).unwrap()); assert_eq!(1, tmp[0]); assert_eq!(2, tmp[1]); assert_eq!(3, tmp[2]); assert_eq!(0, peeker.read(&mut tmp).unwrap()); } #[test] fn peeker_four() { let buf = [1, 2, 3, 4]; let mut peeker = BomPeeker::new(&buf[..]); assert_eq!( Bom { bytes: [1, 2, 3], len: 3}, peeker.peek_bom().unwrap()); let mut tmp = [0; 100]; assert_eq!(3, peeker.read(&mut tmp).unwrap()); assert_eq!(1, tmp[0]); assert_eq!(2, tmp[1]); assert_eq!(3, tmp[2]); assert_eq!(1, peeker.read(&mut tmp).unwrap()); assert_eq!(4, tmp[0]); assert_eq!(0, peeker.read(&mut tmp).unwrap()); } #[test] fn peeker_one_at_a_time() { let buf = [1, 2, 3, 4]; let mut peeker = BomPeeker::new(&buf[..]); let mut tmp = [0; 1]; assert_eq!(0, peeker.read(&mut tmp[..0]).unwrap()); assert_eq!(0, tmp[0]); assert_eq!(1, peeker.read(&mut tmp).unwrap()); assert_eq!(1, tmp[0]); assert_eq!(1, peeker.read(&mut tmp).unwrap()); assert_eq!(2, tmp[0]); assert_eq!(1, peeker.read(&mut tmp).unwrap()); assert_eq!(3, tmp[0]); assert_eq!(1, peeker.read(&mut tmp).unwrap()); assert_eq!(4, tmp[0]); } // In cases where all we have is a bom, we expect the bytes to be // passed through unchanged. #[test] fn trans_utf16_bom() { let srcbuf = vec![0xFF, 0xFE]; let mut dstbuf = vec![0; 8 * (1<<10)]; let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None); let n = rdr.read(&mut dstbuf).unwrap(); assert_eq!(&*srcbuf, &dstbuf[..n]); let srcbuf = vec![0xFE, 0xFF]; let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None); let n = rdr.read(&mut dstbuf).unwrap(); assert_eq!(&*srcbuf, &dstbuf[..n]); let srcbuf = vec![0xEF, 0xBB, 0xBF]; let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None); let n = rdr.read(&mut dstbuf).unwrap(); assert_eq!(&*srcbuf, &dstbuf[..n]); } // Test basic UTF-16 decoding. #[test] fn trans_utf16_basic() { let srcbuf = vec![0xFF, 0xFE, 0x61, 0x00]; let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None); assert_eq!("a", read_to_string(&mut rdr)); let srcbuf = vec![0xFE, 0xFF, 0x00, 0x61]; let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None); assert_eq!("a", read_to_string(&mut rdr)); } // Test incomplete UTF-16 decoding. This ensures we see a replacement char // if the stream ends with an unpaired code unit. #[test] fn trans_utf16_incomplete() { let srcbuf = vec![0xFF, 0xFE, 0x61, 0x00, 0x00]; let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None); assert_eq!("a\u{FFFD}", read_to_string(&mut rdr)); } macro_rules! test_trans_simple { ($name:ident, $enc:expr, $srcbytes:expr, $dst:expr) => { #[test] fn $name() { let srcbuf = &$srcbytes[..]; let enc = Encoding::for_label($enc.as_bytes()); let mut rdr = DecodeReader::new( &*srcbuf, vec![0; 8 * (1<<10)], enc); assert_eq!($dst, read_to_string(&mut rdr)); } } } // This isn't exhaustive obviously, but it lets us test base level support. test_trans_simple!(trans_simple_auto, "does not exist", b"\xD0\x96", "Ж"); test_trans_simple!(trans_simple_utf8, "utf-8", b"\xD0\x96", "Ж"); test_trans_simple!(trans_simple_utf16le, "utf-16le", b"\x16\x04", "Ж"); test_trans_simple!(trans_simple_utf16be, "utf-16be", b"\x04\x16", "Ж"); test_trans_simple!(trans_simple_chinese, "chinese", b"\xA7\xA8", "Ж"); test_trans_simple!(trans_simple_korean, "korean", b"\xAC\xA8", "Ж"); test_trans_simple!(trans_simple_big5_hkscs, "big5-hkscs", b"\xC7\xFA", "Ж"); test_trans_simple!(trans_simple_gbk, "gbk", b"\xA7\xA8", "Ж"); test_trans_simple!(trans_simple_sjis, "sjis", b"\x84\x47", "Ж"); test_trans_simple!(trans_simple_eucjp, "euc-jp", b"\xA7\xA8", "Ж"); test_trans_simple!(trans_simple_latin1, "latin1", b"\xA9", "©"); }
// Copyright 2014 Jonathan Eyolfson use std::ptr; use raw; pub struct Display { ptr: *mut raw::wl_display } impl Display { pub fn connect_to_env_or_default() -> Display { unsafe { let ptr = raw::wl_display_connect(ptr::null()); assert!(!ptr.is_null(), "wl_display_connect failed"); Display { ptr: ptr } } } pub fn connect(name: &str) -> Display { unsafe { let ptr = raw::wl_display_connect(name.as_ptr() as *const i8); assert!(!ptr.is_null(), "wl_display_connect failed"); Display { ptr: ptr } } } pub fn roundtrip(&mut self) -> i32 { unsafe { let r = raw::wl_display_roundtrip(self.ptr); assert!(r != -1, "wl_display_roundtrip failed"); r } } pub fn read_events(&mut self) { unsafe { let r = raw::wl_display_read_events(self.ptr); assert!(r != -1, "wl_display_read_events failed"); } } pub fn prepare_read(&mut self) { unsafe { let r = raw::wl_display_prepare_read(self.ptr); assert!(r != -1, "wl_display_prepare_read failed"); } } pub fn cancel_read(&mut self) { unsafe { raw::wl_display_cancel_read(self.ptr); } } pub fn dispatch(&mut self) -> i32 { unsafe { let r = raw::wl_display_dispatch(self.ptr); assert!(r != -1); r } } pub fn flush(&mut self) -> i32 { unsafe { let r = raw::wl_display_flush(self.ptr); assert!(r != -1); r } } pub unsafe fn to_ptr(&mut self) -> *mut raw::wl_display { self.ptr } } impl Drop for Display { fn drop(&mut self) { unsafe { raw::wl_display_disconnect(self.ptr); } } } Display remaining assert messages added // Copyright 2014 Jonathan Eyolfson use std::ptr; use raw; pub struct Display { ptr: *mut raw::wl_display } impl Display { pub fn connect_to_env_or_default() -> Display { unsafe { let ptr = raw::wl_display_connect(ptr::null()); assert!(!ptr.is_null(), "wl_display_connect failed"); Display { ptr: ptr } } } pub fn connect(name: &str) -> Display { unsafe { let ptr = raw::wl_display_connect(name.as_ptr() as *const i8); assert!(!ptr.is_null(), "wl_display_connect failed"); Display { ptr: ptr } } } pub fn roundtrip(&mut self) -> i32 { unsafe { let r = raw::wl_display_roundtrip(self.ptr); assert!(r != -1, "wl_display_roundtrip failed"); r } } pub fn read_events(&mut self) { unsafe { let r = raw::wl_display_read_events(self.ptr); assert!(r != -1, "wl_display_read_events failed"); } } pub fn prepare_read(&mut self) { unsafe { let r = raw::wl_display_prepare_read(self.ptr); assert!(r != -1, "wl_display_prepare_read failed"); } } pub fn cancel_read(&mut self) { unsafe { raw::wl_display_cancel_read(self.ptr); } } pub fn dispatch(&mut self) -> i32 { unsafe { let r = raw::wl_display_dispatch(self.ptr); assert!(r != -1, "wl_display_dispatch failed"); r } } pub fn flush(&mut self) -> i32 { unsafe { let r = raw::wl_display_flush(self.ptr); assert!(r != -1, "wl_display_flush failed"); r } } pub unsafe fn to_ptr(&mut self) -> *mut raw::wl_display { self.ptr } } impl Drop for Display { fn drop(&mut self) { unsafe { raw::wl_display_disconnect(self.ptr); } } }
use std::char; use num::Complex; use rustty::{Terminal, Cell}; // indexing is from the top of the cell fn pixel_nums_to_braille(p1: Option<u8>, p2: Option<u8>) -> char { let pixel_map = [[0x01, 0x08], [0x02, 0x10], [0x04, 0x20], [0x40, 0x80]]; let mut c = 0; if let Some(p) = p1 { c |= pixel_map[p as usize][0]; } if let Some(p) = p2 { c |= pixel_map[p as usize][1]; } char::from_u32((0x2800 + c) as u32).unwrap() } fn fft_shift<T: Clone>(spec: &mut [T]) { let spec_copy = spec.to_owned(); let (first_half, last_half) = spec_copy.split_at((spec_copy.len() + 1) / 2); let shifted_spec = last_half.iter().chain(first_half.iter()); for (x, y) in spec.iter_mut().zip(shifted_spec) { *x = y.clone(); } } fn spectrum_to_bin_heights(spec: &[Complex<f32>], dest: &mut [f32]) { // subsample let mut last_idx = -1isize; for (i, x) in spec.iter().map(|x| x.norm()).enumerate() { if (i * dest.len() / spec.len()) as isize > last_idx { last_idx += 1; dest[last_idx as usize] = x; } } fft_shift(dest); } pub fn draw_spectrum(term: &mut Terminal, spec: Vec<Complex<f32>>) { term.clear().unwrap(); let (num_cols, num_rows) = term.size(); let pixel_height = num_rows * 4; let pixel_width = num_cols * 2; // TODO what should this value be? let max_height = 500.0; let mut bins = vec![0.0; pixel_width]; spectrum_to_bin_heights(&spec[..], &mut bins[..]); for col_idx in 0..num_cols { // height in float between 0 and 1. let h1 = bins[col_idx * 2] / max_height; let h2 = bins[col_idx * 2 + 1] / max_height; // The "pixel" height of each point. let p1 = (h1 * pixel_height as f32).floor() as usize; let p2 = (h2 * pixel_height as f32).floor() as usize; let p1 = if p1 >= pixel_height { pixel_height - 1} else { p1 }; let p2 = if p2 >= pixel_height { pixel_height - 1} else { p2 }; // Reverse it, since the terminal indexing is from the top let p1 = pixel_height - p1 - 1; let p2 = pixel_height - p2 - 1; let c1 = p1 / 4; let c2 = p2 / 4; if c1 == c2 { term[(col_idx, c1)] = Cell::with_char( pixel_nums_to_braille(Some((p1 % 4) as u8), Some((p2 % 4) as u8))); } else { term[(col_idx, c1)] = Cell::with_char( pixel_nums_to_braille(Some((p1 % 4) as u8), None)); term[(col_idx, c2)] = Cell::with_char( pixel_nums_to_braille(None, Some((p2 % 4) as u8))); } } } #[cfg(test)] mod tests { use super::{pixel_nums_to_braille, fft_shift}; #[test] fn test_fft_shift_dc() { let len = 9; let mut spec = vec![0; len]; spec[0] = 1; fft_shift(&mut spec[..]); assert_eq!(spec[len / 2], 1); } #[test] fn test_fft_shift_even() { let mut before: Vec<usize> = (0..10).collect(); let after = vec![5, 6, 7, 8, 9, 0, 1, 2, 3, 4]; fft_shift(&mut before[..]); assert_eq!(before, after); } #[test] fn test_pixel_nums() { assert_eq!(pixel_nums_to_braille(Some(0), Some(0)), '⠉'); assert_eq!(pixel_nums_to_braille(Some(1), Some(2)), '⠢'); assert_eq!(pixel_nums_to_braille(None, Some(3)), '⢀'); assert_eq!(pixel_nums_to_braille(Some(2), None), '⠄'); } } Filled in spectrum. use std::char; use std::cmp::max; use num::Complex; use rustty::{Terminal, Cell, Style, Attr}; // indexing is from the top of the cell fn pixel_nums_to_braille(p1: Option<u8>, p2: Option<u8>) -> char { let pixel_map = [[0x01, 0x08], [0x02, 0x10], [0x04, 0x20], [0x40, 0x80]]; let mut c = 0; if let Some(p) = p1 { for i in p..4 { c |= pixel_map[i as usize][0]; } } if let Some(p) = p2 { for i in p..4 { c |= pixel_map[i as usize][1]; } } char::from_u32((0x2800 + c) as u32).unwrap() } fn fft_shift<T: Clone>(spec: &mut [T]) { let spec_copy = spec.to_owned(); let (first_half, last_half) = spec_copy.split_at((spec_copy.len() + 1) / 2); let shifted_spec = last_half.iter().chain(first_half.iter()); for (x, y) in spec.iter_mut().zip(shifted_spec) { *x = y.clone(); } } fn spectrum_to_bin_heights(spec: &[Complex<f32>], dest: &mut [f32]) { //TODO should be plotting in log scale // subsample let mut last_idx = -1isize; for (i, x) in spec.iter().map(|x| x.norm()).enumerate() { if (i * dest.len() / spec.len()) as isize > last_idx { last_idx += 1; dest[last_idx as usize] = x; } } //TODO unnecessary allocation fft_shift(dest); } fn char_to_cell(c: char) -> Cell { Cell::new(c, Style::with_attr(Attr::Bold), Style::with_attr(Attr::Default)) } fn draw_pixel_pair(term: &mut Terminal, col_idx: usize, p1: usize, p2: usize) { let max_pixel_height = 4 * term.rows(); // clamp heights let p1 = if p1 >= max_pixel_height { max_pixel_height - 1} else { p1 }; let p2 = if p2 >= max_pixel_height { max_pixel_height - 1} else { p2 }; // Reverse it, since the terminal indexing is from the top let p1 = max_pixel_height - p1 - 1; let p2 = max_pixel_height - p2 - 1; // cell indices let c1 = p1 / 4; let c2 = p2 / 4; let full_cell_char = pixel_nums_to_braille(Some(0), Some(0)); for row_idx in max(c1, c2)..term.rows() { term[(col_idx, row_idx)] = char_to_cell(full_cell_char); } let left_fill_cell_char = pixel_nums_to_braille(Some(0), None); for row_idx in c1..max(c1, c2) { term[(col_idx, row_idx)] = char_to_cell(left_fill_cell_char); } let right_fill_cell_char = pixel_nums_to_braille(Some(0), None); for row_idx in c2..max(c1, c2) { term[(col_idx, row_idx)] = char_to_cell(right_fill_cell_char); } if c1 == c2 { term[(col_idx, c1)] = char_to_cell( pixel_nums_to_braille(Some((p1 % 4) as u8), Some((p2 % 4) as u8))); } else { term[(col_idx, c1)] = char_to_cell( pixel_nums_to_braille(Some((p1 % 4) as u8), None)); term[(col_idx, c2)] = char_to_cell( pixel_nums_to_braille(None, Some((p2 % 4) as u8))); } } pub fn draw_spectrum(term: &mut Terminal, spec: Vec<Complex<f32>>) { term.clear().unwrap(); let (num_cols, num_rows) = term.size(); let pixel_height = num_rows * 4; let pixel_width = num_cols * 2; // TODO what should this value be? let max_height = 500.0; let mut bins = vec![0.0; pixel_width]; spectrum_to_bin_heights(&spec[..], &mut bins[..]); for col_idx in 0..num_cols { // height in float between 0 and 1. let h1 = bins[col_idx * 2] / max_height; let h2 = bins[col_idx * 2 + 1] / max_height; // The "pixel" height of each point. let p1 = (h1 * pixel_height as f32).floor() as usize; let p2 = (h2 * pixel_height as f32).floor() as usize; draw_pixel_pair(term, col_idx, p1, p2); } } #[cfg(test)] mod tests { use super::{pixel_nums_to_braille, fft_shift}; #[test] fn test_fft_shift_dc() { let len = 9; let mut spec = vec![0; len]; spec[0] = 1; fft_shift(&mut spec[..]); assert_eq!(spec[len / 2], 1); } #[test] fn test_fft_shift_even() { let mut before: Vec<usize> = (0..10).collect(); let after = vec![5, 6, 7, 8, 9, 0, 1, 2, 3, 4]; fft_shift(&mut before[..]); assert_eq!(before, after); } #[test] fn test_pixel_nums() { assert_eq!(pixel_nums_to_braille(Some(0), Some(0)), '⣿'); assert_eq!(pixel_nums_to_braille(Some(1), Some(2)), '⣦'); assert_eq!(pixel_nums_to_braille(None, Some(3)), '⢀'); assert_eq!(pixel_nums_to_braille(Some(2), None), '⡄'); } }
use digest::Digest; use sha2::{Sha512}; use curve25519::{GeP2, GeP3, ge_scalarmult_base, sc_reduce, sc_muladd, curve25519, Fe}; use util::{fixed_time_eq}; use std::ops::{Add, Sub, Mul}; pub fn keypair(seed: &[u8]) -> ([u8; 64], [u8; 32]) { let mut secret: [u8; 64] = { let mut hash_output: [u8; 64] = [0; 64]; let mut hasher = Sha512::new(); hasher.input(seed); hasher.result(&mut hash_output); hash_output[0] &= 248; hash_output[31] &= 63; hash_output[31] |= 64; hash_output }; let a = ge_scalarmult_base(&secret[0..32]); let public_key = a.to_bytes(); for (dest, src) in (&mut secret[32..64]).iter_mut().zip(public_key.iter()) { *dest = *src; } for (dest, src) in (&mut secret[0..32]).iter_mut().zip(seed.iter()) { *dest = *src; } (secret, public_key) } pub fn signature(message: &[u8], secret_key: &[u8]) -> [u8; 64] { let seed = &secret_key[0..32]; let public_key = &secret_key[32..64]; let az: [u8; 64] = { let mut hash_output: [u8; 64] = [0; 64]; let mut hasher = Sha512::new(); hasher.input(seed); hasher.result(&mut hash_output); hash_output[0] &= 248; hash_output[31] &= 63; hash_output[31] |= 64; hash_output }; let nonce = { let mut hash_output: [u8; 64] = [0; 64]; let mut hasher = Sha512::new(); hasher.input(&az[32..64]); hasher.input(message); hasher.result(&mut hash_output); sc_reduce(&mut hash_output[0..64]); hash_output }; let mut signature: [u8; 64] = [0; 64]; let r: GeP3 = ge_scalarmult_base(&nonce[0..32]); for (result_byte, source_byte) in (&mut signature[0..32]).iter_mut().zip(r.to_bytes().iter()) { *result_byte = *source_byte; } for (result_byte, source_byte) in (&mut signature[32..64]).iter_mut().zip(public_key.iter()) { *result_byte = *source_byte; } { let mut hasher = Sha512::new(); hasher.input(signature.as_ref()); hasher.input(message); let mut hram: [u8; 64] = [0; 64]; hasher.result(&mut hram); sc_reduce(&mut hram); sc_muladd(&mut signature[32..64], &hram[0..32], &az[0..32], &nonce[0..32]); } signature } fn check_s_lt_l(s: &[u8]) -> bool { let l: [u8; 32] = [ 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0xde, 0xf9, 0xde, 0xa2, 0xf7, 0x9c, 0xd6, 0x58, 0x12, 0x63, 0x1a, 0x5c, 0xf5, 0xd3, 0xed ]; let mut c: u8 = 0; let mut n: u8 = 1; let mut i = 31; loop { c |= ((((s[i] as i32) - (l[i] as i32)) >> 8) as u8) & n; n &= (((((s[i] ^ l[i]) as i32)) - 1) >> 8) as u8; if i == 0 { break; } else { i -= 1; } } c == 0 } pub fn verify(message: &[u8], public_key: &[u8], signature: &[u8]) -> bool { if check_s_lt_l(&signature[32..64]) { return false; } let a = match GeP3::from_bytes_negate_vartime(public_key) { Some(g) => g, None => { return false; } }; let mut d = 0; for pk_byte in public_key.iter() { d |= *pk_byte; } if d == 0 { return false; } let mut hasher = Sha512::new(); hasher.input(&signature[0..32]); hasher.input(public_key); hasher.input(message); let mut hash: [u8; 64] = [0; 64]; hasher.result(&mut hash); sc_reduce(&mut hash); let r = GeP2::double_scalarmult_vartime(hash.as_ref(), a, &signature[32..64]); let rcheck = r.to_bytes(); fixed_time_eq(rcheck.as_ref(), &signature[0..32]) } pub fn exchange(public_key: &[u8], private_key: &[u8]) -> [u8; 32] { let ed_y = Fe::from_bytes(&public_key); // Produce public key in Montgomery form. let mont_x = edwards_to_montgomery_x(ed_y); // Produce private key from seed component (bytes 0 to 32) // of the Ed25519 extended private key (64 bytes). let mut hasher = Sha512::new(); hasher.input(&private_key[0..32]); let mut hash: [u8; 64] = [0; 64]; hasher.result(&mut hash); // Clamp the hash such that it is a valid private key hash[0] &= 248; hash[31] &= 127; hash[31] |= 64; let shared_mont_x : [u8; 32] = curve25519(&hash, &mont_x.to_bytes()); // priv., pub. shared_mont_x } fn edwards_to_montgomery_x(ed_y: Fe) -> Fe { let ed_z = Fe([1,0,0,0,0,0,0,0,0,0]); let temp_x = ed_z.add(ed_y); let temp_z = ed_z.sub(ed_y); let temp_z_inv = temp_z.invert(); let mont_x = temp_x.mul(temp_z_inv); mont_x } #[cfg(test)] mod tests { use ed25519::{keypair, signature, verify, exchange}; use curve25519::{curve25519_base, curve25519}; use digest::Digest; use sha2::{Sha512}; fn do_keypair_case(seed: [u8; 32], expected_secret: [u8; 64], expected_public: [u8; 32]) { let (actual_secret, actual_public) = keypair(seed.as_ref()); assert_eq!(actual_secret.to_vec(), expected_secret.to_vec()); assert_eq!(actual_public.to_vec(), expected_public.to_vec()); } #[test] fn keypair_cases() { do_keypair_case( [0x26, 0x27, 0xf6, 0x85, 0x97, 0x15, 0xad, 0x1d, 0xd2, 0x94, 0xdd, 0xc4, 0x76, 0x19, 0x39, 0x31, 0xf1, 0xad, 0xb5, 0x58, 0xf0, 0x93, 0x97, 0x32, 0x19, 0x2b, 0xd1, 0xc0, 0xfd, 0x16, 0x8e, 0x4e], [0x26, 0x27, 0xf6, 0x85, 0x97, 0x15, 0xad, 0x1d, 0xd2, 0x94, 0xdd, 0xc4, 0x76, 0x19, 0x39, 0x31, 0xf1, 0xad, 0xb5, 0x58, 0xf0, 0x93, 0x97, 0x32, 0x19, 0x2b, 0xd1, 0xc0, 0xfd, 0x16, 0x8e, 0x4e, 0x5d, 0x6d, 0x23, 0x6b, 0x52, 0xd1, 0x8e, 0x3a, 0xb6, 0xd6, 0x07, 0x2f, 0xb6, 0xe4, 0xc7, 0xd4, 0x6b, 0xd5, 0x9a, 0xd9, 0xcc, 0x19, 0x47, 0x26, 0x5f, 0x00, 0xb7, 0x20, 0xfa, 0x2c, 0x8f, 0x66], [0x5d, 0x6d, 0x23, 0x6b, 0x52, 0xd1, 0x8e, 0x3a, 0xb6, 0xd6, 0x07, 0x2f, 0xb6, 0xe4, 0xc7, 0xd4, 0x6b, 0xd5, 0x9a, 0xd9, 0xcc, 0x19, 0x47, 0x26, 0x5f, 0x00, 0xb7, 0x20, 0xfa, 0x2c, 0x8f, 0x66]); do_keypair_case( [0x29, 0x23, 0xbe, 0x84, 0xe1, 0x6c, 0xd6, 0xae, 0x52, 0x90, 0x49, 0xf1, 0xf1, 0xbb, 0xe9, 0xeb, 0xb3, 0xa6, 0xdb, 0x3c, 0x87, 0x0c, 0x3e, 0x99, 0x24, 0x5e, 0x0d, 0x1c, 0x06, 0xb7, 0x47, 0xde], [0x29, 0x23, 0xbe, 0x84, 0xe1, 0x6c, 0xd6, 0xae, 0x52, 0x90, 0x49, 0xf1, 0xf1, 0xbb, 0xe9, 0xeb, 0xb3, 0xa6, 0xdb, 0x3c, 0x87, 0x0c, 0x3e, 0x99, 0x24, 0x5e, 0x0d, 0x1c, 0x06, 0xb7, 0x47, 0xde, 0x5d, 0x83, 0x31, 0x26, 0x56, 0x0c, 0xb1, 0x9a, 0x14, 0x19, 0x37, 0x27, 0x78, 0x96, 0xf0, 0xfd, 0x43, 0x7b, 0xa6, 0x80, 0x1e, 0xb2, 0x10, 0xac, 0x4c, 0x39, 0xd9, 0x00, 0x72, 0xd7, 0x0d, 0xa8], [0x5d, 0x83, 0x31, 0x26, 0x56, 0x0c, 0xb1, 0x9a, 0x14, 0x19, 0x37, 0x27, 0x78, 0x96, 0xf0, 0xfd, 0x43, 0x7b, 0xa6, 0x80, 0x1e, 0xb2, 0x10, 0xac, 0x4c, 0x39, 0xd9, 0x00, 0x72, 0xd7, 0x0d, 0xa8]); } #[test] fn keypair_matches_mont() { let seed = [0x26, 0x27, 0xf6, 0x85, 0x97, 0x15, 0xad, 0x1d, 0xd2, 0x94, 0xdd, 0xc4, 0x76, 0x19, 0x39, 0x31, 0xf1, 0xad, 0xb5, 0x58, 0xf0, 0x93, 0x97, 0x32, 0x19, 0x2b, 0xd1, 0xc0, 0xfd, 0x16, 0x8e, 0x4e]; let (ed_private, ed_public) = keypair(seed.as_ref()); let mut hasher = Sha512::new(); hasher.input(&ed_private[0..32]); let mut hash: [u8; 64] = [0; 64]; hasher.result(&mut hash); hash[0] &= 248; hash[31] &= 127; hash[31] |= 64; let cv_public = curve25519_base(&hash); let edx_ss = exchange(&ed_public, &ed_private); let cv_ss = curve25519(&hash, &cv_public); assert_eq!(edx_ss.to_vec(), cv_ss.to_vec()); } fn do_sign_verify_case(seed: [u8; 32], message: &[u8], expected_signature: [u8; 64]) { let (secret_key, public_key) = keypair(seed.as_ref()); let mut actual_signature = signature(message, secret_key.as_ref()); assert_eq!(expected_signature.to_vec(), actual_signature.to_vec()); assert!(verify(message, public_key.as_ref(), actual_signature.as_ref())); for &(index, flip) in [(0, 1), (31, 0x80), (20, 0xff)].iter() { actual_signature[index] ^= flip; assert!(!verify(message, public_key.as_ref(), actual_signature.as_ref())); actual_signature[index] ^= flip; } let mut public_key_corrupt = public_key; public_key_corrupt[0] ^= 1; assert!(!verify(message, public_key_corrupt.as_ref(), actual_signature.as_ref())); } #[test] fn sign_verify_cases() { do_sign_verify_case( [0x2d, 0x20, 0x86, 0x83, 0x2c, 0xc2, 0xfe, 0x3f, 0xd1, 0x8c, 0xb5, 0x1d, 0x6c, 0x5e, 0x99, 0xa5, 0x75, 0x9f, 0x02, 0x21, 0x1f, 0x85, 0xe5, 0xff, 0x2f, 0x90, 0x4a, 0x78, 0x0f, 0x58, 0x00, 0x6f], [0x89, 0x8f, 0x9c, 0x4b, 0x2c, 0x6e, 0xe9, 0xe2, 0x28, 0x76, 0x1c, 0xa5, 0x08, 0x97, 0xb7, 0x1f, 0xfe, 0xca, 0x1c, 0x35, 0x28, 0x46, 0xf5, 0xfe, 0x13, 0xf7, 0xd3, 0xd5, 0x7e, 0x2c, 0x15, 0xac, 0x60, 0x90, 0x0c, 0xa3, 0x2c, 0x5b, 0x5d, 0xd9, 0x53, 0xc9, 0xa6, 0x81, 0x0a, 0xcc, 0x64, 0x39, 0x4f, 0xfd, 0x14, 0x98, 0x26, 0xd9, 0x98, 0x06, 0x29, 0x2a, 0xdd, 0xd1, 0x3f, 0xc3, 0xbb, 0x7d, 0xac, 0x70, 0x1c, 0x5b, 0x4a, 0x2d, 0x61, 0x5d, 0x15, 0x96, 0x01, 0x28, 0xed, 0x9f, 0x73, 0x6b, 0x98, 0x85, 0x4f, 0x6f, 0x07, 0x05, 0xb0, 0xf0, 0xda, 0xcb, 0xdc, 0x2c, 0x26, 0x2d, 0x27, 0x39, 0x75, 0x19, 0x14, 0x9b, 0x0e, 0x4c, 0xbe, 0x16, 0x77, 0xc5, 0x76, 0xc1, 0x39, 0x7a, 0xae, 0x5c, 0xe3, 0x49, 0x16, 0xe3, 0x51, 0x31, 0x04, 0x63, 0x2e, 0xc2, 0x19, 0x0d, 0xb8, 0xd2, 0x22, 0x89, 0xc3, 0x72, 0x3c, 0x8d, 0x01, 0x21, 0x3c, 0xad, 0x80, 0x3f, 0x4d, 0x75, 0x74, 0xc4, 0xdb, 0xb5, 0x37, 0x31, 0xb0, 0x1c, 0x8e, 0xc7, 0x5d, 0x08, 0x2e, 0xf7, 0xdc, 0x9d, 0x7f, 0x1b, 0x73, 0x15, 0x9f, 0x63, 0xdb, 0x56, 0xaa, 0x12, 0xa2, 0xca, 0x39, 0xea, 0xce, 0x6b, 0x28, 0xe4, 0xc3, 0x1d, 0x9d, 0x25, 0x67, 0x41, 0x45, 0x2e, 0x83, 0x87, 0xe1, 0x53, 0x6d, 0x03, 0x02, 0x6e, 0xe4, 0x84, 0x10, 0xd4, 0x3b, 0x21, 0x91, 0x88, 0xba, 0x14, 0xa8, 0xaf].as_ref(), [0x91, 0x20, 0x91, 0x66, 0x1e, 0xed, 0x18, 0xa4, 0x03, 0x4b, 0xc7, 0xdb, 0x4b, 0xd6, 0x0f, 0xe2, 0xde, 0xeb, 0xf3, 0xff, 0x3b, 0x6b, 0x99, 0x8d, 0xae, 0x20, 0x94, 0xb6, 0x09, 0x86, 0x5c, 0x20, 0x19, 0xec, 0x67, 0x22, 0xbf, 0xdc, 0x87, 0xbd, 0xa5, 0x40, 0x91, 0x92, 0x2e, 0x11, 0xe3, 0x93, 0xf5, 0xfd, 0xce, 0xea, 0x3e, 0x09, 0x1f, 0x2e, 0xe6, 0xbc, 0x62, 0xdf, 0x94, 0x8e, 0x99, 0x09] ); do_sign_verify_case( [0x33, 0x19, 0x17, 0x82, 0xc1, 0x70, 0x4f, 0x60, 0xd0, 0x84, 0x8d, 0x75, 0x62, 0xa2, 0xfa, 0x19, 0xf9, 0x92, 0x4f, 0xea, 0x4e, 0x77, 0x33, 0xcd, 0x45, 0xf6, 0xc3, 0x2f, 0x21, 0x9a, 0x72, 0x91], [0x77, 0x13, 0x43, 0x5a, 0x0e, 0x34, 0x6f, 0x67, 0x71, 0xae, 0x5a, 0xde, 0xa8, 0x7a, 0xe7, 0xa4, 0x52, 0xc6, 0x5d, 0x74, 0x8f, 0x48, 0x69, 0xd3, 0x1e, 0xd3, 0x67, 0x47, 0xc3, 0x28, 0xdd, 0xc4, 0xec, 0x0e, 0x48, 0x67, 0x93, 0xa5, 0x1c, 0x67, 0x66, 0xf7, 0x06, 0x48, 0x26, 0xd0, 0x74, 0x51, 0x4d, 0xd0, 0x57, 0x41, 0xf3, 0xbe, 0x27, 0x3e, 0xf2, 0x1f, 0x28, 0x0e, 0x49, 0x07, 0xed, 0x89, 0xbe, 0x30, 0x1a, 0x4e, 0xc8, 0x49, 0x6e, 0xb6, 0xab, 0x90, 0x00, 0x06, 0xe5, 0xa3, 0xc8, 0xe9, 0xc9, 0x93, 0x62, 0x1d, 0x6a, 0x3b, 0x0f, 0x6c, 0xba, 0xd0, 0xfd, 0xde, 0xf3, 0xb9, 0xc8, 0x2d].as_ref(), [0x4b, 0x8d, 0x9b, 0x1e, 0xca, 0x54, 0x00, 0xea, 0xc6, 0xf5, 0xcc, 0x0c, 0x94, 0x39, 0x63, 0x00, 0x52, 0xf7, 0x34, 0xce, 0x45, 0x3e, 0x94, 0x26, 0xf3, 0x19, 0xdd, 0x96, 0x03, 0xb6, 0xae, 0xae, 0xb9, 0xd2, 0x3a, 0x5f, 0x93, 0xf0, 0x6a, 0x46, 0x00, 0x18, 0xf0, 0x69, 0xdf, 0x19, 0x44, 0x48, 0xf5, 0x60, 0x51, 0xab, 0x9e, 0x6b, 0xfa, 0xeb, 0x64, 0x10, 0x16, 0xf7, 0xa9, 0x0b, 0xe2, 0x0c] ); } } Make the l modulus a global static l is a core modulus of ed25519 and is therefore typically declared as a global static variable. This also helps with performance, as keeping it local to a stack frame could lead to a lot of useless copying. use digest::Digest; use sha2::{Sha512}; use curve25519::{GeP2, GeP3, ge_scalarmult_base, sc_reduce, sc_muladd, curve25519, Fe}; use util::{fixed_time_eq}; use std::ops::{Add, Sub, Mul}; static L: [u8; 32] = [ 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0xde, 0xf9, 0xde, 0xa2, 0xf7, 0x9c, 0xd6, 0x58, 0x12, 0x63, 0x1a, 0x5c, 0xf5, 0xd3, 0xed ]; pub fn keypair(seed: &[u8]) -> ([u8; 64], [u8; 32]) { let mut secret: [u8; 64] = { let mut hash_output: [u8; 64] = [0; 64]; let mut hasher = Sha512::new(); hasher.input(seed); hasher.result(&mut hash_output); hash_output[0] &= 248; hash_output[31] &= 63; hash_output[31] |= 64; hash_output }; let a = ge_scalarmult_base(&secret[0..32]); let public_key = a.to_bytes(); for (dest, src) in (&mut secret[32..64]).iter_mut().zip(public_key.iter()) { *dest = *src; } for (dest, src) in (&mut secret[0..32]).iter_mut().zip(seed.iter()) { *dest = *src; } (secret, public_key) } pub fn signature(message: &[u8], secret_key: &[u8]) -> [u8; 64] { let seed = &secret_key[0..32]; let public_key = &secret_key[32..64]; let az: [u8; 64] = { let mut hash_output: [u8; 64] = [0; 64]; let mut hasher = Sha512::new(); hasher.input(seed); hasher.result(&mut hash_output); hash_output[0] &= 248; hash_output[31] &= 63; hash_output[31] |= 64; hash_output }; let nonce = { let mut hash_output: [u8; 64] = [0; 64]; let mut hasher = Sha512::new(); hasher.input(&az[32..64]); hasher.input(message); hasher.result(&mut hash_output); sc_reduce(&mut hash_output[0..64]); hash_output }; let mut signature: [u8; 64] = [0; 64]; let r: GeP3 = ge_scalarmult_base(&nonce[0..32]); for (result_byte, source_byte) in (&mut signature[0..32]).iter_mut().zip(r.to_bytes().iter()) { *result_byte = *source_byte; } for (result_byte, source_byte) in (&mut signature[32..64]).iter_mut().zip(public_key.iter()) { *result_byte = *source_byte; } { let mut hasher = Sha512::new(); hasher.input(signature.as_ref()); hasher.input(message); let mut hram: [u8; 64] = [0; 64]; hasher.result(&mut hram); sc_reduce(&mut hram); sc_muladd(&mut signature[32..64], &hram[0..32], &az[0..32], &nonce[0..32]); } signature } fn check_s_lt_l(s: &[u8]) -> bool { let mut c: u8 = 0; let mut n: u8 = 1; let mut i = 31; loop { c |= ((((s[i] as i32) - (L[i] as i32)) >> 8) as u8) & n; n &= (((((s[i] ^ L[i]) as i32)) - 1) >> 8) as u8; if i == 0 { break; } else { i -= 1; } } c == 0 } pub fn verify(message: &[u8], public_key: &[u8], signature: &[u8]) -> bool { if check_s_lt_l(&signature[32..64]) { return false; } let a = match GeP3::from_bytes_negate_vartime(public_key) { Some(g) => g, None => { return false; } }; let mut d = 0; for pk_byte in public_key.iter() { d |= *pk_byte; } if d == 0 { return false; } let mut hasher = Sha512::new(); hasher.input(&signature[0..32]); hasher.input(public_key); hasher.input(message); let mut hash: [u8; 64] = [0; 64]; hasher.result(&mut hash); sc_reduce(&mut hash); let r = GeP2::double_scalarmult_vartime(hash.as_ref(), a, &signature[32..64]); let rcheck = r.to_bytes(); fixed_time_eq(rcheck.as_ref(), &signature[0..32]) } pub fn exchange(public_key: &[u8], private_key: &[u8]) -> [u8; 32] { let ed_y = Fe::from_bytes(&public_key); // Produce public key in Montgomery form. let mont_x = edwards_to_montgomery_x(ed_y); // Produce private key from seed component (bytes 0 to 32) // of the Ed25519 extended private key (64 bytes). let mut hasher = Sha512::new(); hasher.input(&private_key[0..32]); let mut hash: [u8; 64] = [0; 64]; hasher.result(&mut hash); // Clamp the hash such that it is a valid private key hash[0] &= 248; hash[31] &= 127; hash[31] |= 64; let shared_mont_x : [u8; 32] = curve25519(&hash, &mont_x.to_bytes()); // priv., pub. shared_mont_x } fn edwards_to_montgomery_x(ed_y: Fe) -> Fe { let ed_z = Fe([1,0,0,0,0,0,0,0,0,0]); let temp_x = ed_z.add(ed_y); let temp_z = ed_z.sub(ed_y); let temp_z_inv = temp_z.invert(); let mont_x = temp_x.mul(temp_z_inv); mont_x } #[cfg(test)] mod tests { use ed25519::{keypair, signature, verify, exchange}; use curve25519::{curve25519_base, curve25519}; use digest::Digest; use sha2::{Sha512}; fn do_keypair_case(seed: [u8; 32], expected_secret: [u8; 64], expected_public: [u8; 32]) { let (actual_secret, actual_public) = keypair(seed.as_ref()); assert_eq!(actual_secret.to_vec(), expected_secret.to_vec()); assert_eq!(actual_public.to_vec(), expected_public.to_vec()); } #[test] fn keypair_cases() { do_keypair_case( [0x26, 0x27, 0xf6, 0x85, 0x97, 0x15, 0xad, 0x1d, 0xd2, 0x94, 0xdd, 0xc4, 0x76, 0x19, 0x39, 0x31, 0xf1, 0xad, 0xb5, 0x58, 0xf0, 0x93, 0x97, 0x32, 0x19, 0x2b, 0xd1, 0xc0, 0xfd, 0x16, 0x8e, 0x4e], [0x26, 0x27, 0xf6, 0x85, 0x97, 0x15, 0xad, 0x1d, 0xd2, 0x94, 0xdd, 0xc4, 0x76, 0x19, 0x39, 0x31, 0xf1, 0xad, 0xb5, 0x58, 0xf0, 0x93, 0x97, 0x32, 0x19, 0x2b, 0xd1, 0xc0, 0xfd, 0x16, 0x8e, 0x4e, 0x5d, 0x6d, 0x23, 0x6b, 0x52, 0xd1, 0x8e, 0x3a, 0xb6, 0xd6, 0x07, 0x2f, 0xb6, 0xe4, 0xc7, 0xd4, 0x6b, 0xd5, 0x9a, 0xd9, 0xcc, 0x19, 0x47, 0x26, 0x5f, 0x00, 0xb7, 0x20, 0xfa, 0x2c, 0x8f, 0x66], [0x5d, 0x6d, 0x23, 0x6b, 0x52, 0xd1, 0x8e, 0x3a, 0xb6, 0xd6, 0x07, 0x2f, 0xb6, 0xe4, 0xc7, 0xd4, 0x6b, 0xd5, 0x9a, 0xd9, 0xcc, 0x19, 0x47, 0x26, 0x5f, 0x00, 0xb7, 0x20, 0xfa, 0x2c, 0x8f, 0x66]); do_keypair_case( [0x29, 0x23, 0xbe, 0x84, 0xe1, 0x6c, 0xd6, 0xae, 0x52, 0x90, 0x49, 0xf1, 0xf1, 0xbb, 0xe9, 0xeb, 0xb3, 0xa6, 0xdb, 0x3c, 0x87, 0x0c, 0x3e, 0x99, 0x24, 0x5e, 0x0d, 0x1c, 0x06, 0xb7, 0x47, 0xde], [0x29, 0x23, 0xbe, 0x84, 0xe1, 0x6c, 0xd6, 0xae, 0x52, 0x90, 0x49, 0xf1, 0xf1, 0xbb, 0xe9, 0xeb, 0xb3, 0xa6, 0xdb, 0x3c, 0x87, 0x0c, 0x3e, 0x99, 0x24, 0x5e, 0x0d, 0x1c, 0x06, 0xb7, 0x47, 0xde, 0x5d, 0x83, 0x31, 0x26, 0x56, 0x0c, 0xb1, 0x9a, 0x14, 0x19, 0x37, 0x27, 0x78, 0x96, 0xf0, 0xfd, 0x43, 0x7b, 0xa6, 0x80, 0x1e, 0xb2, 0x10, 0xac, 0x4c, 0x39, 0xd9, 0x00, 0x72, 0xd7, 0x0d, 0xa8], [0x5d, 0x83, 0x31, 0x26, 0x56, 0x0c, 0xb1, 0x9a, 0x14, 0x19, 0x37, 0x27, 0x78, 0x96, 0xf0, 0xfd, 0x43, 0x7b, 0xa6, 0x80, 0x1e, 0xb2, 0x10, 0xac, 0x4c, 0x39, 0xd9, 0x00, 0x72, 0xd7, 0x0d, 0xa8]); } #[test] fn keypair_matches_mont() { let seed = [0x26, 0x27, 0xf6, 0x85, 0x97, 0x15, 0xad, 0x1d, 0xd2, 0x94, 0xdd, 0xc4, 0x76, 0x19, 0x39, 0x31, 0xf1, 0xad, 0xb5, 0x58, 0xf0, 0x93, 0x97, 0x32, 0x19, 0x2b, 0xd1, 0xc0, 0xfd, 0x16, 0x8e, 0x4e]; let (ed_private, ed_public) = keypair(seed.as_ref()); let mut hasher = Sha512::new(); hasher.input(&ed_private[0..32]); let mut hash: [u8; 64] = [0; 64]; hasher.result(&mut hash); hash[0] &= 248; hash[31] &= 127; hash[31] |= 64; let cv_public = curve25519_base(&hash); let edx_ss = exchange(&ed_public, &ed_private); let cv_ss = curve25519(&hash, &cv_public); assert_eq!(edx_ss.to_vec(), cv_ss.to_vec()); } fn do_sign_verify_case(seed: [u8; 32], message: &[u8], expected_signature: [u8; 64]) { let (secret_key, public_key) = keypair(seed.as_ref()); let mut actual_signature = signature(message, secret_key.as_ref()); assert_eq!(expected_signature.to_vec(), actual_signature.to_vec()); assert!(verify(message, public_key.as_ref(), actual_signature.as_ref())); for &(index, flip) in [(0, 1), (31, 0x80), (20, 0xff)].iter() { actual_signature[index] ^= flip; assert!(!verify(message, public_key.as_ref(), actual_signature.as_ref())); actual_signature[index] ^= flip; } let mut public_key_corrupt = public_key; public_key_corrupt[0] ^= 1; assert!(!verify(message, public_key_corrupt.as_ref(), actual_signature.as_ref())); } #[test] fn sign_verify_cases() { do_sign_verify_case( [0x2d, 0x20, 0x86, 0x83, 0x2c, 0xc2, 0xfe, 0x3f, 0xd1, 0x8c, 0xb5, 0x1d, 0x6c, 0x5e, 0x99, 0xa5, 0x75, 0x9f, 0x02, 0x21, 0x1f, 0x85, 0xe5, 0xff, 0x2f, 0x90, 0x4a, 0x78, 0x0f, 0x58, 0x00, 0x6f], [0x89, 0x8f, 0x9c, 0x4b, 0x2c, 0x6e, 0xe9, 0xe2, 0x28, 0x76, 0x1c, 0xa5, 0x08, 0x97, 0xb7, 0x1f, 0xfe, 0xca, 0x1c, 0x35, 0x28, 0x46, 0xf5, 0xfe, 0x13, 0xf7, 0xd3, 0xd5, 0x7e, 0x2c, 0x15, 0xac, 0x60, 0x90, 0x0c, 0xa3, 0x2c, 0x5b, 0x5d, 0xd9, 0x53, 0xc9, 0xa6, 0x81, 0x0a, 0xcc, 0x64, 0x39, 0x4f, 0xfd, 0x14, 0x98, 0x26, 0xd9, 0x98, 0x06, 0x29, 0x2a, 0xdd, 0xd1, 0x3f, 0xc3, 0xbb, 0x7d, 0xac, 0x70, 0x1c, 0x5b, 0x4a, 0x2d, 0x61, 0x5d, 0x15, 0x96, 0x01, 0x28, 0xed, 0x9f, 0x73, 0x6b, 0x98, 0x85, 0x4f, 0x6f, 0x07, 0x05, 0xb0, 0xf0, 0xda, 0xcb, 0xdc, 0x2c, 0x26, 0x2d, 0x27, 0x39, 0x75, 0x19, 0x14, 0x9b, 0x0e, 0x4c, 0xbe, 0x16, 0x77, 0xc5, 0x76, 0xc1, 0x39, 0x7a, 0xae, 0x5c, 0xe3, 0x49, 0x16, 0xe3, 0x51, 0x31, 0x04, 0x63, 0x2e, 0xc2, 0x19, 0x0d, 0xb8, 0xd2, 0x22, 0x89, 0xc3, 0x72, 0x3c, 0x8d, 0x01, 0x21, 0x3c, 0xad, 0x80, 0x3f, 0x4d, 0x75, 0x74, 0xc4, 0xdb, 0xb5, 0x37, 0x31, 0xb0, 0x1c, 0x8e, 0xc7, 0x5d, 0x08, 0x2e, 0xf7, 0xdc, 0x9d, 0x7f, 0x1b, 0x73, 0x15, 0x9f, 0x63, 0xdb, 0x56, 0xaa, 0x12, 0xa2, 0xca, 0x39, 0xea, 0xce, 0x6b, 0x28, 0xe4, 0xc3, 0x1d, 0x9d, 0x25, 0x67, 0x41, 0x45, 0x2e, 0x83, 0x87, 0xe1, 0x53, 0x6d, 0x03, 0x02, 0x6e, 0xe4, 0x84, 0x10, 0xd4, 0x3b, 0x21, 0x91, 0x88, 0xba, 0x14, 0xa8, 0xaf].as_ref(), [0x91, 0x20, 0x91, 0x66, 0x1e, 0xed, 0x18, 0xa4, 0x03, 0x4b, 0xc7, 0xdb, 0x4b, 0xd6, 0x0f, 0xe2, 0xde, 0xeb, 0xf3, 0xff, 0x3b, 0x6b, 0x99, 0x8d, 0xae, 0x20, 0x94, 0xb6, 0x09, 0x86, 0x5c, 0x20, 0x19, 0xec, 0x67, 0x22, 0xbf, 0xdc, 0x87, 0xbd, 0xa5, 0x40, 0x91, 0x92, 0x2e, 0x11, 0xe3, 0x93, 0xf5, 0xfd, 0xce, 0xea, 0x3e, 0x09, 0x1f, 0x2e, 0xe6, 0xbc, 0x62, 0xdf, 0x94, 0x8e, 0x99, 0x09] ); do_sign_verify_case( [0x33, 0x19, 0x17, 0x82, 0xc1, 0x70, 0x4f, 0x60, 0xd0, 0x84, 0x8d, 0x75, 0x62, 0xa2, 0xfa, 0x19, 0xf9, 0x92, 0x4f, 0xea, 0x4e, 0x77, 0x33, 0xcd, 0x45, 0xf6, 0xc3, 0x2f, 0x21, 0x9a, 0x72, 0x91], [0x77, 0x13, 0x43, 0x5a, 0x0e, 0x34, 0x6f, 0x67, 0x71, 0xae, 0x5a, 0xde, 0xa8, 0x7a, 0xe7, 0xa4, 0x52, 0xc6, 0x5d, 0x74, 0x8f, 0x48, 0x69, 0xd3, 0x1e, 0xd3, 0x67, 0x47, 0xc3, 0x28, 0xdd, 0xc4, 0xec, 0x0e, 0x48, 0x67, 0x93, 0xa5, 0x1c, 0x67, 0x66, 0xf7, 0x06, 0x48, 0x26, 0xd0, 0x74, 0x51, 0x4d, 0xd0, 0x57, 0x41, 0xf3, 0xbe, 0x27, 0x3e, 0xf2, 0x1f, 0x28, 0x0e, 0x49, 0x07, 0xed, 0x89, 0xbe, 0x30, 0x1a, 0x4e, 0xc8, 0x49, 0x6e, 0xb6, 0xab, 0x90, 0x00, 0x06, 0xe5, 0xa3, 0xc8, 0xe9, 0xc9, 0x93, 0x62, 0x1d, 0x6a, 0x3b, 0x0f, 0x6c, 0xba, 0xd0, 0xfd, 0xde, 0xf3, 0xb9, 0xc8, 0x2d].as_ref(), [0x4b, 0x8d, 0x9b, 0x1e, 0xca, 0x54, 0x00, 0xea, 0xc6, 0xf5, 0xcc, 0x0c, 0x94, 0x39, 0x63, 0x00, 0x52, 0xf7, 0x34, 0xce, 0x45, 0x3e, 0x94, 0x26, 0xf3, 0x19, 0xdd, 0x96, 0x03, 0xb6, 0xae, 0xae, 0xb9, 0xd2, 0x3a, 0x5f, 0x93, 0xf0, 0x6a, 0x46, 0x00, 0x18, 0xf0, 0x69, 0xdf, 0x19, 0x44, 0x48, 0xf5, 0x60, 0x51, 0xab, 0x9e, 0x6b, 0xfa, 0xeb, 0x64, 0x10, 0x16, 0xf7, 0xa9, 0x0b, 0xe2, 0x0c] ); } }
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // TODO: add tests use strings::string_buffer::StringBuffer; use std::fs::{self, File}; use std::io::{self, Write, Read, stdout, BufWriter}; use config::{NewlineStyle, Config, WriteMode}; use rustfmt_diff::{make_diff, print_diff, Mismatch}; use checkstyle::{output_header, output_footer, output_checkstyle_file}; // A map of the files of a crate, with their new content pub type FileMap = Vec<FileRecord>; pub type FileRecord = (String, StringBuffer); // Append a newline to the end of each file. pub fn append_newline(s: &mut StringBuffer) { s.push_str("\n"); } pub fn write_all_files<T>(file_map: &FileMap, out: &mut T, config: &Config) -> Result<(), io::Error> where T: Write { output_header(out, config.write_mode).ok(); for &(ref filename, ref text) in file_map { try!(write_file(text, filename, out, config)); } output_footer(out, config.write_mode).ok(); Ok(()) } // Prints all newlines either as `\n` or as `\r\n`. pub fn write_system_newlines<T>(writer: T, text: &StringBuffer, config: &Config) -> Result<(), io::Error> where T: Write { // Buffer output, since we're writing a since char at a time. let mut writer = BufWriter::new(writer); let style = if config.newline_style == NewlineStyle::Native { if cfg!(windows) { NewlineStyle::Windows } else { NewlineStyle::Unix } } else { config.newline_style }; match style { NewlineStyle::Unix => write!(writer, "{}", text), NewlineStyle::Windows => { for (c, _) in text.chars() { match c { '\n' => try!(write!(writer, "\r\n")), '\r' => continue, c => try!(write!(writer, "{}", c)), } } Ok(()) } NewlineStyle::Native => unreachable!(), } } pub fn write_file<T>(text: &StringBuffer, filename: &str, out: &mut T, config: &Config) -> Result<bool, io::Error> where T: Write { fn source_and_formatted_text(text: &StringBuffer, filename: &str, config: &Config) -> Result<(String, String), io::Error> { let mut f = try!(File::open(filename)); let mut ori_text = String::new(); try!(f.read_to_string(&mut ori_text)); let mut v = Vec::new(); try!(write_system_newlines(&mut v, text, config)); let fmt_text = String::from_utf8(v).unwrap(); Ok((ori_text, fmt_text)) } fn create_diff(filename: &str, text: &StringBuffer, config: &Config) -> Result<Vec<Mismatch>, io::Error> { let (ori, fmt) = try!(source_and_formatted_text(text, filename, config)); Ok(make_diff(&ori, &fmt, 3)) } match config.write_mode { WriteMode::Replace => { if let Ok((ori, fmt)) = source_and_formatted_text(text, filename, config) { if fmt != ori { // Do a little dance to make writing safer - write to a temp file // rename the original to a .bk, then rename the temp file to the // original. let tmp_name = filename.to_owned() + ".tmp"; let bk_name = filename.to_owned() + ".bk"; { // Write text to temp file let tmp_file = try!(File::create(&tmp_name)); try!(write_system_newlines(tmp_file, text, config)); } try!(fs::rename(filename, bk_name)); try!(fs::rename(tmp_name, filename)); } } } WriteMode::Overwrite => { // Write text directly over original file. let file = try!(File::create(filename)); try!(write_system_newlines(file, text, config)); } WriteMode::Plain => { let stdout = stdout(); let stdout = stdout.lock(); try!(write_system_newlines(stdout, text, config)); } WriteMode::Display | WriteMode::Coverage => { println!("{}:\n", filename); let stdout = stdout(); let stdout = stdout.lock(); try!(write_system_newlines(stdout, text, config)); } WriteMode::Diff => { if let Ok((ori, fmt)) = source_and_formatted_text(text, filename, config) { let mismatch = make_diff(&ori, &fmt, 3); let has_diff = !mismatch.is_empty(); print_diff(mismatch, |line_num| format!("Diff in {} at line {}:", filename, line_num)); return Ok(has_diff); } } WriteMode::Checkstyle => { let diff = try!(create_diff(filename, text, config)); try!(output_checkstyle_file(out, filename, diff)); } } // when we are not in diff mode, don't indicate differing files Ok(false) } Write to the supplied buffer if one is supplied to format_input // Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // TODO: add tests use strings::string_buffer::StringBuffer; use std::fs::{self, File}; use std::io::{self, Write, Read, sBufWriter}; use config::{NewlineStyle, Config, WriteMode}; use rustfmt_diff::{make_diff, print_diff, Mismatch}; use checkstyle::{output_header, output_footer, output_checkstyle_file}; // A map of the files of a crate, with their new content pub type FileMap = Vec<FileRecord>; pub type FileRecord = (String, StringBuffer); // Append a newline to the end of each file. pub fn append_newline(s: &mut StringBuffer) { s.push_str("\n"); } pub fn write_all_files<T>(file_map: &FileMap, out: &mut T, config: &Config) -> Result<(), io::Error> where T: Write { output_header(out, config.write_mode).ok(); for &(ref filename, ref text) in file_map { try!(write_file(text, filename, out, config)); } output_footer(out, config.write_mode).ok(); Ok(()) } // Prints all newlines either as `\n` or as `\r\n`. pub fn write_system_newlines<T>(writer: T, text: &StringBuffer, config: &Config) -> Result<(), io::Error> where T: Write { // Buffer output, since we're writing a since char at a time. let mut writer = BufWriter::new(writer); let style = if config.newline_style == NewlineStyle::Native { if cfg!(windows) { NewlineStyle::Windows } else { NewlineStyle::Unix } } else { config.newline_style }; match style { NewlineStyle::Unix => write!(writer, "{}", text), NewlineStyle::Windows => { for (c, _) in text.chars() { match c { '\n' => try!(write!(writer, "\r\n")), '\r' => continue, c => try!(write!(writer, "{}", c)), } } Ok(()) } NewlineStyle::Native => unreachable!(), } } pub fn write_file<T>(text: &StringBuffer, filename: &str, out: &mut T, config: &Config) -> Result<bool, io::Error> where T: Write { fn source_and_formatted_text(text: &StringBuffer, filename: &str, config: &Config) -> Result<(String, String), io::Error> { let mut f = try!(File::open(filename)); let mut ori_text = String::new(); try!(f.read_to_string(&mut ori_text)); let mut v = Vec::new(); try!(write_system_newlines(&mut v, text, config)); let fmt_text = String::from_utf8(v).unwrap(); Ok((ori_text, fmt_text)) } fn create_diff(filename: &str, text: &StringBuffer, config: &Config) -> Result<Vec<Mismatch>, io::Error> { let (ori, fmt) = try!(source_and_formatted_text(text, filename, config)); Ok(make_diff(&ori, &fmt, 3)) } match config.write_mode { WriteMode::Replace => { if let Ok((ori, fmt)) = source_and_formatted_text(text, filename, config) { if fmt != ori { // Do a little dance to make writing safer - write to a temp file // rename the original to a .bk, then rename the temp file to the // original. let tmp_name = filename.to_owned() + ".tmp"; let bk_name = filename.to_owned() + ".bk"; { // Write text to temp file let tmp_file = try!(File::create(&tmp_name)); try!(write_system_newlines(tmp_file, text, config)); } try!(fs::rename(filename, bk_name)); try!(fs::rename(tmp_name, filename)); } } } WriteMode::Overwrite => { // Write text directly over original file. let file = try!(File::create(filename)); try!(write_system_newlines(file, text, config)); } WriteMode::Plain => { try!(write_system_newlines(out, text, config)); } WriteMode::Display | WriteMode::Coverage => { println!("{}:\n", filename); try!(write_system_newlines(out, text, config)); } WriteMode::Diff => { if let Ok((ori, fmt)) = source_and_formatted_text(text, filename, config) { let mismatch = make_diff(&ori, &fmt, 3); let has_diff = !mismatch.is_empty(); print_diff(mismatch, |line_num| format!("Diff in {} at line {}:", filename, line_num)); return Ok(has_diff); } } WriteMode::Checkstyle => { let diff = try!(create_diff(filename, text, config)); try!(output_checkstyle_file(out, filename, diff)); } } // when we are not in diff mode, don't indicate differing files Ok(false) }
use std::io; use std::net::{Ipv4Addr, SocketAddrV4}; use std::fmt; use std; use rand::distributions::IndependentSample; use hyper; use xmltree; use rand; use soap; /// Errors that can occur when sending the request to the gateway. #[derive(Debug)] pub enum RequestError { /// Http/Hyper error HttpError(hyper::Error), /// IO Error IoError(io::Error), /// The response from the gateway could not be parsed. InvalidResponse(String), /// The gateway returned an unhandled error code and description. ErrorCode(u16, String), } /// Errors returned by `Gateway::get_external_ip` #[derive(Debug)] pub enum GetExternalIpError { /// The client is not authorized to perform the operation. ActionNotAuthorized, /// Some other error occured performing the request. RequestError(RequestError), } /// Errors returned by `Gateway::remove_port` #[derive(Debug)] pub enum RemovePortError { /// The client is not authorized to perform the operation. ActionNotAuthorized, /// No such port mapping. NoSuchPortMapping, /// Some other error occured performing the request. RequestError(RequestError), } /// Errors returned by `Gateway::add_any_port` and `Gateway::get_any_address` #[derive(Debug)] pub enum AddAnyPortError { /// The client is not authorized to perform the operation. ActionNotAuthorized, /// Can not add a mapping for local port 0. InternalPortZeroInvalid, /// The gateway does not have any free ports. NoPortsAvailable, /// The gateway can only map internal ports to same-numbered external ports /// and this external port is in use. ExternalPortInUse, /// The gateway only supports permanent leases (ie. a `lease_duration` of 0). OnlyPermanentLeasesSupported, /// The description was too long for the gateway to handle. DescriptionTooLong, /// Some other error occured performing the request. RequestError(RequestError), } /// Errors returned by `Gateway::add_port` #[derive(Debug)] pub enum AddPortError { /// The client is not authorized to perform the operation. ActionNotAuthorized, /// Can not add a mapping for local port 0. InternalPortZeroInvalid, /// External port number 0 (any port) is considered invalid by the gateway. ExternalPortZeroInvalid, /// The requested mapping conflicts with a mapping assigned to another client. PortInUse, /// The gateway requires that the requested internal and external ports are the same. SamePortValuesRequired, /// The gateway only supports permanent leases (ie. a `lease_duration` of 0). OnlyPermanentLeasesSupported, /// The description was too long for the gateway to handle. DescriptionTooLong, /// Some other error occured performing the request. RequestError(RequestError), } impl From<io::Error> for RequestError { fn from(err: io::Error) -> RequestError { RequestError::IoError(err) } } impl From<soap::Error> for RequestError { fn from(err: soap::Error) -> RequestError { match err { soap::Error::HttpError(e) => RequestError::HttpError(e), soap::Error::IoError(e) => RequestError::IoError(e), } } } impl fmt::Display for RequestError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { RequestError::HttpError(ref e) => write!(f, "HTTP error. {}", e), RequestError::InvalidResponse(ref e) => write!(f, "Invalid response from gateway: {}", e), RequestError::IoError(ref e) => write!(f, "IO error. {}", e), RequestError::ErrorCode(n, ref e) => write!(f, "Gateway response error {}: {}", n, e), } } } impl std::error::Error for RequestError { fn cause(&self) -> Option<&std::error::Error> { match *self { RequestError::HttpError(ref e) => Some(e), RequestError::InvalidResponse(..) => None, RequestError::IoError(ref e) => Some(e), RequestError::ErrorCode(..) => None, } } fn description(&self) -> &str { match *self { RequestError::HttpError(..) => "Http error", RequestError::InvalidResponse(..) => "Invalid response", RequestError::IoError(..) => "IO error", RequestError::ErrorCode(_, ref e) => &e[..], } } } impl fmt::Display for GetExternalIpError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { GetExternalIpError::ActionNotAuthorized => write!(f, "The client is not authorized to remove the port"), GetExternalIpError::RequestError(ref e) => write!(f, "Request Error. {}", e), } } } impl std::error::Error for GetExternalIpError { fn cause(&self) -> Option<&std::error::Error> { None } fn description(&self) -> &str { match *self { GetExternalIpError::ActionNotAuthorized => "The client is not authorized to remove the port", GetExternalIpError::RequestError(..) => "Request error", } } } impl fmt::Display for RemovePortError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { RemovePortError::ActionNotAuthorized => write!(f, "The client is not authorized to remove the port"), RemovePortError::NoSuchPortMapping => write!(f, "The port was not mapped"), RemovePortError::RequestError(ref e) => write!(f, "Request error. {}", e), } } } impl std::error::Error for RemovePortError { fn cause(&self) -> Option<&std::error::Error> { None } fn description(&self) -> &str { match *self { RemovePortError::ActionNotAuthorized => "The client is not authorized to remove the port", RemovePortError::NoSuchPortMapping => "The port was not mapped", RemovePortError::RequestError(..) => "Request error", } } } impl fmt::Display for AddAnyPortError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { AddAnyPortError::ActionNotAuthorized => write!(f, "The client is not authorized to remove the port"), AddAnyPortError::InternalPortZeroInvalid => write!(f, "Can not add a mapping for local port 0"), AddAnyPortError::NoPortsAvailable => write!(f, "The gateway does not have any free ports"), AddAnyPortError::OnlyPermanentLeasesSupported => write!(f, "The gateway only supports permanent leases (ie. a `lease_duration` of 0),"), AddAnyPortError::ExternalPortInUse => write!(f, "The gateway can only map internal ports to same-numbered external ports and this external port is in use."), AddAnyPortError::DescriptionTooLong => write!(f, "The description was too long for the gateway to handle."), AddAnyPortError::RequestError(ref e) => write!(f, "Request error. {}", e), } } } impl std::error::Error for AddAnyPortError { fn cause(&self) -> Option<&std::error::Error> { None } fn description(&self) -> &str { match *self { AddAnyPortError::ActionNotAuthorized => "The client is not authorized to remove the port", AddAnyPortError::InternalPortZeroInvalid => "Can not add a mapping for local port 0.", AddAnyPortError::NoPortsAvailable => "The gateway does not have any free ports", AddAnyPortError::OnlyPermanentLeasesSupported => "The gateway only supports permanent leases (ie. a `lease_duration` of 0),", AddAnyPortError::ExternalPortInUse => "The gateway can only map internal ports to same-numbered external ports and this external port is in use.", AddAnyPortError::DescriptionTooLong => "The description was too long for the gateway to handle.", AddAnyPortError::RequestError(..) => "Request error", } } } impl fmt::Display for AddPortError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { AddPortError::ActionNotAuthorized => write!(f, "The client is not authorized to map this port."), AddPortError::InternalPortZeroInvalid => write!(f, "Can not add a mapping for local port 0"), AddPortError::ExternalPortZeroInvalid => write!(f, "External port number 0 (any port) is considered invalid by the gateway."), AddPortError::PortInUse => write!(f, "The requested mapping conflicts with a mapping assigned to another client."), AddPortError::SamePortValuesRequired => write!(f, "The gateway requires that the requested internal and external ports are the same."), AddPortError::OnlyPermanentLeasesSupported => write!(f, "The gateway only supports permanent leases (ie. a `lease_duration` of 0),"), AddPortError::DescriptionTooLong => write!(f, "The description was too long for the gateway to handle."), AddPortError::RequestError(ref e) => write!(f, "Request error. {}", e), } } } impl std::error::Error for AddPortError { fn cause(&self) -> Option<&std::error::Error> { None } fn description(&self) -> &str { match *self { AddPortError::ActionNotAuthorized => "The client is not authorized to map this port.", AddPortError::InternalPortZeroInvalid => "Can not add a mapping for local port 0", AddPortError::ExternalPortZeroInvalid => "External port number 0 (any port) is considered invalid by the gateway.", AddPortError::PortInUse => "The requested mapping conflicts with a mapping assigned to another client.", AddPortError::SamePortValuesRequired => "The gateway requires that the requested internal and external ports are the same.", AddPortError::OnlyPermanentLeasesSupported => "The gateway only supports permanent leases (ie. a `lease_duration` of 0),", AddPortError::DescriptionTooLong => "The description was too long for the gateway to handle.", AddPortError::RequestError(..) => "Request error", } } } /// Represents the protocols available for port mapping. #[derive(Debug,Clone,Copy,PartialEq)] pub enum PortMappingProtocol { /// TCP protocol TCP, /// UDP protocol UDP, } impl fmt::Display for PortMappingProtocol { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", match *self { PortMappingProtocol::TCP => "TCP", PortMappingProtocol::UDP => "UDP", }) } } /// This structure represents a gateway found by the search functions. #[derive(Debug, PartialEq, Eq, Hash)] pub struct Gateway { /// Socket address of the gateway pub addr: SocketAddrV4, /// Control url of the device pub control_url: String, } impl Gateway { fn perform_request(&self, header: &str, body: &str, ok: &str) -> Result<(String, xmltree::Element), RequestError> { let url = format!("{}", self); let text = try!(soap::send(&url, soap::Action::new(header), body)); let mut xml = match xmltree::Element::parse(text.as_bytes()) { Ok(xml) => xml, Err(..) => return Err(RequestError::InvalidResponse(text)), }; let mut body = match xml.get_mut_child("Body") { Some(body) => body, None => return Err(RequestError::InvalidResponse(text)), }; if let Some(ok) = body.take_child(ok) { return Ok((text, ok)) } let upnp_error = match body.get_child("Fault") .and_then(|e| e.get_child("detail")) .and_then(|e| e.get_child("UPnPError")) { Some(upnp_error) => upnp_error, None => return Err(RequestError::InvalidResponse(text)), }; match (upnp_error.get_child("errorCode"), upnp_error.get_child("errorDescription")) { (Some(e), Some(d)) => match (e.text.as_ref(), d.text.as_ref()) { (Some(et), Some(dt)) => { match et.parse::<u16>() { Ok(en) => Err(RequestError::ErrorCode(en, From::from(&dt[..]))), Err(..) => Err(RequestError::InvalidResponse(text)), } }, _ => Err(RequestError::InvalidResponse(text)), }, _ => Err(RequestError::InvalidResponse(text)), } } /// Get the external IP address of the gateway. pub fn get_external_ip(&self) -> Result<Ipv4Addr, GetExternalIpError> { // Content of the get external ip SOAPAction request header. let header = "\"urn:schemas-upnp-org:service:WANIPConnection:1#GetExternalIPAddress\""; let body = "<?xml version=\"1.0\"?> <SOAP-ENV:Envelope SOAP-ENV:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\" xmlns:SOAP-ENV=\"http://schemas.xmlsoap.org/soap/envelope/\"> <SOAP-ENV:Body> <m:GetExternalIPAddress xmlns:m=\"urn:schemas-upnp-org:service:WANIPConnection:1\"> </m:GetExternalIPAddress> </SOAP-ENV:Body> </SOAP-ENV:Envelope>"; match self.perform_request(header, body, "GetExternalIPAddressResponse") { Ok((text, response)) => { match response.get_child("NewExternalIPAddress") .and_then(|e| e.text.as_ref()) .and_then(|t| t.parse::<Ipv4Addr>().ok()) { Some(ipv4_addr) => Ok(ipv4_addr), None => Err(GetExternalIpError::RequestError(RequestError::InvalidResponse(text))), } }, Err(RequestError::ErrorCode(606, _)) => Err(GetExternalIpError::ActionNotAuthorized), Err(e) => Err(GetExternalIpError::RequestError(e)), } } /// Get an external socket address with our external ip and any port. This is a convenience /// function that calls `get_external_ip` followed by `add_any_port` /// /// The local_addr is the address where the traffic is sent to. /// The lease_duration parameter is in seconds. A value of 0 is infinite. /// /// # Returns /// /// The external address that was mapped on success. Otherwise an error. pub fn get_any_address(&self, protocol: PortMappingProtocol, local_addr: SocketAddrV4, lease_duration: u32, description: &str) -> Result<SocketAddrV4, AddAnyPortError> { let external_ip = match self.get_external_ip() { Ok(ip) => ip, Err(GetExternalIpError::ActionNotAuthorized) => return Err(AddAnyPortError::ActionNotAuthorized), Err(GetExternalIpError::RequestError(e)) => return Err(AddAnyPortError::RequestError(e)), }; let external_port = try!(self.add_any_port(protocol, local_addr, lease_duration, description)); Ok(SocketAddrV4::new(external_ip, external_port)) } /// Add a port mapping.with any external port. /// /// The local_addr is the address where the traffic is sent to. /// The lease_duration parameter is in seconds. A value of 0 is infinite. /// /// # Returns /// /// The external port that was mapped on success. Otherwise an error. pub fn add_any_port(&self, protocol: PortMappingProtocol, local_addr: SocketAddrV4, lease_duration: u32, description: &str) -> Result<u16, AddAnyPortError> { // This function first attempts to call AddAnyPortMapping on the IGD with a random port // number. If that fails due to the method being unknown it attempts to call AddPortMapping // instead with a random port number. If that fails due to ConflictInMappingEntry it retrys // with another port up to a maximum of 20 times. If it fails due to SamePortValuesRequired // it retrys once with the same port values. if local_addr.port() == 0 { return Err(AddAnyPortError::InternalPortZeroInvalid) } let port_range = rand::distributions::Range::new(32768u16, 65535u16); let mut rng = rand::thread_rng(); let external_port = port_range.ind_sample(&mut rng); let header = "\"urn:schemas-upnp-org:service:WANIPConnection:1#AddAnyPortMapping\""; let body = format!("<?xml version=\"1.0\"?> <s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\"> <s:Body> <u:AddAnyPortMapping xmlns:u=\"urn:schemas-upnp-org:service:WANIPConnection:1\"> <NewProtocol>{}</NewProtocol> <NewExternalPort>{}</NewExternalPort> <NewInternalClient>{}</NewInternalClient> <NewInternalPort>{}</NewInternalPort> <NewLeaseDuration>{}</NewLeaseDuration> <NewPortMappingDescription>{}</NewPortMappingDescription> <NewEnabled>1</NewEnabled> <NewRemoteHost></NewRemoteHost> </u:AddPortMapping> </s:Body> </s:Envelope> ", protocol, external_port, local_addr.ip(), local_addr.port(), lease_duration, description); // First, attempt to call the AddAnyPortMapping method. match self.perform_request(header, &*body, "AddAnyPortMappingResponse") { Ok((text, response)) => { match response.get_child("NewReservedPort") .and_then(|e| e.text.as_ref()) .and_then(|t| t.parse::<u16>().ok()) { Some(port) => Ok(port), None => Err(AddAnyPortError::RequestError(RequestError::InvalidResponse(text))), } } // The router doesn't know the AddAnyPortMapping method. Try using AddPortMapping // instead. Err(RequestError::ErrorCode(401, _)) => { // Try a bunch of random ports. for _attempt in 0..20 { let external_port = port_range.ind_sample(&mut rng); match self.add_port_mapping(protocol, external_port, local_addr, lease_duration, description) { Ok(()) => return Ok(external_port), Err(RequestError::ErrorCode(605, _)) => return Err(AddAnyPortError::DescriptionTooLong), Err(RequestError::ErrorCode(606, _)) => return Err(AddAnyPortError::ActionNotAuthorized), // That port is in use. Try another. Err(RequestError::ErrorCode(718, _)) => continue, // The router requires that internal and external ports are the same. Err(RequestError::ErrorCode(724, _)) => { return match self.add_port_mapping(protocol, local_addr.port(), local_addr, lease_duration, description) { Ok(()) => Ok(local_addr.port()), Err(RequestError::ErrorCode(606, _)) => Err(AddAnyPortError::ActionNotAuthorized), Err(RequestError::ErrorCode(718, _)) => Err(AddAnyPortError::ExternalPortInUse), Err(RequestError::ErrorCode(725, _)) => Err(AddAnyPortError::OnlyPermanentLeasesSupported), Err(e) => Err(AddAnyPortError::RequestError(e)), } }, Err(RequestError::ErrorCode(725, _)) => return Err(AddAnyPortError::OnlyPermanentLeasesSupported), Err(e) => return Err(AddAnyPortError::RequestError(e)), } } // The only way we can get here is if the router kept returning 718 (port in use) // for all the ports we tried. Err(AddAnyPortError::NoPortsAvailable) }, Err(RequestError::ErrorCode(605, _)) => Err(AddAnyPortError::DescriptionTooLong), Err(RequestError::ErrorCode(606, _)) => Err(AddAnyPortError::ActionNotAuthorized), Err(RequestError::ErrorCode(728, _)) => Err(AddAnyPortError::NoPortsAvailable), Err(e) => Err(AddAnyPortError::RequestError(e)), } } fn add_port_mapping(&self, protocol: PortMappingProtocol, external_port: u16, local_addr: SocketAddrV4, lease_duration: u32, description: &str) -> Result<(), RequestError> { let header = "\"urn:schemas-upnp-org:service:WANIPConnection:1#AddPortMapping\""; let body = format!("<?xml version=\"1.0\"?> <s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\"> <s:Body> <u:AddPortMapping xmlns:u=\"urn:schemas-upnp-org:service:WANIPConnection:1\"> <NewProtocol>{}</NewProtocol> <NewExternalPort>{}</NewExternalPort> <NewInternalClient>{}</NewInternalClient> <NewInternalPort>{}</NewInternalPort> <NewLeaseDuration>{}</NewLeaseDuration> <NewPortMappingDescription>{}</NewPortMappingDescription> <NewEnabled>1</NewEnabled> <NewRemoteHost></NewRemoteHost> </u:AddPortMapping> </s:Body> </s:Envelope> ", protocol, external_port, local_addr.ip(), local_addr.port(), lease_duration, description); try!(self.perform_request(header, &*body, "AddPortMappingResponse")); Ok(()) } /// Add a port mapping. /// /// The local_addr is the address where the traffic is sent to. /// The lease_duration parameter is in seconds. A value of 0 is infinite. pub fn add_port(&self, protocol: PortMappingProtocol, external_port: u16, local_addr: SocketAddrV4, lease_duration: u32, description: &str) -> Result<(), AddPortError> { if external_port == 0 { return Err(AddPortError::ExternalPortZeroInvalid); } if local_addr.port() == 0 { return Err(AddPortError::InternalPortZeroInvalid); } match self.add_port_mapping(protocol, external_port, local_addr, lease_duration, description) { Ok(()) => Ok(()), Err(RequestError::ErrorCode(605, _)) => Err(AddPortError::DescriptionTooLong), Err(RequestError::ErrorCode(606, _)) => Err(AddPortError::ActionNotAuthorized), Err(RequestError::ErrorCode(718, _)) => Err(AddPortError::PortInUse), Err(RequestError::ErrorCode(724, _)) => Err(AddPortError::SamePortValuesRequired), Err(RequestError::ErrorCode(725, _)) => Err(AddPortError::OnlyPermanentLeasesSupported), Err(e) => Err(AddPortError::RequestError(e)), } } /// Remove a port mapping. pub fn remove_port(&self, protocol: PortMappingProtocol, external_port: u16) -> Result<(), RemovePortError> { let header = "\"urn:schemas-upnp-org:service:WANIPConnection:1#DeletePortMapping\""; let body = format!("<?xml version=\"1.0\"?> <s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\"> <s:Body> <u:DeletePortMapping xmlns:u=\"urn:schemas-upnp-org:service:WANIPConnection:1\"> <NewProtocol>{}</NewProtocol> <NewExternalPort>{}</NewExternalPort> <NewRemoteHost></NewRemoteHost> </u:DeletePortMapping> </s:Body> </s:Envelope> ", protocol, external_port); match self.perform_request(header, &*body, "DeletePortMappingResponse") { Ok(..) => Ok(()), Err(RequestError::ErrorCode(606, _)) => Err(RemovePortError::ActionNotAuthorized), Err(RequestError::ErrorCode(714, _)) => Err(RemovePortError::NoSuchPortMapping), Err(e) => Err(RemovePortError::RequestError(e)), } } } impl fmt::Display for Gateway { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "http://{}{}", self.addr, self.control_url) } } Add [derive(Clone)] to Gateway use std::io; use std::net::{Ipv4Addr, SocketAddrV4}; use std::fmt; use std; use rand::distributions::IndependentSample; use hyper; use xmltree; use rand; use soap; /// Errors that can occur when sending the request to the gateway. #[derive(Debug)] pub enum RequestError { /// Http/Hyper error HttpError(hyper::Error), /// IO Error IoError(io::Error), /// The response from the gateway could not be parsed. InvalidResponse(String), /// The gateway returned an unhandled error code and description. ErrorCode(u16, String), } /// Errors returned by `Gateway::get_external_ip` #[derive(Debug)] pub enum GetExternalIpError { /// The client is not authorized to perform the operation. ActionNotAuthorized, /// Some other error occured performing the request. RequestError(RequestError), } /// Errors returned by `Gateway::remove_port` #[derive(Debug)] pub enum RemovePortError { /// The client is not authorized to perform the operation. ActionNotAuthorized, /// No such port mapping. NoSuchPortMapping, /// Some other error occured performing the request. RequestError(RequestError), } /// Errors returned by `Gateway::add_any_port` and `Gateway::get_any_address` #[derive(Debug)] pub enum AddAnyPortError { /// The client is not authorized to perform the operation. ActionNotAuthorized, /// Can not add a mapping for local port 0. InternalPortZeroInvalid, /// The gateway does not have any free ports. NoPortsAvailable, /// The gateway can only map internal ports to same-numbered external ports /// and this external port is in use. ExternalPortInUse, /// The gateway only supports permanent leases (ie. a `lease_duration` of 0). OnlyPermanentLeasesSupported, /// The description was too long for the gateway to handle. DescriptionTooLong, /// Some other error occured performing the request. RequestError(RequestError), } /// Errors returned by `Gateway::add_port` #[derive(Debug)] pub enum AddPortError { /// The client is not authorized to perform the operation. ActionNotAuthorized, /// Can not add a mapping for local port 0. InternalPortZeroInvalid, /// External port number 0 (any port) is considered invalid by the gateway. ExternalPortZeroInvalid, /// The requested mapping conflicts with a mapping assigned to another client. PortInUse, /// The gateway requires that the requested internal and external ports are the same. SamePortValuesRequired, /// The gateway only supports permanent leases (ie. a `lease_duration` of 0). OnlyPermanentLeasesSupported, /// The description was too long for the gateway to handle. DescriptionTooLong, /// Some other error occured performing the request. RequestError(RequestError), } impl From<io::Error> for RequestError { fn from(err: io::Error) -> RequestError { RequestError::IoError(err) } } impl From<soap::Error> for RequestError { fn from(err: soap::Error) -> RequestError { match err { soap::Error::HttpError(e) => RequestError::HttpError(e), soap::Error::IoError(e) => RequestError::IoError(e), } } } impl fmt::Display for RequestError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { RequestError::HttpError(ref e) => write!(f, "HTTP error. {}", e), RequestError::InvalidResponse(ref e) => write!(f, "Invalid response from gateway: {}", e), RequestError::IoError(ref e) => write!(f, "IO error. {}", e), RequestError::ErrorCode(n, ref e) => write!(f, "Gateway response error {}: {}", n, e), } } } impl std::error::Error for RequestError { fn cause(&self) -> Option<&std::error::Error> { match *self { RequestError::HttpError(ref e) => Some(e), RequestError::InvalidResponse(..) => None, RequestError::IoError(ref e) => Some(e), RequestError::ErrorCode(..) => None, } } fn description(&self) -> &str { match *self { RequestError::HttpError(..) => "Http error", RequestError::InvalidResponse(..) => "Invalid response", RequestError::IoError(..) => "IO error", RequestError::ErrorCode(_, ref e) => &e[..], } } } impl fmt::Display for GetExternalIpError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { GetExternalIpError::ActionNotAuthorized => write!(f, "The client is not authorized to remove the port"), GetExternalIpError::RequestError(ref e) => write!(f, "Request Error. {}", e), } } } impl std::error::Error for GetExternalIpError { fn cause(&self) -> Option<&std::error::Error> { None } fn description(&self) -> &str { match *self { GetExternalIpError::ActionNotAuthorized => "The client is not authorized to remove the port", GetExternalIpError::RequestError(..) => "Request error", } } } impl fmt::Display for RemovePortError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { RemovePortError::ActionNotAuthorized => write!(f, "The client is not authorized to remove the port"), RemovePortError::NoSuchPortMapping => write!(f, "The port was not mapped"), RemovePortError::RequestError(ref e) => write!(f, "Request error. {}", e), } } } impl std::error::Error for RemovePortError { fn cause(&self) -> Option<&std::error::Error> { None } fn description(&self) -> &str { match *self { RemovePortError::ActionNotAuthorized => "The client is not authorized to remove the port", RemovePortError::NoSuchPortMapping => "The port was not mapped", RemovePortError::RequestError(..) => "Request error", } } } impl fmt::Display for AddAnyPortError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { AddAnyPortError::ActionNotAuthorized => write!(f, "The client is not authorized to remove the port"), AddAnyPortError::InternalPortZeroInvalid => write!(f, "Can not add a mapping for local port 0"), AddAnyPortError::NoPortsAvailable => write!(f, "The gateway does not have any free ports"), AddAnyPortError::OnlyPermanentLeasesSupported => write!(f, "The gateway only supports permanent leases (ie. a `lease_duration` of 0),"), AddAnyPortError::ExternalPortInUse => write!(f, "The gateway can only map internal ports to same-numbered external ports and this external port is in use."), AddAnyPortError::DescriptionTooLong => write!(f, "The description was too long for the gateway to handle."), AddAnyPortError::RequestError(ref e) => write!(f, "Request error. {}", e), } } } impl std::error::Error for AddAnyPortError { fn cause(&self) -> Option<&std::error::Error> { None } fn description(&self) -> &str { match *self { AddAnyPortError::ActionNotAuthorized => "The client is not authorized to remove the port", AddAnyPortError::InternalPortZeroInvalid => "Can not add a mapping for local port 0.", AddAnyPortError::NoPortsAvailable => "The gateway does not have any free ports", AddAnyPortError::OnlyPermanentLeasesSupported => "The gateway only supports permanent leases (ie. a `lease_duration` of 0),", AddAnyPortError::ExternalPortInUse => "The gateway can only map internal ports to same-numbered external ports and this external port is in use.", AddAnyPortError::DescriptionTooLong => "The description was too long for the gateway to handle.", AddAnyPortError::RequestError(..) => "Request error", } } } impl fmt::Display for AddPortError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { AddPortError::ActionNotAuthorized => write!(f, "The client is not authorized to map this port."), AddPortError::InternalPortZeroInvalid => write!(f, "Can not add a mapping for local port 0"), AddPortError::ExternalPortZeroInvalid => write!(f, "External port number 0 (any port) is considered invalid by the gateway."), AddPortError::PortInUse => write!(f, "The requested mapping conflicts with a mapping assigned to another client."), AddPortError::SamePortValuesRequired => write!(f, "The gateway requires that the requested internal and external ports are the same."), AddPortError::OnlyPermanentLeasesSupported => write!(f, "The gateway only supports permanent leases (ie. a `lease_duration` of 0),"), AddPortError::DescriptionTooLong => write!(f, "The description was too long for the gateway to handle."), AddPortError::RequestError(ref e) => write!(f, "Request error. {}", e), } } } impl std::error::Error for AddPortError { fn cause(&self) -> Option<&std::error::Error> { None } fn description(&self) -> &str { match *self { AddPortError::ActionNotAuthorized => "The client is not authorized to map this port.", AddPortError::InternalPortZeroInvalid => "Can not add a mapping for local port 0", AddPortError::ExternalPortZeroInvalid => "External port number 0 (any port) is considered invalid by the gateway.", AddPortError::PortInUse => "The requested mapping conflicts with a mapping assigned to another client.", AddPortError::SamePortValuesRequired => "The gateway requires that the requested internal and external ports are the same.", AddPortError::OnlyPermanentLeasesSupported => "The gateway only supports permanent leases (ie. a `lease_duration` of 0),", AddPortError::DescriptionTooLong => "The description was too long for the gateway to handle.", AddPortError::RequestError(..) => "Request error", } } } /// Represents the protocols available for port mapping. #[derive(Debug,Clone,Copy,PartialEq)] pub enum PortMappingProtocol { /// TCP protocol TCP, /// UDP protocol UDP, } impl fmt::Display for PortMappingProtocol { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", match *self { PortMappingProtocol::TCP => "TCP", PortMappingProtocol::UDP => "UDP", }) } } /// This structure represents a gateway found by the search functions. #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct Gateway { /// Socket address of the gateway pub addr: SocketAddrV4, /// Control url of the device pub control_url: String, } impl Gateway { fn perform_request(&self, header: &str, body: &str, ok: &str) -> Result<(String, xmltree::Element), RequestError> { let url = format!("{}", self); let text = try!(soap::send(&url, soap::Action::new(header), body)); let mut xml = match xmltree::Element::parse(text.as_bytes()) { Ok(xml) => xml, Err(..) => return Err(RequestError::InvalidResponse(text)), }; let mut body = match xml.get_mut_child("Body") { Some(body) => body, None => return Err(RequestError::InvalidResponse(text)), }; if let Some(ok) = body.take_child(ok) { return Ok((text, ok)) } let upnp_error = match body.get_child("Fault") .and_then(|e| e.get_child("detail")) .and_then(|e| e.get_child("UPnPError")) { Some(upnp_error) => upnp_error, None => return Err(RequestError::InvalidResponse(text)), }; match (upnp_error.get_child("errorCode"), upnp_error.get_child("errorDescription")) { (Some(e), Some(d)) => match (e.text.as_ref(), d.text.as_ref()) { (Some(et), Some(dt)) => { match et.parse::<u16>() { Ok(en) => Err(RequestError::ErrorCode(en, From::from(&dt[..]))), Err(..) => Err(RequestError::InvalidResponse(text)), } }, _ => Err(RequestError::InvalidResponse(text)), }, _ => Err(RequestError::InvalidResponse(text)), } } /// Get the external IP address of the gateway. pub fn get_external_ip(&self) -> Result<Ipv4Addr, GetExternalIpError> { // Content of the get external ip SOAPAction request header. let header = "\"urn:schemas-upnp-org:service:WANIPConnection:1#GetExternalIPAddress\""; let body = "<?xml version=\"1.0\"?> <SOAP-ENV:Envelope SOAP-ENV:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\" xmlns:SOAP-ENV=\"http://schemas.xmlsoap.org/soap/envelope/\"> <SOAP-ENV:Body> <m:GetExternalIPAddress xmlns:m=\"urn:schemas-upnp-org:service:WANIPConnection:1\"> </m:GetExternalIPAddress> </SOAP-ENV:Body> </SOAP-ENV:Envelope>"; match self.perform_request(header, body, "GetExternalIPAddressResponse") { Ok((text, response)) => { match response.get_child("NewExternalIPAddress") .and_then(|e| e.text.as_ref()) .and_then(|t| t.parse::<Ipv4Addr>().ok()) { Some(ipv4_addr) => Ok(ipv4_addr), None => Err(GetExternalIpError::RequestError(RequestError::InvalidResponse(text))), } }, Err(RequestError::ErrorCode(606, _)) => Err(GetExternalIpError::ActionNotAuthorized), Err(e) => Err(GetExternalIpError::RequestError(e)), } } /// Get an external socket address with our external ip and any port. This is a convenience /// function that calls `get_external_ip` followed by `add_any_port` /// /// The local_addr is the address where the traffic is sent to. /// The lease_duration parameter is in seconds. A value of 0 is infinite. /// /// # Returns /// /// The external address that was mapped on success. Otherwise an error. pub fn get_any_address(&self, protocol: PortMappingProtocol, local_addr: SocketAddrV4, lease_duration: u32, description: &str) -> Result<SocketAddrV4, AddAnyPortError> { let external_ip = match self.get_external_ip() { Ok(ip) => ip, Err(GetExternalIpError::ActionNotAuthorized) => return Err(AddAnyPortError::ActionNotAuthorized), Err(GetExternalIpError::RequestError(e)) => return Err(AddAnyPortError::RequestError(e)), }; let external_port = try!(self.add_any_port(protocol, local_addr, lease_duration, description)); Ok(SocketAddrV4::new(external_ip, external_port)) } /// Add a port mapping.with any external port. /// /// The local_addr is the address where the traffic is sent to. /// The lease_duration parameter is in seconds. A value of 0 is infinite. /// /// # Returns /// /// The external port that was mapped on success. Otherwise an error. pub fn add_any_port(&self, protocol: PortMappingProtocol, local_addr: SocketAddrV4, lease_duration: u32, description: &str) -> Result<u16, AddAnyPortError> { // This function first attempts to call AddAnyPortMapping on the IGD with a random port // number. If that fails due to the method being unknown it attempts to call AddPortMapping // instead with a random port number. If that fails due to ConflictInMappingEntry it retrys // with another port up to a maximum of 20 times. If it fails due to SamePortValuesRequired // it retrys once with the same port values. if local_addr.port() == 0 { return Err(AddAnyPortError::InternalPortZeroInvalid) } let port_range = rand::distributions::Range::new(32768u16, 65535u16); let mut rng = rand::thread_rng(); let external_port = port_range.ind_sample(&mut rng); let header = "\"urn:schemas-upnp-org:service:WANIPConnection:1#AddAnyPortMapping\""; let body = format!("<?xml version=\"1.0\"?> <s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\"> <s:Body> <u:AddAnyPortMapping xmlns:u=\"urn:schemas-upnp-org:service:WANIPConnection:1\"> <NewProtocol>{}</NewProtocol> <NewExternalPort>{}</NewExternalPort> <NewInternalClient>{}</NewInternalClient> <NewInternalPort>{}</NewInternalPort> <NewLeaseDuration>{}</NewLeaseDuration> <NewPortMappingDescription>{}</NewPortMappingDescription> <NewEnabled>1</NewEnabled> <NewRemoteHost></NewRemoteHost> </u:AddPortMapping> </s:Body> </s:Envelope> ", protocol, external_port, local_addr.ip(), local_addr.port(), lease_duration, description); // First, attempt to call the AddAnyPortMapping method. match self.perform_request(header, &*body, "AddAnyPortMappingResponse") { Ok((text, response)) => { match response.get_child("NewReservedPort") .and_then(|e| e.text.as_ref()) .and_then(|t| t.parse::<u16>().ok()) { Some(port) => Ok(port), None => Err(AddAnyPortError::RequestError(RequestError::InvalidResponse(text))), } } // The router doesn't know the AddAnyPortMapping method. Try using AddPortMapping // instead. Err(RequestError::ErrorCode(401, _)) => { // Try a bunch of random ports. for _attempt in 0..20 { let external_port = port_range.ind_sample(&mut rng); match self.add_port_mapping(protocol, external_port, local_addr, lease_duration, description) { Ok(()) => return Ok(external_port), Err(RequestError::ErrorCode(605, _)) => return Err(AddAnyPortError::DescriptionTooLong), Err(RequestError::ErrorCode(606, _)) => return Err(AddAnyPortError::ActionNotAuthorized), // That port is in use. Try another. Err(RequestError::ErrorCode(718, _)) => continue, // The router requires that internal and external ports are the same. Err(RequestError::ErrorCode(724, _)) => { return match self.add_port_mapping(protocol, local_addr.port(), local_addr, lease_duration, description) { Ok(()) => Ok(local_addr.port()), Err(RequestError::ErrorCode(606, _)) => Err(AddAnyPortError::ActionNotAuthorized), Err(RequestError::ErrorCode(718, _)) => Err(AddAnyPortError::ExternalPortInUse), Err(RequestError::ErrorCode(725, _)) => Err(AddAnyPortError::OnlyPermanentLeasesSupported), Err(e) => Err(AddAnyPortError::RequestError(e)), } }, Err(RequestError::ErrorCode(725, _)) => return Err(AddAnyPortError::OnlyPermanentLeasesSupported), Err(e) => return Err(AddAnyPortError::RequestError(e)), } } // The only way we can get here is if the router kept returning 718 (port in use) // for all the ports we tried. Err(AddAnyPortError::NoPortsAvailable) }, Err(RequestError::ErrorCode(605, _)) => Err(AddAnyPortError::DescriptionTooLong), Err(RequestError::ErrorCode(606, _)) => Err(AddAnyPortError::ActionNotAuthorized), Err(RequestError::ErrorCode(728, _)) => Err(AddAnyPortError::NoPortsAvailable), Err(e) => Err(AddAnyPortError::RequestError(e)), } } fn add_port_mapping(&self, protocol: PortMappingProtocol, external_port: u16, local_addr: SocketAddrV4, lease_duration: u32, description: &str) -> Result<(), RequestError> { let header = "\"urn:schemas-upnp-org:service:WANIPConnection:1#AddPortMapping\""; let body = format!("<?xml version=\"1.0\"?> <s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\"> <s:Body> <u:AddPortMapping xmlns:u=\"urn:schemas-upnp-org:service:WANIPConnection:1\"> <NewProtocol>{}</NewProtocol> <NewExternalPort>{}</NewExternalPort> <NewInternalClient>{}</NewInternalClient> <NewInternalPort>{}</NewInternalPort> <NewLeaseDuration>{}</NewLeaseDuration> <NewPortMappingDescription>{}</NewPortMappingDescription> <NewEnabled>1</NewEnabled> <NewRemoteHost></NewRemoteHost> </u:AddPortMapping> </s:Body> </s:Envelope> ", protocol, external_port, local_addr.ip(), local_addr.port(), lease_duration, description); try!(self.perform_request(header, &*body, "AddPortMappingResponse")); Ok(()) } /// Add a port mapping. /// /// The local_addr is the address where the traffic is sent to. /// The lease_duration parameter is in seconds. A value of 0 is infinite. pub fn add_port(&self, protocol: PortMappingProtocol, external_port: u16, local_addr: SocketAddrV4, lease_duration: u32, description: &str) -> Result<(), AddPortError> { if external_port == 0 { return Err(AddPortError::ExternalPortZeroInvalid); } if local_addr.port() == 0 { return Err(AddPortError::InternalPortZeroInvalid); } match self.add_port_mapping(protocol, external_port, local_addr, lease_duration, description) { Ok(()) => Ok(()), Err(RequestError::ErrorCode(605, _)) => Err(AddPortError::DescriptionTooLong), Err(RequestError::ErrorCode(606, _)) => Err(AddPortError::ActionNotAuthorized), Err(RequestError::ErrorCode(718, _)) => Err(AddPortError::PortInUse), Err(RequestError::ErrorCode(724, _)) => Err(AddPortError::SamePortValuesRequired), Err(RequestError::ErrorCode(725, _)) => Err(AddPortError::OnlyPermanentLeasesSupported), Err(e) => Err(AddPortError::RequestError(e)), } } /// Remove a port mapping. pub fn remove_port(&self, protocol: PortMappingProtocol, external_port: u16) -> Result<(), RemovePortError> { let header = "\"urn:schemas-upnp-org:service:WANIPConnection:1#DeletePortMapping\""; let body = format!("<?xml version=\"1.0\"?> <s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\"> <s:Body> <u:DeletePortMapping xmlns:u=\"urn:schemas-upnp-org:service:WANIPConnection:1\"> <NewProtocol>{}</NewProtocol> <NewExternalPort>{}</NewExternalPort> <NewRemoteHost></NewRemoteHost> </u:DeletePortMapping> </s:Body> </s:Envelope> ", protocol, external_port); match self.perform_request(header, &*body, "DeletePortMappingResponse") { Ok(..) => Ok(()), Err(RequestError::ErrorCode(606, _)) => Err(RemovePortError::ActionNotAuthorized), Err(RequestError::ErrorCode(714, _)) => Err(RemovePortError::NoSuchPortMapping), Err(e) => Err(RemovePortError::RequestError(e)), } } } impl fmt::Display for Gateway { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "http://{}{}", self.addr, self.control_url) } }
use color::Color; use tile::Tile; use ::Axis; #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum BoundState { // TODO change name OutOfBound, Tile(Tile), } #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub struct Alignement(pub BoundState, pub usize, pub BoundState); pub const ALIGN_HORIZONTAL: usize = 0; pub const ALIGN_DIAGONAL_UP: usize = 1; pub const ALIGN_VERTICAL: usize = 2; pub const ALIGN_DIAGONAL_DOWN: usize = 3; // TODO make types for clarity (grid, return) pub fn horizontal_alignement(grid: &[[Tile; ::GRID_LEN]; ::GRID_LEN], pos: Axis) -> Alignement { let tile = grid[pos.x][pos.y].expect(&format!("Tile at {:?} is empty!", pos)); let mut alignement = Alignement(BoundState::OutOfBound, 0, BoundState::OutOfBound); for y in (0...pos.y).rev() { match grid[pos.x][y] { Some(c) if c == tile => alignement.1 += 1, tile => { alignement.0 = BoundState::Tile(tile); break }, } } for y in pos.y + 1..::GRID_LEN { match grid[pos.x][y] { Some(c) if c == tile => alignement.1 += 1, tile => { alignement.2 = BoundState::Tile(tile); break }, } } alignement } pub fn diagonal_up_alignement(grid: &[[Tile; ::GRID_LEN]; ::GRID_LEN], pos: Axis) -> Alignement { let tile = grid[pos.x][pos.y].expect(&format!("Tile at {:?} is empty!", pos)); let mut alignement = Alignement(BoundState::OutOfBound, 0, BoundState::OutOfBound); let Axis { mut x, mut y } = pos; while x < ::GRID_LEN && y < ::GRID_LEN { // x will underflow to usize::max() match grid[x][y] { Some(c) if c == tile => alignement.1 += 1, tile => { alignement.0 = BoundState::Tile(tile); break }, } x += 1; y = y.wrapping_sub(1); } let Axis { mut x, mut y } = pos; x = x.wrapping_sub(1); y += 1; while x < ::GRID_LEN && y < ::GRID_LEN { match grid[x][y] { Some(c) if c == tile => alignement.1 += 1, tile => { alignement.2 = BoundState::Tile(tile); break }, } x = x.wrapping_sub(1); y += 1; } alignement } pub fn vertical_alignement(grid: &[[Tile; ::GRID_LEN]; ::GRID_LEN], pos: Axis) -> Alignement { let tile = grid[pos.x][pos.y].expect(&format!("Tile at {:?} is empty!", pos)); let mut alignement = Alignement(BoundState::OutOfBound, 0, BoundState::OutOfBound); for x in (0...pos.x).rev() { match grid[x][pos.y] { Some(c) if c == tile => alignement.1 += 1, tile => { alignement.0 = BoundState::Tile(tile); break }, } } for x in pos.x + 1..::GRID_LEN { match grid[x][pos.y] { Some(c) if c == tile => alignement.1 += 1, tile => { alignement.2 = BoundState::Tile(tile); break }, } } alignement } pub fn diagonal_down_alignement(grid: &[[Tile; ::GRID_LEN]; ::GRID_LEN], pos: Axis) -> Alignement { let tile = grid[pos.x][pos.y].expect(&format!("Tile at {:?} is empty!", pos)); let mut alignement = Alignement(BoundState::OutOfBound, 0, BoundState::OutOfBound); let Axis { mut x, mut y } = pos; while x < ::GRID_LEN && y < ::GRID_LEN { // x and y will overflow to usize::max() match grid[x][y] { Some(c) if c == tile => alignement.1 += 1, tile => { alignement.0 = BoundState::Tile(tile); break }, } x = x.wrapping_sub(1); y = y.wrapping_sub(1); } let Axis { mut x, mut y } = pos; x += 1; y += 1; while x < ::GRID_LEN && y < ::GRID_LEN { match grid[x][y] { Some(c) if c == tile => alignement.1 += 1, tile => { alignement.2 = BoundState::Tile(tile); break }, } x += 1; y += 1; } alignement } /// returns a list of alignements with the tile at `pos` position in Clockwise /// (e.g. top_to_bot, top_right_to_bot_left, right_to_left, bot_right_to_top_left) /// a None value means no alignement (e.g. less than 2 stones) pub fn list_alignements(grid: &[[Tile; ::GRID_LEN]; ::GRID_LEN], pos: Axis) -> [Option<Alignement>; 4] { let mut alignements = [None; 4]; alignements[ALIGN_HORIZONTAL] = match horizontal_alignement(grid, pos) { Alignement(_, 0, _) => unreachable!("horizontal_alignement cannot count zero tiles!"), Alignement(_, 1, _) => None, x => Some(x), }; alignements[ALIGN_DIAGONAL_UP] = match diagonal_up_alignement(grid, pos) { Alignement(_, 0, _) => unreachable!("diagonal_up_alignement cannot count zero tiles!"), Alignement(_, 1, _) => None, x => Some(x), }; alignements[ALIGN_VERTICAL] = match vertical_alignement(grid, pos) { Alignement(_, 0, _) => unreachable!("vertical_alignement cannot count zero tiles!"), Alignement(_, 1, _) => None, x => Some(x), }; alignements[ALIGN_DIAGONAL_DOWN] = match diagonal_down_alignement(grid, pos) { Alignement(_, 0, _) => unreachable!("diagonal_down_alignement cannot count zero tiles!"), Alignement(_, 1, _) => None, x => Some(x), }; alignements } #[cfg(test)] mod tests { use test::Bencher; use functions::alignements::*; use ::Axis; use color::Color; #[bench] fn alignements_horizontal_forward(bencher: &mut Bencher) { let b = Some(Color::Black); let n = None; let grid = [[b, b, b, b, b, b, b, b, b, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n]]; let alignement = Alignement(BoundState::OutOfBound, 9, BoundState::Tile(n)); bencher.iter(|| assert_eq!(horizontal_alignement(&grid, Axis { x: 0, y: 0 }), alignement) ); } #[bench] fn alignements_horizontal_backward(bencher: &mut Bencher) { let b = Some(Color::Black); let n = None; let grid = [[b, b, b, b, b, b, n, b, b, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n]]; let alignement = Alignement(BoundState::OutOfBound, 6, BoundState::Tile(n)); bencher.iter(|| assert_eq!(horizontal_alignement(&grid, Axis { x: 0, y: 5 }), alignement) ); } #[bench] fn alignements_horizontal_backward_and_forward(bencher: &mut Bencher) { let b = Some(Color::Black); let n = None; let grid = [[b, b, b, b, b, b, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n]]; let alignement = Alignement(BoundState::OutOfBound, 6, BoundState::Tile(n)); bencher.iter(|| assert_eq!(horizontal_alignement(&grid, Axis { x: 0, y: 3 }), alignement) ); } #[bench] fn alignements_diagonal_up_forward(bencher: &mut Bencher) { let b = Some(Color::Black); let w = Some(Color::White); let n = None; let grid = [[n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, w, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n]]; let alignement = Alignement(BoundState::Tile(w), 6, BoundState::Tile(n)); bencher.iter(|| assert_eq!(diagonal_up_alignement(&grid, Axis { x: 8, y: 2 }), alignement) ); } #[bench] fn alignements_diagonal_up_backward(bencher: &mut Bencher) { let b = Some(Color::Black); let w = Some(Color::White); let n = None; let grid = [[n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, w, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n]]; let alignement = Alignement(BoundState::Tile(w), 6, BoundState::Tile(n)); bencher.iter(|| assert_eq!(diagonal_up_alignement(&grid, Axis { x: 3, y: 7 }), alignement) ); } #[bench] fn alignements_diagonal_up_backward_and_forward(bencher: &mut Bencher) { let b = Some(Color::Black); let w = Some(Color::White); let n = None; let grid = [[n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, w, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n]]; let alignement = Alignement(BoundState::Tile(n), 6, BoundState::Tile(w)); bencher.iter(|| assert_eq!(diagonal_up_alignement(&grid, Axis { x: 5, y: 5 }), alignement) ); } #[bench] fn alignements_vertical_forward(bencher: &mut Bencher) { let b = Some(Color::Black); let w = Some(Color::White); let n = None; let grid = [[n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, w, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, w, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n]]; let alignement = Alignement(BoundState::Tile(w), 6, BoundState::Tile(w)); bencher.iter(|| assert_eq!(vertical_alignement(&grid, Axis { x: 3, y: 4 }), alignement) ); } #[bench] fn alignements_vertical_backward(bencher: &mut Bencher) { let b = Some(Color::Black); let w = Some(Color::White); let n = None; let grid = [[n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, w, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, w, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n]]; let alignement = Alignement(BoundState::Tile(w), 6, BoundState::Tile(n)); bencher.iter(|| assert_eq!(vertical_alignement(&grid, Axis { x: 8, y: 4 }), alignement) ); } #[bench] fn alignements_vertical_backward_and_forward(bencher: &mut Bencher) { let b = Some(Color::Black); let w = Some(Color::White); let n = None; let grid = [[n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, w, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, w, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n]]; let alignement = Alignement(BoundState::Tile(w), 6, BoundState::Tile(w)); bencher.iter(|| assert_eq!(vertical_alignement(&grid, Axis { x: 6, y: 4 }), alignement) ); } #[bench] fn alignements_diagonal_down_forward(bencher: &mut Bencher) { let b = Some(Color::Black); let w = Some(Color::White); let n = None; let grid = [[n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, w, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n]]; let alignement = Alignement(BoundState::Tile(w), 6, BoundState::Tile(n)); bencher.iter(|| assert_eq!(diagonal_down_alignement(&grid, Axis { x: 2, y: 2 }), alignement) ); } #[bench] fn alignements_diagonal_down_backward(bencher: &mut Bencher) { let b = Some(Color::Black); let w = Some(Color::White); let n = None; let grid = [[n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, w, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n]]; let alignement = Alignement(BoundState::Tile(n), 6, BoundState::Tile(w)); bencher.iter(|| assert_eq!(diagonal_down_alignement(&grid, Axis { x: 7, y: 7 }), alignement) ); } #[bench] fn alignements_diagonal_down_backward_and_forward(bencher: &mut Bencher) { let b = Some(Color::Black); let w = Some(Color::White); let n = None; let grid = [[n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, w, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n]]; let alignement = Alignement(BoundState::Tile(n), 6, BoundState::Tile(w)); bencher.iter(|| assert_eq!(diagonal_down_alignement(&grid, Axis { x: 5, y: 5 }), alignement) ); } #[bench] fn alignements_list_all(bencher: &mut Bencher) { let b = Some(Color::Black); let w = Some(Color::White); let n = None; let grid = [[n, n, n, n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n], [n, w, n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n], [n, n, b, n, b, n, b, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, b, b, b, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, b, b, b, b, w, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, b, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n]]; let mut alignements = [None; 4]; alignements[ALIGN_HORIZONTAL] = Some(Alignement(BoundState::Tile(n), 4, BoundState::Tile(w))); alignements[ALIGN_DIAGONAL_UP] = Some(Alignement(BoundState::Tile(n), 7, BoundState::OutOfBound)); alignements[ALIGN_VERTICAL] = Some(Alignement(BoundState::Tile(n), 3, BoundState::Tile(n))); alignements[ALIGN_DIAGONAL_DOWN] = Some(Alignement(BoundState::Tile(w), 4, BoundState::Tile(n))); bencher.iter(|| assert_eq!(list_alignements(&grid, Axis { x: 4, y: 4 }), alignements) ); } #[bench] fn alignements_list_all_too_small(bencher: &mut Bencher) { let b = Some(Color::Black); let w = Some(Color::White); let n = None; let grid = [[n, n, n, n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n], [n, w, n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n], [n, n, b, n, b, n, b, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, b, b, b, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, w, b, w, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, b, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n]]; let mut alignements = [None; 4]; alignements[ALIGN_HORIZONTAL] = None; alignements[ALIGN_DIAGONAL_UP] = Some(Alignement(BoundState::Tile(n), 7, BoundState::OutOfBound)); alignements[ALIGN_VERTICAL] = Some(Alignement(BoundState::Tile(n), 3, BoundState::Tile(n))); alignements[ALIGN_DIAGONAL_DOWN] = Some(Alignement(BoundState::Tile(w), 4, BoundState::Tile(n))); bencher.iter(|| assert_eq!(list_alignements(&grid, Axis { x: 4, y: 4 }), alignements) ); } } alignements: Remove ALIGN_ before alignement constant indexes use color::Color; use tile::Tile; use ::Axis; #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum BoundState { // TODO change name OutOfBound, Tile(Tile), } #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub struct Alignement(pub BoundState, pub usize, pub BoundState); pub const HORIZONTAL: usize = 0; pub const DIAGONAL_UP: usize = 1; pub const VERTICAL: usize = 2; pub const DIAGONAL_DOWN: usize = 3; // TODO make types for clarity (grid, return) pub fn horizontal_alignement(grid: &[[Tile; ::GRID_LEN]; ::GRID_LEN], pos: Axis) -> Alignement { let tile = grid[pos.x][pos.y].expect(&format!("Tile at {:?} is empty!", pos)); let mut alignement = Alignement(BoundState::OutOfBound, 0, BoundState::OutOfBound); for y in (0...pos.y).rev() { match grid[pos.x][y] { Some(c) if c == tile => alignement.1 += 1, tile => { alignement.0 = BoundState::Tile(tile); break }, } } for y in pos.y + 1..::GRID_LEN { match grid[pos.x][y] { Some(c) if c == tile => alignement.1 += 1, tile => { alignement.2 = BoundState::Tile(tile); break }, } } alignement } pub fn diagonal_up_alignement(grid: &[[Tile; ::GRID_LEN]; ::GRID_LEN], pos: Axis) -> Alignement { let tile = grid[pos.x][pos.y].expect(&format!("Tile at {:?} is empty!", pos)); let mut alignement = Alignement(BoundState::OutOfBound, 0, BoundState::OutOfBound); let Axis { mut x, mut y } = pos; while x < ::GRID_LEN && y < ::GRID_LEN { // x will underflow to usize::max() match grid[x][y] { Some(c) if c == tile => alignement.1 += 1, tile => { alignement.0 = BoundState::Tile(tile); break }, } x += 1; y = y.wrapping_sub(1); } let Axis { mut x, mut y } = pos; x = x.wrapping_sub(1); y += 1; while x < ::GRID_LEN && y < ::GRID_LEN { match grid[x][y] { Some(c) if c == tile => alignement.1 += 1, tile => { alignement.2 = BoundState::Tile(tile); break }, } x = x.wrapping_sub(1); y += 1; } alignement } pub fn vertical_alignement(grid: &[[Tile; ::GRID_LEN]; ::GRID_LEN], pos: Axis) -> Alignement { let tile = grid[pos.x][pos.y].expect(&format!("Tile at {:?} is empty!", pos)); let mut alignement = Alignement(BoundState::OutOfBound, 0, BoundState::OutOfBound); for x in (0...pos.x).rev() { match grid[x][pos.y] { Some(c) if c == tile => alignement.1 += 1, tile => { alignement.0 = BoundState::Tile(tile); break }, } } for x in pos.x + 1..::GRID_LEN { match grid[x][pos.y] { Some(c) if c == tile => alignement.1 += 1, tile => { alignement.2 = BoundState::Tile(tile); break }, } } alignement } pub fn diagonal_down_alignement(grid: &[[Tile; ::GRID_LEN]; ::GRID_LEN], pos: Axis) -> Alignement { let tile = grid[pos.x][pos.y].expect(&format!("Tile at {:?} is empty!", pos)); let mut alignement = Alignement(BoundState::OutOfBound, 0, BoundState::OutOfBound); let Axis { mut x, mut y } = pos; while x < ::GRID_LEN && y < ::GRID_LEN { // x and y will overflow to usize::max() match grid[x][y] { Some(c) if c == tile => alignement.1 += 1, tile => { alignement.0 = BoundState::Tile(tile); break }, } x = x.wrapping_sub(1); y = y.wrapping_sub(1); } let Axis { mut x, mut y } = pos; x += 1; y += 1; while x < ::GRID_LEN && y < ::GRID_LEN { match grid[x][y] { Some(c) if c == tile => alignement.1 += 1, tile => { alignement.2 = BoundState::Tile(tile); break }, } x += 1; y += 1; } alignement } /// returns a list of alignements with the tile at `pos` position in Clockwise /// (e.g. top_to_bot, top_right_to_bot_left, right_to_left, bot_right_to_top_left) /// a None value means no alignement (e.g. less than 2 stones) pub fn list_alignements(grid: &[[Tile; ::GRID_LEN]; ::GRID_LEN], pos: Axis) -> [Option<Alignement>; 4] { let mut alignements = [None; 4]; alignements[HORIZONTAL] = match horizontal_alignement(grid, pos) { Alignement(_, 0, _) => unreachable!("horizontal_alignement cannot count zero tiles!"), Alignement(_, 1, _) => None, x => Some(x), }; alignements[DIAGONAL_UP] = match diagonal_up_alignement(grid, pos) { Alignement(_, 0, _) => unreachable!("diagonal_up_alignement cannot count zero tiles!"), Alignement(_, 1, _) => None, x => Some(x), }; alignements[VERTICAL] = match vertical_alignement(grid, pos) { Alignement(_, 0, _) => unreachable!("vertical_alignement cannot count zero tiles!"), Alignement(_, 1, _) => None, x => Some(x), }; alignements[DIAGONAL_DOWN] = match diagonal_down_alignement(grid, pos) { Alignement(_, 0, _) => unreachable!("diagonal_down_alignement cannot count zero tiles!"), Alignement(_, 1, _) => None, x => Some(x), }; alignements } #[cfg(test)] mod tests { use test::Bencher; use functions::alignements::*; use ::Axis; use color::Color; #[bench] fn alignements_horizontal_forward(bencher: &mut Bencher) { let b = Some(Color::Black); let n = None; let grid = [[b, b, b, b, b, b, b, b, b, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n]]; let alignement = Alignement(BoundState::OutOfBound, 9, BoundState::Tile(n)); bencher.iter(|| assert_eq!(horizontal_alignement(&grid, Axis { x: 0, y: 0 }), alignement) ); } #[bench] fn alignements_horizontal_backward(bencher: &mut Bencher) { let b = Some(Color::Black); let n = None; let grid = [[b, b, b, b, b, b, n, b, b, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n]]; let alignement = Alignement(BoundState::OutOfBound, 6, BoundState::Tile(n)); bencher.iter(|| assert_eq!(horizontal_alignement(&grid, Axis { x: 0, y: 5 }), alignement) ); } #[bench] fn alignements_horizontal_backward_and_forward(bencher: &mut Bencher) { let b = Some(Color::Black); let n = None; let grid = [[b, b, b, b, b, b, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n]]; let alignement = Alignement(BoundState::OutOfBound, 6, BoundState::Tile(n)); bencher.iter(|| assert_eq!(horizontal_alignement(&grid, Axis { x: 0, y: 3 }), alignement) ); } #[bench] fn alignements_diagonal_up_forward(bencher: &mut Bencher) { let b = Some(Color::Black); let w = Some(Color::White); let n = None; let grid = [[n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, w, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n]]; let alignement = Alignement(BoundState::Tile(w), 6, BoundState::Tile(n)); bencher.iter(|| assert_eq!(diagonal_up_alignement(&grid, Axis { x: 8, y: 2 }), alignement) ); } #[bench] fn alignements_diagonal_up_backward(bencher: &mut Bencher) { let b = Some(Color::Black); let w = Some(Color::White); let n = None; let grid = [[n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, w, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n]]; let alignement = Alignement(BoundState::Tile(w), 6, BoundState::Tile(n)); bencher.iter(|| assert_eq!(diagonal_up_alignement(&grid, Axis { x: 3, y: 7 }), alignement) ); } #[bench] fn alignements_diagonal_up_backward_and_forward(bencher: &mut Bencher) { let b = Some(Color::Black); let w = Some(Color::White); let n = None; let grid = [[n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, w, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n]]; let alignement = Alignement(BoundState::Tile(n), 6, BoundState::Tile(w)); bencher.iter(|| assert_eq!(diagonal_up_alignement(&grid, Axis { x: 5, y: 5 }), alignement) ); } #[bench] fn alignements_vertical_forward(bencher: &mut Bencher) { let b = Some(Color::Black); let w = Some(Color::White); let n = None; let grid = [[n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, w, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, w, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n]]; let alignement = Alignement(BoundState::Tile(w), 6, BoundState::Tile(w)); bencher.iter(|| assert_eq!(vertical_alignement(&grid, Axis { x: 3, y: 4 }), alignement) ); } #[bench] fn alignements_vertical_backward(bencher: &mut Bencher) { let b = Some(Color::Black); let w = Some(Color::White); let n = None; let grid = [[n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, w, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, w, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n]]; let alignement = Alignement(BoundState::Tile(w), 6, BoundState::Tile(n)); bencher.iter(|| assert_eq!(vertical_alignement(&grid, Axis { x: 8, y: 4 }), alignement) ); } #[bench] fn alignements_vertical_backward_and_forward(bencher: &mut Bencher) { let b = Some(Color::Black); let w = Some(Color::White); let n = None; let grid = [[n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, w, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, w, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n]]; let alignement = Alignement(BoundState::Tile(w), 6, BoundState::Tile(w)); bencher.iter(|| assert_eq!(vertical_alignement(&grid, Axis { x: 6, y: 4 }), alignement) ); } #[bench] fn alignements_diagonal_down_forward(bencher: &mut Bencher) { let b = Some(Color::Black); let w = Some(Color::White); let n = None; let grid = [[n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, w, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n]]; let alignement = Alignement(BoundState::Tile(w), 6, BoundState::Tile(n)); bencher.iter(|| assert_eq!(diagonal_down_alignement(&grid, Axis { x: 2, y: 2 }), alignement) ); } #[bench] fn alignements_diagonal_down_backward(bencher: &mut Bencher) { let b = Some(Color::Black); let w = Some(Color::White); let n = None; let grid = [[n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, w, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n]]; let alignement = Alignement(BoundState::Tile(n), 6, BoundState::Tile(w)); bencher.iter(|| assert_eq!(diagonal_down_alignement(&grid, Axis { x: 7, y: 7 }), alignement) ); } #[bench] fn alignements_diagonal_down_backward_and_forward(bencher: &mut Bencher) { let b = Some(Color::Black); let w = Some(Color::White); let n = None; let grid = [[n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, w, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n]]; let alignement = Alignement(BoundState::Tile(n), 6, BoundState::Tile(w)); bencher.iter(|| assert_eq!(diagonal_down_alignement(&grid, Axis { x: 5, y: 5 }), alignement) ); } #[bench] fn alignements_list_all(bencher: &mut Bencher) { let b = Some(Color::Black); let w = Some(Color::White); let n = None; let grid = [[n, n, n, n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n], [n, w, n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n], [n, n, b, n, b, n, b, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, b, b, b, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, b, b, b, b, w, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, b, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n]]; let mut alignements = [None; 4]; alignements[HORIZONTAL] = Some(Alignement(BoundState::Tile(n), 4, BoundState::Tile(w))); alignements[DIAGONAL_UP] = Some(Alignement(BoundState::Tile(n), 7, BoundState::OutOfBound)); alignements[VERTICAL] = Some(Alignement(BoundState::Tile(n), 3, BoundState::Tile(n))); alignements[DIAGONAL_DOWN] = Some(Alignement(BoundState::Tile(w), 4, BoundState::Tile(n))); bencher.iter(|| assert_eq!(list_alignements(&grid, Axis { x: 4, y: 4 }), alignements) ); } #[bench] fn alignements_list_all_too_small(bencher: &mut Bencher) { let b = Some(Color::Black); let w = Some(Color::White); let n = None; let grid = [[n, n, n, n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n], [n, w, n, n, n, n, n, b, n, n, n, n, n, n, n, n, n, n, n], [n, n, b, n, b, n, b, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, b, b, b, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, w, b, w, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, b, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, b, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n], [n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n]]; let mut alignements = [None; 4]; alignements[HORIZONTAL] = None; alignements[DIAGONAL_UP] = Some(Alignement(BoundState::Tile(n), 7, BoundState::OutOfBound)); alignements[VERTICAL] = Some(Alignement(BoundState::Tile(n), 3, BoundState::Tile(n))); alignements[DIAGONAL_DOWN] = Some(Alignement(BoundState::Tile(w), 4, BoundState::Tile(n))); bencher.iter(|| assert_eq!(list_alignements(&grid, Axis { x: 4, y: 4 }), alignements) ); } }
use std::fmt; use std::collections::HashMap; use px8; /// Emulated screen width in pixels pub const SCREEN_WIDTH: usize = px8::SCREEN_WIDTH; /// Emulated screen height in ixels pub const SCREEN_HEIGHT: usize = px8::SCREEN_HEIGHT; /// Screen texture size in bytes pub const SCREEN_SIZE: usize = SCREEN_WIDTH * SCREEN_HEIGHT; pub const GLYPH : [[u16; 2]; 95] = [ [0x0000, 0x0000], // space [0x0000, 0x1700], // ! [0x0003, 0x0003], // " [0x001f, 0x0a1f], // # [0x000d, 0x1f0b], // $ [0x0013, 0x0419], // % [0x0018, 0x171f], // & [0x0000, 0x0102], // ' [0x0000, 0x211e], // ( [0x001e, 0x2100], // ) [0x0015, 0x0e15], // * [0x0004, 0x0e04], // + [0x0000, 0x1020], // , [0x0004, 0x0404], // - [0x0000, 0x2000], // . [0x0001, 0x1e20], // / [0x003e, 0x223e], // 0 [0x0020, 0x3e22], // 1 [0x002e, 0x2a3a], // 2 [0x003e, 0x2a22], // 3 [0x003e, 0x080e], // 4 [0x003a, 0x2a2e], // 5 [0x0038, 0x283e], // 6 [0x003e, 0x0202], // 7 [0x003e, 0x2a3e], // 8 [0x003e, 0x0a0e], // 9 [0x0000, 0x0000], // : [0x0000, 0x1700], // ; [0x0010, 0x0e01], // < [0x0003, 0x0003], // = [0x001f, 0x0a1f], // > [0x000d, 0x1f0b], // ? [0x0013, 0x0419], // @ [0x1e09, 0x091e], // A [0x0a15, 0x151f], // B [0x0a11, 0x110e], // C [0x0e11, 0x111f], // D [0x1115, 0x151f], // E [0x0105, 0x051f], // F [0x0c15, 0x110e], // G [0x1f08, 0x081f], // H [0x1111, 0x1f11], // I [0x010f, 0x1108], // J [0x110a, 0x041f], // K [0x1010, 0x101f], // L [0x1f07, 0x071f], // M [0x1f04, 0x021f], // N [0x0e11, 0x110e], // O [0x0609, 0x091f], // P [0x1619, 0x110e], // Q [0x0609, 0x091f], // R [0x0915, 0x1512], // S [0x0101, 0x1f01], // T [0x0f10, 0x100f], // U [0x0304, 0x081f], // V [0x1f18, 0x181f], // W [0x1b04, 0x041b], // X [0x0304, 0x1c03], // Y [0x1315, 0x1519], // Z [0x0000, 0x0000], // [ [0x0010, 0x0e01], // \ [0x0000, 0x1700], // ] [0x0010, 0x0e01], // ^ [0x0003, 0x0003], // _ [0x001f, 0x0a1f], // ` [0x001c, 0x1408], // a [0x0008, 0x141f], // b [0x0014, 0x1408], // c [0x001f, 0x1408], // d [0x0014, 0x140c], // e [0x0005, 0x1e04], // f [0x003c, 0x5458], // g [0x0018, 0x041f], // h [0x0000, 0x1d00], // i [0x0000, 0x1d20], // j [0x0014, 0x081f], // k [0x0000, 0x100f], // l [0x001c, 0x0c1c], // m [0x0018, 0x041c], // n [0x0008, 0x1408], // o [0x0018, 0x147c], // p [0x007c, 0x140c], // q [0x0004, 0x0418], // r [0x0004, 0x1c10], // s [0x0014, 0x0e04], // t [0x001c, 0x100c], // u [0x000c, 0x180c], // v [0x001c, 0x181c], // w [0x0014, 0x0814], // x [0x003c, 0x505c], // y [0x0010, 0x1c04], // z [0x000d, 0x1f0b], // { [0x000d, 0x1f0b], // | [0x0013, 0x0419], // } [0x0013, 0x0419], // ~ ]; use nalgebra::{U2, U3, Rotation2, Dynamic, Matrix, MatrixArray, MatrixVec}; type DMatrixu32 = Matrix<u32, Dynamic, Dynamic, MatrixVec<u32, Dynamic, Dynamic>>; #[derive(Clone)] pub struct DynSprite { pub data: DMatrixu32, } impl DynSprite { pub fn new(data: Vec<u32>, width: u32, height: u32) -> DynSprite { let mut d_mat = DMatrixu32::from_element(height as usize, width as usize, 0); let mut idx = 0; debug!("WIDTH {:?} HEIGHT {:?} -> {:?} {:?}", width, height, d_mat.ncols(), d_mat.nrows()); for i in 0..width { for j in 0..height { d_mat[(i+j*width) as usize] = *data.get(idx).unwrap(); idx += 1; } } DynSprite { data: d_mat.clone(), } } pub fn new_from_matrix(d_mat: DMatrixu32) -> DynSprite { DynSprite { data: d_mat, } } pub fn flip_x(&mut self) -> DMatrixu32 { let mut r_mat = self.data.clone(); let n_cols = r_mat.ncols(); let n_rows = r_mat.nrows(); for i in 0..n_cols/2 { for j in 0..n_rows { let tmp = r_mat[(i + j * n_cols) as usize]; r_mat[(i + j * n_cols) as usize] = r_mat[((n_cols - (i+1)) + j * n_cols) as usize]; r_mat[((n_cols - (i+1)) + j * n_cols) as usize] = tmp; } } return r_mat; } pub fn flip_y(&mut self) -> DMatrixu32 { let mut r_mat = self.data.clone(); let n_cols = r_mat.ncols(); let n_rows = r_mat.nrows(); for i in 0..n_rows/2 { for j in 0..n_cols { let tmp = r_mat[(j + i * n_cols) as usize]; r_mat[(j + i * n_cols) as usize] = r_mat[(j + (n_rows - (i+1)) * n_cols) as usize]; r_mat[(j + (n_rows - (i+1)) * n_cols) as usize] = tmp; } } return r_mat; } } impl fmt::Debug for DynSprite { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut data_matrix = String::new(); data_matrix.push('\n'); for j in 0..self.data.nrows() { for i in 0..self.data.ncols() { data_matrix.push_str(format!("{:?} ", self.data[(i+j*self.data.ncols()) as usize]).as_str()); } data_matrix.push('\n'); } write!(f, "{}", data_matrix) } } #[derive(Clone)] pub struct Sprite { pub data: Vec<u8>, } impl Sprite { pub fn new(d: [u8; 8 * 8]) -> Sprite { let mut v = Vec::new(); v.extend(d.iter().cloned()); Sprite { data: v } } pub fn set_data(&mut self, idx: usize, col: u8) { self.data[idx] = col; } pub fn get_data(&mut self) -> String { let mut data = String::new(); for c in self.data.clone() { data.push_str(&format!("{:?}", c)); } return data; } pub fn get_line(&mut self, line: u32) -> String { let mut data = String::new(); let mut data_clone = self.data.clone(); let data_line: Vec<_> = data_clone.drain((line*8) as usize..(line*8+8)as usize).collect(); for c in data_line.clone() { data.push_str(&format!("{:x}", c)); } return data; } pub fn horizontal_reflection(&mut self) -> [u8; 64] { let mut ret: [u8; 64] = self.to_u8_64_array(); for i in 0..4 { for j in 0..8 { let tmp = ret[(i + j * 8) as usize]; ret[(i + j * 8) as usize] = ret[((8 - (i+1)) + j * 8) as usize]; ret[((8 - (i+1)) + j * 8) as usize] = tmp; } } return ret; } pub fn vertical_reflection(&mut self) -> [u8; 64] { let mut ret: [u8; 64] = self.to_u8_64_array(); for i in 0..4 { for j in 0..8 { let tmp = ret[(j + i * 8) as usize]; ret[(j + i * 8) as usize] = ret[(j + (8 - (i+1)) * 8) as usize]; ret[(j + (8 - (i+1)) * 8) as usize] = tmp; } } return ret; } pub fn flip_x(&mut self) -> Sprite { return Sprite::new(self.horizontal_reflection()); } pub fn flip_y(&mut self) -> Sprite { return Sprite::new(self.vertical_reflection()); } pub fn to_u8_64_array(&mut self) -> [u8;64] { let mut arr = [0u8;64]; for (place, element) in arr.iter_mut().zip(self.data.iter()) { *place = *element; } arr } } impl fmt::Debug for Sprite { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut data_matrix = String::new(); data_matrix.push('\n'); for i in 0..8 { data_matrix.push_str(format!("{:?}", &self.data[i*8..i*8+8]).as_str()); data_matrix.push('\n'); } write!(f, "{}", data_matrix) } } // Screen scaling #[derive(Copy, Clone)] pub enum Scale { Scale1x, Scale2x, Scale3x, Scale4x, Scale5x, Scale6x, Scale8x, Scale10x, } impl Scale { pub fn factor(self) -> usize { match self { Scale::Scale1x => 1, Scale::Scale2x => 2, Scale::Scale3x => 3, Scale::Scale4x => 4, Scale::Scale5x => 5, Scale::Scale6x => 6, Scale::Scale8x => 8, Scale::Scale10x => 10, } } } pub struct Camera { pub x: i32, pub y: i32, } impl Camera { pub fn new() -> Camera { Camera {x: 0, y: 0} } } pub struct Clipping { pub x: u32, pub y: u32, pub w: u32, pub h: u32, pub clipped: bool, } impl Clipping { pub fn new() -> Clipping { Clipping {x: 0, y: 0, w: 0, h: 0, clipped: false} } } pub struct Screen { pub back_buffer: Box<px8::ScreenBuffer>, pub saved_back_buffer: Box<px8::ScreenBuffer>, pub sprites: Vec<Sprite>, pub dyn_sprites: Vec<DynSprite>, pub map: [[u32; 32]; px8::SCREEN_WIDTH], pub transparency: HashMap<u32, u8>, pub colors: HashMap<u32, u32>, pub camera: Camera, pub color: u32, pub clipping: Clipping, } unsafe impl Send for Screen {} unsafe impl Sync for Screen {} impl Screen { pub fn new() -> Screen { Screen { back_buffer: Box::new(px8::SCREEN_EMPTY), saved_back_buffer: Box::new(px8::SCREEN_EMPTY), sprites: Vec::new(), dyn_sprites: Vec::new(), map: [[0; 32]; px8::SCREEN_WIDTH], transparency: HashMap::new(), colors: HashMap::new(), color: 0, camera: Camera::new(), clipping: Clipping::new(), } } pub fn init(&mut self) { self._reset_colors(); self._reset_transparency(); } pub fn _reset_transparency(&mut self) { self.transparency.clear(); self.transparency.insert(0, 1); } pub fn _reset_colors(&mut self) { self.colors.clear(); } pub fn save(&mut self) { for i in 0..px8::SCREEN_PIXELS { self.saved_back_buffer[i] = self.back_buffer[i]; } } pub fn restore(&mut self) { for i in 0..px8::SCREEN_PIXELS { self.back_buffer[i] = self.saved_back_buffer[i]; } } pub fn _find_color(&mut self, col: i32) -> u32 { // no specified color if col == -1 { return self.color; } return col as u32; } pub fn camera(&mut self, x: i32, y: i32) { if x == -1 && y == -1 { self.camera.x = 0; self.camera.y = 0; } else { self.camera.x = x; self.camera.y = y; } } pub fn set_sprites(&mut self, sprites: Vec<Sprite>) { self.sprites = sprites; } pub fn set_map(&mut self, map: [[u32; 32]; px8::SCREEN_WIDTH]) { self.map = map; } pub fn putpixel_(&mut self, x: i32, y: i32, col: u32) { // Camera let x = (x as i32 - self.camera.x) as usize; let y = (y as i32 - self.camera.y) as usize; let mut col = col; if x >= SCREEN_WIDTH || y >= SCREEN_HEIGHT { return; } // Clipped if self.clipping.clipped { let x = x as u32; let y = y as u32; if !(x >= self.clipping.x && x <= self.clipping.x + self.clipping.w) { return; } if !(y >= self.clipping.y && y <= self.clipping.y + self.clipping.h) { return; } } match self.colors.get(&col) { Some(&value) => col = value, None => (), } // col = self.colors[col as usize]; self.back_buffer[x + y * SCREEN_WIDTH] = col; } pub fn color(&mut self, col: i32) { if col != -1 { self.color = col as u32; } } pub fn putpixel(&mut self, x: i32, y: i32, col: u32) { return self.putpixel_(x, y, col); } pub fn getpixel(&mut self, x: usize, y: usize) -> u32 { if x >= SCREEN_WIDTH || y >= SCREEN_HEIGHT { return 0; } return self.back_buffer[x + y * SCREEN_WIDTH] as u32; } pub fn pget(&mut self, x: u32, y: u32) -> u32 { let col = self.getpixel(x as usize, y as usize); return col; } pub fn pset(&mut self, x: i32, y: i32, col: i32) { let color = self._find_color(col); self.putpixel_(x, y, color); } pub fn sget(&mut self, x: u32, y: u32) -> u8 { let idx_sprite = (x / 8) + 16 * (y / 8); let sprite = &self.sprites[idx_sprite as usize]; return *sprite.data.get(((x % 8) + (y % 8) * 8) as usize).unwrap(); } pub fn sset(&mut self, x: u32, y: u32, col: i32) { let col = self._find_color(col); let idx_sprite = (x / 8) + 16 * (y / 8); let ref mut sprite = self.sprites[idx_sprite as usize]; sprite.set_data(((x % 8) + (y % 8) * 8) as usize, col as u8); } pub fn cls(&mut self) { for x in 0..SCREEN_WIDTH { for y in 0..SCREEN_HEIGHT { self.putpixel(x as i32, y as i32, 0); } } } pub fn print(&mut self, string: String, x: i32, y: i32, col: i32) { let mut x = x; let y = y; for k in 0..string.len() { let value = string.as_bytes()[k] as usize; let data; if value >= 32 && value <= 126 { data = GLYPH[value - 32]; } else { /* Unknown char, replace by a space */ data = [0x0000, 0x0000]; } let mut idx = 1; let mut idx_1 = 0; for i in 0..32 { if (data[idx] & (0x1 << idx_1)) != 0 { self.pset(x, y + i % 8, col) } idx_1 += 1; if i % 8 == 7 { x = x + 1; } if i == 15 { idx = 0; idx_1 = 0; } } } } pub fn line(&mut self, x0: i32, y0: i32, x1: i32, y1: i32, col: i32) { debug!("LINE {:?} {:?} {:?} {:?} {:?}", x0, y0, x1, y1, col); let color = self._find_color(col); let (mut x0, mut y0) = (x0, y0); let (x1, y1) = (x1, y1); let dx = (x1 - x0).abs(); let sx = if x0 < x1 { 1 } else { -1 }; let dy: i32 = -1 * (y1 - y0).abs(); let sy: i32 = if y0 < y1 { 1 } else { -1 }; let mut err: i32 = dx + dy; /* error value e_xy */ loop { self.putpixel(x0, y0, color); if x0 == x1 && y0 == y1 { break; } let e2 = 2 * err; if e2 >= dy { err += dy; x0 += sx; } /* e_xy+e_x > 0 */ if e2 <= dx { err += dx; y0 += sy; } /* e_xy+e_y < 0 */ } } pub fn hline(&mut self, x1: i32, x2: i32, y: i32, col: i32) { self.line(x1, y, x2, y, col); } pub fn rect(&mut self, x0: i32, y0: i32, x1: i32, y1: i32, col: i32) { self.line(x0, y0, x0, y1, col); self.line(x0, y0, x1, y0, col); self.line(x0, y1, x1, y1, col); self.line(x1, y0, x1, y1, col); } pub fn rectfill(&mut self, x0: i32, y0: i32, x1: i32, y1: i32, col: i32) { self.line(x0, y0, x0, y1, col); self.line(x0, y0, x1, y0, col); self.line(x0, y1, x1, y1, col); self.line(x1, y0, x1, y1, col); for y in y0..y1 { self.line(x0, y, x1, y, col) } } pub fn square(&mut self, x0: i32, y0: i32, h: i32, col: i32) { self.rect(x0, y0, x0 + h, y0 + h, col); } pub fn squarefill(&mut self, x0: i32, y0: i32, h: i32, col: i32) { self.rectfill(x0, y0, x0 + h, y0 + h, col); } pub fn circ(&mut self, x: i32, y: i32, r: i32, col: i32) { if r <= 0 { return; } let col = self._find_color(col); let mut h: i32; let mut i: i32; let mut j: i32; let mut k: i32; let mut oh: i32 = 0xFFFF; let mut oi: i32 = 0xFFFF; let mut ix: i32; let mut iy: i32; let rx: i32 = r as i32; let ry: i32 = r as i32; let mut xmj: i32; let mut xpj: i32; let mut ymi: i32; let mut ypi: i32; let mut xmk: i32; let mut xpk: i32; let mut ymh: i32; let mut yph: i32; ix = 0; iy = ry * 64; h = (ix + 32) >> 6; i = (iy + 32) >> 6; j = (h * rx) / ry; k = (i * rx) / ry; while i > h { if ((oi != i) && (oh != i)) || ((oh != h) && (oi != h) && (i != h)) { xmj = x - j; xpj = x + j; if i > 0 { ypi = y + i; ymi = y - i; self.putpixel(xmj, ypi, col); self.putpixel(xpj, ypi, col); self.putpixel(xmj, ymi, col); self.putpixel(xpj, ymi, col); } else { self.putpixel(xmj, y, col); self.putpixel(xpj, y, col); } oi = i; xmk = x - k; xpk = x + k; if h > 0 { yph = y + h; ymh = y - h; self.putpixel(xmk, yph, col); self.putpixel(xpk, yph, col); self.putpixel(xmk, ymh, col); self.putpixel(xpk, ymh, col); } else { self.putpixel(xmk, y, col); self.putpixel(xpk, y, col); } oh = h; } ix = ix + iy / ry; iy = iy - ix / ry; h = (ix + 32) >> 6; i = (iy + 32) >> 6; j = (h * rx) / ry; k = (i * rx) / ry; } } pub fn circfill(&mut self, x: i32, y: i32, r: i32, col: i32) { if r <= 0 { return; } let mut h: i32; let mut i: i32; let mut j: i32; let mut k: i32; let mut oh: i32 = 0xFFFF; let mut oi: i32 = 0xFFFF; let mut ix: i32; let mut iy: i32; let rx: i32 = r as i32; let ry: i32 = r as i32; let mut xmj: i32; let mut xpj: i32; let mut xmk: i32; let mut xpk: i32; ix = 0; iy = ry * 64; h = (ix + 32) >> 6; i = (iy + 32) >> 6; j = (h * rx) / ry; k = (i * rx) / ry; while i > h { if (oi != i) && (oh != i) { xmj = x - j; xpj = x + j; if i > 0 { self.hline(xmj, xpj, (y + i), col); self.hline(xmj, xpj, (y - i), col); } else { self.hline(xmj, xpj, y, col); } oi = i; } if (oh != h) && (oi != h) && (i != h) { xmk = x - k; xpk = x + k; if h > 0 { self.hline(xmk, xpk, (y + h), col); self.hline(xmk, xpk, (y - h), col); } else { self.hline(xmk, xpk, y, col); } oh = h; } ix = ix + iy / ry; iy = iy - ix / ry; h = (ix + 32) >> 6; i = (iy + 32) >> 6; j = (h * rx) / ry; k = (i * rx) / ry; } } pub fn clip(&mut self, x: i32, y: i32, w: i32, h: i32) { // reset if x == -1 && y == -1 && w == -1 && h == -1 { self.clipping.clipped = false; } // invalid clipping value if x == -1 || y == -1 || w == -1 || h == -1 { return; } if x < 0 || y < 0 || w < 0 || h < 0 { return; } self.clipping.x = x as u32; self.clipping.y = y as u32; self.clipping.w = w as u32; self.clipping.h = h as u32; self.clipping.clipped = true; } pub fn trigon(&mut self, x1: i32, y1: i32, x2: i32, y2: i32, x3: i32, y3: i32, col: i32) { let mut vx = Vec::new(); let mut vy = Vec::new(); vx.push(x1); vx.push(x2); vx.push(x3); vy.push(y1); vy.push(y2); vy.push(y3); self.polygon(vx.clone(), vy.clone(), col); } pub fn polygon(&mut self, vx: Vec<i32>, vy: Vec<i32>, col: i32) { if vx.len() < 3 || vy.len() < 3 { return; } if vx.len() != vy.len() { return; } let mut idx = 0; while idx < vx.len() - 1 { self.line(vx[idx], vy[idx], vx[idx + 1], vy[idx + 1], col); idx += 1; } self.line(*vx.get(idx).unwrap(), *vy.get(idx).unwrap(), *vx.get(0).unwrap(), *vy.get(0).unwrap(), col); } pub fn spr(&mut self, n: u32, x: i32, y: i32, w: u32, h: u32, flip_x: bool, flip_y: bool) { let sprites_number = w * h; debug!("PRINT SPRITE = {:?} x:{:?} y:{:?} n:{:?} w:{:?} h:{:?} flip_x:{:?} flip_y:{:?}", sprites_number, x, y, n, w, h, flip_x, flip_y); let mut idx_w = 0; let mut orig_x = x; let mut orig_y = y; for i in 0..sprites_number { let mut sprite = self.sprites[(n + i) as usize].clone(); if flip_x { sprite = sprite.flip_x(); } if flip_y { sprite = sprite.flip_y(); } let mut new_x = orig_x % SCREEN_WIDTH as i32; let mut new_y = orig_y; debug!("SPRITE = {:?} x:{:?} y:{:?} {:?}", (n + i) as usize, new_x, new_y, sprite); let mut index = 0; for c in &sprite.data { if !self.is_transparent(*c as u32) { self.putpixel_(new_x, new_y, *c as u32); } index = index + 1; if index != 0 && index % 8 == 0 { new_y = new_y + 1; new_x = orig_x % SCREEN_WIDTH as i32; } else { new_x = new_x + 1; } } idx_w += 1; orig_x += 8; if idx_w == w { orig_y += 8; idx_w = 0; orig_x = 0; } } } pub fn spr_dyn(&mut self, id: u32, x: i32, y: i32, flip_x: bool, flip_y: bool) { info!("SPR DYN {:?}: {:?} {:?}", id, x, y); if id as usize >= self.dyn_sprites.len() { return } let mut sprite = self.dyn_sprites[id as usize].clone(); if flip_x { sprite = DynSprite::new_from_matrix(sprite.flip_x()); } if flip_y { sprite = DynSprite::new_from_matrix(sprite.flip_y()); } for j in 0..sprite.data.nrows() { for i in 0..sprite.data.ncols() { self.putpixel_(i as i32 + x, j as i32 + y, sprite.data[(i+j*sprite.data.ncols()) as usize]); } } } pub fn spr_dyn_load(&mut self, data: Vec<u8>, width: u32, height: u32) -> i32 { debug!("Load dynamic sprite {:?} {:?}", width, height); let mut idx = 0; let mut v:Vec<u32> = Vec::new(); while idx < data.len() { let r = *data.get(idx).unwrap(); let g = *data.get(idx+1).unwrap(); let b = *data.get(idx+2).unwrap(); v.push(px8::PALETTE.lock().unwrap().add_color(r, g, b)); idx += 3; } let s = DynSprite::new(v, width, height); self.dyn_sprites.push(s.clone()); (self.dyn_sprites.len() as i32) - 1 } pub fn map(&mut self, cel_x: u32, cel_y: u32, sx: i32, sy: i32, cel_w: u32, cel_h: u32) { let mut idx_x: i32 = 0; let mut idx_y: i32 = 0; let mut cel_w = cel_w; if cel_w > SCREEN_WIDTH as u32 { cel_w = SCREEN_WIDTH as u32; } let mut cel_h = cel_h; if cel_h > 32 { cel_h = 32; } debug!("cel_x {:?} cel_y {:?} sx {:?} sy {:?} cel_w {:?} cel_h {:?}", cel_x, cel_y, sx, sy, cel_w, cel_h); while idx_y < cel_h as i32 { idx_x = 0; while idx_x < cel_w as i32 { let orig_x = sx + 8 * idx_x; let mut new_x = orig_x; let mut new_y = sy + 8 * idx_y; if new_x > SCREEN_WIDTH as i32 || new_y > SCREEN_HEIGHT as i32 { break } let mut map_x = cel_x as i32 + idx_x; let mut map_y = cel_y as i32 + idx_y; let idx_sprite = self.map[map_x as usize][map_y as usize]; let sprite = self.sprites[idx_sprite as usize].clone(); let mut index = 0; for c in &sprite.data { if ! self.is_transparent(*c as u32) { self.putpixel_(new_x, new_y, *c as u32); } index = index + 1; if index > 0 && index % 8 == 0 { new_y = new_y + 1; new_x = orig_x; } else { new_x = new_x + 1; } } idx_x += 1; } idx_y += 1; } } pub fn mget(&mut self, x: u32, y: u32) -> u32 { if x as usize > px8::SCREEN_WIDTH || y as usize > px8::SCREEN_WIDTH { return 0; } let value = self.map[x as usize][y as usize]; return value; } pub fn mset(&mut self, x: u32, y: u32, v: u32) { if x as usize > px8::SCREEN_WIDTH || y as usize > px8::SCREEN_WIDTH { return; } self.map[x as usize][y as usize] = v; } pub fn sspr(&mut self, sx: u32, sy: u32, sw: u32, sh: u32, dx: i32, dy: i32, dw: u32, dh: u32, flip_x: bool, flip_y: bool) { let mut v = Vec::new(); for x in sx..sx+sw { for y in sy..sy+sh { v.push(self.sget(x, y)); } } let mut x2; let mut y2; let w1 = sw; let w2 = dw; let h1 = sh; let h2 = dh; let mut x_ratio; let mut y_ratio; let mut ret = Vec::with_capacity((w2 * h2) as usize); x_ratio = ((w1 << 16)/w2) + 1; y_ratio = ((h1 << 16)/h2) + 1; for i in 0..h2 { for j in 0..w2 { x2 = (j * x_ratio)>>16; y2 = (i * y_ratio)>>16; ret.insert((i*w2+j) as usize, *v.get((y2*w1+x2) as usize).unwrap()); } } if flip_x { for i in 0..w2/2 { for j in 0..h2 { let tmp = ret[(i + j * w2) as usize]; ret[(i + j * w2) as usize] = ret[((w2 - (i+1)) + j * w2) as usize]; ret[((w2 - (i+1)) + j * w2) as usize] = tmp; } } } if flip_y { for i in 0..h2/2 { for j in 0..w2 { let tmp = ret[(j + i * w2) as usize]; ret[(j + i * w2) as usize] = ret[(j + (h2 - (i+1)) * w2) as usize]; ret[(j + (h2 - (i+1)) * w2) as usize] = tmp; } } } let mut idx = 0; for i in 0..h2 { for j in 0..w2 { let d:u8 = *ret.get(idx).unwrap(); idx += 1; if d != 0 { if ! self.is_transparent(d as u32) { self.putpixel_(i as i32 + dx, j as i32 + dy, d as u32); } //if self.transparency[d as usize] == 0 { // self.putpixel_(i as i32 + dx, j as i32 + dy, px8::Color::from_u8(d)); // } } } } } pub fn is_transparent(&mut self, value: u32) -> bool { match self.transparency.get(&(value as u32)) { Some(&1) => { return true; }, Some(&_) => (), None => (), } return false; } pub fn pal(&mut self, c0: i32, c1: i32) { if c0 < 0 || c1 < 0 { self._reset_colors(); } else { self.colors.insert(c0 as u32, c1 as u32); } } pub fn palt(&mut self, c: i32, t: bool) { if c == -1 { self._reset_transparency(); } else { self.transparency.insert(c as u32, t as u8); } } } Add transparency for dynamic sprite use std::fmt; use std::collections::HashMap; use px8; /// Emulated screen width in pixels pub const SCREEN_WIDTH: usize = px8::SCREEN_WIDTH; /// Emulated screen height in ixels pub const SCREEN_HEIGHT: usize = px8::SCREEN_HEIGHT; /// Screen texture size in bytes pub const SCREEN_SIZE: usize = SCREEN_WIDTH * SCREEN_HEIGHT; pub const GLYPH : [[u16; 2]; 95] = [ [0x0000, 0x0000], // space [0x0000, 0x1700], // ! [0x0003, 0x0003], // " [0x001f, 0x0a1f], // # [0x000d, 0x1f0b], // $ [0x0013, 0x0419], // % [0x0018, 0x171f], // & [0x0000, 0x0102], // ' [0x0000, 0x211e], // ( [0x001e, 0x2100], // ) [0x0015, 0x0e15], // * [0x0004, 0x0e04], // + [0x0000, 0x1020], // , [0x0004, 0x0404], // - [0x0000, 0x2000], // . [0x0001, 0x1e20], // / [0x003e, 0x223e], // 0 [0x0020, 0x3e22], // 1 [0x002e, 0x2a3a], // 2 [0x003e, 0x2a22], // 3 [0x003e, 0x080e], // 4 [0x003a, 0x2a2e], // 5 [0x0038, 0x283e], // 6 [0x003e, 0x0202], // 7 [0x003e, 0x2a3e], // 8 [0x003e, 0x0a0e], // 9 [0x0000, 0x0000], // : [0x0000, 0x1700], // ; [0x0010, 0x0e01], // < [0x0003, 0x0003], // = [0x001f, 0x0a1f], // > [0x000d, 0x1f0b], // ? [0x0013, 0x0419], // @ [0x1e09, 0x091e], // A [0x0a15, 0x151f], // B [0x0a11, 0x110e], // C [0x0e11, 0x111f], // D [0x1115, 0x151f], // E [0x0105, 0x051f], // F [0x0c15, 0x110e], // G [0x1f08, 0x081f], // H [0x1111, 0x1f11], // I [0x010f, 0x1108], // J [0x110a, 0x041f], // K [0x1010, 0x101f], // L [0x1f07, 0x071f], // M [0x1f04, 0x021f], // N [0x0e11, 0x110e], // O [0x0609, 0x091f], // P [0x1619, 0x110e], // Q [0x0609, 0x091f], // R [0x0915, 0x1512], // S [0x0101, 0x1f01], // T [0x0f10, 0x100f], // U [0x0304, 0x081f], // V [0x1f18, 0x181f], // W [0x1b04, 0x041b], // X [0x0304, 0x1c03], // Y [0x1315, 0x1519], // Z [0x0000, 0x0000], // [ [0x0010, 0x0e01], // \ [0x0000, 0x1700], // ] [0x0010, 0x0e01], // ^ [0x0003, 0x0003], // _ [0x001f, 0x0a1f], // ` [0x001c, 0x1408], // a [0x0008, 0x141f], // b [0x0014, 0x1408], // c [0x001f, 0x1408], // d [0x0014, 0x140c], // e [0x0005, 0x1e04], // f [0x003c, 0x5458], // g [0x0018, 0x041f], // h [0x0000, 0x1d00], // i [0x0000, 0x1d20], // j [0x0014, 0x081f], // k [0x0000, 0x100f], // l [0x001c, 0x0c1c], // m [0x0018, 0x041c], // n [0x0008, 0x1408], // o [0x0018, 0x147c], // p [0x007c, 0x140c], // q [0x0004, 0x0418], // r [0x0004, 0x1c10], // s [0x0014, 0x0e04], // t [0x001c, 0x100c], // u [0x000c, 0x180c], // v [0x001c, 0x181c], // w [0x0014, 0x0814], // x [0x003c, 0x505c], // y [0x0010, 0x1c04], // z [0x000d, 0x1f0b], // { [0x000d, 0x1f0b], // | [0x0013, 0x0419], // } [0x0013, 0x0419], // ~ ]; use nalgebra::{U2, U3, Rotation2, Dynamic, Matrix, MatrixArray, MatrixVec}; type DMatrixu32 = Matrix<u32, Dynamic, Dynamic, MatrixVec<u32, Dynamic, Dynamic>>; #[derive(Clone)] pub struct DynSprite { pub data: DMatrixu32, } impl DynSprite { pub fn new(data: Vec<u32>, width: u32, height: u32) -> DynSprite { let mut d_mat = DMatrixu32::from_element(height as usize, width as usize, 0); let mut idx = 0; debug!("WIDTH {:?} HEIGHT {:?} -> {:?} {:?}", width, height, d_mat.ncols(), d_mat.nrows()); for i in 0..width { for j in 0..height { d_mat[(i+j*width) as usize] = *data.get(idx).unwrap(); idx += 1; } } DynSprite { data: d_mat.clone(), } } pub fn new_from_matrix(d_mat: DMatrixu32) -> DynSprite { DynSprite { data: d_mat, } } pub fn flip_x(&mut self) -> DMatrixu32 { let mut r_mat = self.data.clone(); let n_cols = r_mat.ncols(); let n_rows = r_mat.nrows(); for i in 0..n_cols/2 { for j in 0..n_rows { let tmp = r_mat[(i + j * n_cols) as usize]; r_mat[(i + j * n_cols) as usize] = r_mat[((n_cols - (i+1)) + j * n_cols) as usize]; r_mat[((n_cols - (i+1)) + j * n_cols) as usize] = tmp; } } return r_mat; } pub fn flip_y(&mut self) -> DMatrixu32 { let mut r_mat = self.data.clone(); let n_cols = r_mat.ncols(); let n_rows = r_mat.nrows(); for i in 0..n_rows/2 { for j in 0..n_cols { let tmp = r_mat[(j + i * n_cols) as usize]; r_mat[(j + i * n_cols) as usize] = r_mat[(j + (n_rows - (i+1)) * n_cols) as usize]; r_mat[(j + (n_rows - (i+1)) * n_cols) as usize] = tmp; } } return r_mat; } } impl fmt::Debug for DynSprite { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut data_matrix = String::new(); data_matrix.push('\n'); for j in 0..self.data.nrows() { for i in 0..self.data.ncols() { data_matrix.push_str(format!("{:?} ", self.data[(i+j*self.data.ncols()) as usize]).as_str()); } data_matrix.push('\n'); } write!(f, "{}", data_matrix) } } #[derive(Clone)] pub struct Sprite { pub data: Vec<u8>, } impl Sprite { pub fn new(d: [u8; 8 * 8]) -> Sprite { let mut v = Vec::new(); v.extend(d.iter().cloned()); Sprite { data: v } } pub fn set_data(&mut self, idx: usize, col: u8) { self.data[idx] = col; } pub fn get_data(&mut self) -> String { let mut data = String::new(); for c in self.data.clone() { data.push_str(&format!("{:?}", c)); } return data; } pub fn get_line(&mut self, line: u32) -> String { let mut data = String::new(); let mut data_clone = self.data.clone(); let data_line: Vec<_> = data_clone.drain((line*8) as usize..(line*8+8)as usize).collect(); for c in data_line.clone() { data.push_str(&format!("{:x}", c)); } return data; } pub fn horizontal_reflection(&mut self) -> [u8; 64] { let mut ret: [u8; 64] = self.to_u8_64_array(); for i in 0..4 { for j in 0..8 { let tmp = ret[(i + j * 8) as usize]; ret[(i + j * 8) as usize] = ret[((8 - (i+1)) + j * 8) as usize]; ret[((8 - (i+1)) + j * 8) as usize] = tmp; } } return ret; } pub fn vertical_reflection(&mut self) -> [u8; 64] { let mut ret: [u8; 64] = self.to_u8_64_array(); for i in 0..4 { for j in 0..8 { let tmp = ret[(j + i * 8) as usize]; ret[(j + i * 8) as usize] = ret[(j + (8 - (i+1)) * 8) as usize]; ret[(j + (8 - (i+1)) * 8) as usize] = tmp; } } return ret; } pub fn flip_x(&mut self) -> Sprite { return Sprite::new(self.horizontal_reflection()); } pub fn flip_y(&mut self) -> Sprite { return Sprite::new(self.vertical_reflection()); } pub fn to_u8_64_array(&mut self) -> [u8;64] { let mut arr = [0u8;64]; for (place, element) in arr.iter_mut().zip(self.data.iter()) { *place = *element; } arr } } impl fmt::Debug for Sprite { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut data_matrix = String::new(); data_matrix.push('\n'); for i in 0..8 { data_matrix.push_str(format!("{:?}", &self.data[i*8..i*8+8]).as_str()); data_matrix.push('\n'); } write!(f, "{}", data_matrix) } } // Screen scaling #[derive(Copy, Clone)] pub enum Scale { Scale1x, Scale2x, Scale3x, Scale4x, Scale5x, Scale6x, Scale8x, Scale10x, } impl Scale { pub fn factor(self) -> usize { match self { Scale::Scale1x => 1, Scale::Scale2x => 2, Scale::Scale3x => 3, Scale::Scale4x => 4, Scale::Scale5x => 5, Scale::Scale6x => 6, Scale::Scale8x => 8, Scale::Scale10x => 10, } } } pub struct Camera { pub x: i32, pub y: i32, } impl Camera { pub fn new() -> Camera { Camera {x: 0, y: 0} } } pub struct Clipping { pub x: u32, pub y: u32, pub w: u32, pub h: u32, pub clipped: bool, } impl Clipping { pub fn new() -> Clipping { Clipping {x: 0, y: 0, w: 0, h: 0, clipped: false} } } pub struct Screen { pub back_buffer: Box<px8::ScreenBuffer>, pub saved_back_buffer: Box<px8::ScreenBuffer>, pub sprites: Vec<Sprite>, pub dyn_sprites: Vec<DynSprite>, pub map: [[u32; 32]; px8::SCREEN_WIDTH], pub transparency: HashMap<u32, u8>, pub colors: HashMap<u32, u32>, pub camera: Camera, pub color: u32, pub clipping: Clipping, } unsafe impl Send for Screen {} unsafe impl Sync for Screen {} impl Screen { pub fn new() -> Screen { Screen { back_buffer: Box::new(px8::SCREEN_EMPTY), saved_back_buffer: Box::new(px8::SCREEN_EMPTY), sprites: Vec::new(), dyn_sprites: Vec::new(), map: [[0; 32]; px8::SCREEN_WIDTH], transparency: HashMap::new(), colors: HashMap::new(), color: 0, camera: Camera::new(), clipping: Clipping::new(), } } pub fn init(&mut self) { self._reset_colors(); self._reset_transparency(); } pub fn _reset_transparency(&mut self) { self.transparency.clear(); self.transparency.insert(0, 1); } pub fn _reset_colors(&mut self) { self.colors.clear(); } pub fn save(&mut self) { for i in 0..px8::SCREEN_PIXELS { self.saved_back_buffer[i] = self.back_buffer[i]; } } pub fn restore(&mut self) { for i in 0..px8::SCREEN_PIXELS { self.back_buffer[i] = self.saved_back_buffer[i]; } } pub fn _find_color(&mut self, col: i32) -> u32 { // no specified color if col == -1 { return self.color; } return col as u32; } pub fn camera(&mut self, x: i32, y: i32) { if x == -1 && y == -1 { self.camera.x = 0; self.camera.y = 0; } else { self.camera.x = x; self.camera.y = y; } } pub fn set_sprites(&mut self, sprites: Vec<Sprite>) { self.sprites = sprites; } pub fn set_map(&mut self, map: [[u32; 32]; px8::SCREEN_WIDTH]) { self.map = map; } pub fn putpixel_(&mut self, x: i32, y: i32, col: u32) { // Camera let x = (x as i32 - self.camera.x) as usize; let y = (y as i32 - self.camera.y) as usize; let mut col = col; if x >= SCREEN_WIDTH || y >= SCREEN_HEIGHT { return; } // Clipped if self.clipping.clipped { let x = x as u32; let y = y as u32; if !(x >= self.clipping.x && x <= self.clipping.x + self.clipping.w) { return; } if !(y >= self.clipping.y && y <= self.clipping.y + self.clipping.h) { return; } } match self.colors.get(&col) { Some(&value) => col = value, None => (), } // col = self.colors[col as usize]; self.back_buffer[x + y * SCREEN_WIDTH] = col; } pub fn color(&mut self, col: i32) { if col != -1 { self.color = col as u32; } } pub fn putpixel(&mut self, x: i32, y: i32, col: u32) { return self.putpixel_(x, y, col); } pub fn getpixel(&mut self, x: usize, y: usize) -> u32 { if x >= SCREEN_WIDTH || y >= SCREEN_HEIGHT { return 0; } return self.back_buffer[x + y * SCREEN_WIDTH] as u32; } pub fn pget(&mut self, x: u32, y: u32) -> u32 { let col = self.getpixel(x as usize, y as usize); return col; } pub fn pset(&mut self, x: i32, y: i32, col: i32) { let color = self._find_color(col); self.putpixel_(x, y, color); } pub fn sget(&mut self, x: u32, y: u32) -> u8 { let idx_sprite = (x / 8) + 16 * (y / 8); let sprite = &self.sprites[idx_sprite as usize]; return *sprite.data.get(((x % 8) + (y % 8) * 8) as usize).unwrap(); } pub fn sset(&mut self, x: u32, y: u32, col: i32) { let col = self._find_color(col); let idx_sprite = (x / 8) + 16 * (y / 8); let ref mut sprite = self.sprites[idx_sprite as usize]; sprite.set_data(((x % 8) + (y % 8) * 8) as usize, col as u8); } pub fn cls(&mut self) { for x in 0..SCREEN_WIDTH { for y in 0..SCREEN_HEIGHT { self.putpixel(x as i32, y as i32, 0); } } } pub fn print(&mut self, string: String, x: i32, y: i32, col: i32) { let mut x = x; let y = y; for k in 0..string.len() { let value = string.as_bytes()[k] as usize; let data; if value >= 32 && value <= 126 { data = GLYPH[value - 32]; } else { /* Unknown char, replace by a space */ data = [0x0000, 0x0000]; } let mut idx = 1; let mut idx_1 = 0; for i in 0..32 { if (data[idx] & (0x1 << idx_1)) != 0 { self.pset(x, y + i % 8, col) } idx_1 += 1; if i % 8 == 7 { x = x + 1; } if i == 15 { idx = 0; idx_1 = 0; } } } } pub fn line(&mut self, x0: i32, y0: i32, x1: i32, y1: i32, col: i32) { debug!("LINE {:?} {:?} {:?} {:?} {:?}", x0, y0, x1, y1, col); let color = self._find_color(col); let (mut x0, mut y0) = (x0, y0); let (x1, y1) = (x1, y1); let dx = (x1 - x0).abs(); let sx = if x0 < x1 { 1 } else { -1 }; let dy: i32 = -1 * (y1 - y0).abs(); let sy: i32 = if y0 < y1 { 1 } else { -1 }; let mut err: i32 = dx + dy; /* error value e_xy */ loop { self.putpixel(x0, y0, color); if x0 == x1 && y0 == y1 { break; } let e2 = 2 * err; if e2 >= dy { err += dy; x0 += sx; } /* e_xy+e_x > 0 */ if e2 <= dx { err += dx; y0 += sy; } /* e_xy+e_y < 0 */ } } pub fn hline(&mut self, x1: i32, x2: i32, y: i32, col: i32) { self.line(x1, y, x2, y, col); } pub fn rect(&mut self, x0: i32, y0: i32, x1: i32, y1: i32, col: i32) { self.line(x0, y0, x0, y1, col); self.line(x0, y0, x1, y0, col); self.line(x0, y1, x1, y1, col); self.line(x1, y0, x1, y1, col); } pub fn rectfill(&mut self, x0: i32, y0: i32, x1: i32, y1: i32, col: i32) { self.line(x0, y0, x0, y1, col); self.line(x0, y0, x1, y0, col); self.line(x0, y1, x1, y1, col); self.line(x1, y0, x1, y1, col); for y in y0..y1 { self.line(x0, y, x1, y, col) } } pub fn square(&mut self, x0: i32, y0: i32, h: i32, col: i32) { self.rect(x0, y0, x0 + h, y0 + h, col); } pub fn squarefill(&mut self, x0: i32, y0: i32, h: i32, col: i32) { self.rectfill(x0, y0, x0 + h, y0 + h, col); } pub fn circ(&mut self, x: i32, y: i32, r: i32, col: i32) { if r <= 0 { return; } let col = self._find_color(col); let mut h: i32; let mut i: i32; let mut j: i32; let mut k: i32; let mut oh: i32 = 0xFFFF; let mut oi: i32 = 0xFFFF; let mut ix: i32; let mut iy: i32; let rx: i32 = r as i32; let ry: i32 = r as i32; let mut xmj: i32; let mut xpj: i32; let mut ymi: i32; let mut ypi: i32; let mut xmk: i32; let mut xpk: i32; let mut ymh: i32; let mut yph: i32; ix = 0; iy = ry * 64; h = (ix + 32) >> 6; i = (iy + 32) >> 6; j = (h * rx) / ry; k = (i * rx) / ry; while i > h { if ((oi != i) && (oh != i)) || ((oh != h) && (oi != h) && (i != h)) { xmj = x - j; xpj = x + j; if i > 0 { ypi = y + i; ymi = y - i; self.putpixel(xmj, ypi, col); self.putpixel(xpj, ypi, col); self.putpixel(xmj, ymi, col); self.putpixel(xpj, ymi, col); } else { self.putpixel(xmj, y, col); self.putpixel(xpj, y, col); } oi = i; xmk = x - k; xpk = x + k; if h > 0 { yph = y + h; ymh = y - h; self.putpixel(xmk, yph, col); self.putpixel(xpk, yph, col); self.putpixel(xmk, ymh, col); self.putpixel(xpk, ymh, col); } else { self.putpixel(xmk, y, col); self.putpixel(xpk, y, col); } oh = h; } ix = ix + iy / ry; iy = iy - ix / ry; h = (ix + 32) >> 6; i = (iy + 32) >> 6; j = (h * rx) / ry; k = (i * rx) / ry; } } pub fn circfill(&mut self, x: i32, y: i32, r: i32, col: i32) { if r <= 0 { return; } let mut h: i32; let mut i: i32; let mut j: i32; let mut k: i32; let mut oh: i32 = 0xFFFF; let mut oi: i32 = 0xFFFF; let mut ix: i32; let mut iy: i32; let rx: i32 = r as i32; let ry: i32 = r as i32; let mut xmj: i32; let mut xpj: i32; let mut xmk: i32; let mut xpk: i32; ix = 0; iy = ry * 64; h = (ix + 32) >> 6; i = (iy + 32) >> 6; j = (h * rx) / ry; k = (i * rx) / ry; while i > h { if (oi != i) && (oh != i) { xmj = x - j; xpj = x + j; if i > 0 { self.hline(xmj, xpj, (y + i), col); self.hline(xmj, xpj, (y - i), col); } else { self.hline(xmj, xpj, y, col); } oi = i; } if (oh != h) && (oi != h) && (i != h) { xmk = x - k; xpk = x + k; if h > 0 { self.hline(xmk, xpk, (y + h), col); self.hline(xmk, xpk, (y - h), col); } else { self.hline(xmk, xpk, y, col); } oh = h; } ix = ix + iy / ry; iy = iy - ix / ry; h = (ix + 32) >> 6; i = (iy + 32) >> 6; j = (h * rx) / ry; k = (i * rx) / ry; } } pub fn clip(&mut self, x: i32, y: i32, w: i32, h: i32) { // reset if x == -1 && y == -1 && w == -1 && h == -1 { self.clipping.clipped = false; } // invalid clipping value if x == -1 || y == -1 || w == -1 || h == -1 { return; } if x < 0 || y < 0 || w < 0 || h < 0 { return; } self.clipping.x = x as u32; self.clipping.y = y as u32; self.clipping.w = w as u32; self.clipping.h = h as u32; self.clipping.clipped = true; } pub fn trigon(&mut self, x1: i32, y1: i32, x2: i32, y2: i32, x3: i32, y3: i32, col: i32) { let mut vx = Vec::new(); let mut vy = Vec::new(); vx.push(x1); vx.push(x2); vx.push(x3); vy.push(y1); vy.push(y2); vy.push(y3); self.polygon(vx.clone(), vy.clone(), col); } pub fn polygon(&mut self, vx: Vec<i32>, vy: Vec<i32>, col: i32) { if vx.len() < 3 || vy.len() < 3 { return; } if vx.len() != vy.len() { return; } let mut idx = 0; while idx < vx.len() - 1 { self.line(vx[idx], vy[idx], vx[idx + 1], vy[idx + 1], col); idx += 1; } self.line(*vx.get(idx).unwrap(), *vy.get(idx).unwrap(), *vx.get(0).unwrap(), *vy.get(0).unwrap(), col); } pub fn spr(&mut self, n: u32, x: i32, y: i32, w: u32, h: u32, flip_x: bool, flip_y: bool) { let sprites_number = w * h; debug!("PRINT SPRITE = {:?} x:{:?} y:{:?} n:{:?} w:{:?} h:{:?} flip_x:{:?} flip_y:{:?}", sprites_number, x, y, n, w, h, flip_x, flip_y); let mut idx_w = 0; let mut orig_x = x; let mut orig_y = y; for i in 0..sprites_number { let mut sprite = self.sprites[(n + i) as usize].clone(); if flip_x { sprite = sprite.flip_x(); } if flip_y { sprite = sprite.flip_y(); } let mut new_x = orig_x % SCREEN_WIDTH as i32; let mut new_y = orig_y; debug!("SPRITE = {:?} x:{:?} y:{:?} {:?}", (n + i) as usize, new_x, new_y, sprite); let mut index = 0; for c in &sprite.data { if !self.is_transparent(*c as u32) { self.putpixel_(new_x, new_y, *c as u32); } index = index + 1; if index != 0 && index % 8 == 0 { new_y = new_y + 1; new_x = orig_x % SCREEN_WIDTH as i32; } else { new_x = new_x + 1; } } idx_w += 1; orig_x += 8; if idx_w == w { orig_y += 8; idx_w = 0; orig_x = 0; } } } pub fn spr_dyn(&mut self, id: u32, x: i32, y: i32, flip_x: bool, flip_y: bool) { debug!("SPR DYN {:?}: {:?} {:?}", id, x, y); if id as usize >= self.dyn_sprites.len() { return } let mut sprite = self.dyn_sprites[id as usize].clone(); if flip_x { sprite = DynSprite::new_from_matrix(sprite.flip_x()); } if flip_y { sprite = DynSprite::new_from_matrix(sprite.flip_y()); } for j in 0..sprite.data.nrows() { for i in 0..sprite.data.ncols() { let c = sprite.data[(i + j * sprite.data.ncols()) as usize]; if !self.is_transparent(c) { self.putpixel_(i as i32 + x, j as i32 + y, c); } } } } pub fn spr_dyn_load(&mut self, data: Vec<u8>, width: u32, height: u32) -> i32 { debug!("Load dynamic sprite {:?} {:?}", width, height); let mut idx = 0; let mut v:Vec<u32> = Vec::new(); while idx < data.len() { let r = *data.get(idx).unwrap(); let g = *data.get(idx+1).unwrap(); let b = *data.get(idx+2).unwrap(); v.push(px8::PALETTE.lock().unwrap().add_color(r, g, b)); idx += 3; } let s = DynSprite::new(v, width, height); self.dyn_sprites.push(s.clone()); (self.dyn_sprites.len() as i32) - 1 } pub fn map(&mut self, cel_x: u32, cel_y: u32, sx: i32, sy: i32, cel_w: u32, cel_h: u32) { let mut idx_x: i32 = 0; let mut idx_y: i32 = 0; let mut cel_w = cel_w; if cel_w > SCREEN_WIDTH as u32 { cel_w = SCREEN_WIDTH as u32; } let mut cel_h = cel_h; if cel_h > 32 { cel_h = 32; } debug!("cel_x {:?} cel_y {:?} sx {:?} sy {:?} cel_w {:?} cel_h {:?}", cel_x, cel_y, sx, sy, cel_w, cel_h); while idx_y < cel_h as i32 { idx_x = 0; while idx_x < cel_w as i32 { let orig_x = sx + 8 * idx_x; let mut new_x = orig_x; let mut new_y = sy + 8 * idx_y; if new_x > SCREEN_WIDTH as i32 || new_y > SCREEN_HEIGHT as i32 { break } let mut map_x = cel_x as i32 + idx_x; let mut map_y = cel_y as i32 + idx_y; let idx_sprite = self.map[map_x as usize][map_y as usize]; let sprite = self.sprites[idx_sprite as usize].clone(); let mut index = 0; for c in &sprite.data { if ! self.is_transparent(*c as u32) { self.putpixel_(new_x, new_y, *c as u32); } index = index + 1; if index > 0 && index % 8 == 0 { new_y = new_y + 1; new_x = orig_x; } else { new_x = new_x + 1; } } idx_x += 1; } idx_y += 1; } } pub fn mget(&mut self, x: u32, y: u32) -> u32 { if x as usize > px8::SCREEN_WIDTH || y as usize > px8::SCREEN_WIDTH { return 0; } let value = self.map[x as usize][y as usize]; return value; } pub fn mset(&mut self, x: u32, y: u32, v: u32) { if x as usize > px8::SCREEN_WIDTH || y as usize > px8::SCREEN_WIDTH { return; } self.map[x as usize][y as usize] = v; } pub fn sspr(&mut self, sx: u32, sy: u32, sw: u32, sh: u32, dx: i32, dy: i32, dw: u32, dh: u32, flip_x: bool, flip_y: bool) { let mut v = Vec::new(); for x in sx..sx+sw { for y in sy..sy+sh { v.push(self.sget(x, y)); } } let mut x2; let mut y2; let w1 = sw; let w2 = dw; let h1 = sh; let h2 = dh; let mut x_ratio; let mut y_ratio; let mut ret = Vec::with_capacity((w2 * h2) as usize); x_ratio = ((w1 << 16)/w2) + 1; y_ratio = ((h1 << 16)/h2) + 1; for i in 0..h2 { for j in 0..w2 { x2 = (j * x_ratio)>>16; y2 = (i * y_ratio)>>16; ret.insert((i*w2+j) as usize, *v.get((y2*w1+x2) as usize).unwrap()); } } if flip_x { for i in 0..w2/2 { for j in 0..h2 { let tmp = ret[(i + j * w2) as usize]; ret[(i + j * w2) as usize] = ret[((w2 - (i+1)) + j * w2) as usize]; ret[((w2 - (i+1)) + j * w2) as usize] = tmp; } } } if flip_y { for i in 0..h2/2 { for j in 0..w2 { let tmp = ret[(j + i * w2) as usize]; ret[(j + i * w2) as usize] = ret[(j + (h2 - (i+1)) * w2) as usize]; ret[(j + (h2 - (i+1)) * w2) as usize] = tmp; } } } let mut idx = 0; for i in 0..h2 { for j in 0..w2 { let d:u8 = *ret.get(idx).unwrap(); idx += 1; if d != 0 { if ! self.is_transparent(d as u32) { self.putpixel_(i as i32 + dx, j as i32 + dy, d as u32); } //if self.transparency[d as usize] == 0 { // self.putpixel_(i as i32 + dx, j as i32 + dy, px8::Color::from_u8(d)); // } } } } } pub fn is_transparent(&mut self, value: u32) -> bool { match self.transparency.get(&(value as u32)) { Some(&1) => { return true; }, Some(&_) => (), None => (), } return false; } pub fn pal(&mut self, c0: i32, c1: i32) { if c0 < 0 || c1 < 0 { self._reset_colors(); } else { self.colors.insert(c0 as u32, c1 as u32); } } pub fn palt(&mut self, c: i32, t: bool) { if c == -1 { self._reset_transparency(); } else { self.transparency.insert(c as u32, t as u8); } } }
// Zinc, the bare metal stack for rust. // Copyright 2014 Vladimir "farcaller" Pouzanov <farcaller@gmail.com> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /*! HAL provides abstractions for specific MCU hardware. Each peripheral in `hal` has a `xxxConf` struct that can be defined statically, and each such struct has a `setup()` method that configures the hardware (returning the object to interact with it where applicable). */ pub mod lpc11xx; #[cfg(feature = "mcu_lpc17xx")] pub mod lpc17xx; #[cfg(feature = "mcu_stm32f4")] pub mod stm32f4; #[cfg(feature = "mcu_stm32l1")] pub mod stm32l1; #[cfg(feature = "mcu_k20")] pub mod k20; #[cfg(feature = "mcu_tiva_c")] pub mod tiva_c; #[cfg(any(feature = "cpu_cortex-m3", feature = "cpu_cortex-m4"))] mod cortex_common; #[cfg(feature = "cpu_cortex-m3")] pub mod cortex_m3; #[cfg(feature = "cpu_cortex-m4")] pub mod cortex_m4; pub mod mem_init; pub mod pin; pub mod spi; pub mod stack; pub mod timer; pub mod uart; #[cfg(target_os = "none")] pub mod isr; Cleaned up layout of hal/mod.rs so feature guards are more obvious // Zinc, the bare metal stack for rust. // Copyright 2014 Vladimir "farcaller" Pouzanov <farcaller@gmail.com> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /*! HAL provides abstractions for specific MCU hardware. Each peripheral in `hal` has a `xxxConf` struct that can be defined statically, and each such struct has a `setup()` method that configures the hardware (returning the object to interact with it where applicable). */ pub mod lpc11xx; #[cfg(feature = "mcu_lpc17xx")] pub mod lpc17xx; #[cfg(feature = "mcu_stm32f4")] pub mod stm32f4; #[cfg(feature = "mcu_stm32l1")] pub mod stm32l1; #[cfg(feature = "mcu_k20")] pub mod k20; #[cfg(feature = "mcu_tiva_c")] pub mod tiva_c; #[cfg(any(feature = "cpu_cortex-m0", feature = "cpu_cortex-m3", feature = "cpu_cortex-m4"))] mod cortex_common; #[cfg(feature = "cpu_cortex-m3")] pub mod cortex_m3; #[cfg(feature = "cpu_cortex-m4")] pub mod cortex_m4; pub mod mem_init; pub mod pin; pub mod spi; pub mod stack; pub mod timer; pub mod uart; #[cfg(target_os = "none")] pub mod isr;
//! Define trait for Hermite matrices use ndarray::{Ix2, Array, RcArray}; use lapack::c::Layout; use super::matrix::{Matrix, MFloat}; use super::square::SquareMatrix; use super::error::LinalgError; use super::impls::eigh::ImplEigh; use super::impls::cholesky::ImplCholesky; pub trait HMFloat: ImplEigh + ImplCholesky + MFloat {} impl<A: ImplEigh + ImplCholesky + MFloat> HMFloat for A {} /// Methods for Hermite matrix pub trait HermiteMatrix: SquareMatrix + Matrix { /// eigenvalue decomposition fn eigh(self) -> Result<(Self::Vector, Self), LinalgError>; /// symmetric square root of Hermite matrix fn ssqrt(self) -> Result<Self, LinalgError>; /// Cholesky factorization fn cholesky(self) -> Result<Self, LinalgError>; /// calc determinant using Cholesky factorization fn deth(self) -> Result<Self::Scalar, LinalgError>; } impl<A: HMFloat> HermiteMatrix for Array<A, Ix2> { fn eigh(self) -> Result<(Self::Vector, Self), LinalgError> { self.check_square()?; let layout = self.layout()?; let (rows, cols) = self.size(); let (w, a) = ImplEigh::eigh(layout, rows, self.into_raw_vec())?; let ea = Array::from_vec(w); let va = match layout { Layout::ColumnMajor => Array::from_vec(a).into_shape((rows, cols)).unwrap().reversed_axes(), Layout::RowMajor => Array::from_vec(a).into_shape((rows, cols)).unwrap(), }; Ok((ea, va)) } fn ssqrt(self) -> Result<Self, LinalgError> { let (n, _) = self.size(); let (e, v) = self.eigh()?; let mut res = Array::zeros((n, n)); for i in 0..n { for j in 0..n { res[(i, j)] = e[i].sqrt() * v[(j, i)]; } } Ok(v.dot(&res)) } fn cholesky(self) -> Result<Self, LinalgError> { self.check_square()?; let (n, _) = self.size(); let layout = self.layout()?; let a = ImplCholesky::cholesky(layout, n, self.into_raw_vec())?; let mut c = match layout { Layout::RowMajor => Array::from_vec(a).into_shape((n, n)).unwrap(), Layout::ColumnMajor => Array::from_vec(a).into_shape((n, n)).unwrap().reversed_axes(), }; for ((i, j), val) in c.indexed_iter_mut() { if i > j { *val = A::zero(); } } Ok(c) } fn deth(self) -> Result<Self::Scalar, LinalgError> { let (n, _) = self.size(); let c = self.cholesky()?; let rt = (0..n).map(|i| c[(i, i)]).fold(A::one(), |det, c| det * c); Ok(rt * rt) } } impl<A: HMFloat> HermiteMatrix for RcArray<A, Ix2> { fn eigh(self) -> Result<(Self::Vector, Self), LinalgError> { let (e, v) = self.into_owned().eigh()?; Ok((e.into_shared(), v.into_shared())) } fn ssqrt(self) -> Result<Self, LinalgError> { let s = self.into_owned().ssqrt()?; Ok(s.into_shared()) } fn cholesky(self) -> Result<Self, LinalgError> { let s = self.into_owned().cholesky()?; Ok(s.into_shared()) } fn deth(self) -> Result<Self::Scalar, LinalgError> { self.into_owned().deth() } } Remove old hermite codes
// // SOS: the Stupid Operating System // by Hawk Weisman (hi@hawkweisman.me) // // Copyright (c) 2015 Hawk Weisman // Released under the terms of the MIT license. See `LICENSE` in the root // directory of this repository for more information. // use ::arch::vga; use core::ptr::Unique; use core::mem; use core::fmt::{Write, Result}; use core::str::MatchIndices; use spin::Mutex; const ANSI_ESCAPE: &'static str = "\x1b"; pub struct Terminal { buffer: Unique<vga::Buffer> , x: usize , y: usize , colors: vga::Palette } macro_rules! next_ansi_byte { ($b:expr) => { $b.next().expect("Unterminated ANSI escape sequence!") } } impl Terminal { #[inline] fn buffer(&mut self) -> &mut vga::Buffer { unsafe { self.buffer.get_mut() } } pub fn set_colors(&mut self, bg: vga::Color, fg: vga::Color) -> &mut Self { self.colors = vga::Palette::new(bg,fg); self } fn scroll(&mut self) { let mut rows = self.buffer() .iter_mut(); let mut next = rows.next() .unwrap(); while let Some(thing) = rows.next() { mem::swap(next, thing); next = thing; } // empty last line unsafe { *next = mem::zeroed() } } pub fn clear(&mut self) -> &mut Self { unsafe { *(self.buffer()) = mem::zeroed(); } self } pub fn write_byte(&mut self, byte: u8) -> &mut Self { if byte == b'\n' { self.x = 0; self.y += 1; } else { // set character at position self.buffer()[self.y][self.x] = vga::Char { ascii: byte , colors: self.colors }; self.x += 1; // check for line wrapping if self.x >= vga::X_MAX { self.x = 0; self.y += 1; } } // check for scrolling if self.y >= vga::Y_MAX { self.scroll(); self.y = vga::Y_MAX- 1; } self } fn handle_ansi_escape(&self, escape_code: &str) -> Result { match escape_code[4..].as_bytes() { [b'3', n @ u8, b'm'] => { unsafe { self.colors .set_foreground(mem::transmute(n - 48)); } Ok(()) } , [b'4', n @ u8, b'm'] => { unsafe { self.colors .set_background(mem::transmute(n - 48)); } Ok(()) } , _ => unimplemented!() } // let escape_seq: &str = bytes.take_while(|b| b != b'm') // .collect::<&str>(); // match escape_seq { // [b'3', n] => unsafe { // self.colors.set_foreground(mem::transmute(n - 48)) // } // } // while let Some(byte) = bytes.next() { // match *byte { // // we've recieved an ANSI escape sequence. // // this basically enters a mediocre FSM for matching ANSI // // control codes. // 0x1b => match *next_ansi_byte!(bytes) { // // handle multi-char ANSI escapes // b'[' => match *next_ansi_byte!(bytes) { // // foreground color code // fg @ 30 ... 37 => { // if !(*next_ansi_byte!(bytes) == b'm') { // unsafe { // let color: vga::Color // = mem::transmute(fg - 30); // self.colors // .set_foreground(color); // } // // } // } // // background color code // , 40 ... 47 => { // // } // , _ => unimplemented!() // } // , _ => unimplemented!() // } // // otherwise, treat the byte as a normal ASCII char // , b => { self.write_byte(b); } // } // } } } struct AnsiEscapeIter<'a> { string: &'a str , curr_slice: &'a str , in_escape: bool } impl<'a> AnsiEscapeIter<'a> { pub fn new(s: &'a str) -> Self { AnsiEscapeIter { string: s , curr_slice: s , in_escape: false } } } impl<'a> Iterator for AnsiEscapeIter<'a> { type Item = &'a str; fn next(&mut self) -> Option<Self::Item> { if self.curr_slice.len() == 0 { // if the remaining string is empty, we just return `None` None } else { // otherwise, find the next index to chunk on. let maybe_idx = if self.in_escape { // if we're in an escape code, we split the chunk at the // index of the next 'm' character, adding 1 so that the // 'm' is in the escape code chunk. self.curr_slice.find('m') .map(|idx| idx + 1) } else { // otherwise, split at the next ANSI escape sequence self.curr_slice.find(ANSI_ESCAPE) }; // if we found another index to chunk on, map over that index; // otherwise, we just yield the rest of the string maybe_idx.map_or( Some(self.curr_slice) // remainder (if no index to chunk on) , |idx| { // otherwise, chunk along that index... let (chunk, next_slice) = self.curr_slice .split_at(idx); self.curr_slice = next_slice; // update current chunk Some(chunk) // return the chunk }) } } } impl Write for Terminal { fn write_str(&mut self, s: &str) -> Result { if s.contains(ANSI_ESCAPE) { // if the segment contains an ANSI escape, construct an iterator // over each chunk containing either an escape sequence or text for segment in AnsiEscapeIter::new(s) { if segment.starts_with(ANSI_ESCAPE) { // if the current segment is an ANSI escape code, // try to handle the escape and fail if it is malformed try!(self.handle_ansi_escape(segment)) } else { // otherwise, just write each chunk in the string. for byte in segment.as_bytes() { self.write_byte(*byte); } } } } else { // otherwise, if there are no ANSI escape codes, // we can just write each byte in the string. for byte in s.as_bytes() { self.write_byte(*byte); } } Ok(()) } } /// The system's VGA terminal pub static CONSOLE: Mutex<Terminal> = Mutex::new(Terminal { colors: vga::Palette::new( vga::Color::LightGreen , vga::Color::Black ) , x: 0 , y: 0 , buffer: unsafe { Unique::new(0xB8000 as *mut _) }, }); [term.rs] Additional documentation // // SOS: the Stupid Operating System // by Hawk Weisman (hi@hawkweisman.me) // // Copyright (c) 2015 Hawk Weisman // Released under the terms of the MIT license. See `LICENSE` in the root // directory of this repository for more information. // use ::arch::vga; use core::ptr::Unique; use core::mem; use core::fmt::{Write, Result}; use core::str::MatchIndices; use spin::Mutex; const ANSI_ESCAPE: &'static str = "\x1b"; pub struct Terminal { buffer: Unique<vga::Buffer> , x: usize , y: usize , colors: vga::Palette } macro_rules! next_ansi_byte { ($b:expr) => { $b.next().expect("Unterminated ANSI escape sequence!") } } impl Terminal { #[inline] fn buffer(&mut self) -> &mut vga::Buffer { unsafe { self.buffer.get_mut() } } /// Set the color palette used for writing subsequent characters. pub fn set_colors(&mut self, bg: vga::Color, fg: vga::Color) -> &mut Self { self.colors = vga::Palette::new(bg,fg); self } /// Scrolls the terminal one row. fn scroll(&mut self) { // construct an iterator over the whole buffer let mut rows = self.buffer() .iter_mut(); // the current row in the buffer let mut current = rows.next() .unwrap(); while let Some(next) = rows.next() { // while there are rows remaining in the iterator, swap the // next row with the current one (moving it back by one) mem::swap(current, next); // and advance our pointer to the current row. current = next; } // empty the last line in the buffer unsafe { *current = mem::zeroed() } } /// Clear the terminal pub fn clear(&mut self) -> &mut Self { // to clear the terminal, we just zero out the whole buffer. unsafe { *(self.buffer()) = mem::zeroed(); } self } /// Write the given byte to the terminal, and advance the cursor position. pub fn write_byte(&mut self, byte: u8) -> &mut Self { if byte == b'\n' { // if the byte is a newline, we just advance to the next line // and reset the column position. self.x = 0; self.y += 1; } else { // otherwise, it's a regular character, so we just set the // byte at the current position in the buffer to that // character (with the current color palette) self.buffer()[self.y][self.x] = vga::Char { ascii: byte , colors: self.colors }; // and advance our column position by one self.x += 1; if self.x >= vga::X_MAX { // if we've reached the end of the line, advance to the next self.x = 0; self.y += 1; } } if self.y >= vga::Y_MAX { // if we've reached the bottom of the terminal, scroll. self.scroll(); self.y = vga::Y_MAX- 1; } self } fn handle_ansi_escape(&self, escape_code: &str) -> Result { match escape_code.as_bytes() { // `\x1b[3Nm` sets the foreground color to N. [0x1b, b'[', b'3', n, b'm'] => { unsafe { self.colors .set_foreground(mem::transmute(n - 48)); } Ok(()) } // `\x1b[4Nm` sets the background color to N , [0x1b, b'[', b'4', n, b'm'] => { unsafe { self.colors .set_background(mem::transmute(n - 48)); } Ok(()) } , _ => unimplemented!() } // let escape_seq: &str = bytes.take_while(|b| b != b'm') // .collect::<&str>(); // match escape_seq { // [b'3', n] => unsafe { // self.colors.set_foreground(mem::transmute(n - 48)) // } // } // while let Some(byte) = bytes.next() { // match *byte { // // we've recieved an ANSI escape sequence. // // this basically enters a mediocre FSM for matching ANSI // // control codes. // 0x1b => match *next_ansi_byte!(bytes) { // // handle multi-char ANSI escapes // b'[' => match *next_ansi_byte!(bytes) { // // foreground color code // fg @ 30 ... 37 => { // if !(*next_ansi_byte!(bytes) == b'm') { // unsafe { // let color: vga::Color // = mem::transmute(fg - 30); // self.colors // .set_foreground(color); // } // // } // } // // background color code // , 40 ... 47 => { // // } // , _ => unimplemented!() // } // , _ => unimplemented!() // } // // otherwise, treat the byte as a normal ASCII char // , b => { self.write_byte(b); } // } // } } } struct AnsiEscapeIter<'a> { string: &'a str , curr_slice: &'a str , in_escape: bool } impl<'a> AnsiEscapeIter<'a> { pub fn new(s: &'a str) -> Self { AnsiEscapeIter { string: s , curr_slice: s , in_escape: false } } } impl<'a> Iterator for AnsiEscapeIter<'a> { type Item = &'a str; fn next(&mut self) -> Option<Self::Item> { if self.curr_slice.len() == 0 { // if the remaining string is empty, we just return `None` None } else { // otherwise, find the next index to chunk on. let maybe_idx = if self.in_escape { // if we're in an escape code, we split the chunk at the // index of the next 'm' character, adding 1 so that the // 'm' is in the escape code chunk. self.curr_slice.find('m') .map(|idx| idx + 1) } else { // otherwise, split at the next ANSI escape sequence self.curr_slice.find(ANSI_ESCAPE) }; // if we found another index to chunk on, map over that index; // otherwise, we just yield the rest of the string maybe_idx.map_or( Some(self.curr_slice) // remainder (if no index to chunk on) , |idx| { // otherwise, chunk along that index... let (chunk, next_slice) = self.curr_slice .split_at(idx); self.curr_slice = next_slice; // update current chunk Some(chunk) // return the chunk }) } } } impl Write for Terminal { fn write_str(&mut self, s: &str) -> Result { if s.contains(ANSI_ESCAPE) { // if the segment contains an ANSI escape, construct an iterator // over each chunk containing either an escape sequence or text for segment in AnsiEscapeIter::new(s) { if segment.starts_with(ANSI_ESCAPE) { // if the current segment is an ANSI escape code, // try to handle the escape and fail if it is malformed try!(self.handle_ansi_escape(segment)) } else { // otherwise, just write each chunk in the string. for byte in segment.as_bytes() { self.write_byte(*byte); } } } } else { // otherwise, if there are no ANSI escape codes, // we can just write each byte in the string. for byte in s.as_bytes() { self.write_byte(*byte); } } Ok(()) } } /// The system's global VGA terminal pub static CONSOLE: Mutex<Terminal> = Mutex::new(Terminal { colors: vga::Palette::new( vga::Color::LightGreen , vga::Color::Black ) , x: 0 , y: 0 , buffer: unsafe { Unique::new(0xB8000 as *mut _) }, });
use std::cmp; use petgraph::graph::NodeIndex; use rustwlc::{WlcView, Geometry, Point, Size, ResizeEdge}; use super::super::{LayoutTree, TreeError}; use super::super::commands::CommandResult; use super::super::core::container::{Container, ContainerType, ContainerErr, Layout, Handle}; use super::borders; use ::layout::core::borders::Borders; use ::render::Renderable; use ::debug_enabled; use uuid::Uuid; #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum LayoutErr { /// The node behind the UUID was asked to ground when it was already grounded. AlreadyGrounded(Uuid), /// The node behind the UUID was asked to float when it was already floating. AlreadyFloating(Uuid) } impl LayoutTree { /// Given the index of some container in the tree, lays out the children of /// that container based on what type of container it is and how big of an /// area is allocated for it and its children. pub fn layout(&mut self, node_ix: NodeIndex) { match self.tree[node_ix].get_type() { ContainerType::Root => { for output_ix in self.tree.children_of(node_ix) { self.layout(output_ix); } } ContainerType::Output => { let geometry; { let container = &mut self.tree[node_ix]; geometry = container.get_geometry() .expect("Output had no geometry"); let actual_geometry = container.get_actual_geometry() .expect("Output had no actual geometry"); match *container { Container::Output { ref mut background, .. } => { // update the background size if let Some(background) = *background { background.set_geometry(ResizeEdge::empty(), actual_geometry) } } _ => unreachable!() } } let mut fullscreen_apps = Vec::new(); for workspace_ix in self.tree.children_of(node_ix) { self.layout_helper(workspace_ix, geometry, &mut fullscreen_apps); } self.layout_fullscreen_apps(fullscreen_apps); } ContainerType::Workspace => { // get geometry from the parent output let output_ix = self.tree.ancestor_of_type(node_ix, ContainerType::Output) .expect("Workspace had no output parent"); let output_geometry = self.tree[output_ix].get_geometry() .expect("Could not get output geometry"); let mut fullscreen_apps = Vec::new(); self.layout_helper(node_ix, output_geometry, &mut fullscreen_apps); self.layout_fullscreen_apps(fullscreen_apps) } ContainerType::Container => { let geometry = self.tree[node_ix].get_actual_geometry() .expect("Could not get actual container geometry"); // TODO Fake vector that doesn't allocate for this case? let mut fullscreen_apps = Vec::new(); self.layout_helper(node_ix, geometry, &mut fullscreen_apps); } ContainerType::View => { let parent_ix = self.tree.parent_of(node_ix) .expect("View had no parent"); self.layout(parent_ix); } } self.validate(); } /// Helper function to layout a container. The geometry is the constraint geometry, /// the container tries to lay itself out within the confines defined by the constraint. /// Generally, this should not be used directly and layout should be used. fn layout_helper(&mut self, node_ix: NodeIndex, mut geometry: Geometry, fullscreen_apps: &mut Vec<NodeIndex>) { if self.tree[node_ix].fullscreen() { fullscreen_apps.push(node_ix); } match self.tree[node_ix].get_type() { ContainerType::Root => { warn!("Ignoring geometry constraint ({:#?}), \ deferring to each output's constraints", geometry); for child_ix in self.tree.children_of(node_ix) { self.layout(child_ix); } }, ContainerType::Output => { self.tree[node_ix].set_geometry(ResizeEdge::empty(), geometry); for child_ix in self.tree.children_of(node_ix) { self.layout_helper(child_ix, geometry, fullscreen_apps); } } ContainerType::Workspace => { self.tree[node_ix].set_geometry(ResizeEdge::empty(), geometry); for child_ix in self.tree.grounded_children(node_ix) { self.layout_helper(child_ix, geometry, fullscreen_apps); } // place floating children above everything else let root_ix = self.tree.children_of(node_ix)[0]; for child_ix in self.tree.floating_children(root_ix) { // TODO Propogate error self.place_floating(child_ix, fullscreen_apps).ok(); } }, ContainerType::Container => { // Update the geometry so that borders are included when tiling. geometry = self.update_container_geo_for_borders(node_ix, geometry) .expect("Could not update container geo for tiling"); let layout = match self.tree[node_ix] { Container::Container { layout, .. } => layout, _ => unreachable!() }; match layout { Layout::Horizontal => { let children = self.tree.grounded_children(node_ix); let children_len = children.len(); let mut scale = LayoutTree::calculate_scale(children.iter().map(|child_ix| { let c_geometry = self.tree[*child_ix].get_geometry() .expect("Child had no geometry"); c_geometry.size.w as f32 }).collect(), geometry.size.w as f32); if scale > 0.1 { scale = geometry.size.w as f32 / scale; let new_size_f = |child_size: Size, sub_geometry: Geometry| { let width = if child_size.w > 0 { child_size.w as f32 } else { // If the width would become zero, just make it the average size of the container. // e.g, if container was width 500 w/ 2 children, this view would have a width of 250 geometry.size.w as f32 / children_len.checked_sub(1).unwrap_or(1) as f32 }; Size { w: ((width) * scale) as u32, h: sub_geometry.size.h } }; let remaining_size_f = |sub_geometry: Geometry, cur_geometry: Geometry| { let remaining_width = cur_geometry.origin.x + cur_geometry.size.w as i32 - sub_geometry.origin.x; Size { w: remaining_width as u32, h: sub_geometry.size.h } }; let new_point_f = |new_size: Size, sub_geometry: Geometry| { Point { x: sub_geometry.origin.x + new_size.w as i32, y: sub_geometry.origin.y } }; self.generic_tile(node_ix, geometry, children.as_slice(), new_size_f, remaining_size_f, new_point_f, fullscreen_apps); self.add_gaps(node_ix) .expect("Couldn't add gaps to horizontal container"); // TODO Propogate error self.draw_borders_rec(children).ok(); } } Layout::Vertical => { let children = self.tree.grounded_children(node_ix); let children_len = children.len(); let mut scale = LayoutTree::calculate_scale(children.iter().map(|child_ix| { let c_geometry = self.tree[*child_ix].get_geometry() .expect("Child had no geometry"); c_geometry.size.h as f32 }).collect(), geometry.size.h as f32); if scale > 0.1 { scale = geometry.size.h as f32 / scale; let new_size_f = |child_size: Size, sub_geometry: Geometry| { let height = if child_size.h > 0 { child_size.h as f32 } else { // If the height would become zero, just make it the average size of the container. // e.g, if container was height 500 w/ 2 children, this view would have a height of 250 geometry.size.h as f32 / children_len.checked_sub(1).unwrap_or(1) as f32 }; Size { w: sub_geometry.size.w, h: ((height) * scale) as u32 } }; let remaining_size_f = |sub_geometry: Geometry, cur_geometry: Geometry| { let remaining_height = cur_geometry.origin.y + cur_geometry.size.h as i32 - sub_geometry.origin.y; Size { w: sub_geometry.size.w, h: remaining_height as u32 } }; let new_point_f = |new_size: Size, sub_geometry: Geometry| { Point { x: sub_geometry.origin.x, y: sub_geometry.origin.y + new_size.h as i32 } }; self.generic_tile(node_ix, geometry, children.as_slice(), new_size_f, remaining_size_f, new_point_f, fullscreen_apps); self.add_gaps(node_ix) .expect("Couldn't add gaps to vertical container"); // TODO Propogate error self.draw_borders_rec(children).ok(); } }, Layout::Tabbed | Layout::Stacked => { let mut children = self.tree.grounded_children(node_ix); if children.len() == 0 { return; } children.push(node_ix); let c_geometry = self.tree[node_ix].get_geometry() .expect("Container had no geometry"); if let Some(visible_child) = self.tree.next_active_node(node_ix) { self.layout_helper(visible_child, c_geometry, fullscreen_apps); let workspace_ix = self.tree.ancestor_of_type( node_ix, ContainerType::Workspace) .expect("Node did not have a workspace as an ancestor"); // Set visibilty if on active workspace if self.tree.on_path(workspace_ix) { // set all the children invisible self.set_container_visibility(node_ix, false); // set the focused child to be visible self.set_container_visibility(visible_child, true); } } // TODO Propogate error self.draw_borders_rec(children).ok(); }, } } ContainerType::View => { self.tree[node_ix].set_geometry(ResizeEdge::empty(), geometry); self.update_view_geo_for_borders(node_ix) .expect("Couldn't add border gaps to horizontal container"); } } self.validate(); } /// Attempts to set the node behind the id to be floating. /// /// This removes the container from its parent and makes its new parent- /// the workspace it resides in. /// /// The view will have a geometry of 1/2 the height/width, and set right in the /// middle of the screen. /// /// This will change the active container, but **not** the active path, /// it will remain pointing at the previous parent container. pub fn float_container(&mut self, id: Uuid) -> CommandResult { let node_ix = try!(self.tree.lookup_id(id).ok_or(TreeError::NodeNotFound(id))); if self.tree.is_root_container(node_ix) { return Err(TreeError::InvalidOperationOnRootContainer(id)) } if self.tree[node_ix].floating() { warn!("Trying to float an already floating container"); return Err(TreeError::Layout(LayoutErr::AlreadyFloating(id))); } let output_ix = try!(self.tree.ancestor_of_type(node_ix, ContainerType::Output) .map_err(|err| TreeError::PetGraph(err))); let output_size = match self.tree[output_ix] { Container::Output { handle, .. } => { handle.get_resolution().expect("Output had no resolution") }, _ => unreachable!() }; { let container = &mut self.tree[node_ix]; try!(container.set_floating(true) .map_err(|_| TreeError::UuidWrongType(id, vec!(ContainerType::View, ContainerType::Container)))); let new_geometry = Geometry { size: Size { h: output_size.h / 2, w: output_size.w / 2 }, origin: Point { x: (output_size.w / 2 - output_size.w / 4) as i32 , y: (output_size.h / 2 - output_size.h / 4) as i32 } }; match container.get_type() { ContainerType::View | ContainerType::Container => { container.set_geometry(ResizeEdge::empty(), new_geometry); }, _ => return Err(TreeError::UuidWrongType(id, vec!(ContainerType::View, ContainerType::Container))) } container.resize_borders(new_geometry); container.draw_borders()?; } let root_ix = self.tree.root_ix(); let root_c_ix = try!(self.tree.follow_path_until(root_ix, ContainerType::Container) .map_err(|_| TreeError::NoActiveContainer)); let parent_ix = self.tree.parent_of(node_ix) .expect("View had no parent node!"); try!(self.tree.move_into(node_ix, root_c_ix) .map_err(|err| TreeError::PetGraph(err))); self.tree.set_ancestor_paths_active(node_ix); if self.tree.can_remove_empty_parent(parent_ix) { try!(self.remove_view_or_container(parent_ix)); } let parent_ix = self.tree.parent_of(root_c_ix).unwrap(); self.layout(parent_ix); Ok(()) } pub fn ground_container(&mut self, id: Uuid) -> CommandResult { let floating_ix = try!(self.tree.lookup_id(id).ok_or(TreeError::NodeNotFound(id))); if !self.tree[floating_ix].floating() { warn!("Trying to ground an already grounded container"); return Err(TreeError::Layout(LayoutErr::AlreadyGrounded(id))); } let root_ix = self.tree.root_ix(); let mut node_ix = self.tree.follow_path(root_ix); // If view, need to make it a sibling if self.tree[node_ix].get_type() == ContainerType::View { node_ix = try!(self.tree.parent_of(node_ix) .map_err(|err| TreeError::PetGraph(err))); } { let container = &mut self.tree[floating_ix]; try!(container.set_floating(false) .map_err(|_| TreeError::UuidWrongType(id, vec!(ContainerType::View, ContainerType::Container)))); } try!(self.tree.move_into(floating_ix, node_ix) .map_err(|err| TreeError::PetGraph(err))); self.normalize_container(node_ix); let root_ix = self.tree.root_ix(); let root_c_ix = try!(self.tree.follow_path_until(root_ix, ContainerType::Container) .map_err(|_| TreeError::NoActiveContainer)); let parent_ix = self.tree.parent_of(root_c_ix).unwrap(); self.layout(parent_ix); Ok(()) } /// If the node is floating, places it at its reported position, above all /// other nodes. fn place_floating(&mut self, node_ix: NodeIndex, fullscreen_apps: &mut Vec<NodeIndex>) -> CommandResult { if self.tree[node_ix].fullscreen() { fullscreen_apps.push(node_ix); return Ok(()) } if !self.tree[node_ix].floating() { Err(ContainerErr::BadOperationOn( self.tree[node_ix].get_type(), "Tried to absolutely place a non-floating view!".into()))? } { let container = &mut self.tree[node_ix]; match *container { Container::Container { .. } => { unimplemented!() }, Container::View { ref handle, .. } => { handle.bring_to_front(); }, _ => unreachable!() } container.draw_borders()?; } for child_ix in self.tree.floating_children(node_ix) { self.place_floating(child_ix, fullscreen_apps)?; } Ok(()) } /// Changes the layout of the active container to the given layout. /// If the active container is a view, a new container is added with the given /// layout type. pub fn toggle_active_layout(&mut self, new_layout: Layout) -> CommandResult { if let Some(active_ix) = self.active_container { let parent_ix = self.tree.parent_of(active_ix) .expect("Active container had no parent"); if self.tree.is_root_container(active_ix) { self.set_layout(active_ix, new_layout); return Ok(()) } if self.tree.grounded_children(parent_ix).len() == 1 { self.set_layout(parent_ix, new_layout); return Ok(()) } let active_geometry = self.get_active_container() .expect("Could not get the active container") .get_geometry().expect("Active container had no geometry"); let output_ix = self.tree.ancestor_of_type(active_ix, ContainerType::Output)?; let output = match self.tree[output_ix].get_handle()? { Handle::Output(handle) => handle, _ => unreachable!() }; let borders = Borders::new(active_geometry, output) // TODO This will change when we get proper tabbed/stacked borders .map(|mut b| { b.title = format!("{:?} container", new_layout); b }); let mut new_container = Container::new_container(active_geometry, borders); new_container.set_layout(new_layout).ok(); self.add_container(new_container, active_ix)?; // add_container sets the active container to be the new container self.set_active_node(active_ix)?; let parent_ix = self.tree.parent_of(active_ix)?; self.layout(parent_ix); } self.validate(); Ok(()) } // Updates the tree's layout recursively starting from the active container. // If the active container is a view, it starts at the parent container. pub fn layout_active_of(&mut self, c_type: ContainerType) { if let Some(container_ix) = self.active_ix_of(c_type) { match c_type { ContainerType::Root | ContainerType::Output | ContainerType::Workspace => { self.layout(container_ix); }, ContainerType::Container => { let mut fullscreen_apps = Vec::new(); let geometry = self.tree[container_ix].get_geometry() .expect("Container didn't have a geometry"); self.layout_helper(container_ix, geometry, &mut fullscreen_apps); }, ContainerType::View => { warn!("Cannot simply update a view's geometry without {}", "consulting container, updating it's parent"); self.layout_active_of(ContainerType::Container); } } } else { warn!("{:#?} did not have a parent of type {:?}, doing nothing!", self, c_type); } self.validate(); } /// Sets the active container to the given layout. /// /// If the container is a view, it sets the layout of its parent to the /// given layout. /// /// Automatically retiles the container whose layout was changed. pub fn set_active_layout(&mut self, new_layout: Layout) -> CommandResult { let mut node_ix = self.active_container .ok_or(TreeError::NoActiveContainer)?; if self.tree[node_ix].get_type() == ContainerType::View { node_ix = self.tree.parent_of(node_ix) .expect("View had no parent"); } self.tree[node_ix].set_layout(new_layout) .map_err(TreeError::Container)?; self.validate(); let workspace_ix = self.tree.ancestor_of_type(node_ix, ContainerType::Workspace)?; self.layout(workspace_ix); Ok(()) } /// Gets the active container and toggles it based on the following rules: /// * If horizontal, make it vertical /// * else, make it horizontal /// This method does *NOT* update the actual views geometry, that needs to be /// done separately by the caller pub fn toggle_cardinal_tiling(&mut self, id: Uuid) -> CommandResult { { // NOTE: This stupid mutable lookup can't be its own function, see: // https://www.reddit.com/r/rust/comments/55o54l/hey_rustaceans_got_an_easy_question_ask_here/d8pv5q9/?context=3 let node_ix = try!(self.tree.lookup_id(id) .ok_or(TreeError::NodeNotFound(id))); let container_t = self.tree[node_ix].get_type(); if container_t == ContainerType::View { let parent_id = try!(self.parent_of(id)).get_id(); return self.toggle_cardinal_tiling(parent_id) } let new_layout = match self.tree[node_ix].get_layout()? { Layout::Horizontal => Layout::Vertical, _ => Layout::Horizontal }; self.set_layout(node_ix, new_layout) } self.validate(); Ok(()) } /// Calculates how much to scale on average for each value given. /// If the value is 0 (i.e the width or height of the container is 0), /// then it is calculated as max / children_values.len() fn calculate_scale(children_values: Vec<f32>, max: f32) -> f32 { let mut scale = 0.0; let len = children_values.len(); for mut value in children_values { if value <= 0.0 { value = max / len.checked_sub(1).unwrap_or(1) as f32; } scale += value; } return scale; } fn generic_tile<SizeF, RemainF, PointF> (&mut self, node_ix: NodeIndex, geometry: Geometry, children: &[NodeIndex], new_size_f: SizeF, remaining_size_f: RemainF, new_point_f: PointF, fullscreen_apps: &mut Vec<NodeIndex>) where SizeF: Fn(Size, Geometry) -> Size, RemainF: Fn(Geometry, Geometry) -> Size, PointF: Fn(Size, Geometry) -> Point { let mut sub_geometry = geometry.clone(); for (index, child_ix) in children.iter().enumerate() { let child_size = self.tree[*child_ix].get_geometry() .expect("Child had no geometry").size; let new_size = new_size_f(child_size, sub_geometry.clone()); sub_geometry = Geometry { origin: sub_geometry.origin.clone(), size: new_size.clone() }; // If last child, then just give it the remaining height if index == children.len() - 1 { let new_size = remaining_size_f(sub_geometry.clone(), self.tree[node_ix].get_geometry() .expect("Container had no geometry")); sub_geometry = Geometry { origin: sub_geometry.origin, size: new_size }; } self.layout_helper(*child_ix, sub_geometry.clone(), fullscreen_apps); // Next sub container needs to start where this one ends let new_point = new_point_f(new_size.clone(), sub_geometry.clone()); sub_geometry = Geometry { // lambda to calculate new point, given a new size // which is calculated in the function origin: new_point, size: new_size }; } self.validate(); } pub fn set_layout(&mut self, node_ix: NodeIndex, new_layout: Layout) { match self.tree[node_ix] { Container::Container { ref mut layout, .. } => { *layout = new_layout; }, ref container => { warn!("Can not set layout on non-container {:#?}", container); return; } } if new_layout == Layout::Vertical || new_layout == Layout::Horizontal { let workspace_ix = self.tree.ancestor_of_type( node_ix, ContainerType::Workspace) .expect("Node did not have a workspace as an ancestor"); if self.tree.on_path(workspace_ix) { self.set_container_visibility(node_ix, true) } } } /// Normalizes the geometry of a view to be the same size as it's siblings, /// based on the parent container's layout, at the 0 point of the parent container. /// Note this does not auto-tile, only modifies this one view. /// /// Useful if a container's children want to be evenly distributed, or a new view /// is being added. pub fn normalize_view(&mut self, view: WlcView) { if let Some(view_ix) = self.tree.descendant_with_handle(self.tree.root_ix(), view.into()) { self.normalize_container(view_ix); } } /// Normalizes the geometry of a view or a container of views so that /// the view is the same size as its siblings. pub fn normalize_container(&mut self, node_ix: NodeIndex) { // if floating, do not normalize if self.tree[node_ix].floating() { if cfg!(debug_assertions) || !debug_enabled() { error!("Tried to normalize {:?}\n{:#?}", node_ix, self); panic!("Tried to normalize a floating view, are you sure you want to do that?") } else { warn!("Tried to normalize {:?}\n{:#?}", node_ix, self); return } } match self.tree[node_ix].get_type() { ContainerType::Container => { for child_ix in self.tree.grounded_children(node_ix) { self.normalize_container(child_ix) } }, ContainerType::View => { let parent_ix = self.tree.ancestor_of_type(node_ix, ContainerType::Container) .expect("View had no container parent"); let new_geometry: Geometry; let num_siblings = cmp::max(1, self.tree.grounded_children(parent_ix).len() .checked_sub(1).unwrap_or(0)) as u32; let parent_geometry = self.tree[parent_ix].get_actual_geometry() .expect("Parent container had no geometry"); match self.tree[parent_ix] { Container::Container { ref layout, .. } => { match *layout { Layout::Horizontal => { new_geometry = Geometry { origin: parent_geometry.origin.clone(), size: Size { w: parent_geometry.size.w / num_siblings, h: parent_geometry.size.h } }; } Layout::Vertical => { new_geometry = Geometry { origin: parent_geometry.origin.clone(), size: Size { w: parent_geometry.size.w, h: parent_geometry.size.h / num_siblings } }; }, Layout::Tabbed | Layout::Stacked => new_geometry = parent_geometry } }, _ => unreachable!() }; self.tree[node_ix].set_geometry(ResizeEdge::empty(), new_geometry); }, container => { error!("Tried to normalize a {:#?}", container); panic!("Can only normalize the view on a view or container") } } } /// Tiles these containers above all the other containers in its workspace. /// /// If multiple containers are in the same workspace, each one will be drawn /// on top of the other, with the last one being the one ultimately seen by the user. /// /// # Panic /// This function will panic if the any of the containers are not a `View` or a `Container` pub fn layout_fullscreen_apps(&mut self, containers: Vec<NodeIndex>) { for node_ix in containers { let output_ix = self.tree.ancestor_of_type(node_ix, ContainerType::Output) .expect("Container did not have an output as an ancestor"); let output_geometry = self.tree[output_ix].get_actual_geometry() .expect("Output did not have a geometry associated with it"); // Sorry, this is an ugly borrow checker hack // Can't do self.layout() in Container::Container, borrowing mutably self mutably here. let maybe_node_ix = match self.tree[node_ix] { Container::View { handle, .. } => { handle.set_geometry(ResizeEdge::empty(), output_geometry); handle.bring_to_front(); let views = handle.get_output().get_views(); // TODO It would be nice to not have to iterate over // all the views just to do this. for view in views { // make sure children render above fullscreen parent if view.get_parent() == handle { view.bring_to_front(); } } None }, Container::Container { ref mut geometry, .. } => { *geometry = output_geometry; Some(node_ix) }, ref container => { error!("Expected a view or a container, got {:?}", container); panic!("Expected a View or a Container, got something else"); } }; if let Some(node_ix) = maybe_node_ix { self.layout(node_ix); } } } /// Adds gaps between all the views of the container at the `NodeIndex` /// This does not recurse if a container is found. /// /// If the `NodeIndex` doesn't point to a `Container`, an error is returned. fn add_gaps(&mut self, node_ix: NodeIndex) -> CommandResult { let layout = match self.tree[node_ix] { Container::Container { layout, .. } => layout, _ => return Err(TreeError::UuidNotAssociatedWith( ContainerType::Container)) }; let gap = Borders::gap_size(); if gap == 0 { return Ok(()) } let children = self.tree.grounded_children(node_ix); for (index, child_ix) in children.iter().enumerate() { let child = &mut self.tree[*child_ix]; match *child { Container::View { handle, .. } => { let mut geometry = handle.get_geometry().unwrap(); geometry.origin.x += (gap / 2) as i32; geometry.origin.y += (gap / 2) as i32; if index == children.len() - 1 { match layout { Layout::Horizontal => { geometry.size.w = geometry.size.w.saturating_sub(gap / 2) }, Layout::Vertical => { geometry.size.h = geometry.size.h.saturating_sub(gap / 2) }, // TODO Gaps for tabbed/stacked _ => {} } } match layout { Layout::Horizontal => { geometry.size.w = geometry.size.w.saturating_sub(gap / 2); geometry.size.h = geometry.size.h.saturating_sub(gap); }, Layout::Vertical => { geometry.size.w = geometry.size.w.saturating_sub(gap); geometry.size.h = geometry.size.h.saturating_sub(gap / 2); }, Layout::Tabbed | Layout::Stacked => { /* Should not be gaps within a stacked / tabbed, * because only one view is visible at a time. */ } } handle.set_geometry(ResizeEdge::empty(), geometry); }, // Do nothing, will get in the next recursion cycle Container::Container { .. } => {continue}, ref container => { error!("Iterating over a container, \ found non-view/containers!"); error!("Found: {:#?}", container); panic!("Applying gaps, found a non-view/container") } } } Ok(()) } /// Updates the geometry of the container, so that the borders are not /// hidden by the container. E.g this ensures that the borders are treated /// as part of the container for tiling/rendering purposes /// /// Returns the updated geometry for the container on success. /// That geometry should be used as the new constraint geometry for the /// children containers. fn update_container_geo_for_borders(&mut self, node_ix: NodeIndex, mut geometry: Geometry) -> Result<Geometry, TreeError> { let container = &mut self.tree[node_ix]; match *container { Container::Container { ref mut apparent_geometry, ref borders, .. } => { if borders.is_some() { let thickness = Borders::thickness(); let edge_thickness = thickness / 2; let title_size = Borders::title_bar_size(); geometry.origin.y += edge_thickness as i32; geometry.origin.y += (title_size / 2) as i32; geometry.size.h = geometry.size.h.saturating_sub(edge_thickness); geometry.size.h = geometry.size.h.saturating_sub(title_size / 2); } *apparent_geometry = geometry; }, ref container => { error!("Attempted to add borders to non-view"); error!("Found {:#?}", container); panic!("Applying gaps for borders, found non-view/container") } } Ok(geometry) } /// Updates the geometry of the view, so that the borders are not /// hidden by other views. E.g this ensures that the borders are treated /// as part of the container for tiling/rendering purposes fn update_view_geo_for_borders(&mut self, node_ix: NodeIndex) -> CommandResult { let container = &mut self.tree[node_ix]; let mut geometry = container.get_geometry() .expect("Container had no geometry"); match *container { Container::View { handle, .. } => { let thickness = Borders::thickness(); if thickness == 0 { return Ok(()) } let edge_thickness = (thickness / 2) as i32; let title_size = Borders::title_bar_size(); geometry.origin.x += edge_thickness; geometry.origin.y += edge_thickness; geometry.origin.y += title_size as i32; geometry.size.w = geometry.size.w.saturating_sub(thickness); geometry.size.h = geometry.size.h.saturating_sub(thickness); geometry.size.h = geometry.size.h.saturating_sub(title_size); handle.set_geometry(ResizeEdge::empty(), geometry); }, ref container => { error!("Attempted to add borders to non-view"); error!("Found {:#?}", container); panic!("Applying gaps for borders, found non-view/container") } } // Done to make the resizing on tiled works container.resize_borders(geometry); Ok(()) } /// Draws the borders recursively, down from the top to the bottom. fn draw_borders_rec(&mut self, mut children: Vec<NodeIndex>) -> CommandResult { while children.len() > 0 { let child_ix = children.pop().unwrap(); children.extend(self.tree.grounded_children(child_ix)); let parent_ix = self.tree.parent_of(child_ix) .expect("Node had no parent"); let children = self.tree.children_of(parent_ix); let index = children.iter().position(|&node_ix| node_ix == child_ix) .map(|num| (num + 1).to_string()); if Some(child_ix) != self.active_container { self.set_borders(child_ix, borders::Mode::Inactive)?; } else { match self.tree[parent_ix] { Container::Container { layout, ref mut borders, .. } => { if layout == Layout::Tabbed || layout == Layout::Stacked { borders.as_mut().map(|b| { b.set_title(format!("{:?} ({}/{})", layout, index.unwrap_or("?".into()), children.len() )); }); } }, _ => {} } self.set_borders(child_ix, borders::Mode::Active)?; } } Ok(()) } } #[cfg(test)] mod test { use super::super::super::LayoutTree; #[test] /// Ensure that calculate_scale is fair to all it's children fn calculate_scale_test() { assert_eq!(LayoutTree::calculate_scale(vec!(), 0.0), 0.0); assert_eq!(LayoutTree::calculate_scale(vec!(5.0, 5.0, 5.0, 5.0, 5.0, 5.0), 0.0), 30.0); assert_eq!(LayoutTree::calculate_scale(vec!(5.0, 5.0, 5.0, 5.0, -5.0, 0.0), 5.0), 22.0); } } Fixed tabbed/stacked not showing view title bar use std::cmp; use petgraph::graph::NodeIndex; use rustwlc::{WlcView, Geometry, Point, Size, ResizeEdge}; use super::super::{LayoutTree, TreeError}; use super::super::commands::CommandResult; use super::super::core::container::{Container, ContainerType, ContainerErr, Layout, Handle}; use super::borders; use ::layout::core::borders::Borders; use ::render::Renderable; use ::debug_enabled; use uuid::Uuid; #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum LayoutErr { /// The node behind the UUID was asked to ground when it was already grounded. AlreadyGrounded(Uuid), /// The node behind the UUID was asked to float when it was already floating. AlreadyFloating(Uuid) } impl LayoutTree { /// Given the index of some container in the tree, lays out the children of /// that container based on what type of container it is and how big of an /// area is allocated for it and its children. pub fn layout(&mut self, node_ix: NodeIndex) { match self.tree[node_ix].get_type() { ContainerType::Root => { for output_ix in self.tree.children_of(node_ix) { self.layout(output_ix); } } ContainerType::Output => { let geometry; { let container = &mut self.tree[node_ix]; geometry = container.get_geometry() .expect("Output had no geometry"); let actual_geometry = container.get_actual_geometry() .expect("Output had no actual geometry"); match *container { Container::Output { ref mut background, .. } => { // update the background size if let Some(background) = *background { background.set_geometry(ResizeEdge::empty(), actual_geometry) } } _ => unreachable!() } } let mut fullscreen_apps = Vec::new(); for workspace_ix in self.tree.children_of(node_ix) { self.layout_helper(workspace_ix, geometry, &mut fullscreen_apps); } self.layout_fullscreen_apps(fullscreen_apps); } ContainerType::Workspace => { // get geometry from the parent output let output_ix = self.tree.ancestor_of_type(node_ix, ContainerType::Output) .expect("Workspace had no output parent"); let output_geometry = self.tree[output_ix].get_geometry() .expect("Could not get output geometry"); let mut fullscreen_apps = Vec::new(); self.layout_helper(node_ix, output_geometry, &mut fullscreen_apps); self.layout_fullscreen_apps(fullscreen_apps) } ContainerType::Container => { let geometry = self.tree[node_ix].get_actual_geometry() .expect("Could not get actual container geometry"); // TODO Fake vector that doesn't allocate for this case? let mut fullscreen_apps = Vec::new(); self.layout_helper(node_ix, geometry, &mut fullscreen_apps); } ContainerType::View => { let parent_ix = self.tree.parent_of(node_ix) .expect("View had no parent"); self.layout(parent_ix); } } self.validate(); } /// Helper function to layout a container. The geometry is the constraint geometry, /// the container tries to lay itself out within the confines defined by the constraint. /// Generally, this should not be used directly and layout should be used. fn layout_helper(&mut self, node_ix: NodeIndex, mut geometry: Geometry, fullscreen_apps: &mut Vec<NodeIndex>) { if self.tree[node_ix].fullscreen() { fullscreen_apps.push(node_ix); } match self.tree[node_ix].get_type() { ContainerType::Root => { warn!("Ignoring geometry constraint ({:#?}), \ deferring to each output's constraints", geometry); for child_ix in self.tree.children_of(node_ix) { self.layout(child_ix); } }, ContainerType::Output => { self.tree[node_ix].set_geometry(ResizeEdge::empty(), geometry); for child_ix in self.tree.children_of(node_ix) { self.layout_helper(child_ix, geometry, fullscreen_apps); } } ContainerType::Workspace => { self.tree[node_ix].set_geometry(ResizeEdge::empty(), geometry); for child_ix in self.tree.grounded_children(node_ix) { self.layout_helper(child_ix, geometry, fullscreen_apps); } // place floating children above everything else let root_ix = self.tree.children_of(node_ix)[0]; for child_ix in self.tree.floating_children(root_ix) { // TODO Propogate error self.place_floating(child_ix, fullscreen_apps).ok(); } }, ContainerType::Container => { // Update the geometry so that borders are included when tiling. geometry = self.update_container_geo_for_borders(node_ix, geometry) .expect("Could not update container geo for tiling"); let layout = match self.tree[node_ix] { Container::Container { layout, .. } => layout, _ => unreachable!() }; match layout { Layout::Horizontal => { let children = self.tree.grounded_children(node_ix); let children_len = children.len(); let mut scale = LayoutTree::calculate_scale(children.iter().map(|child_ix| { let c_geometry = self.tree[*child_ix].get_geometry() .expect("Child had no geometry"); c_geometry.size.w as f32 }).collect(), geometry.size.w as f32); if scale > 0.1 { scale = geometry.size.w as f32 / scale; let new_size_f = |child_size: Size, sub_geometry: Geometry| { let width = if child_size.w > 0 { child_size.w as f32 } else { // If the width would become zero, just make it the average size of the container. // e.g, if container was width 500 w/ 2 children, this view would have a width of 250 geometry.size.w as f32 / children_len.checked_sub(1).unwrap_or(1) as f32 }; Size { w: ((width) * scale) as u32, h: sub_geometry.size.h } }; let remaining_size_f = |sub_geometry: Geometry, cur_geometry: Geometry| { let remaining_width = cur_geometry.origin.x + cur_geometry.size.w as i32 - sub_geometry.origin.x; Size { w: remaining_width as u32, h: sub_geometry.size.h } }; let new_point_f = |new_size: Size, sub_geometry: Geometry| { Point { x: sub_geometry.origin.x + new_size.w as i32, y: sub_geometry.origin.y } }; self.generic_tile(node_ix, geometry, children.as_slice(), new_size_f, remaining_size_f, new_point_f, fullscreen_apps); self.add_gaps(node_ix) .expect("Couldn't add gaps to horizontal container"); // TODO Propogate error self.draw_borders_rec(children).ok(); } } Layout::Vertical => { let children = self.tree.grounded_children(node_ix); let children_len = children.len(); let mut scale = LayoutTree::calculate_scale(children.iter().map(|child_ix| { let c_geometry = self.tree[*child_ix].get_geometry() .expect("Child had no geometry"); c_geometry.size.h as f32 }).collect(), geometry.size.h as f32); if scale > 0.1 { scale = geometry.size.h as f32 / scale; let new_size_f = |child_size: Size, sub_geometry: Geometry| { let height = if child_size.h > 0 { child_size.h as f32 } else { // If the height would become zero, just make it the average size of the container. // e.g, if container was height 500 w/ 2 children, this view would have a height of 250 geometry.size.h as f32 / children_len.checked_sub(1).unwrap_or(1) as f32 }; Size { w: sub_geometry.size.w, h: ((height) * scale) as u32 } }; let remaining_size_f = |sub_geometry: Geometry, cur_geometry: Geometry| { let remaining_height = cur_geometry.origin.y + cur_geometry.size.h as i32 - sub_geometry.origin.y; Size { w: sub_geometry.size.w, h: remaining_height as u32 } }; let new_point_f = |new_size: Size, sub_geometry: Geometry| { Point { x: sub_geometry.origin.x, y: sub_geometry.origin.y + new_size.h as i32 } }; self.generic_tile(node_ix, geometry, children.as_slice(), new_size_f, remaining_size_f, new_point_f, fullscreen_apps); self.add_gaps(node_ix) .expect("Couldn't add gaps to vertical container"); // TODO Propogate error self.draw_borders_rec(children).ok(); } }, Layout::Tabbed | Layout::Stacked => { let mut children = self.tree.grounded_children(node_ix); if children.len() == 0 { return; } children.push(node_ix); let c_geometry = match self.tree[node_ix] { Container::Container { apparent_geometry, .. } => apparent_geometry, _ => unreachable!() }; if let Some(visible_child) = self.tree.next_active_node(node_ix) { self.layout_helper(visible_child, c_geometry, fullscreen_apps); let workspace_ix = self.tree.ancestor_of_type( node_ix, ContainerType::Workspace) .expect("Node did not have a workspace as an ancestor"); // Set visibilty if on active workspace if self.tree.on_path(workspace_ix) { // set all the children invisible self.set_container_visibility(node_ix, false); // set the focused child to be visible self.set_container_visibility(visible_child, true); } } // TODO Propogate error self.draw_borders_rec(children).ok(); }, } } ContainerType::View => { self.tree[node_ix].set_geometry(ResizeEdge::empty(), geometry); self.update_view_geo_for_borders(node_ix) .expect("Couldn't add border gaps to horizontal container"); } } self.validate(); } /// Attempts to set the node behind the id to be floating. /// /// This removes the container from its parent and makes its new parent- /// the workspace it resides in. /// /// The view will have a geometry of 1/2 the height/width, and set right in the /// middle of the screen. /// /// This will change the active container, but **not** the active path, /// it will remain pointing at the previous parent container. pub fn float_container(&mut self, id: Uuid) -> CommandResult { let node_ix = try!(self.tree.lookup_id(id).ok_or(TreeError::NodeNotFound(id))); if self.tree.is_root_container(node_ix) { return Err(TreeError::InvalidOperationOnRootContainer(id)) } if self.tree[node_ix].floating() { warn!("Trying to float an already floating container"); return Err(TreeError::Layout(LayoutErr::AlreadyFloating(id))); } let output_ix = try!(self.tree.ancestor_of_type(node_ix, ContainerType::Output) .map_err(|err| TreeError::PetGraph(err))); let output_size = match self.tree[output_ix] { Container::Output { handle, .. } => { handle.get_resolution().expect("Output had no resolution") }, _ => unreachable!() }; { let container = &mut self.tree[node_ix]; try!(container.set_floating(true) .map_err(|_| TreeError::UuidWrongType(id, vec!(ContainerType::View, ContainerType::Container)))); let new_geometry = Geometry { size: Size { h: output_size.h / 2, w: output_size.w / 2 }, origin: Point { x: (output_size.w / 2 - output_size.w / 4) as i32 , y: (output_size.h / 2 - output_size.h / 4) as i32 } }; match container.get_type() { ContainerType::View | ContainerType::Container => { container.set_geometry(ResizeEdge::empty(), new_geometry); }, _ => return Err(TreeError::UuidWrongType(id, vec!(ContainerType::View, ContainerType::Container))) } container.resize_borders(new_geometry); container.draw_borders()?; } let root_ix = self.tree.root_ix(); let root_c_ix = try!(self.tree.follow_path_until(root_ix, ContainerType::Container) .map_err(|_| TreeError::NoActiveContainer)); let parent_ix = self.tree.parent_of(node_ix) .expect("View had no parent node!"); try!(self.tree.move_into(node_ix, root_c_ix) .map_err(|err| TreeError::PetGraph(err))); self.tree.set_ancestor_paths_active(node_ix); if self.tree.can_remove_empty_parent(parent_ix) { try!(self.remove_view_or_container(parent_ix)); } let parent_ix = self.tree.parent_of(root_c_ix).unwrap(); self.layout(parent_ix); Ok(()) } pub fn ground_container(&mut self, id: Uuid) -> CommandResult { let floating_ix = try!(self.tree.lookup_id(id).ok_or(TreeError::NodeNotFound(id))); if !self.tree[floating_ix].floating() { warn!("Trying to ground an already grounded container"); return Err(TreeError::Layout(LayoutErr::AlreadyGrounded(id))); } let root_ix = self.tree.root_ix(); let mut node_ix = self.tree.follow_path(root_ix); // If view, need to make it a sibling if self.tree[node_ix].get_type() == ContainerType::View { node_ix = try!(self.tree.parent_of(node_ix) .map_err(|err| TreeError::PetGraph(err))); } { let container = &mut self.tree[floating_ix]; try!(container.set_floating(false) .map_err(|_| TreeError::UuidWrongType(id, vec!(ContainerType::View, ContainerType::Container)))); } try!(self.tree.move_into(floating_ix, node_ix) .map_err(|err| TreeError::PetGraph(err))); self.normalize_container(node_ix); let root_ix = self.tree.root_ix(); let root_c_ix = try!(self.tree.follow_path_until(root_ix, ContainerType::Container) .map_err(|_| TreeError::NoActiveContainer)); let parent_ix = self.tree.parent_of(root_c_ix).unwrap(); self.layout(parent_ix); Ok(()) } /// If the node is floating, places it at its reported position, above all /// other nodes. fn place_floating(&mut self, node_ix: NodeIndex, fullscreen_apps: &mut Vec<NodeIndex>) -> CommandResult { if self.tree[node_ix].fullscreen() { fullscreen_apps.push(node_ix); return Ok(()) } if !self.tree[node_ix].floating() { Err(ContainerErr::BadOperationOn( self.tree[node_ix].get_type(), "Tried to absolutely place a non-floating view!".into()))? } { let container = &mut self.tree[node_ix]; match *container { Container::Container { .. } => { unimplemented!() }, Container::View { ref handle, .. } => { handle.bring_to_front(); }, _ => unreachable!() } container.draw_borders()?; } for child_ix in self.tree.floating_children(node_ix) { self.place_floating(child_ix, fullscreen_apps)?; } Ok(()) } /// Changes the layout of the active container to the given layout. /// If the active container is a view, a new container is added with the given /// layout type. pub fn toggle_active_layout(&mut self, new_layout: Layout) -> CommandResult { if let Some(active_ix) = self.active_container { let parent_ix = self.tree.parent_of(active_ix) .expect("Active container had no parent"); if self.tree.is_root_container(active_ix) { self.set_layout(active_ix, new_layout); return Ok(()) } if self.tree.grounded_children(parent_ix).len() == 1 { self.set_layout(parent_ix, new_layout); return Ok(()) } let active_geometry = self.get_active_container() .expect("Could not get the active container") .get_geometry().expect("Active container had no geometry"); let output_ix = self.tree.ancestor_of_type(active_ix, ContainerType::Output)?; let output = match self.tree[output_ix].get_handle()? { Handle::Output(handle) => handle, _ => unreachable!() }; let borders = Borders::new(active_geometry, output) // TODO This will change when we get proper tabbed/stacked borders .map(|mut b| { b.title = format!("{:?} container", new_layout); b }); let mut new_container = Container::new_container(active_geometry, borders); new_container.set_layout(new_layout).ok(); self.add_container(new_container, active_ix)?; // add_container sets the active container to be the new container self.set_active_node(active_ix)?; let parent_ix = self.tree.parent_of(active_ix)?; self.layout(parent_ix); } self.validate(); Ok(()) } // Updates the tree's layout recursively starting from the active container. // If the active container is a view, it starts at the parent container. pub fn layout_active_of(&mut self, c_type: ContainerType) { if let Some(container_ix) = self.active_ix_of(c_type) { match c_type { ContainerType::Root | ContainerType::Output | ContainerType::Workspace => { self.layout(container_ix); }, ContainerType::Container => { let mut fullscreen_apps = Vec::new(); let geometry = self.tree[container_ix].get_geometry() .expect("Container didn't have a geometry"); self.layout_helper(container_ix, geometry, &mut fullscreen_apps); }, ContainerType::View => { warn!("Cannot simply update a view's geometry without {}", "consulting container, updating it's parent"); self.layout_active_of(ContainerType::Container); } } } else { warn!("{:#?} did not have a parent of type {:?}, doing nothing!", self, c_type); } self.validate(); } /// Sets the active container to the given layout. /// /// If the container is a view, it sets the layout of its parent to the /// given layout. /// /// Automatically retiles the container whose layout was changed. pub fn set_active_layout(&mut self, new_layout: Layout) -> CommandResult { let mut node_ix = self.active_container .ok_or(TreeError::NoActiveContainer)?; if self.tree[node_ix].get_type() == ContainerType::View { node_ix = self.tree.parent_of(node_ix) .expect("View had no parent"); } self.tree[node_ix].set_layout(new_layout) .map_err(TreeError::Container)?; self.validate(); let workspace_ix = self.tree.ancestor_of_type(node_ix, ContainerType::Workspace)?; self.layout(workspace_ix); Ok(()) } /// Gets the active container and toggles it based on the following rules: /// * If horizontal, make it vertical /// * else, make it horizontal /// This method does *NOT* update the actual views geometry, that needs to be /// done separately by the caller pub fn toggle_cardinal_tiling(&mut self, id: Uuid) -> CommandResult { { // NOTE: This stupid mutable lookup can't be its own function, see: // https://www.reddit.com/r/rust/comments/55o54l/hey_rustaceans_got_an_easy_question_ask_here/d8pv5q9/?context=3 let node_ix = try!(self.tree.lookup_id(id) .ok_or(TreeError::NodeNotFound(id))); let container_t = self.tree[node_ix].get_type(); if container_t == ContainerType::View { let parent_id = try!(self.parent_of(id)).get_id(); return self.toggle_cardinal_tiling(parent_id) } let new_layout = match self.tree[node_ix].get_layout()? { Layout::Horizontal => Layout::Vertical, _ => Layout::Horizontal }; self.set_layout(node_ix, new_layout) } self.validate(); Ok(()) } /// Calculates how much to scale on average for each value given. /// If the value is 0 (i.e the width or height of the container is 0), /// then it is calculated as max / children_values.len() fn calculate_scale(children_values: Vec<f32>, max: f32) -> f32 { let mut scale = 0.0; let len = children_values.len(); for mut value in children_values { if value <= 0.0 { value = max / len.checked_sub(1).unwrap_or(1) as f32; } scale += value; } return scale; } fn generic_tile<SizeF, RemainF, PointF> (&mut self, node_ix: NodeIndex, geometry: Geometry, children: &[NodeIndex], new_size_f: SizeF, remaining_size_f: RemainF, new_point_f: PointF, fullscreen_apps: &mut Vec<NodeIndex>) where SizeF: Fn(Size, Geometry) -> Size, RemainF: Fn(Geometry, Geometry) -> Size, PointF: Fn(Size, Geometry) -> Point { let mut sub_geometry = geometry.clone(); for (index, child_ix) in children.iter().enumerate() { let child_size = self.tree[*child_ix].get_geometry() .expect("Child had no geometry").size; let new_size = new_size_f(child_size, sub_geometry.clone()); sub_geometry = Geometry { origin: sub_geometry.origin.clone(), size: new_size.clone() }; // If last child, then just give it the remaining height if index == children.len() - 1 { let new_size = remaining_size_f(sub_geometry.clone(), self.tree[node_ix].get_geometry() .expect("Container had no geometry")); sub_geometry = Geometry { origin: sub_geometry.origin, size: new_size }; } self.layout_helper(*child_ix, sub_geometry.clone(), fullscreen_apps); // Next sub container needs to start where this one ends let new_point = new_point_f(new_size.clone(), sub_geometry.clone()); sub_geometry = Geometry { // lambda to calculate new point, given a new size // which is calculated in the function origin: new_point, size: new_size }; } self.validate(); } pub fn set_layout(&mut self, node_ix: NodeIndex, new_layout: Layout) { match self.tree[node_ix] { Container::Container { ref mut layout, .. } => { *layout = new_layout; }, ref container => { warn!("Can not set layout on non-container {:#?}", container); return; } } if new_layout == Layout::Vertical || new_layout == Layout::Horizontal { let workspace_ix = self.tree.ancestor_of_type( node_ix, ContainerType::Workspace) .expect("Node did not have a workspace as an ancestor"); if self.tree.on_path(workspace_ix) { self.set_container_visibility(node_ix, true) } } } /// Normalizes the geometry of a view to be the same size as it's siblings, /// based on the parent container's layout, at the 0 point of the parent container. /// Note this does not auto-tile, only modifies this one view. /// /// Useful if a container's children want to be evenly distributed, or a new view /// is being added. pub fn normalize_view(&mut self, view: WlcView) { if let Some(view_ix) = self.tree.descendant_with_handle(self.tree.root_ix(), view.into()) { self.normalize_container(view_ix); } } /// Normalizes the geometry of a view or a container of views so that /// the view is the same size as its siblings. pub fn normalize_container(&mut self, node_ix: NodeIndex) { // if floating, do not normalize if self.tree[node_ix].floating() { if cfg!(debug_assertions) || !debug_enabled() { error!("Tried to normalize {:?}\n{:#?}", node_ix, self); panic!("Tried to normalize a floating view, are you sure you want to do that?") } else { warn!("Tried to normalize {:?}\n{:#?}", node_ix, self); return } } match self.tree[node_ix].get_type() { ContainerType::Container => { for child_ix in self.tree.grounded_children(node_ix) { self.normalize_container(child_ix) } }, ContainerType::View => { let parent_ix = self.tree.ancestor_of_type(node_ix, ContainerType::Container) .expect("View had no container parent"); let new_geometry: Geometry; let num_siblings = cmp::max(1, self.tree.grounded_children(parent_ix).len() .checked_sub(1).unwrap_or(0)) as u32; let parent_geometry = self.tree[parent_ix].get_actual_geometry() .expect("Parent container had no geometry"); match self.tree[parent_ix] { Container::Container { ref layout, .. } => { match *layout { Layout::Horizontal => { new_geometry = Geometry { origin: parent_geometry.origin.clone(), size: Size { w: parent_geometry.size.w / num_siblings, h: parent_geometry.size.h } }; } Layout::Vertical => { new_geometry = Geometry { origin: parent_geometry.origin.clone(), size: Size { w: parent_geometry.size.w, h: parent_geometry.size.h / num_siblings } }; }, Layout::Tabbed | Layout::Stacked => new_geometry = parent_geometry } }, _ => unreachable!() }; self.tree[node_ix].set_geometry(ResizeEdge::empty(), new_geometry); }, container => { error!("Tried to normalize a {:#?}", container); panic!("Can only normalize the view on a view or container") } } } /// Tiles these containers above all the other containers in its workspace. /// /// If multiple containers are in the same workspace, each one will be drawn /// on top of the other, with the last one being the one ultimately seen by the user. /// /// # Panic /// This function will panic if the any of the containers are not a `View` or a `Container` pub fn layout_fullscreen_apps(&mut self, containers: Vec<NodeIndex>) { for node_ix in containers { let output_ix = self.tree.ancestor_of_type(node_ix, ContainerType::Output) .expect("Container did not have an output as an ancestor"); let output_geometry = self.tree[output_ix].get_actual_geometry() .expect("Output did not have a geometry associated with it"); // Sorry, this is an ugly borrow checker hack // Can't do self.layout() in Container::Container, borrowing mutably self mutably here. let maybe_node_ix = match self.tree[node_ix] { Container::View { handle, .. } => { handle.set_geometry(ResizeEdge::empty(), output_geometry); handle.bring_to_front(); let views = handle.get_output().get_views(); // TODO It would be nice to not have to iterate over // all the views just to do this. for view in views { // make sure children render above fullscreen parent if view.get_parent() == handle { view.bring_to_front(); } } None }, Container::Container { ref mut geometry, .. } => { *geometry = output_geometry; Some(node_ix) }, ref container => { error!("Expected a view or a container, got {:?}", container); panic!("Expected a View or a Container, got something else"); } }; if let Some(node_ix) = maybe_node_ix { self.layout(node_ix); } } } /// Adds gaps between all the views of the container at the `NodeIndex` /// This does not recurse if a container is found. /// /// If the `NodeIndex` doesn't point to a `Container`, an error is returned. fn add_gaps(&mut self, node_ix: NodeIndex) -> CommandResult { let layout = match self.tree[node_ix] { Container::Container { layout, .. } => layout, _ => return Err(TreeError::UuidNotAssociatedWith( ContainerType::Container)) }; let gap = Borders::gap_size(); if gap == 0 { return Ok(()) } let children = self.tree.grounded_children(node_ix); for (index, child_ix) in children.iter().enumerate() { let child = &mut self.tree[*child_ix]; match *child { Container::View { handle, .. } => { let mut geometry = handle.get_geometry().unwrap(); geometry.origin.x += (gap / 2) as i32; geometry.origin.y += (gap / 2) as i32; if index == children.len() - 1 { match layout { Layout::Horizontal => { geometry.size.w = geometry.size.w.saturating_sub(gap / 2) }, Layout::Vertical => { geometry.size.h = geometry.size.h.saturating_sub(gap / 2) }, // TODO Gaps for tabbed/stacked _ => {} } } match layout { Layout::Horizontal => { geometry.size.w = geometry.size.w.saturating_sub(gap / 2); geometry.size.h = geometry.size.h.saturating_sub(gap); }, Layout::Vertical => { geometry.size.w = geometry.size.w.saturating_sub(gap); geometry.size.h = geometry.size.h.saturating_sub(gap / 2); }, Layout::Tabbed | Layout::Stacked => { /* Should not be gaps within a stacked / tabbed, * because only one view is visible at a time. */ } } handle.set_geometry(ResizeEdge::empty(), geometry); }, // Do nothing, will get in the next recursion cycle Container::Container { .. } => {continue}, ref container => { error!("Iterating over a container, \ found non-view/containers!"); error!("Found: {:#?}", container); panic!("Applying gaps, found a non-view/container") } } } Ok(()) } /// Updates the geometry of the container, so that the borders are not /// hidden by the container. E.g this ensures that the borders are treated /// as part of the container for tiling/rendering purposes /// /// Returns the updated geometry for the container on success. /// That geometry should be used as the new constraint geometry for the /// children containers. fn update_container_geo_for_borders(&mut self, node_ix: NodeIndex, mut geometry: Geometry) -> Result<Geometry, TreeError> { let container = &mut self.tree[node_ix]; match *container { Container::Container { ref mut apparent_geometry, ref borders, .. } => { if borders.is_some() { let thickness = Borders::thickness(); let edge_thickness = thickness / 2; let title_size = Borders::title_bar_size(); geometry.origin.y += edge_thickness as i32; geometry.origin.y += (title_size / 2) as i32; geometry.size.h = geometry.size.h.saturating_sub(edge_thickness); geometry.size.h = geometry.size.h.saturating_sub(title_size / 2); } *apparent_geometry = geometry; }, ref container => { error!("Attempted to add borders to non-view"); error!("Found {:#?}", container); panic!("Applying gaps for borders, found non-view/container") } } Ok(geometry) } /// Updates the geometry of the view, so that the borders are not /// hidden by other views. E.g this ensures that the borders are treated /// as part of the container for tiling/rendering purposes fn update_view_geo_for_borders(&mut self, node_ix: NodeIndex) -> CommandResult { let container = &mut self.tree[node_ix]; let mut geometry = container.get_geometry() .expect("Container had no geometry"); match *container { Container::View { handle, .. } => { let thickness = Borders::thickness(); if thickness == 0 { return Ok(()) } let edge_thickness = (thickness / 2) as i32; let title_size = Borders::title_bar_size(); geometry.origin.x += edge_thickness; geometry.origin.y += edge_thickness; geometry.origin.y += title_size as i32; geometry.size.w = geometry.size.w.saturating_sub(thickness); geometry.size.h = geometry.size.h.saturating_sub(thickness); geometry.size.h = geometry.size.h.saturating_sub(title_size); handle.set_geometry(ResizeEdge::empty(), geometry); }, ref container => { error!("Attempted to add borders to non-view"); error!("Found {:#?}", container); panic!("Applying gaps for borders, found non-view/container") } } // Done to make the resizing on tiled works container.resize_borders(geometry); Ok(()) } /// Draws the borders recursively, down from the top to the bottom. fn draw_borders_rec(&mut self, mut children: Vec<NodeIndex>) -> CommandResult { while children.len() > 0 { let child_ix = children.pop().unwrap(); children.extend(self.tree.grounded_children(child_ix)); let parent_ix = self.tree.parent_of(child_ix) .expect("Node had no parent"); let children = self.tree.children_of(parent_ix); let index = children.iter().position(|&node_ix| node_ix == child_ix) .map(|num| (num + 1).to_string()); if Some(child_ix) != self.active_container { self.set_borders(child_ix, borders::Mode::Inactive)?; } else { match self.tree[parent_ix] { Container::Container { layout, ref mut borders, .. } => { if layout == Layout::Tabbed || layout == Layout::Stacked { borders.as_mut().map(|b| { b.set_title(format!("{:?} ({}/{})", layout, index.unwrap_or("?".into()), children.len() )); }); } }, _ => {} } self.set_borders(child_ix, borders::Mode::Active)?; } } Ok(()) } } #[cfg(test)] mod test { use super::super::super::LayoutTree; #[test] /// Ensure that calculate_scale is fair to all it's children fn calculate_scale_test() { assert_eq!(LayoutTree::calculate_scale(vec!(), 0.0), 0.0); assert_eq!(LayoutTree::calculate_scale(vec!(5.0, 5.0, 5.0, 5.0, 5.0, 5.0), 0.0), 30.0); assert_eq!(LayoutTree::calculate_scale(vec!(5.0, 5.0, 5.0, 5.0, -5.0, 0.0), 5.0), 22.0); } }
use std::cmp; use petgraph::graph::NodeIndex; use rustwlc::{WlcView, Geometry, Point, Size, ResizeEdge}; use super::super::{LayoutTree, TreeError}; use super::super::commands::CommandResult; use super::super::core::container::{Container, ContainerType, ContainerErr, Layout, Handle}; use ::layout::core::borders::Borders; use ::render::Renderable; use ::debug_enabled; use uuid::Uuid; #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum LayoutErr { /// The node behind the UUID was asked to ground when it was already grounded. AlreadyGrounded(Uuid), /// The node behind the UUID was asked to float when it was already floating. AlreadyFloating(Uuid) } impl LayoutTree { /// Given the index of some container in the tree, lays out the children of /// that container based on what type of container it is and how big of an /// area is allocated for it and its children. pub fn layout(&mut self, node_ix: NodeIndex) { match self.tree[node_ix].get_type() { ContainerType::Root => { for output_ix in self.tree.children_of(node_ix) { self.layout(output_ix); } } ContainerType::Output => { let geometry; { let container = &mut self.tree[node_ix]; geometry = container.get_geometry() .expect("Output had no geometry"); let actual_geometry = container.get_actual_geometry() .expect("Output had no actual geometry"); match *container { Container::Output { ref mut background, .. } => { // update the background size if let Some(background) = *background { background.set_geometry(ResizeEdge::empty(), actual_geometry) } } _ => unreachable!() } } let mut fullscreen_apps = Vec::new(); for workspace_ix in self.tree.children_of(node_ix) { self.layout_helper(workspace_ix, geometry, &mut fullscreen_apps); } self.layout_fullscreen_apps(fullscreen_apps); } ContainerType::Workspace => { // get geometry from the parent output let output_ix = self.tree.ancestor_of_type(node_ix, ContainerType::Output) .expect("Workspace had no output parent"); let output_geometry = self.tree[output_ix].get_geometry() .expect("Could not get output geometry"); let mut fullscreen_apps = Vec::new(); self.layout_helper(node_ix, output_geometry, &mut fullscreen_apps); self.layout_fullscreen_apps(fullscreen_apps) } ContainerType::Container => { let geometry = match self.tree[node_ix] { Container::Container { geometry, .. } => geometry, _ => unreachable!() }; // TODO Fake vector that doesn't allocate for this case? let mut fullscreen_apps = Vec::new(); self.layout_helper(node_ix, geometry, &mut fullscreen_apps); } ContainerType::View => { let parent_ix = self.tree.parent_of(node_ix) .expect("View had no parent"); self.layout(parent_ix); } } self.validate(); } /// Helper function to layout a container. The geometry is the constraint geometry, /// the container tries to lay itself out within the confines defined by the constraint. /// Generally, this should not be used directly and layout should be used. fn layout_helper(&mut self, node_ix: NodeIndex, mut geometry: Geometry, fullscreen_apps: &mut Vec<NodeIndex>) { if self.tree[node_ix].fullscreen() { fullscreen_apps.push(node_ix); } match self.tree[node_ix].get_type() { ContainerType::Root => { warn!("Ignoring geometry constraint ({:#?}), \ deferring to each output's constraints", geometry); for child_ix in self.tree.children_of(node_ix) { self.layout(child_ix); } }, ContainerType::Output => { self.tree[node_ix].set_geometry(ResizeEdge::empty(), geometry); for child_ix in self.tree.children_of(node_ix) { self.layout_helper(child_ix, geometry, fullscreen_apps); } } ContainerType::Workspace => { self.tree[node_ix].set_geometry(ResizeEdge::empty(), geometry); for child_ix in self.tree.grounded_children(node_ix) { self.layout_helper(child_ix, geometry, fullscreen_apps); } // place floating children above everything else let root_ix = self.tree.children_of(node_ix)[0]; for child_ix in self.tree.floating_children(root_ix) { // TODO Propogate error self.place_floating(child_ix, fullscreen_apps).ok(); } }, ContainerType::Container => { // Update the geometry so that borders are included when tiling. geometry = self.update_container_geo_for_borders(node_ix, geometry) .expect("Could not update container geo for tiling"); let layout = match self.tree[node_ix] { Container::Container { layout, .. } => layout, _ => unreachable!() }; match layout { Layout::Horizontal => { let children = self.tree.grounded_children(node_ix); let children_len = children.len(); let mut scale = LayoutTree::calculate_scale(children.iter().map(|child_ix| { let c_geometry = self.tree[*child_ix].get_geometry() .expect("Child had no geometry"); c_geometry.size.w as f32 }).collect(), geometry.size.w as f32); if scale > 0.1 { scale = geometry.size.w as f32 / scale; let new_size_f = |child_size: Size, sub_geometry: Geometry| { let width = if child_size.w > 0 { child_size.w as f32 } else { // If the width would become zero, just make it the average size of the container. // e.g, if container was width 500 w/ 2 children, this view would have a width of 250 geometry.size.w as f32 / children_len.checked_sub(1).unwrap_or(1) as f32 }; Size { w: ((width) * scale) as u32, h: sub_geometry.size.h } }; let remaining_size_f = |sub_geometry: Geometry, cur_geometry: Geometry| { let remaining_width = cur_geometry.origin.x + cur_geometry.size.w as i32 - sub_geometry.origin.x; Size { w: remaining_width as u32, h: sub_geometry.size.h } }; let new_point_f = |new_size: Size, sub_geometry: Geometry| { Point { x: sub_geometry.origin.x + new_size.w as i32, y: sub_geometry.origin.y } }; self.generic_tile(node_ix, geometry, children.as_slice(), new_size_f, remaining_size_f, new_point_f, fullscreen_apps); self.add_gaps(node_ix) .expect("Couldn't add gaps to horizontal container"); // TODO Propogate error self.draw_borders_rec(children).ok(); } } Layout::Vertical => { let children = self.tree.grounded_children(node_ix); let children_len = children.len(); let mut scale = LayoutTree::calculate_scale(children.iter().map(|child_ix| { let c_geometry = self.tree[*child_ix].get_geometry() .expect("Child had no geometry"); c_geometry.size.h as f32 }).collect(), geometry.size.h as f32); if scale > 0.1 { scale = geometry.size.h as f32 / scale; let new_size_f = |child_size: Size, sub_geometry: Geometry| { let height = if child_size.h > 0 { child_size.h as f32 } else { // If the height would become zero, just make it the average size of the container. // e.g, if container was height 500 w/ 2 children, this view would have a height of 250 geometry.size.h as f32 / children_len.checked_sub(1).unwrap_or(1) as f32 }; Size { w: sub_geometry.size.w, h: ((height) * scale) as u32 } }; let remaining_size_f = |sub_geometry: Geometry, cur_geometry: Geometry| { let remaining_height = cur_geometry.origin.y + cur_geometry.size.h as i32 - sub_geometry.origin.y; Size { w: sub_geometry.size.w, h: remaining_height as u32 } }; let new_point_f = |new_size: Size, sub_geometry: Geometry| { Point { x: sub_geometry.origin.x, y: sub_geometry.origin.y + new_size.h as i32 } }; self.generic_tile(node_ix, geometry, children.as_slice(), new_size_f, remaining_size_f, new_point_f, fullscreen_apps); self.add_gaps(node_ix) .expect("Couldn't add gaps to vertical container"); // TODO Propogate error self.draw_borders_rec(children).ok(); } }, Layout::Tabbed | Layout::Stacked => { let mut children = self.tree.grounded_children(node_ix); if children.len() == 0 { return; } children.push(node_ix); let c_geometry = self.tree[node_ix].get_geometry() .expect("Container had no geometry"); if let Some(visible_child) = self.tree.next_active_node(node_ix) { self.layout_helper(visible_child, c_geometry, fullscreen_apps); // set all the children invisible self.set_container_visibility(node_ix, false); // set the focused child to be visible self.set_container_visibility(visible_child, true); } // TODO Propogate error self.draw_borders_rec(children).ok(); }, } } ContainerType::View => { self.tree[node_ix].set_geometry(ResizeEdge::empty(), geometry); self.update_view_geo_for_borders(node_ix) .expect("Couldn't add border gaps to horizontal container"); } } self.validate(); } /// Attempts to set the node behind the id to be floating. /// /// This removes the container from its parent and makes its new parent- /// the workspace it resides in. /// /// The view will have a geometry of 1/2 the height/width, and set right in the /// middle of the screen. /// /// This will change the active container, but **not** the active path, /// it will remain pointing at the previous parent container. pub fn float_container(&mut self, id: Uuid) -> CommandResult { let node_ix = try!(self.tree.lookup_id(id).ok_or(TreeError::NodeNotFound(id))); if self.tree.is_root_container(node_ix) { return Err(TreeError::InvalidOperationOnRootContainer(id)) } if self.tree[node_ix].floating() { warn!("Trying to float an already floating container"); return Err(TreeError::Layout(LayoutErr::AlreadyFloating(id))); } let output_ix = try!(self.tree.ancestor_of_type(node_ix, ContainerType::Output) .map_err(|err| TreeError::PetGraph(err))); let output_size = match self.tree[output_ix] { Container::Output { handle, .. } => { handle.get_resolution().expect("Output had no resolution") }, _ => unreachable!() }; { let container = &mut self.tree[node_ix]; try!(container.set_floating(true) .map_err(|_| TreeError::UuidWrongType(id, vec!(ContainerType::View, ContainerType::Container)))); let new_geometry = Geometry { size: Size { h: output_size.h / 2, w: output_size.w / 2 }, origin: Point { x: (output_size.w / 2 - output_size.w / 4) as i32 , y: (output_size.h / 2 - output_size.h / 4) as i32 } }; match container.get_type() { ContainerType::View | ContainerType::Container => { container.set_geometry(ResizeEdge::empty(), new_geometry); }, _ => return Err(TreeError::UuidWrongType(id, vec!(ContainerType::View, ContainerType::Container))) } container.resize_borders(new_geometry); container.draw_borders()?; } let root_ix = self.tree.root_ix(); let root_c_ix = try!(self.tree.follow_path_until(root_ix, ContainerType::Container) .map_err(|_| TreeError::NoActiveContainer)); let parent_ix = self.tree.parent_of(node_ix) .expect("View had no parent node!"); try!(self.tree.move_into(node_ix, root_c_ix) .map_err(|err| TreeError::PetGraph(err))); self.tree.set_ancestor_paths_active(node_ix); if self.tree.can_remove_empty_parent(parent_ix) { try!(self.remove_view_or_container(parent_ix)); } let parent_ix = self.tree.parent_of(root_c_ix).unwrap(); self.layout(parent_ix); Ok(()) } pub fn ground_container(&mut self, id: Uuid) -> CommandResult { let floating_ix = try!(self.tree.lookup_id(id).ok_or(TreeError::NodeNotFound(id))); if !self.tree[floating_ix].floating() { warn!("Trying to ground an already grounded container"); return Err(TreeError::Layout(LayoutErr::AlreadyGrounded(id))); } let root_ix = self.tree.root_ix(); let mut node_ix = self.tree.follow_path(root_ix); // If view, need to make it a sibling if self.tree[node_ix].get_type() == ContainerType::View { node_ix = try!(self.tree.parent_of(node_ix) .map_err(|err| TreeError::PetGraph(err))); } { let container = &mut self.tree[floating_ix]; try!(container.set_floating(false) .map_err(|_| TreeError::UuidWrongType(id, vec!(ContainerType::View, ContainerType::Container)))); } try!(self.tree.move_into(floating_ix, node_ix) .map_err(|err| TreeError::PetGraph(err))); self.normalize_container(node_ix); let root_ix = self.tree.root_ix(); let root_c_ix = try!(self.tree.follow_path_until(root_ix, ContainerType::Container) .map_err(|_| TreeError::NoActiveContainer)); let parent_ix = self.tree.parent_of(root_c_ix).unwrap(); self.layout(parent_ix); Ok(()) } /// If the node is floating, places it at its reported position, above all /// other nodes. fn place_floating(&mut self, node_ix: NodeIndex, fullscreen_apps: &mut Vec<NodeIndex>) -> CommandResult { if self.tree[node_ix].fullscreen() { fullscreen_apps.push(node_ix); return Ok(()) } if !self.tree[node_ix].floating() { Err(ContainerErr::BadOperationOn( self.tree[node_ix].get_type(), "Tried to absolutely place a non-floating view!".into()))? } { let container = &mut self.tree[node_ix]; match *container { Container::Container { .. } => { unimplemented!() }, Container::View { ref handle, .. } => { handle.bring_to_front(); }, _ => unreachable!() } container.draw_borders()?; } for child_ix in self.tree.floating_children(node_ix) { self.place_floating(child_ix, fullscreen_apps)?; } Ok(()) } /// Changes the layout of the active container to the given layout. /// If the active container is a view, a new container is added with the given /// layout type. pub fn toggle_active_layout(&mut self, new_layout: Layout) -> CommandResult { if let Some(active_ix) = self.active_container { let parent_ix = self.tree.parent_of(active_ix) .expect("Active container had no parent"); if self.tree.is_root_container(active_ix) { self.set_layout(active_ix, new_layout); return Ok(()) } if self.tree.grounded_children(parent_ix).len() == 1 { self.set_layout(parent_ix, new_layout); return Ok(()) } let active_geometry = self.get_active_container() .expect("Could not get the active container") .get_geometry().expect("Active container had no geometry"); let output_ix = self.tree.ancestor_of_type(active_ix, ContainerType::Output)?; let output = match self.tree[output_ix].get_handle()? { Handle::Output(handle) => handle, _ => unreachable!() }; let borders = Borders::new(active_geometry, output) // TODO This will change when we get proper tabbed/stacked borders .map(|mut b| { b.title = format!("{:?} container", new_layout); b }); let mut new_container = Container::new_container(active_geometry, borders); new_container.set_layout(new_layout).ok(); self.add_container(new_container, active_ix)?; // add_container sets the active container to be the new container self.set_active_node(active_ix)?; let parent_ix = self.tree.parent_of(active_ix)?; self.layout(parent_ix); } self.validate(); Ok(()) } // Updates the tree's layout recursively starting from the active container. // If the active container is a view, it starts at the parent container. pub fn layout_active_of(&mut self, c_type: ContainerType) { if let Some(container_ix) = self.active_ix_of(c_type) { match c_type { ContainerType::Root | ContainerType::Output | ContainerType::Workspace => { self.layout(container_ix); }, ContainerType::Container => { let mut fullscreen_apps = Vec::new(); let geometry = self.tree[container_ix].get_geometry() .expect("Container didn't have a geometry"); self.layout_helper(container_ix, geometry, &mut fullscreen_apps); }, ContainerType::View => { warn!("Cannot simply update a view's geometry without {}", "consulting container, updating it's parent"); self.layout_active_of(ContainerType::Container); } } } else { warn!("{:#?} did not have a parent of type {:?}, doing nothing!", self, c_type); } self.validate(); } /// Sets the active container to the given layout. /// /// If the container is a view, it sets the layout of its parent to the /// given layout. /// /// Automatically retiles the container whose layout was changed. pub fn set_active_layout(&mut self, new_layout: Layout) -> CommandResult { let mut node_ix = self.active_container .ok_or(TreeError::NoActiveContainer)?; if self.tree[node_ix].get_type() == ContainerType::View { node_ix = self.tree.parent_of(node_ix) .expect("View had no parent"); } self.tree[node_ix].set_layout(new_layout) .map_err(TreeError::Container)?; self.validate(); let parent_ix = self.tree.parent_of(node_ix)?; self.layout(parent_ix); Ok(()) } /// Gets the active container and toggles it based on the following rules: /// * If horizontal, make it vertical /// * else, make it horizontal /// This method does *NOT* update the actual views geometry, that needs to be /// done separately by the caller pub fn toggle_cardinal_tiling(&mut self, id: Uuid) -> CommandResult { { // NOTE: This stupid mutable lookup can't be its own function, see: // https://www.reddit.com/r/rust/comments/55o54l/hey_rustaceans_got_an_easy_question_ask_here/d8pv5q9/?context=3 let node_ix = try!(self.tree.lookup_id(id) .ok_or(TreeError::NodeNotFound(id))); let container_t = self.tree[node_ix].get_type(); if container_t == ContainerType::View { let parent_id = try!(self.parent_of(id)).get_id(); return self.toggle_cardinal_tiling(parent_id) } let container = &mut self.tree[node_ix]; let new_layout = match container.get_layout()? { Layout::Horizontal => Layout::Vertical, _ => Layout::Horizontal }; container.set_layout(new_layout)?; } self.validate(); Ok(()) } /// Calculates how much to scale on average for each value given. /// If the value is 0 (i.e the width or height of the container is 0), /// then it is calculated as max / children_values.len() fn calculate_scale(children_values: Vec<f32>, max: f32) -> f32 { let mut scale = 0.0; let len = children_values.len(); for mut value in children_values { if value <= 0.0 { value = max / len.checked_sub(1).unwrap_or(1) as f32; } scale += value; } return scale; } fn generic_tile<SizeF, RemainF, PointF> (&mut self, node_ix: NodeIndex, geometry: Geometry, children: &[NodeIndex], new_size_f: SizeF, remaining_size_f: RemainF, new_point_f: PointF, fullscreen_apps: &mut Vec<NodeIndex>) where SizeF: Fn(Size, Geometry) -> Size, RemainF: Fn(Geometry, Geometry) -> Size, PointF: Fn(Size, Geometry) -> Point { let mut sub_geometry = geometry.clone(); for (index, child_ix) in children.iter().enumerate() { let child_size: Size; { let child = &mut self.tree[*child_ix]; child.set_visibility(true); child_size = child.get_geometry() .expect("Child had no geometry").size; } let new_size = new_size_f(child_size, sub_geometry.clone()); sub_geometry = Geometry { origin: sub_geometry.origin.clone(), size: new_size.clone() }; // If last child, then just give it the remaining height if index == children.len() - 1 { let new_size = remaining_size_f(sub_geometry.clone(), self.tree[node_ix].get_geometry() .expect("Container had no geometry")); sub_geometry = Geometry { origin: sub_geometry.origin, size: new_size }; } self.layout_helper(*child_ix, sub_geometry.clone(), fullscreen_apps); // Next sub container needs to start where this one ends let new_point = new_point_f(new_size.clone(), sub_geometry.clone()); sub_geometry = Geometry { // lambda to calculate new point, given a new size // which is calculated in the function origin: new_point, size: new_size }; } self.validate(); } pub fn set_layout(&mut self, node_ix: NodeIndex, new_layout: Layout) { match self.tree[node_ix] { Container::Container { ref mut layout, .. } => { *layout = new_layout; }, ref container => { warn!("Can not set layout on non-container {:#?}", container); return; } } } /// Normalizes the geometry of a view to be the same size as it's siblings, /// based on the parent container's layout, at the 0 point of the parent container. /// Note this does not auto-tile, only modifies this one view. /// /// Useful if a container's children want to be evenly distributed, or a new view /// is being added. pub fn normalize_view(&mut self, view: WlcView) { if let Some(view_ix) = self.tree.descendant_with_handle(self.tree.root_ix(), view.into()) { self.normalize_container(view_ix); } } /// Normalizes the geometry of a view or a container of views so that /// the view is the same size as its siblings. pub fn normalize_container(&mut self, node_ix: NodeIndex) { // if floating, do not normalize if self.tree[node_ix].floating() { if cfg!(debug_assertions) || !debug_enabled() { error!("Tried to normalize {:?}\n{:#?}", node_ix, self); panic!("Tried to normalize a floating view, are you sure you want to do that?") } else { warn!("Tried to normalize {:?}\n{:#?}", node_ix, self); return } } match self.tree[node_ix].get_type() { ContainerType::Container => { for child_ix in self.tree.grounded_children(node_ix) { self.normalize_container(child_ix) } }, ContainerType::View => { let parent_ix = self.tree.ancestor_of_type(node_ix, ContainerType::Container) .expect("View had no container parent"); let new_geometry: Geometry; let num_siblings = cmp::max(1, self.tree.grounded_children(parent_ix).len() .checked_sub(1).unwrap_or(0)) as u32; let parent_geometry = self.tree[parent_ix].get_geometry() .expect("Parent container had no geometry"); match self.tree[parent_ix] { Container::Container { ref layout, .. } => { match *layout { Layout::Horizontal => { new_geometry = Geometry { origin: parent_geometry.origin.clone(), size: Size { w: parent_geometry.size.w / num_siblings, h: parent_geometry.size.h } }; } Layout::Vertical => { new_geometry = Geometry { origin: parent_geometry.origin.clone(), size: Size { w: parent_geometry.size.w, h: parent_geometry.size.h / num_siblings } }; }, Layout::Tabbed | Layout::Stacked => new_geometry = parent_geometry } }, _ => unreachable!() }; self.tree[node_ix].set_geometry(ResizeEdge::empty(), new_geometry); }, container => { error!("Tried to normalize a {:#?}", container); panic!("Can only normalize the view on a view or container") } } } /// Tiles these containers above all the other containers in its workspace. /// /// If multiple containers are in the same workspace, each one will be drawn /// on top of the other, with the last one being the one ultimately seen by the user. /// /// # Panic /// This function will panic if the any of the containers are not a `View` or a `Container` pub fn layout_fullscreen_apps(&mut self, containers: Vec<NodeIndex>) { for node_ix in containers { let output_ix = self.tree.ancestor_of_type(node_ix, ContainerType::Output) .expect("Container did not have an output as an ancestor"); let output_geometry = self.tree[output_ix].get_actual_geometry() .expect("Output did not have a geometry associated with it"); // Sorry, this is an ugly borrow checker hack // Can't do self.layout() in Container::Container, borrowing mutably self mutably here. let maybe_node_ix = match self.tree[node_ix] { Container::View { handle, .. } => { handle.set_geometry(ResizeEdge::empty(), output_geometry); handle.bring_to_front(); let views = handle.get_output().get_views(); // TODO It would be nice to not have to iterate over // all the views just to do this. for view in views { // make sure children render above fullscreen parent if view.get_parent() == handle { view.bring_to_front(); } } None }, Container::Container { ref mut geometry, .. } => { *geometry = output_geometry; Some(node_ix) }, ref container => { error!("Expected a view or a container, got {:?}", container); panic!("Expected a View or a Container, got something else"); } }; if let Some(node_ix) = maybe_node_ix { self.layout(node_ix); } } } /// Adds gaps between all the views of the container at the `NodeIndex` /// This does not recurse if a container is found. /// /// If the `NodeIndex` doesn't point to a `Container`, an error is returned. fn add_gaps(&mut self, node_ix: NodeIndex) -> CommandResult { let layout = match self.tree[node_ix] { Container::Container { layout, .. } => layout, _ => return Err(TreeError::UuidNotAssociatedWith( ContainerType::Container)) }; let gap = Borders::gap_size(); if gap == 0 { return Ok(()) } let children = self.tree.grounded_children(node_ix); for (index, child_ix) in children.iter().enumerate() { let child = &mut self.tree[*child_ix]; match *child { Container::View { handle, .. } => { let mut geometry = handle.get_geometry().unwrap(); geometry.origin.x += (gap / 2) as i32; geometry.origin.y += (gap / 2) as i32; if index == children.len() - 1 { match layout { Layout::Horizontal => { geometry.size.w = geometry.size.w.saturating_sub(gap / 2) }, Layout::Vertical => { geometry.size.h = geometry.size.h.saturating_sub(gap / 2) }, // TODO Gaps for tabbed/stacked _ => {} } } match layout { Layout::Horizontal => { geometry.size.w = geometry.size.w.saturating_sub(gap / 2); geometry.size.h = geometry.size.h.saturating_sub(gap); }, Layout::Vertical => { geometry.size.w = geometry.size.w.saturating_sub(gap); geometry.size.h = geometry.size.h.saturating_sub(gap / 2); }, Layout::Tabbed | Layout::Stacked => { /* Should not be gaps within a stacked / tabbed, * because only one view is visible at a time. */ } } handle.set_geometry(ResizeEdge::empty(), geometry); }, // Do nothing, will get in the next recursion cycle Container::Container { .. } => {continue}, ref container => { error!("Iterating over a container, \ found non-view/containers!"); error!("Found: {:#?}", container); panic!("Applying gaps, found a non-view/container") } } } Ok(()) } /// Updates the geometry of the container, so that the borders are not /// hidden by the container. E.g this ensures that the borders are treated /// as part of the container for tiling/rendering purposes /// /// Returns the updated geometry for the container on success. /// That geometry should be used as the new constraint geometry for the /// children containers. fn update_container_geo_for_borders(&mut self, node_ix: NodeIndex, mut geometry: Geometry) -> Result<Geometry, TreeError> { let container = &mut self.tree[node_ix]; match *container { Container::Container { geometry: ref mut c_geometry, ref borders, .. } => { if borders.is_some() { let thickness = Borders::thickness(); let edge_thickness = thickness / 2; let title_size = Borders::title_bar_size(); geometry.origin.y += edge_thickness as i32; geometry.origin.y += (title_size / 2) as i32; geometry.size.h = geometry.size.h.saturating_sub(edge_thickness); geometry.size.h = geometry.size.h.saturating_sub(title_size / 2); } *c_geometry = geometry; }, ref container => { error!("Attempted to add borders to non-view"); error!("Found {:#?}", container); panic!("Applying gaps for borders, found non-view/container") } } Ok(geometry) } /// Updates the geometry of the view, so that the borders are not /// hidden by other views. E.g this ensures that the borders are treated /// as part of the container for tiling/rendering purposes fn update_view_geo_for_borders(&mut self, node_ix: NodeIndex) -> CommandResult { let container = &mut self.tree[node_ix]; let mut geometry = container.get_geometry() .expect("Container had no geometry"); match *container { Container::View { handle, .. } => { let borders = Borders::thickness(); if borders == 0 { return Ok(()) } let edge_thickness = (borders / 2) as i32; let title_size = Borders::title_bar_size(); geometry.origin.x += edge_thickness; geometry.origin.y += edge_thickness; geometry.origin.y += title_size as i32; geometry.size.w = geometry.size.w.saturating_sub(borders); geometry.size.h = geometry.size.h.saturating_sub(borders); geometry.size.h = geometry.size.h.saturating_sub(title_size); handle.set_geometry(ResizeEdge::empty(), geometry); }, ref container => { error!("Attempted to add borders to non-view"); error!("Found {:#?}", container); panic!("Applying gaps for borders, found non-view/container") } } // Done to make the resizing on tiled works container.resize_borders(geometry); Ok(()) } /// Draws the borders recursively, down from the top to the bottom. fn draw_borders_rec(&mut self, mut children: Vec<NodeIndex>) -> CommandResult { while children.len() > 0 { let child_ix = children.pop().unwrap(); children.extend(self.tree.grounded_children(child_ix)); let parent_ix = self.tree.parent_of(child_ix) .expect("Node had no parent"); let children = self.tree.children_of(parent_ix); let index = children.iter().position(|&node_ix| node_ix == child_ix) .map(|num| (num + 1).to_string()); let container; if Some(child_ix) != self.active_container { // TODO Just unpaint conditonally all up the tree // This should be the same for the parent drawing below // in the else case if !self.tree.on_path(parent_ix) { let parent_container = &mut self.tree[parent_ix]; parent_container.clear_border_color()?; parent_container.draw_borders()?; } container = &mut self.tree[child_ix]; container.clear_border_color()?; } else { match self.tree[parent_ix] { Container::Container { layout, ref mut borders, .. } => { if layout == Layout::Tabbed || layout == Layout::Stacked { borders.as_mut().map(|b| { b.set_title(format!("{:?} ({}/{})", layout, index.unwrap_or("?".into()), children.len() )); }); } }, _ => {} } { let parent_container = &mut self.tree[parent_ix]; parent_container.active_border_color()?; parent_container.draw_borders()?; } container = &mut self.tree[child_ix]; container.active_border_color()?; } container.draw_borders()?; } Ok(()) } } #[cfg(test)] mod test { use super::super::super::LayoutTree; #[test] /// Ensure that calculate_scale is fair to all it's children fn calculate_scale_test() { assert_eq!(LayoutTree::calculate_scale(vec!(), 0.0), 0.0); assert_eq!(LayoutTree::calculate_scale(vec!(5.0, 5.0, 5.0, 5.0, 5.0, 5.0), 0.0), 30.0); assert_eq!(LayoutTree::calculate_scale(vec!(5.0, 5.0, 5.0, 5.0, -5.0, 0.0), 5.0), 22.0); } } Quick graphical fix for tabbed/stack layout bug tabbed/stack layout bug: whenever an action happens that causes a re-tile, the tabbed stacked containers start shrinking. That really is annoying, because common operations (like spawning or destroying windows in a tab/stack container) cause it to resize, which disrupts that container AND all of it's neighbors. use std::cmp; use petgraph::graph::NodeIndex; use rustwlc::{WlcView, Geometry, Point, Size, ResizeEdge}; use super::super::{LayoutTree, TreeError}; use super::super::commands::CommandResult; use super::super::core::container::{Container, ContainerType, ContainerErr, Layout, Handle}; use ::layout::core::borders::Borders; use ::render::Renderable; use ::debug_enabled; use uuid::Uuid; #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum LayoutErr { /// The node behind the UUID was asked to ground when it was already grounded. AlreadyGrounded(Uuid), /// The node behind the UUID was asked to float when it was already floating. AlreadyFloating(Uuid) } impl LayoutTree { /// Given the index of some container in the tree, lays out the children of /// that container based on what type of container it is and how big of an /// area is allocated for it and its children. pub fn layout(&mut self, node_ix: NodeIndex) { match self.tree[node_ix].get_type() { ContainerType::Root => { for output_ix in self.tree.children_of(node_ix) { self.layout(output_ix); } } ContainerType::Output => { let geometry; { let container = &mut self.tree[node_ix]; geometry = container.get_geometry() .expect("Output had no geometry"); let actual_geometry = container.get_actual_geometry() .expect("Output had no actual geometry"); match *container { Container::Output { ref mut background, .. } => { // update the background size if let Some(background) = *background { background.set_geometry(ResizeEdge::empty(), actual_geometry) } } _ => unreachable!() } } let mut fullscreen_apps = Vec::new(); for workspace_ix in self.tree.children_of(node_ix) { self.layout_helper(workspace_ix, geometry, &mut fullscreen_apps); } self.layout_fullscreen_apps(fullscreen_apps); } ContainerType::Workspace => { // get geometry from the parent output let output_ix = self.tree.ancestor_of_type(node_ix, ContainerType::Output) .expect("Workspace had no output parent"); let output_geometry = self.tree[output_ix].get_geometry() .expect("Could not get output geometry"); let mut fullscreen_apps = Vec::new(); self.layout_helper(node_ix, output_geometry, &mut fullscreen_apps); self.layout_fullscreen_apps(fullscreen_apps) } ContainerType::Container => { let geometry = match self.tree[node_ix] { Container::Container { geometry, .. } => geometry, _ => unreachable!() }; // TODO Fake vector that doesn't allocate for this case? let mut fullscreen_apps = Vec::new(); self.layout_helper(node_ix, geometry, &mut fullscreen_apps); } ContainerType::View => { let parent_ix = self.tree.parent_of(node_ix) .expect("View had no parent"); self.layout(parent_ix); } } self.validate(); } /// Helper function to layout a container. The geometry is the constraint geometry, /// the container tries to lay itself out within the confines defined by the constraint. /// Generally, this should not be used directly and layout should be used. fn layout_helper(&mut self, node_ix: NodeIndex, mut geometry: Geometry, fullscreen_apps: &mut Vec<NodeIndex>) { if self.tree[node_ix].fullscreen() { fullscreen_apps.push(node_ix); } match self.tree[node_ix].get_type() { ContainerType::Root => { warn!("Ignoring geometry constraint ({:#?}), \ deferring to each output's constraints", geometry); for child_ix in self.tree.children_of(node_ix) { self.layout(child_ix); } }, ContainerType::Output => { self.tree[node_ix].set_geometry(ResizeEdge::empty(), geometry); for child_ix in self.tree.children_of(node_ix) { self.layout_helper(child_ix, geometry, fullscreen_apps); } } ContainerType::Workspace => { self.tree[node_ix].set_geometry(ResizeEdge::empty(), geometry); for child_ix in self.tree.grounded_children(node_ix) { self.layout_helper(child_ix, geometry, fullscreen_apps); } // place floating children above everything else let root_ix = self.tree.children_of(node_ix)[0]; for child_ix in self.tree.floating_children(root_ix) { // TODO Propogate error self.place_floating(child_ix, fullscreen_apps).ok(); } }, ContainerType::Container => { // Update the geometry so that borders are included when tiling. geometry = self.update_container_geo_for_borders(node_ix, geometry) .expect("Could not update container geo for tiling"); let layout = match self.tree[node_ix] { Container::Container { layout, .. } => layout, _ => unreachable!() }; match layout { Layout::Horizontal => { let children = self.tree.grounded_children(node_ix); let children_len = children.len(); let mut scale = LayoutTree::calculate_scale(children.iter().map(|child_ix| { let c_geometry = self.tree[*child_ix].get_geometry() .expect("Child had no geometry"); c_geometry.size.w as f32 }).collect(), geometry.size.w as f32); if scale > 0.1 { scale = geometry.size.w as f32 / scale; let new_size_f = |child_size: Size, sub_geometry: Geometry| { let width = if child_size.w > 0 { child_size.w as f32 } else { // If the width would become zero, just make it the average size of the container. // e.g, if container was width 500 w/ 2 children, this view would have a width of 250 geometry.size.w as f32 / children_len.checked_sub(1).unwrap_or(1) as f32 }; Size { w: ((width) * scale) as u32, h: sub_geometry.size.h } }; let remaining_size_f = |sub_geometry: Geometry, cur_geometry: Geometry| { let remaining_width = cur_geometry.origin.x + cur_geometry.size.w as i32 - sub_geometry.origin.x; Size { w: remaining_width as u32, h: sub_geometry.size.h } }; let new_point_f = |new_size: Size, sub_geometry: Geometry| { Point { x: sub_geometry.origin.x + new_size.w as i32, y: sub_geometry.origin.y } }; self.generic_tile(node_ix, geometry, children.as_slice(), new_size_f, remaining_size_f, new_point_f, fullscreen_apps); self.add_gaps(node_ix) .expect("Couldn't add gaps to horizontal container"); // TODO Propogate error self.draw_borders_rec(children).ok(); } } Layout::Vertical => { let children = self.tree.grounded_children(node_ix); let children_len = children.len(); let mut scale = LayoutTree::calculate_scale(children.iter().map(|child_ix| { let c_geometry = self.tree[*child_ix].get_geometry() .expect("Child had no geometry"); c_geometry.size.h as f32 }).collect(), geometry.size.h as f32); if scale > 0.1 { scale = geometry.size.h as f32 / scale; let new_size_f = |child_size: Size, sub_geometry: Geometry| { let height = if child_size.h > 0 { child_size.h as f32 } else { // If the height would become zero, just make it the average size of the container. // e.g, if container was height 500 w/ 2 children, this view would have a height of 250 geometry.size.h as f32 / children_len.checked_sub(1).unwrap_or(1) as f32 }; Size { w: sub_geometry.size.w, h: ((height) * scale) as u32 } }; let remaining_size_f = |sub_geometry: Geometry, cur_geometry: Geometry| { let remaining_height = cur_geometry.origin.y + cur_geometry.size.h as i32 - sub_geometry.origin.y; Size { w: sub_geometry.size.w, h: remaining_height as u32 } }; let new_point_f = |new_size: Size, sub_geometry: Geometry| { Point { x: sub_geometry.origin.x, y: sub_geometry.origin.y + new_size.h as i32 } }; self.generic_tile(node_ix, geometry, children.as_slice(), new_size_f, remaining_size_f, new_point_f, fullscreen_apps); self.add_gaps(node_ix) .expect("Couldn't add gaps to vertical container"); // TODO Propogate error self.draw_borders_rec(children).ok(); } }, Layout::Tabbed | Layout::Stacked => { let mut children = self.tree.grounded_children(node_ix); if children.len() == 0 { return; } children.push(node_ix); let c_geometry = self.tree[node_ix].get_geometry() .expect("Container had no geometry"); if let Some(visible_child) = self.tree.next_active_node(node_ix) { self.layout_helper(visible_child, c_geometry, fullscreen_apps); // set all the children invisible self.set_container_visibility(node_ix, false); // set the focused child to be visible self.set_container_visibility(visible_child, true); } // TODO Propogate error self.draw_borders_rec(children).ok(); }, } } ContainerType::View => { self.tree[node_ix].set_geometry(ResizeEdge::empty(), geometry); self.update_view_geo_for_borders(node_ix) .expect("Couldn't add border gaps to horizontal container"); } } self.validate(); } /// Attempts to set the node behind the id to be floating. /// /// This removes the container from its parent and makes its new parent- /// the workspace it resides in. /// /// The view will have a geometry of 1/2 the height/width, and set right in the /// middle of the screen. /// /// This will change the active container, but **not** the active path, /// it will remain pointing at the previous parent container. pub fn float_container(&mut self, id: Uuid) -> CommandResult { let node_ix = try!(self.tree.lookup_id(id).ok_or(TreeError::NodeNotFound(id))); if self.tree.is_root_container(node_ix) { return Err(TreeError::InvalidOperationOnRootContainer(id)) } if self.tree[node_ix].floating() { warn!("Trying to float an already floating container"); return Err(TreeError::Layout(LayoutErr::AlreadyFloating(id))); } let output_ix = try!(self.tree.ancestor_of_type(node_ix, ContainerType::Output) .map_err(|err| TreeError::PetGraph(err))); let output_size = match self.tree[output_ix] { Container::Output { handle, .. } => { handle.get_resolution().expect("Output had no resolution") }, _ => unreachable!() }; { let container = &mut self.tree[node_ix]; try!(container.set_floating(true) .map_err(|_| TreeError::UuidWrongType(id, vec!(ContainerType::View, ContainerType::Container)))); let new_geometry = Geometry { size: Size { h: output_size.h / 2, w: output_size.w / 2 }, origin: Point { x: (output_size.w / 2 - output_size.w / 4) as i32 , y: (output_size.h / 2 - output_size.h / 4) as i32 } }; match container.get_type() { ContainerType::View | ContainerType::Container => { container.set_geometry(ResizeEdge::empty(), new_geometry); }, _ => return Err(TreeError::UuidWrongType(id, vec!(ContainerType::View, ContainerType::Container))) } container.resize_borders(new_geometry); container.draw_borders()?; } let root_ix = self.tree.root_ix(); let root_c_ix = try!(self.tree.follow_path_until(root_ix, ContainerType::Container) .map_err(|_| TreeError::NoActiveContainer)); let parent_ix = self.tree.parent_of(node_ix) .expect("View had no parent node!"); try!(self.tree.move_into(node_ix, root_c_ix) .map_err(|err| TreeError::PetGraph(err))); self.tree.set_ancestor_paths_active(node_ix); if self.tree.can_remove_empty_parent(parent_ix) { try!(self.remove_view_or_container(parent_ix)); } let parent_ix = self.tree.parent_of(root_c_ix).unwrap(); self.layout(parent_ix); Ok(()) } pub fn ground_container(&mut self, id: Uuid) -> CommandResult { let floating_ix = try!(self.tree.lookup_id(id).ok_or(TreeError::NodeNotFound(id))); if !self.tree[floating_ix].floating() { warn!("Trying to ground an already grounded container"); return Err(TreeError::Layout(LayoutErr::AlreadyGrounded(id))); } let root_ix = self.tree.root_ix(); let mut node_ix = self.tree.follow_path(root_ix); // If view, need to make it a sibling if self.tree[node_ix].get_type() == ContainerType::View { node_ix = try!(self.tree.parent_of(node_ix) .map_err(|err| TreeError::PetGraph(err))); } { let container = &mut self.tree[floating_ix]; try!(container.set_floating(false) .map_err(|_| TreeError::UuidWrongType(id, vec!(ContainerType::View, ContainerType::Container)))); } try!(self.tree.move_into(floating_ix, node_ix) .map_err(|err| TreeError::PetGraph(err))); self.normalize_container(node_ix); let root_ix = self.tree.root_ix(); let root_c_ix = try!(self.tree.follow_path_until(root_ix, ContainerType::Container) .map_err(|_| TreeError::NoActiveContainer)); let parent_ix = self.tree.parent_of(root_c_ix).unwrap(); self.layout(parent_ix); Ok(()) } /// If the node is floating, places it at its reported position, above all /// other nodes. fn place_floating(&mut self, node_ix: NodeIndex, fullscreen_apps: &mut Vec<NodeIndex>) -> CommandResult { if self.tree[node_ix].fullscreen() { fullscreen_apps.push(node_ix); return Ok(()) } if !self.tree[node_ix].floating() { Err(ContainerErr::BadOperationOn( self.tree[node_ix].get_type(), "Tried to absolutely place a non-floating view!".into()))? } { let container = &mut self.tree[node_ix]; match *container { Container::Container { .. } => { unimplemented!() }, Container::View { ref handle, .. } => { handle.bring_to_front(); }, _ => unreachable!() } container.draw_borders()?; } for child_ix in self.tree.floating_children(node_ix) { self.place_floating(child_ix, fullscreen_apps)?; } Ok(()) } /// Changes the layout of the active container to the given layout. /// If the active container is a view, a new container is added with the given /// layout type. pub fn toggle_active_layout(&mut self, new_layout: Layout) -> CommandResult { if let Some(active_ix) = self.active_container { let parent_ix = self.tree.parent_of(active_ix) .expect("Active container had no parent"); if self.tree.is_root_container(active_ix) { self.set_layout(active_ix, new_layout); return Ok(()) } if self.tree.grounded_children(parent_ix).len() == 1 { self.set_layout(parent_ix, new_layout); return Ok(()) } let active_geometry = self.get_active_container() .expect("Could not get the active container") .get_geometry().expect("Active container had no geometry"); let output_ix = self.tree.ancestor_of_type(active_ix, ContainerType::Output)?; let output = match self.tree[output_ix].get_handle()? { Handle::Output(handle) => handle, _ => unreachable!() }; let borders = Borders::new(active_geometry, output) // TODO This will change when we get proper tabbed/stacked borders .map(|mut b| { b.title = format!("{:?} container", new_layout); b }); let mut new_container = Container::new_container(active_geometry, borders); new_container.set_layout(new_layout).ok(); self.add_container(new_container, active_ix)?; // add_container sets the active container to be the new container self.set_active_node(active_ix)?; let parent_ix = self.tree.parent_of(active_ix)?; self.layout(parent_ix); } self.validate(); Ok(()) } // Updates the tree's layout recursively starting from the active container. // If the active container is a view, it starts at the parent container. pub fn layout_active_of(&mut self, c_type: ContainerType) { if let Some(container_ix) = self.active_ix_of(c_type) { match c_type { ContainerType::Root | ContainerType::Output | ContainerType::Workspace => { self.layout(container_ix); }, ContainerType::Container => { let mut fullscreen_apps = Vec::new(); let geometry = self.tree[container_ix].get_geometry() .expect("Container didn't have a geometry"); self.layout_helper(container_ix, geometry, &mut fullscreen_apps); }, ContainerType::View => { warn!("Cannot simply update a view's geometry without {}", "consulting container, updating it's parent"); self.layout_active_of(ContainerType::Container); } } } else { warn!("{:#?} did not have a parent of type {:?}, doing nothing!", self, c_type); } self.validate(); } /// Sets the active container to the given layout. /// /// If the container is a view, it sets the layout of its parent to the /// given layout. /// /// Automatically retiles the container whose layout was changed. pub fn set_active_layout(&mut self, new_layout: Layout) -> CommandResult { let mut node_ix = self.active_container .ok_or(TreeError::NoActiveContainer)?; if self.tree[node_ix].get_type() == ContainerType::View { node_ix = self.tree.parent_of(node_ix) .expect("View had no parent"); } self.tree[node_ix].set_layout(new_layout) .map_err(TreeError::Container)?; self.validate(); let workspace_ix = self.tree.ancestor_of_type(node_ix, ContainerType::Workspace)?; self.layout(workspace_ix); Ok(()) } /// Gets the active container and toggles it based on the following rules: /// * If horizontal, make it vertical /// * else, make it horizontal /// This method does *NOT* update the actual views geometry, that needs to be /// done separately by the caller pub fn toggle_cardinal_tiling(&mut self, id: Uuid) -> CommandResult { { // NOTE: This stupid mutable lookup can't be its own function, see: // https://www.reddit.com/r/rust/comments/55o54l/hey_rustaceans_got_an_easy_question_ask_here/d8pv5q9/?context=3 let node_ix = try!(self.tree.lookup_id(id) .ok_or(TreeError::NodeNotFound(id))); let container_t = self.tree[node_ix].get_type(); if container_t == ContainerType::View { let parent_id = try!(self.parent_of(id)).get_id(); return self.toggle_cardinal_tiling(parent_id) } let container = &mut self.tree[node_ix]; let new_layout = match container.get_layout()? { Layout::Horizontal => Layout::Vertical, _ => Layout::Horizontal }; container.set_layout(new_layout)?; } self.validate(); Ok(()) } /// Calculates how much to scale on average for each value given. /// If the value is 0 (i.e the width or height of the container is 0), /// then it is calculated as max / children_values.len() fn calculate_scale(children_values: Vec<f32>, max: f32) -> f32 { let mut scale = 0.0; let len = children_values.len(); for mut value in children_values { if value <= 0.0 { value = max / len.checked_sub(1).unwrap_or(1) as f32; } scale += value; } return scale; } fn generic_tile<SizeF, RemainF, PointF> (&mut self, node_ix: NodeIndex, geometry: Geometry, children: &[NodeIndex], new_size_f: SizeF, remaining_size_f: RemainF, new_point_f: PointF, fullscreen_apps: &mut Vec<NodeIndex>) where SizeF: Fn(Size, Geometry) -> Size, RemainF: Fn(Geometry, Geometry) -> Size, PointF: Fn(Size, Geometry) -> Point { let mut sub_geometry = geometry.clone(); for (index, child_ix) in children.iter().enumerate() { let child_size: Size; { let child = &mut self.tree[*child_ix]; child.set_visibility(true); child_size = child.get_geometry() .expect("Child had no geometry").size; } let new_size = new_size_f(child_size, sub_geometry.clone()); sub_geometry = Geometry { origin: sub_geometry.origin.clone(), size: new_size.clone() }; // If last child, then just give it the remaining height if index == children.len() - 1 { let new_size = remaining_size_f(sub_geometry.clone(), self.tree[node_ix].get_geometry() .expect("Container had no geometry")); sub_geometry = Geometry { origin: sub_geometry.origin, size: new_size }; } self.layout_helper(*child_ix, sub_geometry.clone(), fullscreen_apps); // Next sub container needs to start where this one ends let new_point = new_point_f(new_size.clone(), sub_geometry.clone()); sub_geometry = Geometry { // lambda to calculate new point, given a new size // which is calculated in the function origin: new_point, size: new_size }; } self.validate(); } pub fn set_layout(&mut self, node_ix: NodeIndex, new_layout: Layout) { match self.tree[node_ix] { Container::Container { ref mut layout, .. } => { *layout = new_layout; }, ref container => { warn!("Can not set layout on non-container {:#?}", container); return; } } } /// Normalizes the geometry of a view to be the same size as it's siblings, /// based on the parent container's layout, at the 0 point of the parent container. /// Note this does not auto-tile, only modifies this one view. /// /// Useful if a container's children want to be evenly distributed, or a new view /// is being added. pub fn normalize_view(&mut self, view: WlcView) { if let Some(view_ix) = self.tree.descendant_with_handle(self.tree.root_ix(), view.into()) { self.normalize_container(view_ix); } } /// Normalizes the geometry of a view or a container of views so that /// the view is the same size as its siblings. pub fn normalize_container(&mut self, node_ix: NodeIndex) { // if floating, do not normalize if self.tree[node_ix].floating() { if cfg!(debug_assertions) || !debug_enabled() { error!("Tried to normalize {:?}\n{:#?}", node_ix, self); panic!("Tried to normalize a floating view, are you sure you want to do that?") } else { warn!("Tried to normalize {:?}\n{:#?}", node_ix, self); return } } match self.tree[node_ix].get_type() { ContainerType::Container => { for child_ix in self.tree.grounded_children(node_ix) { self.normalize_container(child_ix) } }, ContainerType::View => { let parent_ix = self.tree.ancestor_of_type(node_ix, ContainerType::Container) .expect("View had no container parent"); let new_geometry: Geometry; let num_siblings = cmp::max(1, self.tree.grounded_children(parent_ix).len() .checked_sub(1).unwrap_or(0)) as u32; let parent_geometry = self.tree[parent_ix].get_geometry() .expect("Parent container had no geometry"); match self.tree[parent_ix] { Container::Container { ref layout, .. } => { match *layout { Layout::Horizontal => { new_geometry = Geometry { origin: parent_geometry.origin.clone(), size: Size { w: parent_geometry.size.w / num_siblings, h: parent_geometry.size.h } }; } Layout::Vertical => { new_geometry = Geometry { origin: parent_geometry.origin.clone(), size: Size { w: parent_geometry.size.w, h: parent_geometry.size.h / num_siblings } }; }, Layout::Tabbed | Layout::Stacked => new_geometry = parent_geometry } }, _ => unreachable!() }; self.tree[node_ix].set_geometry(ResizeEdge::empty(), new_geometry); }, container => { error!("Tried to normalize a {:#?}", container); panic!("Can only normalize the view on a view or container") } } } /// Tiles these containers above all the other containers in its workspace. /// /// If multiple containers are in the same workspace, each one will be drawn /// on top of the other, with the last one being the one ultimately seen by the user. /// /// # Panic /// This function will panic if the any of the containers are not a `View` or a `Container` pub fn layout_fullscreen_apps(&mut self, containers: Vec<NodeIndex>) { for node_ix in containers { let output_ix = self.tree.ancestor_of_type(node_ix, ContainerType::Output) .expect("Container did not have an output as an ancestor"); let output_geometry = self.tree[output_ix].get_actual_geometry() .expect("Output did not have a geometry associated with it"); // Sorry, this is an ugly borrow checker hack // Can't do self.layout() in Container::Container, borrowing mutably self mutably here. let maybe_node_ix = match self.tree[node_ix] { Container::View { handle, .. } => { handle.set_geometry(ResizeEdge::empty(), output_geometry); handle.bring_to_front(); let views = handle.get_output().get_views(); // TODO It would be nice to not have to iterate over // all the views just to do this. for view in views { // make sure children render above fullscreen parent if view.get_parent() == handle { view.bring_to_front(); } } None }, Container::Container { ref mut geometry, .. } => { *geometry = output_geometry; Some(node_ix) }, ref container => { error!("Expected a view or a container, got {:?}", container); panic!("Expected a View or a Container, got something else"); } }; if let Some(node_ix) = maybe_node_ix { self.layout(node_ix); } } } /// Adds gaps between all the views of the container at the `NodeIndex` /// This does not recurse if a container is found. /// /// If the `NodeIndex` doesn't point to a `Container`, an error is returned. fn add_gaps(&mut self, node_ix: NodeIndex) -> CommandResult { let layout = match self.tree[node_ix] { Container::Container { layout, .. } => layout, _ => return Err(TreeError::UuidNotAssociatedWith( ContainerType::Container)) }; let gap = Borders::gap_size(); if gap == 0 { return Ok(()) } let children = self.tree.grounded_children(node_ix); for (index, child_ix) in children.iter().enumerate() { let child = &mut self.tree[*child_ix]; match *child { Container::View { handle, .. } => { let mut geometry = handle.get_geometry().unwrap(); geometry.origin.x += (gap / 2) as i32; geometry.origin.y += (gap / 2) as i32; if index == children.len() - 1 { match layout { Layout::Horizontal => { geometry.size.w = geometry.size.w.saturating_sub(gap / 2) }, Layout::Vertical => { geometry.size.h = geometry.size.h.saturating_sub(gap / 2) }, // TODO Gaps for tabbed/stacked _ => {} } } match layout { Layout::Horizontal => { geometry.size.w = geometry.size.w.saturating_sub(gap / 2); geometry.size.h = geometry.size.h.saturating_sub(gap); }, Layout::Vertical => { geometry.size.w = geometry.size.w.saturating_sub(gap); geometry.size.h = geometry.size.h.saturating_sub(gap / 2); }, Layout::Tabbed | Layout::Stacked => { /* Should not be gaps within a stacked / tabbed, * because only one view is visible at a time. */ } } handle.set_geometry(ResizeEdge::empty(), geometry); }, // Do nothing, will get in the next recursion cycle Container::Container { .. } => {continue}, ref container => { error!("Iterating over a container, \ found non-view/containers!"); error!("Found: {:#?}", container); panic!("Applying gaps, found a non-view/container") } } } Ok(()) } /// Updates the geometry of the container, so that the borders are not /// hidden by the container. E.g this ensures that the borders are treated /// as part of the container for tiling/rendering purposes /// /// Returns the updated geometry for the container on success. /// That geometry should be used as the new constraint geometry for the /// children containers. fn update_container_geo_for_borders(&mut self, node_ix: NodeIndex, mut geometry: Geometry) -> Result<Geometry, TreeError> { let container = &mut self.tree[node_ix]; match *container { Container::Container { geometry: ref mut c_geometry, ref borders, .. } => { if borders.is_some() { let thickness = Borders::thickness(); let edge_thickness = thickness / 2; let title_size = Borders::title_bar_size(); geometry.origin.y += edge_thickness as i32; geometry.origin.y += (title_size / 2) as i32; geometry.size.h = geometry.size.h.saturating_sub(edge_thickness); geometry.size.h = geometry.size.h.saturating_sub(title_size / 2); } *c_geometry = geometry; }, ref container => { error!("Attempted to add borders to non-view"); error!("Found {:#?}", container); panic!("Applying gaps for borders, found non-view/container") } } Ok(geometry) } /// Updates the geometry of the view, so that the borders are not /// hidden by other views. E.g this ensures that the borders are treated /// as part of the container for tiling/rendering purposes fn update_view_geo_for_borders(&mut self, node_ix: NodeIndex) -> CommandResult { let container = &mut self.tree[node_ix]; let mut geometry = container.get_geometry() .expect("Container had no geometry"); match *container { Container::View { handle, .. } => { let borders = Borders::thickness(); if borders == 0 { return Ok(()) } let edge_thickness = (borders / 2) as i32; let title_size = Borders::title_bar_size(); geometry.origin.x += edge_thickness; geometry.origin.y += edge_thickness; geometry.origin.y += title_size as i32; geometry.size.w = geometry.size.w.saturating_sub(borders); geometry.size.h = geometry.size.h.saturating_sub(borders); geometry.size.h = geometry.size.h.saturating_sub(title_size); handle.set_geometry(ResizeEdge::empty(), geometry); }, ref container => { error!("Attempted to add borders to non-view"); error!("Found {:#?}", container); panic!("Applying gaps for borders, found non-view/container") } } // Done to make the resizing on tiled works container.resize_borders(geometry); Ok(()) } /// Draws the borders recursively, down from the top to the bottom. fn draw_borders_rec(&mut self, mut children: Vec<NodeIndex>) -> CommandResult { while children.len() > 0 { let child_ix = children.pop().unwrap(); children.extend(self.tree.grounded_children(child_ix)); let parent_ix = self.tree.parent_of(child_ix) .expect("Node had no parent"); let children = self.tree.children_of(parent_ix); let index = children.iter().position(|&node_ix| node_ix == child_ix) .map(|num| (num + 1).to_string()); let container; if Some(child_ix) != self.active_container { // TODO Just unpaint conditonally all up the tree // This should be the same for the parent drawing below // in the else case if !self.tree.on_path(parent_ix) { let parent_container = &mut self.tree[parent_ix]; parent_container.clear_border_color()?; parent_container.draw_borders()?; } container = &mut self.tree[child_ix]; container.clear_border_color()?; } else { match self.tree[parent_ix] { Container::Container { layout, ref mut borders, .. } => { if layout == Layout::Tabbed || layout == Layout::Stacked { borders.as_mut().map(|b| { b.set_title(format!("{:?} ({}/{})", layout, index.unwrap_or("?".into()), children.len() )); }); } }, _ => {} } { let parent_container = &mut self.tree[parent_ix]; parent_container.active_border_color()?; parent_container.draw_borders()?; } container = &mut self.tree[child_ix]; container.active_border_color()?; } container.draw_borders()?; } Ok(()) } } #[cfg(test)] mod test { use super::super::super::LayoutTree; #[test] /// Ensure that calculate_scale is fair to all it's children fn calculate_scale_test() { assert_eq!(LayoutTree::calculate_scale(vec!(), 0.0), 0.0); assert_eq!(LayoutTree::calculate_scale(vec!(5.0, 5.0, 5.0, 5.0, 5.0, 5.0), 0.0), 30.0); assert_eq!(LayoutTree::calculate_scale(vec!(5.0, 5.0, 5.0, 5.0, -5.0, 0.0), 5.0), 22.0); } }
pub struct KissRng { x: usize, y: usize, z: usize, c: usize, } impl KissRng { pub fn new() -> KissRng { return KissRng { x: 123456789, y: 987654321, z: 43219876, c: 6543217, }; } pub fn next(&mut self) -> usize { self.x = 314527869usize.wrapping_mul(self.x).wrapping_add(1234567); self.y ^= self.y.rotate_left(5); self.y ^= self.y.rotate_right(7); self.y ^= self.y.rotate_left(22); self.z = 4294584393usize.wrapping_mul(self.z).wrapping_add(self.c); self.c = self.z.rotate_right(32); return self.x.wrapping_add(self.y).wrapping_add(self.z); } } #[cfg(test)] mod tests { use super::*; #[test] fn kissrng_test_for_overflow() { let mut rng = KissRng::new(); for _ in 0..1000000 { rng.next(); } } } Added time to modify seed extern crate time; pub struct KissRng { x: usize, y: usize, z: usize, c: usize, } impl KissRng { pub fn new() -> KissRng { let now = time::precise_time_ns() as usize; return KissRng { x: 123456789usize.wrapping_add(now), y: 987654321usize.wrapping_add(now), z: 43219876usize.wrapping_add(now), c: 6543217usize.wrapping_add(now) }; } pub fn next(&mut self) -> usize { self.x = 314527869usize.wrapping_mul(self.x).wrapping_add(1234567); self.y ^= self.y.rotate_left(5); self.y ^= self.y.rotate_right(7); self.y ^= self.y.rotate_left(22); self.z = 4294584393usize.wrapping_mul(self.z).wrapping_add(self.c); self.c = self.z.rotate_right(32); return self.x.wrapping_add(self.y).wrapping_add(self.z); } } #[cfg(test)] mod tests { use super::*; #[test] fn kissrng_test_for_overflow() { let mut rng = KissRng::new(); for _ in 0..1000000 { rng.next(); } } }
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! A module for working with borrowed data. #![stable(feature = "rust1", since = "1.0.0")] use core::clone::Clone; use core::cmp::{Eq, Ord, Ordering, PartialEq, PartialOrd}; use core::convert::AsRef; use core::hash::{Hash, Hasher}; use core::marker::Sized; use core::ops::Deref; use core::option::Option; use fmt; use alloc::{rc, arc}; use self::Cow::*; /// A trait for borrowing data. /// /// In general, there may be several ways to "borrow" a piece of data. The /// typical ways of borrowing a type `T` are `&T` (a shared borrow) and `&mut T` /// (a mutable borrow). But types like `Vec<T>` provide additional kinds of /// borrows: the borrowed slices `&[T]` and `&mut [T]`. /// /// When writing generic code, it is often desirable to abstract over all ways /// of borrowing data from a given type. That is the role of the `Borrow` /// trait: if `T: Borrow<U>`, then `&U` can be borrowed from `&T`. A given /// type can be borrowed as multiple different types. In particular, `Vec<T>: /// Borrow<Vec<T>>` and `Vec<T>: Borrow<[T]>`. #[stable(feature = "rust1", since = "1.0.0")] pub trait Borrow<Borrowed: ?Sized> { /// Immutably borrow from an owned value. #[stable(feature = "rust1", since = "1.0.0")] fn borrow(&self) -> &Borrowed; } /// A trait for mutably borrowing data. /// /// Similar to `Borrow`, but for mutable borrows. #[stable(feature = "rust1", since = "1.0.0")] pub trait BorrowMut<Borrowed: ?Sized> : Borrow<Borrowed> { /// Mutably borrow from an owned value. #[stable(feature = "rust1", since = "1.0.0")] fn borrow_mut(&mut self) -> &mut Borrowed; } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> Borrow<T> for T { fn borrow(&self) -> &T { self } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> BorrowMut<T> for T { fn borrow_mut(&mut self) -> &mut T { self } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: ?Sized> Borrow<T> for &'a T { fn borrow(&self) -> &T { &**self } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: ?Sized> Borrow<T> for &'a mut T { fn borrow(&self) -> &T { &**self } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: ?Sized> BorrowMut<T> for &'a mut T { fn borrow_mut(&mut self) -> &mut T { &mut **self } } impl<T> Borrow<T> for rc::Rc<T> { fn borrow(&self) -> &T { &**self } } impl<T> Borrow<T> for arc::Arc<T> { fn borrow(&self) -> &T { &**self } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, B: ?Sized> Borrow<B> for Cow<'a, B> where B: ToOwned, <B as ToOwned>::Owned: 'a { fn borrow(&self) -> &B { &**self } } /// A generalization of Clone to borrowed data. /// /// Some types make it possible to go from borrowed to owned, usually by /// implementing the `Clone` trait. But `Clone` works only for going from `&T` /// to `T`. The `ToOwned` trait generalizes `Clone` to construct owned data /// from any borrow of a given type. #[stable(feature = "rust1", since = "1.0.0")] pub trait ToOwned { #[stable(feature = "rust1", since = "1.0.0")] type Owned: Borrow<Self>; /// Create owned data from borrowed data, usually by copying. #[stable(feature = "rust1", since = "1.0.0")] fn to_owned(&self) -> Self::Owned; } #[stable(feature = "rust1", since = "1.0.0")] impl<T> ToOwned for T where T: Clone { type Owned = T; fn to_owned(&self) -> T { self.clone() } } /// A clone-on-write smart pointer. /// /// The type `Cow` is a smart pointer providing clone-on-write functionality: it /// can enclose and provide immutable access to borrowed data, and clone the /// data lazily when mutation or ownership is required. The type is designed to /// work with general borrowed data via the `Borrow` trait. /// /// `Cow` implements both `Deref`, which means that you can call /// non-mutating methods directly on the data it encloses. If mutation /// is desired, `to_mut` will obtain a mutable references to an owned /// value, cloning if necessary. /// /// # Examples /// /// ``` /// use std::borrow::Cow; /// /// fn abs_all(input: &mut Cow<[i32]>) { /// for i in 0..input.len() { /// let v = input[i]; /// if v < 0 { /// // clones into a vector the first time (if not already owned) /// input.to_mut()[i] = -v; /// } /// } /// } /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub enum Cow<'a, B: ?Sized + 'a> where B: ToOwned { /// Borrowed data. #[stable(feature = "rust1", since = "1.0.0")] Borrowed(&'a B), /// Owned data. #[stable(feature = "rust1", since = "1.0.0")] Owned(<B as ToOwned>::Owned) } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, B: ?Sized> Clone for Cow<'a, B> where B: ToOwned { fn clone(&self) -> Cow<'a, B> { match *self { Borrowed(b) => Borrowed(b), Owned(ref o) => { let b: &B = o.borrow(); Owned(b.to_owned()) }, } } } impl<'a, B: ?Sized> Cow<'a, B> where B: ToOwned { /// Acquire a mutable reference to the owned form of the data. /// /// Copies the data if it is not already owned. #[stable(feature = "rust1", since = "1.0.0")] pub fn to_mut(&mut self) -> &mut <B as ToOwned>::Owned { match *self { Borrowed(borrowed) => { *self = Owned(borrowed.to_owned()); self.to_mut() } Owned(ref mut owned) => owned } } /// Extract the owned data. /// /// Copies the data if it is not already owned. #[stable(feature = "rust1", since = "1.0.0")] pub fn into_owned(self) -> <B as ToOwned>::Owned { match self { Borrowed(borrowed) => borrowed.to_owned(), Owned(owned) => owned } } /// Returns true if this `Cow` wraps a borrowed value #[deprecated(since = "1.0.0", reason = "match on the enum instead")] #[unstable(feature = "std_misc")] pub fn is_borrowed(&self) -> bool { match *self { Borrowed(_) => true, _ => false, } } /// Returns true if this `Cow` wraps an owned value #[deprecated(since = "1.0.0", reason = "match on the enum instead")] #[unstable(feature = "std_misc")] pub fn is_owned(&self) -> bool { match *self { Owned(_) => true, _ => false, } } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, B: ?Sized> Deref for Cow<'a, B> where B: ToOwned { type Target = B; fn deref(&self) -> &B { match *self { Borrowed(borrowed) => borrowed, Owned(ref owned) => owned.borrow() } } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, B: ?Sized> Eq for Cow<'a, B> where B: Eq + ToOwned {} #[stable(feature = "rust1", since = "1.0.0")] impl<'a, B: ?Sized> Ord for Cow<'a, B> where B: Ord + ToOwned { #[inline] fn cmp(&self, other: &Cow<'a, B>) -> Ordering { Ord::cmp(&**self, &**other) } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, 'b, B: ?Sized, C: ?Sized> PartialEq<Cow<'b, C>> for Cow<'a, B> where B: PartialEq<C> + ToOwned, C: ToOwned, { #[inline] fn eq(&self, other: &Cow<'b, C>) -> bool { PartialEq::eq(&**self, &**other) } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, B: ?Sized> PartialOrd for Cow<'a, B> where B: PartialOrd + ToOwned, { #[inline] fn partial_cmp(&self, other: &Cow<'a, B>) -> Option<Ordering> { PartialOrd::partial_cmp(&**self, &**other) } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, B: ?Sized> fmt::Debug for Cow<'a, B> where B: fmt::Debug + ToOwned, <B as ToOwned>::Owned: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Borrowed(ref b) => fmt::Debug::fmt(b, f), Owned(ref o) => fmt::Debug::fmt(o, f), } } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, B: ?Sized> fmt::Display for Cow<'a, B> where B: fmt::Display + ToOwned, <B as ToOwned>::Owned: fmt::Display, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Borrowed(ref b) => fmt::Display::fmt(b, f), Owned(ref o) => fmt::Display::fmt(o, f), } } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, B: ?Sized> Hash for Cow<'a, B> where B: Hash + ToOwned { #[inline] fn hash<H: Hasher>(&self, state: &mut H) { Hash::hash(&**self, state) } } /// Trait for moving into a `Cow` #[unstable(feature = "into_cow", reason = "may be replaced by `convert::Into`")] pub trait IntoCow<'a, B: ?Sized> where B: ToOwned { /// Moves `self` into `Cow` fn into_cow(self) -> Cow<'a, B>; } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, B: ?Sized> IntoCow<'a, B> for Cow<'a, B> where B: ToOwned { fn into_cow(self) -> Cow<'a, B> { self } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: Clone> AsRef<T> for Cow<'a, T> { fn as_ref(&self) -> &T { self } } Document std::borrow with examples // Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! A module for working with borrowed data. #![stable(feature = "rust1", since = "1.0.0")] use core::clone::Clone; use core::cmp::{Eq, Ord, Ordering, PartialEq, PartialOrd}; use core::convert::AsRef; use core::hash::{Hash, Hasher}; use core::marker::Sized; use core::ops::Deref; use core::option::Option; use fmt; use alloc::{rc, arc}; use self::Cow::*; /// A trait for borrowing data. /// /// In general, there may be several ways to "borrow" a piece of data. The /// typical ways of borrowing a type `T` are `&T` (a shared borrow) and `&mut T` /// (a mutable borrow). But types like `Vec<T>` provide additional kinds of /// borrows: the borrowed slices `&[T]` and `&mut [T]`. /// /// When writing generic code, it is often desirable to abstract over all ways /// of borrowing data from a given type. That is the role of the `Borrow` /// trait: if `T: Borrow<U>`, then `&U` can be borrowed from `&T`. A given /// type can be borrowed as multiple different types. In particular, `Vec<T>: /// Borrow<Vec<T>>` and `Vec<T>: Borrow<[T]>`. #[stable(feature = "rust1", since = "1.0.0")] pub trait Borrow<Borrowed: ?Sized> { /// Immutably borrow from an owned value. /// /// # Examples /// /// ``` /// use std::borrow::Borrow; /// /// fn check<T: Borrow<str>>(s: T) { /// assert_eq!("Hello", s.borrow()); /// } /// /// let s = "Hello".to_string(); /// /// check(s); /// /// let s = "Hello"; /// /// check(s); /// ``` #[stable(feature = "rust1", since = "1.0.0")] fn borrow(&self) -> &Borrowed; } /// A trait for mutably borrowing data. /// /// Similar to `Borrow`, but for mutable borrows. #[stable(feature = "rust1", since = "1.0.0")] pub trait BorrowMut<Borrowed: ?Sized> : Borrow<Borrowed> { /// Mutably borrow from an owned value. /// /// # Examples /// /// ``` /// use std::borrow::BorrowMut; /// /// fn check<T: BorrowMut<[i32]>>(mut v: T) { /// assert_eq!(&mut [1, 2, 3], v.borrow_mut()); /// } /// /// let v = vec![1, 2, 3]; /// /// check(v); /// ``` #[stable(feature = "rust1", since = "1.0.0")] fn borrow_mut(&mut self) -> &mut Borrowed; } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> Borrow<T> for T { fn borrow(&self) -> &T { self } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> BorrowMut<T> for T { fn borrow_mut(&mut self) -> &mut T { self } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: ?Sized> Borrow<T> for &'a T { fn borrow(&self) -> &T { &**self } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: ?Sized> Borrow<T> for &'a mut T { fn borrow(&self) -> &T { &**self } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: ?Sized> BorrowMut<T> for &'a mut T { fn borrow_mut(&mut self) -> &mut T { &mut **self } } impl<T> Borrow<T> for rc::Rc<T> { fn borrow(&self) -> &T { &**self } } impl<T> Borrow<T> for arc::Arc<T> { fn borrow(&self) -> &T { &**self } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, B: ?Sized> Borrow<B> for Cow<'a, B> where B: ToOwned, <B as ToOwned>::Owned: 'a { fn borrow(&self) -> &B { &**self } } /// A generalization of Clone to borrowed data. /// /// Some types make it possible to go from borrowed to owned, usually by /// implementing the `Clone` trait. But `Clone` works only for going from `&T` /// to `T`. The `ToOwned` trait generalizes `Clone` to construct owned data /// from any borrow of a given type. #[stable(feature = "rust1", since = "1.0.0")] pub trait ToOwned { #[stable(feature = "rust1", since = "1.0.0")] type Owned: Borrow<Self>; /// Create owned data from borrowed data, usually by copying. #[stable(feature = "rust1", since = "1.0.0")] fn to_owned(&self) -> Self::Owned; } #[stable(feature = "rust1", since = "1.0.0")] impl<T> ToOwned for T where T: Clone { type Owned = T; fn to_owned(&self) -> T { self.clone() } } /// A clone-on-write smart pointer. /// /// The type `Cow` is a smart pointer providing clone-on-write functionality: it /// can enclose and provide immutable access to borrowed data, and clone the /// data lazily when mutation or ownership is required. The type is designed to /// work with general borrowed data via the `Borrow` trait. /// /// `Cow` implements both `Deref`, which means that you can call /// non-mutating methods directly on the data it encloses. If mutation /// is desired, `to_mut` will obtain a mutable references to an owned /// value, cloning if necessary. /// /// # Examples /// /// ``` /// use std::borrow::Cow; /// /// fn abs_all(input: &mut Cow<[i32]>) { /// for i in 0..input.len() { /// let v = input[i]; /// if v < 0 { /// // clones into a vector the first time (if not already owned) /// input.to_mut()[i] = -v; /// } /// } /// } /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub enum Cow<'a, B: ?Sized + 'a> where B: ToOwned { /// Borrowed data. #[stable(feature = "rust1", since = "1.0.0")] Borrowed(&'a B), /// Owned data. #[stable(feature = "rust1", since = "1.0.0")] Owned(<B as ToOwned>::Owned) } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, B: ?Sized> Clone for Cow<'a, B> where B: ToOwned { fn clone(&self) -> Cow<'a, B> { match *self { Borrowed(b) => Borrowed(b), Owned(ref o) => { let b: &B = o.borrow(); Owned(b.to_owned()) }, } } } impl<'a, B: ?Sized> Cow<'a, B> where B: ToOwned { /// Acquire a mutable reference to the owned form of the data. /// /// Copies the data if it is not already owned. /// /// # Examples /// /// ``` /// use std::borrow::Cow; /// /// let mut cow: Cow<[_]> = Cow::Owned(vec![1, 2, 3]); /// /// let hello = cow.to_mut(); /// /// assert_eq!(&[1, 2, 3], hello); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn to_mut(&mut self) -> &mut <B as ToOwned>::Owned { match *self { Borrowed(borrowed) => { *self = Owned(borrowed.to_owned()); self.to_mut() } Owned(ref mut owned) => owned } } /// Extract the owned data. /// /// Copies the data if it is not already owned. /// /// # Examples /// /// ``` /// use std::borrow::Cow; /// /// let cow: Cow<[_]> = Cow::Owned(vec![1, 2, 3]); /// /// let hello = cow.into_owned(); /// /// assert_eq!(vec![1, 2, 3], hello); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn into_owned(self) -> <B as ToOwned>::Owned { match self { Borrowed(borrowed) => borrowed.to_owned(), Owned(owned) => owned } } /// Returns true if this `Cow` wraps a borrowed value #[deprecated(since = "1.0.0", reason = "match on the enum instead")] #[unstable(feature = "std_misc")] pub fn is_borrowed(&self) -> bool { match *self { Borrowed(_) => true, _ => false, } } /// Returns true if this `Cow` wraps an owned value #[deprecated(since = "1.0.0", reason = "match on the enum instead")] #[unstable(feature = "std_misc")] pub fn is_owned(&self) -> bool { match *self { Owned(_) => true, _ => false, } } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, B: ?Sized> Deref for Cow<'a, B> where B: ToOwned { type Target = B; fn deref(&self) -> &B { match *self { Borrowed(borrowed) => borrowed, Owned(ref owned) => owned.borrow() } } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, B: ?Sized> Eq for Cow<'a, B> where B: Eq + ToOwned {} #[stable(feature = "rust1", since = "1.0.0")] impl<'a, B: ?Sized> Ord for Cow<'a, B> where B: Ord + ToOwned { #[inline] fn cmp(&self, other: &Cow<'a, B>) -> Ordering { Ord::cmp(&**self, &**other) } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, 'b, B: ?Sized, C: ?Sized> PartialEq<Cow<'b, C>> for Cow<'a, B> where B: PartialEq<C> + ToOwned, C: ToOwned, { #[inline] fn eq(&self, other: &Cow<'b, C>) -> bool { PartialEq::eq(&**self, &**other) } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, B: ?Sized> PartialOrd for Cow<'a, B> where B: PartialOrd + ToOwned, { #[inline] fn partial_cmp(&self, other: &Cow<'a, B>) -> Option<Ordering> { PartialOrd::partial_cmp(&**self, &**other) } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, B: ?Sized> fmt::Debug for Cow<'a, B> where B: fmt::Debug + ToOwned, <B as ToOwned>::Owned: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Borrowed(ref b) => fmt::Debug::fmt(b, f), Owned(ref o) => fmt::Debug::fmt(o, f), } } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, B: ?Sized> fmt::Display for Cow<'a, B> where B: fmt::Display + ToOwned, <B as ToOwned>::Owned: fmt::Display, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Borrowed(ref b) => fmt::Display::fmt(b, f), Owned(ref o) => fmt::Display::fmt(o, f), } } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, B: ?Sized> Hash for Cow<'a, B> where B: Hash + ToOwned { #[inline] fn hash<H: Hasher>(&self, state: &mut H) { Hash::hash(&**self, state) } } /// Trait for moving into a `Cow` #[unstable(feature = "into_cow", reason = "may be replaced by `convert::Into`")] pub trait IntoCow<'a, B: ?Sized> where B: ToOwned { /// Moves `self` into `Cow` fn into_cow(self) -> Cow<'a, B>; } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, B: ?Sized> IntoCow<'a, B> for Cow<'a, B> where B: ToOwned { fn into_cow(self) -> Cow<'a, B> { self } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: Clone> AsRef<T> for Cow<'a, T> { fn as_ref(&self) -> &T { self } }
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use ast; use ast::{P, Name, Mrk}; use ast_util; use parse::token; use util::interner::StrInterner; use util::interner; use std::cast; use std::char; use std::local_data; #[deriving(Clone, Encodable, Decodable, Eq, IterBytes)] pub enum binop { PLUS, MINUS, STAR, SLASH, PERCENT, CARET, AND, OR, SHL, SHR, } #[deriving(Clone, Encodable, Decodable, Eq, IterBytes)] pub enum Token { /* Expression-operator symbols. */ EQ, LT, LE, EQEQ, NE, GE, GT, ANDAND, OROR, NOT, TILDE, BINOP(binop), BINOPEQ(binop), /* Structural symbols */ AT, DOT, DOTDOT, DOTDOTDOT, COMMA, SEMI, COLON, MOD_SEP, RARROW, LARROW, DARROW, FAT_ARROW, LPAREN, RPAREN, LBRACKET, RBRACKET, LBRACE, RBRACE, POUND, DOLLAR, /* Literals */ LIT_CHAR(u32), LIT_INT(i64, ast::int_ty), LIT_UINT(u64, ast::uint_ty), LIT_INT_UNSUFFIXED(i64), LIT_FLOAT(ast::Ident, ast::float_ty), LIT_FLOAT_UNSUFFIXED(ast::Ident), LIT_STR(ast::Ident), LIT_STR_RAW(ast::Ident, uint), /* raw str delimited by n hash symbols */ /* Name components */ // an identifier contains an "is_mod_name" boolean, // indicating whether :: follows this token with no // whitespace in between. IDENT(ast::Ident, bool), UNDERSCORE, LIFETIME(ast::Ident), /* For interpolation */ INTERPOLATED(nonterminal), DOC_COMMENT(ast::Ident), EOF, } #[deriving(Clone, Encodable, Decodable, Eq, IterBytes)] /// For interpolation during macro expansion. pub enum nonterminal { nt_item(@ast::item), nt_block(P<ast::Block>), nt_stmt(@ast::Stmt), nt_pat( @ast::Pat), nt_expr(@ast::Expr), nt_ty( P<ast::Ty>), nt_ident(~ast::Ident, bool), nt_attr(@ast::Attribute), // #[foo] nt_path(~ast::Path), nt_tt( @ast::token_tree), //needs @ed to break a circularity nt_matchers(~[ast::matcher]) } pub fn binop_to_str(o: binop) -> ~str { match o { PLUS => ~"+", MINUS => ~"-", STAR => ~"*", SLASH => ~"/", PERCENT => ~"%", CARET => ~"^", AND => ~"&", OR => ~"|", SHL => ~"<<", SHR => ~">>" } } pub fn to_str(input: @ident_interner, t: &Token) -> ~str { match *t { EQ => ~"=", LT => ~"<", LE => ~"<=", EQEQ => ~"==", NE => ~"!=", GE => ~">=", GT => ~">", NOT => ~"!", TILDE => ~"~", OROR => ~"||", ANDAND => ~"&&", BINOP(op) => binop_to_str(op), BINOPEQ(op) => binop_to_str(op) + "=", /* Structural symbols */ AT => ~"@", DOT => ~".", DOTDOT => ~"..", DOTDOTDOT => ~"...", COMMA => ~",", SEMI => ~";", COLON => ~":", MOD_SEP => ~"::", RARROW => ~"->", LARROW => ~"<-", DARROW => ~"<->", FAT_ARROW => ~"=>", LPAREN => ~"(", RPAREN => ~")", LBRACKET => ~"[", RBRACKET => ~"]", LBRACE => ~"{", RBRACE => ~"}", POUND => ~"#", DOLLAR => ~"$", /* Literals */ LIT_CHAR(c) => { let mut res = ~"'"; char::from_u32(c).unwrap().escape_default(|c| { res.push_char(c); }); res.push_char('\''); res } LIT_INT(i, t) => { i.to_str() + ast_util::int_ty_to_str(t) } LIT_UINT(u, t) => { u.to_str() + ast_util::uint_ty_to_str(t) } LIT_INT_UNSUFFIXED(i) => { i.to_str() } LIT_FLOAT(ref s, t) => { let mut body = ident_to_str(s).to_owned(); if body.ends_with(".") { body.push_char('0'); // `10.f` is not a float literal } body + ast_util::float_ty_to_str(t) } LIT_FLOAT_UNSUFFIXED(ref s) => { let mut body = ident_to_str(s).to_owned(); if body.ends_with(".") { body.push_char('0'); // `10.f` is not a float literal } body } LIT_STR(ref s) => { format!("\"{}\"", ident_to_str(s).escape_default()) } LIT_STR_RAW(ref s, n) => { format!("r{delim}\"{string}\"{delim}", delim="#".repeat(n), string=ident_to_str(s)) } /* Name components */ IDENT(s, _) => input.get(s.name).to_owned(), LIFETIME(s) => format!("'{}", input.get(s.name)), UNDERSCORE => ~"_", /* Other */ DOC_COMMENT(ref s) => ident_to_str(s).to_owned(), EOF => ~"<eof>", INTERPOLATED(ref nt) => { match nt { &nt_expr(e) => ::print::pprust::expr_to_str(e, input), &nt_attr(e) => ::print::pprust::attribute_to_str(e, input), _ => { ~"an interpolated " + match (*nt) { nt_item(..) => ~"item", nt_block(..) => ~"block", nt_stmt(..) => ~"statement", nt_pat(..) => ~"pattern", nt_attr(..) => fail!("should have been handled"), nt_expr(..) => fail!("should have been handled above"), nt_ty(..) => ~"type", nt_ident(..) => ~"identifier", nt_path(..) => ~"path", nt_tt(..) => ~"tt", nt_matchers(..) => ~"matcher sequence" } } } } } } pub fn can_begin_expr(t: &Token) -> bool { match *t { LPAREN => true, LBRACE => true, LBRACKET => true, IDENT(_, _) => true, UNDERSCORE => true, TILDE => true, LIT_CHAR(_) => true, LIT_INT(_, _) => true, LIT_UINT(_, _) => true, LIT_INT_UNSUFFIXED(_) => true, LIT_FLOAT(_, _) => true, LIT_FLOAT_UNSUFFIXED(_) => true, LIT_STR(_) => true, LIT_STR_RAW(_, _) => true, POUND => true, AT => true, NOT => true, BINOP(MINUS) => true, BINOP(STAR) => true, BINOP(AND) => true, BINOP(OR) => true, // in lambda syntax OROR => true, // in lambda syntax MOD_SEP => true, INTERPOLATED(nt_expr(..)) | INTERPOLATED(nt_ident(..)) | INTERPOLATED(nt_block(..)) | INTERPOLATED(nt_path(..)) => true, _ => false } } /// what's the opposite delimiter? pub fn flip_delimiter(t: &token::Token) -> token::Token { match *t { LPAREN => RPAREN, LBRACE => RBRACE, LBRACKET => RBRACKET, RPAREN => LPAREN, RBRACE => LBRACE, RBRACKET => LBRACKET, _ => fail!() } } pub fn is_lit(t: &Token) -> bool { match *t { LIT_CHAR(_) => true, LIT_INT(_, _) => true, LIT_UINT(_, _) => true, LIT_INT_UNSUFFIXED(_) => true, LIT_FLOAT(_, _) => true, LIT_FLOAT_UNSUFFIXED(_) => true, LIT_STR(_) => true, LIT_STR_RAW(_, _) => true, _ => false } } pub fn is_ident(t: &Token) -> bool { match *t { IDENT(_, _) => true, _ => false } } pub fn is_ident_or_path(t: &Token) -> bool { match *t { IDENT(_, _) | INTERPOLATED(nt_path(..)) => true, _ => false } } pub fn is_plain_ident(t: &Token) -> bool { match *t { IDENT(_, false) => true, _ => false } } pub fn is_bar(t: &Token) -> bool { match *t { BINOP(OR) | OROR => true, _ => false } } pub mod special_idents { use ast::Ident; pub static underscore : Ident = Ident { name: 0, ctxt: 0}; // apparently unused? pub static anon : Ident = Ident { name: 1, ctxt: 0}; pub static invalid : Ident = Ident { name: 2, ctxt: 0}; // '' pub static unary : Ident = Ident { name: 3, ctxt: 0}; // apparently unused? pub static not_fn : Ident = Ident { name: 4, ctxt: 0}; // apparently unused? pub static idx_fn : Ident = Ident { name: 5, ctxt: 0}; // apparently unused? pub static unary_minus_fn : Ident = Ident { name: 6, ctxt: 0}; // apparently unused? pub static clownshoes_extensions : Ident = Ident { name: 7, ctxt: 0}; pub static self_ : Ident = Ident { name: super::SELF_KEYWORD_NAME, ctxt: 0}; // 'self' /* for matcher NTs */ // none of these appear to be used, but perhaps references to // these are artificially fabricated by the macro system.... pub static item : Ident = Ident { name: 9, ctxt: 0}; pub static block : Ident = Ident { name: 10, ctxt: 0}; pub static stmt : Ident = Ident { name: 11, ctxt: 0}; pub static pat : Ident = Ident { name: 12, ctxt: 0}; pub static expr : Ident = Ident { name: 13, ctxt: 0}; pub static ty : Ident = Ident { name: 14, ctxt: 0}; pub static ident : Ident = Ident { name: 15, ctxt: 0}; pub static path : Ident = Ident { name: 16, ctxt: 0}; pub static tt : Ident = Ident { name: 17, ctxt: 0}; pub static matchers : Ident = Ident { name: 18, ctxt: 0}; pub static str : Ident = Ident { name: 19, ctxt: 0}; // for the type // apparently unused? /* outside of libsyntax */ pub static arg : Ident = Ident { name: 20, ctxt: 0}; pub static descrim : Ident = Ident { name: 21, ctxt: 0}; pub static clownshoe_abi : Ident = Ident { name: 22, ctxt: 0}; pub static clownshoe_stack_shim : Ident = Ident { name: 23, ctxt: 0}; pub static main : Ident = Ident { name: 24, ctxt: 0}; pub static opaque : Ident = Ident { name: 25, ctxt: 0}; pub static blk : Ident = Ident { name: 26, ctxt: 0}; pub static statik : Ident = Ident { name: super::STATIC_KEYWORD_NAME, ctxt: 0}; pub static clownshoes_foreign_mod: Ident = Ident { name: 28, ctxt: 0}; pub static unnamed_field: Ident = Ident { name: 29, ctxt: 0}; pub static c_abi: Ident = Ident { name: 30, ctxt: 0}; // apparently unused? pub static type_self: Ident = Ident { name: 31, ctxt: 0}; // `Self` } // here are the ones that actually occur in the source. Maybe the rest // should be removed? /* special_idents::anon special_idents::arg special_idents::blk special_idents::clownshoe_abi special_idents::clownshoe_stack_shim special_idents::clownshoes_extensions special_idents::clownshoes_foreign_mod special_idents::descrim special_idents::invalid special_idents::main special_idents::matchers special_idents::opaque special_idents::self_ special_idents::statik special_idents::tt special_idents::type_self special_idents::unnamed_field */ /** * Maps a token to a record specifying the corresponding binary * operator */ pub fn token_to_binop(tok: &Token) -> Option<ast::BinOp> { match *tok { BINOP(STAR) => Some(ast::BiMul), BINOP(SLASH) => Some(ast::BiDiv), BINOP(PERCENT) => Some(ast::BiRem), BINOP(PLUS) => Some(ast::BiAdd), BINOP(MINUS) => Some(ast::BiSub), BINOP(SHL) => Some(ast::BiShl), BINOP(SHR) => Some(ast::BiShr), BINOP(AND) => Some(ast::BiBitAnd), BINOP(CARET) => Some(ast::BiBitXor), BINOP(OR) => Some(ast::BiBitOr), LT => Some(ast::BiLt), LE => Some(ast::BiLe), GE => Some(ast::BiGe), GT => Some(ast::BiGt), EQEQ => Some(ast::BiEq), NE => Some(ast::BiNe), ANDAND => Some(ast::BiAnd), OROR => Some(ast::BiOr), _ => None } } // looks like we can get rid of this completely... pub type ident_interner = StrInterner; // return a fresh interner, preloaded with special identifiers. fn mk_fresh_ident_interner() -> @ident_interner { // The indices here must correspond to the numbers in // special_idents, in Keyword to_ident(), and in static // constants below. let init_vec = ~[ "_", // 0 "anon", // 1 "", // 2 "unary", // 3 "!", // 4 "[]", // 5 "unary-", // 6 "__extensions__", // 7 "self", // 8 "item", // 9 "block", // 10 "stmt", // 11 "pat", // 12 "expr", // 13 "ty", // 14 "ident", // 15 "path", // 16 "tt", // 17 "matchers", // 18 "str", // 19 "arg", // 20 "descrim", // 21 "__rust_abi", // 22 "__rust_stack_shim", // 23 "main", // 24 "<opaque>", // 25 "blk", // 26 "static", // 27 "__foreign_mod__", // 28 "<unnamed_field>", // 29 "C", // 30 "Self", // 31 "as", // 32 "break", // 33 "const", // 34 "do", // 35 "else", // 36 "enum", // 37 "extern", // 38 "false", // 39 "fn", // 40 "for", // 41 "if", // 42 "impl", // 43 "let", // 44 "__log_level", // 45 "loop", // 46 "match", // 47 "mod", // 48 "mut", // 49 "once", // 50 "priv", // 51 "pub", // 52 "ref", // 53 "return", // 54 "static", // 27 -- also a special ident (prefill de-dupes) "self", // 8 -- also a special ident (prefill de-dupes) "struct", // 55 "super", // 56 "true", // 57 "trait", // 58 "type", // 59 "unsafe", // 60 "use", // 61 "while", // 62 "in", // 63 "continue", // 64 "proc", // 65 "be", // 66 "pure", // 67 "yield", // 68 "typeof", // 69 "alignof", // 70 "offsetof", // 71 "sizeof", // 72 ]; @interner::StrInterner::prefill(init_vec) } static SELF_KEYWORD_NAME: Name = 8; static STATIC_KEYWORD_NAME: Name = 27; static STRICT_KEYWORD_START: Name = 32; static STRICT_KEYWORD_FINAL: Name = 65; static RESERVED_KEYWORD_START: Name = 66; static RESERVED_KEYWORD_FINAL: Name = 72; // if an interner exists in TLS, return it. Otherwise, prepare a // fresh one. pub fn get_ident_interner() -> @ident_interner { local_data_key!(key: @@::parse::token::ident_interner) match local_data::get(key, |k| k.map(|k| *k)) { Some(interner) => *interner, None => { let interner = mk_fresh_ident_interner(); local_data::set(key, @interner); interner } } } /* for when we don't care about the contents; doesn't interact with TLD or serialization */ pub fn mk_fake_ident_interner() -> @ident_interner { @interner::StrInterner::new() } // maps a string to its interned representation pub fn intern(str : &str) -> Name { let interner = get_ident_interner(); interner.intern(str) } // gensyms a new uint, using the current interner pub fn gensym(str : &str) -> Name { let interner = get_ident_interner(); interner.gensym(str) } // map an interned representation back to a string pub fn interner_get(name : Name) -> @str { get_ident_interner().get(name) } // maps an identifier to the string that it corresponds to pub fn ident_to_str(id : &ast::Ident) -> @str { interner_get(id.name) } // maps a string to an identifier with an empty syntax context pub fn str_to_ident(str : &str) -> ast::Ident { ast::Ident::new(intern(str)) } // maps a string to a gensym'ed identifier pub fn gensym_ident(str : &str) -> ast::Ident { ast::Ident::new(gensym(str)) } // create a fresh name that maps to the same string as the old one. // note that this guarantees that str_ptr_eq(ident_to_str(src),interner_get(fresh_name(src))); // that is, that the new name and the old one are connected to ptr_eq strings. pub fn fresh_name(src : &ast::Ident) -> Name { let interner = get_ident_interner(); interner.gensym_copy(src.name) // following: debug version. Could work in final except that it's incompatible with // good error messages and uses of struct names in ambiguous could-be-binding // locations. Also definitely destroys the guarantee given above about ptr_eq. /*let num = rand::rng().gen_uint_range(0,0xffff); gensym(format!("{}_{}",ident_to_str(src),num))*/ } // it looks like there oughta be a str_ptr_eq fn, but no one bothered to implement it? // determine whether two @str values are pointer-equal pub fn str_ptr_eq(a : @str, b : @str) -> bool { unsafe { let p : uint = cast::transmute(a); let q : uint = cast::transmute(b); let result = p == q; // got to transmute them back, to make sure the ref count is correct: let _junk1 : @str = cast::transmute(p); let _junk2 : @str = cast::transmute(q); result } } // return true when two identifiers refer (through the intern table) to the same ptr_eq // string. This is used to compare identifiers in places where hygienic comparison is // not wanted (i.e. not lexical vars). pub fn ident_spelling_eq(a : &ast::Ident, b : &ast::Ident) -> bool { str_ptr_eq(interner_get(a.name),interner_get(b.name)) } // create a fresh mark. pub fn fresh_mark() -> Mrk { gensym("mark") } /** * All the valid words that have meaning in the Rust language. * * Rust keywords are either 'strict' or 'reserved'. Strict keywords may not * appear as identifiers at all. Reserved keywords are not used anywhere in * the language and may not appear as identifiers. */ pub mod keywords { use ast::Ident; pub enum Keyword { // Strict keywords As, Break, Const, Do, Else, Enum, Extern, False, Fn, For, If, Impl, In, Let, __LogLevel, Loop, Match, Mod, Mut, Once, Priv, Pub, Ref, Return, Static, Self, Struct, Super, True, Trait, Type, Unsafe, Use, While, Continue, Proc, // Reserved keywords Alignof, Be, Offsetof, Pure, Sizeof, Typeof, Yield, } impl Keyword { pub fn to_ident(&self) -> Ident { match *self { As => Ident { name: 32, ctxt: 0 }, Break => Ident { name: 33, ctxt: 0 }, Const => Ident { name: 34, ctxt: 0 }, Do => Ident { name: 35, ctxt: 0 }, Else => Ident { name: 36, ctxt: 0 }, Enum => Ident { name: 37, ctxt: 0 }, Extern => Ident { name: 38, ctxt: 0 }, False => Ident { name: 39, ctxt: 0 }, Fn => Ident { name: 40, ctxt: 0 }, For => Ident { name: 41, ctxt: 0 }, If => Ident { name: 42, ctxt: 0 }, Impl => Ident { name: 43, ctxt: 0 }, In => Ident { name: 63, ctxt: 0 }, Let => Ident { name: 44, ctxt: 0 }, __LogLevel => Ident { name: 45, ctxt: 0 }, Loop => Ident { name: 46, ctxt: 0 }, Match => Ident { name: 47, ctxt: 0 }, Mod => Ident { name: 48, ctxt: 0 }, Mut => Ident { name: 49, ctxt: 0 }, Once => Ident { name: 50, ctxt: 0 }, Priv => Ident { name: 51, ctxt: 0 }, Pub => Ident { name: 52, ctxt: 0 }, Ref => Ident { name: 53, ctxt: 0 }, Return => Ident { name: 54, ctxt: 0 }, Static => Ident { name: super::STATIC_KEYWORD_NAME, ctxt: 0 }, Self => Ident { name: super::SELF_KEYWORD_NAME, ctxt: 0 }, Struct => Ident { name: 55, ctxt: 0 }, Super => Ident { name: 56, ctxt: 0 }, True => Ident { name: 57, ctxt: 0 }, Trait => Ident { name: 58, ctxt: 0 }, Type => Ident { name: 59, ctxt: 0 }, Unsafe => Ident { name: 60, ctxt: 0 }, Use => Ident { name: 61, ctxt: 0 }, While => Ident { name: 62, ctxt: 0 }, Continue => Ident { name: 64, ctxt: 0 }, Proc => Ident { name: 65, ctxt: 0 }, Alignof => Ident { name: 70, ctxt: 0 }, Be => Ident { name: 66, ctxt: 0 }, Offsetof => Ident { name: 71, ctxt: 0 }, Pure => Ident { name: 67, ctxt: 0 }, Sizeof => Ident { name: 72, ctxt: 0 }, Typeof => Ident { name: 69, ctxt: 0 }, Yield => Ident { name: 68, ctxt: 0 }, } } } } pub fn is_keyword(kw: keywords::Keyword, tok: &Token) -> bool { match *tok { token::IDENT(sid, false) => { kw.to_ident().name == sid.name } _ => { false } } } pub fn is_any_keyword(tok: &Token) -> bool { match *tok { token::IDENT(sid, false) => match sid.name { SELF_KEYWORD_NAME | STATIC_KEYWORD_NAME | STRICT_KEYWORD_START .. RESERVED_KEYWORD_FINAL => true, _ => false, }, _ => false } } pub fn is_strict_keyword(tok: &Token) -> bool { match *tok { token::IDENT(sid, false) => match sid.name { SELF_KEYWORD_NAME | STATIC_KEYWORD_NAME | STRICT_KEYWORD_START .. STRICT_KEYWORD_FINAL => true, _ => false, }, _ => false, } } pub fn is_reserved_keyword(tok: &Token) -> bool { match *tok { token::IDENT(sid, false) => match sid.name { RESERVED_KEYWORD_START .. RESERVED_KEYWORD_FINAL => true, _ => false, }, _ => false, } } pub fn mtwt_token_eq(t1 : &Token, t2 : &Token) -> bool { match (t1,t2) { (&IDENT(id1,_),&IDENT(id2,_)) => ast_util::mtwt_resolve(id1) == ast_util::mtwt_resolve(id2), _ => *t1 == *t2 } } #[cfg(test)] mod test { use super::*; use ast; use ast_util; fn mark_ident(id : ast::Ident, m : ast::Mrk) -> ast::Ident { ast::Ident{name:id.name,ctxt:ast_util::new_mark(m,id.ctxt)} } #[test] fn mtwt_token_eq_test() { assert!(mtwt_token_eq(&GT,&GT)); let a = str_to_ident("bac"); let a1 = mark_ident(a,92); assert!(mtwt_token_eq(&IDENT(a,true),&IDENT(a1,false))); } #[test] fn str_ptr_eq_tests(){ let a = @"abc"; let b = @"abc"; let c = a; assert!(str_ptr_eq(a,c)); assert!(!str_ptr_eq(a,b)); } #[test] fn fresh_name_pointer_sharing() { let ghi = str_to_ident("ghi"); assert_eq!(ident_to_str(&ghi),@"ghi"); assert!(str_ptr_eq(ident_to_str(&ghi),ident_to_str(&ghi))) let fresh = ast::Ident::new(fresh_name(&ghi)); assert_eq!(ident_to_str(&fresh),@"ghi"); assert!(str_ptr_eq(ident_to_str(&ghi),ident_to_str(&fresh))); } } Deduplicate in syntax::parse::token with a macro. I also renumbered things at the same time; ``in`` was shifted into its alphabetical position and the reserved keywords were reordered (a couple of them were out of order). // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use ast; use ast::{P, Name, Mrk}; use ast_util; use parse::token; use util::interner::StrInterner; use util::interner; use std::cast; use std::char; use std::local_data; #[deriving(Clone, Encodable, Decodable, Eq, IterBytes)] pub enum binop { PLUS, MINUS, STAR, SLASH, PERCENT, CARET, AND, OR, SHL, SHR, } #[deriving(Clone, Encodable, Decodable, Eq, IterBytes)] pub enum Token { /* Expression-operator symbols. */ EQ, LT, LE, EQEQ, NE, GE, GT, ANDAND, OROR, NOT, TILDE, BINOP(binop), BINOPEQ(binop), /* Structural symbols */ AT, DOT, DOTDOT, DOTDOTDOT, COMMA, SEMI, COLON, MOD_SEP, RARROW, LARROW, DARROW, FAT_ARROW, LPAREN, RPAREN, LBRACKET, RBRACKET, LBRACE, RBRACE, POUND, DOLLAR, /* Literals */ LIT_CHAR(u32), LIT_INT(i64, ast::int_ty), LIT_UINT(u64, ast::uint_ty), LIT_INT_UNSUFFIXED(i64), LIT_FLOAT(ast::Ident, ast::float_ty), LIT_FLOAT_UNSUFFIXED(ast::Ident), LIT_STR(ast::Ident), LIT_STR_RAW(ast::Ident, uint), /* raw str delimited by n hash symbols */ /* Name components */ // an identifier contains an "is_mod_name" boolean, // indicating whether :: follows this token with no // whitespace in between. IDENT(ast::Ident, bool), UNDERSCORE, LIFETIME(ast::Ident), /* For interpolation */ INTERPOLATED(nonterminal), DOC_COMMENT(ast::Ident), EOF, } #[deriving(Clone, Encodable, Decodable, Eq, IterBytes)] /// For interpolation during macro expansion. pub enum nonterminal { nt_item(@ast::item), nt_block(P<ast::Block>), nt_stmt(@ast::Stmt), nt_pat( @ast::Pat), nt_expr(@ast::Expr), nt_ty( P<ast::Ty>), nt_ident(~ast::Ident, bool), nt_attr(@ast::Attribute), // #[foo] nt_path(~ast::Path), nt_tt( @ast::token_tree), //needs @ed to break a circularity nt_matchers(~[ast::matcher]) } pub fn binop_to_str(o: binop) -> ~str { match o { PLUS => ~"+", MINUS => ~"-", STAR => ~"*", SLASH => ~"/", PERCENT => ~"%", CARET => ~"^", AND => ~"&", OR => ~"|", SHL => ~"<<", SHR => ~">>" } } pub fn to_str(input: @ident_interner, t: &Token) -> ~str { match *t { EQ => ~"=", LT => ~"<", LE => ~"<=", EQEQ => ~"==", NE => ~"!=", GE => ~">=", GT => ~">", NOT => ~"!", TILDE => ~"~", OROR => ~"||", ANDAND => ~"&&", BINOP(op) => binop_to_str(op), BINOPEQ(op) => binop_to_str(op) + "=", /* Structural symbols */ AT => ~"@", DOT => ~".", DOTDOT => ~"..", DOTDOTDOT => ~"...", COMMA => ~",", SEMI => ~";", COLON => ~":", MOD_SEP => ~"::", RARROW => ~"->", LARROW => ~"<-", DARROW => ~"<->", FAT_ARROW => ~"=>", LPAREN => ~"(", RPAREN => ~")", LBRACKET => ~"[", RBRACKET => ~"]", LBRACE => ~"{", RBRACE => ~"}", POUND => ~"#", DOLLAR => ~"$", /* Literals */ LIT_CHAR(c) => { let mut res = ~"'"; char::from_u32(c).unwrap().escape_default(|c| { res.push_char(c); }); res.push_char('\''); res } LIT_INT(i, t) => { i.to_str() + ast_util::int_ty_to_str(t) } LIT_UINT(u, t) => { u.to_str() + ast_util::uint_ty_to_str(t) } LIT_INT_UNSUFFIXED(i) => { i.to_str() } LIT_FLOAT(ref s, t) => { let mut body = ident_to_str(s).to_owned(); if body.ends_with(".") { body.push_char('0'); // `10.f` is not a float literal } body + ast_util::float_ty_to_str(t) } LIT_FLOAT_UNSUFFIXED(ref s) => { let mut body = ident_to_str(s).to_owned(); if body.ends_with(".") { body.push_char('0'); // `10.f` is not a float literal } body } LIT_STR(ref s) => { format!("\"{}\"", ident_to_str(s).escape_default()) } LIT_STR_RAW(ref s, n) => { format!("r{delim}\"{string}\"{delim}", delim="#".repeat(n), string=ident_to_str(s)) } /* Name components */ IDENT(s, _) => input.get(s.name).to_owned(), LIFETIME(s) => format!("'{}", input.get(s.name)), UNDERSCORE => ~"_", /* Other */ DOC_COMMENT(ref s) => ident_to_str(s).to_owned(), EOF => ~"<eof>", INTERPOLATED(ref nt) => { match nt { &nt_expr(e) => ::print::pprust::expr_to_str(e, input), &nt_attr(e) => ::print::pprust::attribute_to_str(e, input), _ => { ~"an interpolated " + match (*nt) { nt_item(..) => ~"item", nt_block(..) => ~"block", nt_stmt(..) => ~"statement", nt_pat(..) => ~"pattern", nt_attr(..) => fail!("should have been handled"), nt_expr(..) => fail!("should have been handled above"), nt_ty(..) => ~"type", nt_ident(..) => ~"identifier", nt_path(..) => ~"path", nt_tt(..) => ~"tt", nt_matchers(..) => ~"matcher sequence" } } } } } } pub fn can_begin_expr(t: &Token) -> bool { match *t { LPAREN => true, LBRACE => true, LBRACKET => true, IDENT(_, _) => true, UNDERSCORE => true, TILDE => true, LIT_CHAR(_) => true, LIT_INT(_, _) => true, LIT_UINT(_, _) => true, LIT_INT_UNSUFFIXED(_) => true, LIT_FLOAT(_, _) => true, LIT_FLOAT_UNSUFFIXED(_) => true, LIT_STR(_) => true, LIT_STR_RAW(_, _) => true, POUND => true, AT => true, NOT => true, BINOP(MINUS) => true, BINOP(STAR) => true, BINOP(AND) => true, BINOP(OR) => true, // in lambda syntax OROR => true, // in lambda syntax MOD_SEP => true, INTERPOLATED(nt_expr(..)) | INTERPOLATED(nt_ident(..)) | INTERPOLATED(nt_block(..)) | INTERPOLATED(nt_path(..)) => true, _ => false } } /// what's the opposite delimiter? pub fn flip_delimiter(t: &token::Token) -> token::Token { match *t { LPAREN => RPAREN, LBRACE => RBRACE, LBRACKET => RBRACKET, RPAREN => LPAREN, RBRACE => LBRACE, RBRACKET => LBRACKET, _ => fail!() } } pub fn is_lit(t: &Token) -> bool { match *t { LIT_CHAR(_) => true, LIT_INT(_, _) => true, LIT_UINT(_, _) => true, LIT_INT_UNSUFFIXED(_) => true, LIT_FLOAT(_, _) => true, LIT_FLOAT_UNSUFFIXED(_) => true, LIT_STR(_) => true, LIT_STR_RAW(_, _) => true, _ => false } } pub fn is_ident(t: &Token) -> bool { match *t { IDENT(_, _) => true, _ => false } } pub fn is_ident_or_path(t: &Token) -> bool { match *t { IDENT(_, _) | INTERPOLATED(nt_path(..)) => true, _ => false } } pub fn is_plain_ident(t: &Token) -> bool { match *t { IDENT(_, false) => true, _ => false } } pub fn is_bar(t: &Token) -> bool { match *t { BINOP(OR) | OROR => true, _ => false } } // In this macro, there is the requirement that the name (the number) must be monotonically // increasing by one in the special identifiers, starting at 0; the same holds for the keywords, // except starting from the next number instead of zero, and with the additional exception that // special identifiers are *also* allowed (they are deduplicated in the important place, the // interner), an exception which is demonstrated by "static" and "self". macro_rules! declare_special_idents_and_keywords {( // So now, in these rules, why is each definition parenthesised? // Answer: otherwise we get a spurious local ambiguity bug on the "}" pub mod special_idents { $( ($si_name:expr, $si_static:ident, $si_str:expr); )* } pub mod keywords { $( ($k_name:expr, $k_variant:ident, $k_str:expr); )* } ) => { pub mod special_idents { use ast::Ident; $( pub static $si_static: Ident = Ident { name: $si_name, ctxt: 0 }; )* } /** * All the valid words that have meaning in the Rust language. * * Rust keywords are either 'strict' or 'reserved'. Strict keywords may not * appear as identifiers at all. Reserved keywords are not used anywhere in * the language and may not appear as identifiers. */ pub mod keywords { use ast::Ident; pub enum Keyword { $( $k_variant, )* } impl Keyword { pub fn to_ident(&self) -> Ident { match *self { $( $k_variant => Ident { name: $k_name, ctxt: 0 }, )* } } } } fn mk_fresh_ident_interner() -> @ident_interner { // The indices here must correspond to the numbers in // special_idents, in Keyword to_ident(), and in static // constants below. let init_vec = ~[ $( $si_str, )* $( $k_str, )* ]; @interner::StrInterner::prefill(init_vec) } }} // If modifying the numbers below, remember to modify these as appropriate static SELF_KEYWORD_NAME: Name = 8; static STATIC_KEYWORD_NAME: Name = 27; static STRICT_KEYWORD_START: Name = 32; static STRICT_KEYWORD_FINAL: Name = 65; static RESERVED_KEYWORD_START: Name = 66; static RESERVED_KEYWORD_FINAL: Name = 72; declare_special_idents_and_keywords! { pub mod special_idents { // These ones are statics (0, underscore, "_"); // apparently unused? (1, anon, "anon"); (2, invalid, ""); // '' (3, unary, "unary"); // apparently unused? (4, not_fn, "!"); // apparently unused? (5, idx_fn, "[]"); // apparently unused? (6, unary_minus_fn, "unary-"); // apparently unused? (7, clownshoes_extensions, "__extensions__"); (super::SELF_KEYWORD_NAME, self_, "self"); // 'self' /* for matcher NTs */ // none of these appear to be used, but perhaps references to // these are artificially fabricated by the macro system.... (9, item, "item"); (10, block, "block"); (11, stmt, "stmt"); (12, pat, "pat"); (13, expr, "expr"); (14, ty, "ty"); (15, ident, "ident"); (16, path, "path"); (17, tt, "tt"); (18, matchers, "matchers"); // for the type // apparently unused? (19, str, "str"); /* outside of libsyntax */ (20, arg, "arg"); (21, descrim, "descrim"); (22, clownshoe_abi, "__rust_abi"); (23, clownshoe_stack_shim, "__rust_stack_shim"); (24, main, "main"); (25, opaque, "<opaque>"); (26, blk, "blk"); (super::STATIC_KEYWORD_NAME, statik, "static"); (28, clownshoes_foreign_mod, "__foreign_mod__"); (29, unnamed_field, "<unnamed_field>"); (30, c_abi, "C"); // apparently unused? (31, type_self, "Self"); // `Self` // here are the ones that actually occur in the source. Maybe the rest // should be removed? /* special_idents::anon special_idents::arg special_idents::blk special_idents::clownshoe_abi special_idents::clownshoe_stack_shim special_idents::clownshoes_extensions special_idents::clownshoes_foreign_mod special_idents::descrim special_idents::invalid special_idents::main special_idents::matchers special_idents::opaque special_idents::self_ special_idents::statik special_idents::tt special_idents::type_self special_idents::unnamed_field */ } pub mod keywords { // These ones are variants of the Keyword enum (32, As, "as"); (33, Break, "break"); (34, Const, "const"); (35, Do, "do"); (36, Else, "else"); (37, Enum, "enum"); (38, Extern, "extern"); (39, False, "false"); (40, Fn, "fn"); (41, For, "for"); (42, If, "if"); (43, Impl, "impl"); (44, In, "in"); (45, Let, "let"); (46, __LogLevel, "__log_level"); (47, Loop, "loop"); (48, Match, "match"); (49, Mod, "mod"); (50, Mut, "mut"); (51, Once, "once"); (52, Priv, "priv"); (53, Pub, "pub"); (54, Ref, "ref"); (55, Return, "return"); // Static and Self are also special idents (prefill de-dupes) (super::STATIC_KEYWORD_NAME, Static, "static"); (super::SELF_KEYWORD_NAME, Self, "self"); (56, Struct, "struct"); (57, Super, "super"); (58, True, "true"); (59, Trait, "trait"); (60, Type, "type"); (61, Unsafe, "unsafe"); (62, Use, "use"); (63, While, "while"); (64, Continue, "continue"); (65, Proc, "proc"); (66, Alignof, "alignof"); (67, Be, "be"); (68, Offsetof, "offsetof"); (69, Pure, "pure"); (70, Sizeof, "sizeof"); (71, Typeof, "typeof"); (72, Yield, "yield"); } } /** * Maps a token to a record specifying the corresponding binary * operator */ pub fn token_to_binop(tok: &Token) -> Option<ast::BinOp> { match *tok { BINOP(STAR) => Some(ast::BiMul), BINOP(SLASH) => Some(ast::BiDiv), BINOP(PERCENT) => Some(ast::BiRem), BINOP(PLUS) => Some(ast::BiAdd), BINOP(MINUS) => Some(ast::BiSub), BINOP(SHL) => Some(ast::BiShl), BINOP(SHR) => Some(ast::BiShr), BINOP(AND) => Some(ast::BiBitAnd), BINOP(CARET) => Some(ast::BiBitXor), BINOP(OR) => Some(ast::BiBitOr), LT => Some(ast::BiLt), LE => Some(ast::BiLe), GE => Some(ast::BiGe), GT => Some(ast::BiGt), EQEQ => Some(ast::BiEq), NE => Some(ast::BiNe), ANDAND => Some(ast::BiAnd), OROR => Some(ast::BiOr), _ => None } } // looks like we can get rid of this completely... pub type ident_interner = StrInterner; // if an interner exists in TLS, return it. Otherwise, prepare a // fresh one. pub fn get_ident_interner() -> @ident_interner { local_data_key!(key: @@::parse::token::ident_interner) match local_data::get(key, |k| k.map(|k| *k)) { Some(interner) => *interner, None => { let interner = mk_fresh_ident_interner(); local_data::set(key, @interner); interner } } } /* for when we don't care about the contents; doesn't interact with TLD or serialization */ pub fn mk_fake_ident_interner() -> @ident_interner { @interner::StrInterner::new() } // maps a string to its interned representation pub fn intern(str : &str) -> Name { let interner = get_ident_interner(); interner.intern(str) } // gensyms a new uint, using the current interner pub fn gensym(str : &str) -> Name { let interner = get_ident_interner(); interner.gensym(str) } // map an interned representation back to a string pub fn interner_get(name : Name) -> @str { get_ident_interner().get(name) } // maps an identifier to the string that it corresponds to pub fn ident_to_str(id : &ast::Ident) -> @str { interner_get(id.name) } // maps a string to an identifier with an empty syntax context pub fn str_to_ident(str : &str) -> ast::Ident { ast::Ident::new(intern(str)) } // maps a string to a gensym'ed identifier pub fn gensym_ident(str : &str) -> ast::Ident { ast::Ident::new(gensym(str)) } // create a fresh name that maps to the same string as the old one. // note that this guarantees that str_ptr_eq(ident_to_str(src),interner_get(fresh_name(src))); // that is, that the new name and the old one are connected to ptr_eq strings. pub fn fresh_name(src : &ast::Ident) -> Name { let interner = get_ident_interner(); interner.gensym_copy(src.name) // following: debug version. Could work in final except that it's incompatible with // good error messages and uses of struct names in ambiguous could-be-binding // locations. Also definitely destroys the guarantee given above about ptr_eq. /*let num = rand::rng().gen_uint_range(0,0xffff); gensym(format!("{}_{}",ident_to_str(src),num))*/ } // it looks like there oughta be a str_ptr_eq fn, but no one bothered to implement it? // determine whether two @str values are pointer-equal pub fn str_ptr_eq(a : @str, b : @str) -> bool { unsafe { let p : uint = cast::transmute(a); let q : uint = cast::transmute(b); let result = p == q; // got to transmute them back, to make sure the ref count is correct: let _junk1 : @str = cast::transmute(p); let _junk2 : @str = cast::transmute(q); result } } // return true when two identifiers refer (through the intern table) to the same ptr_eq // string. This is used to compare identifiers in places where hygienic comparison is // not wanted (i.e. not lexical vars). pub fn ident_spelling_eq(a : &ast::Ident, b : &ast::Ident) -> bool { str_ptr_eq(interner_get(a.name),interner_get(b.name)) } // create a fresh mark. pub fn fresh_mark() -> Mrk { gensym("mark") } // See the macro above about the types of keywords pub fn is_keyword(kw: keywords::Keyword, tok: &Token) -> bool { match *tok { token::IDENT(sid, false) => { kw.to_ident().name == sid.name } _ => { false } } } pub fn is_any_keyword(tok: &Token) -> bool { match *tok { token::IDENT(sid, false) => match sid.name { SELF_KEYWORD_NAME | STATIC_KEYWORD_NAME | STRICT_KEYWORD_START .. RESERVED_KEYWORD_FINAL => true, _ => false, }, _ => false } } pub fn is_strict_keyword(tok: &Token) -> bool { match *tok { token::IDENT(sid, false) => match sid.name { SELF_KEYWORD_NAME | STATIC_KEYWORD_NAME | STRICT_KEYWORD_START .. STRICT_KEYWORD_FINAL => true, _ => false, }, _ => false, } } pub fn is_reserved_keyword(tok: &Token) -> bool { match *tok { token::IDENT(sid, false) => match sid.name { RESERVED_KEYWORD_START .. RESERVED_KEYWORD_FINAL => true, _ => false, }, _ => false, } } pub fn mtwt_token_eq(t1 : &Token, t2 : &Token) -> bool { match (t1,t2) { (&IDENT(id1,_),&IDENT(id2,_)) => ast_util::mtwt_resolve(id1) == ast_util::mtwt_resolve(id2), _ => *t1 == *t2 } } #[cfg(test)] mod test { use super::*; use ast; use ast_util; fn mark_ident(id : ast::Ident, m : ast::Mrk) -> ast::Ident { ast::Ident{name:id.name,ctxt:ast_util::new_mark(m,id.ctxt)} } #[test] fn mtwt_token_eq_test() { assert!(mtwt_token_eq(&GT,&GT)); let a = str_to_ident("bac"); let a1 = mark_ident(a,92); assert!(mtwt_token_eq(&IDENT(a,true),&IDENT(a1,false))); } #[test] fn str_ptr_eq_tests(){ let a = @"abc"; let b = @"abc"; let c = a; assert!(str_ptr_eq(a,c)); assert!(!str_ptr_eq(a,b)); } #[test] fn fresh_name_pointer_sharing() { let ghi = str_to_ident("ghi"); assert_eq!(ident_to_str(&ghi),@"ghi"); assert!(str_ptr_eq(ident_to_str(&ghi),ident_to_str(&ghi))) let fresh = ast::Ident::new(fresh_name(&ghi)); assert_eq!(ident_to_str(&fresh),@"ghi"); assert!(str_ptr_eq(ident_to_str(&ghi),ident_to_str(&fresh))); } }
// Copyright 2016 Joe Wilm, The Alacritty Project Contributors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // //! Logging for alacritty. //! //! The main executable is supposed to call `initialize()` exactly once during //! startup. All logging messages are written to stdout, given that their //! log-level is sufficient for the level configured in `cli::Options`. use log; use std::sync; use std::io; use cli; pub struct Logger<T:Send+io::Write> { level: log::LogLevelFilter, output: sync::Mutex<T> } impl<T> Logger<T> where T:Send+io::Write { pub fn new(output: T, level: log::LogLevelFilter) -> Logger<io::LineWriter<T>> { Logger { level: level, output: sync::Mutex::new(io::LineWriter::new(output)) } } } impl<T> log::Log for Logger<T> where T:Send+io::Write { fn enabled(&self, metadata: &log::LogMetadata) -> bool { metadata.level() <= self.level } fn log(&self, record: &log::LogRecord) { if self.enabled(record.metadata()) { if record.target().starts_with("alacritty") { if let Ok(ref mut writer) = self.output.lock() { let _ = writer.write(format!("{}\n", record.args()).as_ref()); } } } } } pub fn initialize(options: &cli::Options) { log::set_logger(|max_log_level| { max_log_level.set(options.log_level); Box::new(Logger::new(io::stdout(), options.log_level)) }).unwrap_or_else(|e| die!("{}", e)); } Minor formatting fixes // Copyright 2016 Joe Wilm, The Alacritty Project Contributors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // //! Logging for alacritty. //! //! The main executable is supposed to call `initialize()` exactly once during //! startup. All logging messages are written to stdout, given that their //! log-level is sufficient for the level configured in `cli::Options`. use log; use std::sync; use std::io; use cli; pub struct Logger<T> { level: log::LogLevelFilter, output: sync::Mutex<T> } impl<T: Send + io::Write> Logger<T> { pub fn new(output: T, level: log::LogLevelFilter) -> Logger<io::LineWriter<T>> { Logger { level: level, output: sync::Mutex::new(io::LineWriter::new(output)) } } } impl<T: Send + io::Write> log::Log for Logger<T> { fn enabled(&self, metadata: &log::LogMetadata) -> bool { metadata.level() <= self.level } fn log(&self, record: &log::LogRecord) { if self.enabled(record.metadata()) { if record.target().starts_with("alacritty") { if let Ok(ref mut writer) = self.output.lock() { let _ = writer.write(format!("{}\n", record.args()).as_ref()); } } } } } pub fn initialize(options: &cli::Options) { log::set_logger(|max_log_level| { max_log_level.set(options.log_level); Box::new(Logger::new(io::stdout(), options.log_level)) }).unwrap_or_else(|e| die!("{}", e)); }
use std::borrow::Cow; use rustc::{mir, ty}; use rustc::ty::Instance; use rustc::ty::layout::{self, TyLayout, LayoutOf}; use syntax::source_map::Span; use rustc_target::spec::abi::Abi; use super::{ InterpResult, PointerArithmetic, InterpError, Scalar, InterpCx, Machine, Immediate, OpTy, ImmTy, PlaceTy, MPlaceTy, StackPopCleanup, FnVal, }; impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { #[inline] pub fn goto_block(&mut self, target: Option<mir::BasicBlock>) -> InterpResult<'tcx> { if let Some(target) = target { self.frame_mut().block = target; self.frame_mut().stmt = 0; Ok(()) } else { err!(Unreachable) } } pub(super) fn eval_terminator( &mut self, terminator: &mir::Terminator<'tcx>, ) -> InterpResult<'tcx> { use rustc::mir::TerminatorKind::*; match terminator.kind { Return => { self.frame().return_place.map(|r| self.dump_place(*r)); self.pop_stack_frame()? } Goto { target } => self.goto_block(Some(target))?, SwitchInt { ref discr, ref values, ref targets, .. } => { let discr = self.read_immediate(self.eval_operand(discr, None)?)?; trace!("SwitchInt({:?})", *discr); // Branch to the `otherwise` case by default, if no match is found. let mut target_block = targets[targets.len() - 1]; for (index, &const_int) in values.iter().enumerate() { // Compare using binary_op, to also support pointer values let const_int = Scalar::from_uint(const_int, discr.layout.size); let (res, _) = self.binary_op(mir::BinOp::Eq, discr, ImmTy::from_scalar(const_int, discr.layout), )?; if res.to_bool()? { target_block = targets[index]; break; } } self.goto_block(Some(target_block))?; } Call { ref func, ref args, ref destination, .. } => { let (dest, ret) = match *destination { Some((ref lv, target)) => (Some(self.eval_place(lv)?), Some(target)), None => (None, None), }; let func = self.eval_operand(func, None)?; let (fn_val, abi) = match func.layout.ty.sty { ty::FnPtr(sig) => { let caller_abi = sig.abi(); let fn_ptr = self.read_scalar(func)?.not_undef()?; let fn_val = self.memory.get_fn(fn_ptr)?; (fn_val, caller_abi) } ty::FnDef(def_id, substs) => { let sig = func.layout.ty.fn_sig(*self.tcx); (FnVal::Instance(self.resolve(def_id, substs)?), sig.abi()) }, _ => { let msg = format!("can't handle callee of type {:?}", func.layout.ty); return err!(Unimplemented(msg)); } }; let args = self.eval_operands(args)?; self.eval_fn_call( fn_val, terminator.source_info.span, abi, &args[..], dest, ret, )?; } Drop { ref location, target, .. } => { // FIXME(CTFE): forbid drop in const eval let place = self.eval_place(location)?; let ty = place.layout.ty; trace!("TerminatorKind::drop: {:?}, type {}", location, ty); let instance = Instance::resolve_drop_in_place(*self.tcx, ty); self.drop_in_place( place, instance, terminator.source_info.span, target, )?; } Assert { ref cond, expected, ref msg, target, .. } => { let cond_val = self.read_immediate(self.eval_operand(cond, None)?)? .to_scalar()?.to_bool()?; if expected == cond_val { self.goto_block(Some(target))?; } else { // Compute error message use rustc::mir::interpret::InterpError::*; return match *msg { BoundsCheck { ref len, ref index } => { let len = self.read_immediate(self.eval_operand(len, None)?) .expect("can't eval len").to_scalar()? .to_bits(self.memory().pointer_size())? as u64; let index = self.read_immediate(self.eval_operand(index, None)?) .expect("can't eval index").to_scalar()? .to_bits(self.memory().pointer_size())? as u64; err!(BoundsCheck { len, index }) } Overflow(op) => Err(Overflow(op).into()), OverflowNeg => Err(OverflowNeg.into()), DivisionByZero => Err(DivisionByZero.into()), RemainderByZero => Err(RemainderByZero.into()), GeneratorResumedAfterReturn | GeneratorResumedAfterPanic => unimplemented!(), _ => bug!(), }; } } Yield { .. } | GeneratorDrop | DropAndReplace { .. } | Resume | Abort => unimplemented!("{:#?}", terminator.kind), FalseEdges { .. } => bug!("should have been eliminated by\ `simplify_branches` mir pass"), FalseUnwind { .. } => bug!("should have been eliminated by\ `simplify_branches` mir pass"), Unreachable => return err!(Unreachable), } Ok(()) } fn check_argument_compat( rust_abi: bool, caller: TyLayout<'tcx>, callee: TyLayout<'tcx>, ) -> bool { if caller.ty == callee.ty { // No question return true; } if !rust_abi { // Don't risk anything return false; } // Compare layout match (&caller.abi, &callee.abi) { // Different valid ranges are okay (once we enforce validity, // that will take care to make it UB to leave the range, just // like for transmute). (layout::Abi::Scalar(ref caller), layout::Abi::Scalar(ref callee)) => caller.value == callee.value, (layout::Abi::ScalarPair(ref caller1, ref caller2), layout::Abi::ScalarPair(ref callee1, ref callee2)) => caller1.value == callee1.value && caller2.value == callee2.value, // Be conservative _ => false } } /// Pass a single argument, checking the types for compatibility. fn pass_argument( &mut self, rust_abi: bool, caller_arg: &mut impl Iterator<Item=OpTy<'tcx, M::PointerTag>>, callee_arg: PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { if rust_abi && callee_arg.layout.is_zst() { // Nothing to do. trace!("Skipping callee ZST"); return Ok(()); } let caller_arg = caller_arg.next() .ok_or_else(|| InterpError::FunctionArgCountMismatch)?; if rust_abi { debug_assert!(!caller_arg.layout.is_zst(), "ZSTs must have been already filtered out"); } // Now, check if !Self::check_argument_compat(rust_abi, caller_arg.layout, callee_arg.layout) { return err!(FunctionArgMismatch(caller_arg.layout.ty, callee_arg.layout.ty)); } // We allow some transmutes here self.copy_op_transmute(caller_arg, callee_arg) } /// Call this function -- pushing the stack frame and initializing the arguments. fn eval_fn_call( &mut self, fn_val: FnVal<'tcx, M::ExtraFnVal>, span: Span, caller_abi: Abi, args: &[OpTy<'tcx, M::PointerTag>], dest: Option<PlaceTy<'tcx, M::PointerTag>>, ret: Option<mir::BasicBlock>, ) -> InterpResult<'tcx> { trace!("eval_fn_call: {:#?}", fn_val); let instance = match fn_val { FnVal::Instance(instance) => instance, FnVal::Other(extra) => { return M::call_extra_fn(self, extra, args, dest, ret); } }; match instance.def { ty::InstanceDef::Intrinsic(..) => { if caller_abi != Abi::RustIntrinsic { return err!(FunctionAbiMismatch(caller_abi, Abi::RustIntrinsic)); } // The intrinsic itself cannot diverge, so if we got here without a return // place... (can happen e.g., for transmute returning `!`) let dest = match dest { Some(dest) => dest, None => return err!(Unreachable) }; M::call_intrinsic(self, instance, args, dest)?; // No stack frame gets pushed, the main loop will just act as if the // call completed. self.goto_block(ret)?; self.dump_place(*dest); Ok(()) } ty::InstanceDef::VtableShim(..) | ty::InstanceDef::ClosureOnceShim { .. } | ty::InstanceDef::FnPtrShim(..) | ty::InstanceDef::DropGlue(..) | ty::InstanceDef::CloneShim(..) | ty::InstanceDef::Item(_) => { // ABI check { let callee_abi = { let instance_ty = instance.ty(*self.tcx); match instance_ty.sty { ty::FnDef(..) => instance_ty.fn_sig(*self.tcx).abi(), ty::Closure(..) => Abi::RustCall, ty::Generator(..) => Abi::Rust, _ => bug!("unexpected callee ty: {:?}", instance_ty), } }; // Rust and RustCall are compatible let normalize_abi = |abi| if abi == Abi::RustCall { Abi::Rust } else { abi }; if normalize_abi(caller_abi) != normalize_abi(callee_abi) { return err!(FunctionAbiMismatch(caller_abi, callee_abi)); } } // We need MIR for this fn let body = match M::find_fn(self, instance, args, dest, ret)? { Some(body) => body, None => return Ok(()), }; self.push_stack_frame( instance, span, body, dest, StackPopCleanup::Goto(ret), )?; // We want to pop this frame again in case there was an error, to put // the blame in the right location. Until the 2018 edition is used in // the compiler, we have to do this with an immediately invoked function. let res = (||{ trace!( "caller ABI: {:?}, args: {:#?}", caller_abi, args.iter() .map(|arg| (arg.layout.ty, format!("{:?}", **arg))) .collect::<Vec<_>>() ); trace!( "spread_arg: {:?}, locals: {:#?}", body.spread_arg, body.args_iter() .map(|local| (local, self.layout_of_local(self.frame(), local, None).unwrap().ty) ) .collect::<Vec<_>>() ); // Figure out how to pass which arguments. // The Rust ABI is special: ZST get skipped. let rust_abi = match caller_abi { Abi::Rust | Abi::RustCall => true, _ => false }; // We have two iterators: Where the arguments come from, // and where they go to. // For where they come from: If the ABI is RustCall, we untuple the // last incoming argument. These two iterators do not have the same type, // so to keep the code paths uniform we accept an allocation // (for RustCall ABI only). let caller_args : Cow<'_, [OpTy<'tcx, M::PointerTag>]> = if caller_abi == Abi::RustCall && !args.is_empty() { // Untuple let (&untuple_arg, args) = args.split_last().unwrap(); trace!("eval_fn_call: Will pass last argument by untupling"); Cow::from(args.iter().map(|&a| Ok(a)) .chain((0..untuple_arg.layout.fields.count()).into_iter() .map(|i| self.operand_field(untuple_arg, i as u64)) ) .collect::<InterpResult<'_, Vec<OpTy<'tcx, M::PointerTag>>>>()?) } else { // Plain arg passing Cow::from(args) }; // Skip ZSTs let mut caller_iter = caller_args.iter() .filter(|op| !rust_abi || !op.layout.is_zst()) .map(|op| *op); // Now we have to spread them out across the callee's locals, // taking into account the `spread_arg`. If we could write // this is a single iterator (that handles `spread_arg`), then // `pass_argument` would be the loop body. It takes care to // not advance `caller_iter` for ZSTs. let mut locals_iter = body.args_iter(); while let Some(local) = locals_iter.next() { let dest = self.eval_place( &mir::Place::from(local) )?; if Some(local) == body.spread_arg { // Must be a tuple for i in 0..dest.layout.fields.count() { let dest = self.place_field(dest, i as u64)?; self.pass_argument(rust_abi, &mut caller_iter, dest)?; } } else { // Normal argument self.pass_argument(rust_abi, &mut caller_iter, dest)?; } } // Now we should have no more caller args if caller_iter.next().is_some() { trace!("Caller has passed too many args"); return err!(FunctionArgCountMismatch); } // Don't forget to check the return type! if let Some(caller_ret) = dest { let callee_ret = self.eval_place( &mir::Place::RETURN_PLACE )?; if !Self::check_argument_compat( rust_abi, caller_ret.layout, callee_ret.layout, ) { return err!(FunctionRetMismatch( caller_ret.layout.ty, callee_ret.layout.ty )); } } else { let local = mir::RETURN_PLACE; let ty = self.frame().body.local_decls[local].ty; if !self.tcx.is_ty_uninhabited_from_any_module(ty) { return err!(FunctionRetMismatch(self.tcx.types.never, ty)); } } Ok(()) })(); match res { Err(err) => { self.stack.pop(); Err(err) } Ok(v) => Ok(v) } } // cannot use the shim here, because that will only result in infinite recursion ty::InstanceDef::Virtual(_, idx) => { let mut args = args.to_vec(); let ptr_size = self.pointer_size(); // We have to implement all "object safe receivers". Currently we // support built-in pointers (&, &mut, Box) as well as unsized-self. We do // not yet support custom self types. // Also see librustc_codegen_llvm/abi.rs and librustc_codegen_llvm/mir/block.rs. let receiver_place = match args[0].layout.ty.builtin_deref(true) { Some(_) => { // Built-in pointer. self.deref_operand(args[0])? } None => { // Unsized self. args[0].assert_mem_place() } }; // Find and consult vtable let vtable = receiver_place.vtable(); let vtable_slot = vtable.ptr_offset(ptr_size * (idx as u64 + 3), self)?; let vtable_slot = self.memory.check_ptr_access( vtable_slot, ptr_size, self.tcx.data_layout.pointer_align.abi, )?.expect("cannot be a ZST"); let fn_ptr = self.memory.get(vtable_slot.alloc_id)? .read_ptr_sized(self, vtable_slot)?.not_undef()?; let drop_fn = self.memory.get_fn(fn_ptr)?; // `*mut receiver_place.layout.ty` is almost the layout that we // want for args[0]: We have to project to field 0 because we want // a thin pointer. assert!(receiver_place.layout.is_unsized()); let receiver_ptr_ty = self.tcx.mk_mut_ptr(receiver_place.layout.ty); let this_receiver_ptr = self.layout_of(receiver_ptr_ty)?.field(self, 0)?; // Adjust receiver argument. args[0] = OpTy::from(ImmTy { layout: this_receiver_ptr, imm: Immediate::Scalar(receiver_place.ptr.into()) }); trace!("Patched self operand to {:#?}", args[0]); // recurse with concrete function self.eval_fn_call(drop_fn, span, caller_abi, &args, dest, ret) } } } fn drop_in_place( &mut self, place: PlaceTy<'tcx, M::PointerTag>, instance: ty::Instance<'tcx>, span: Span, target: mir::BasicBlock, ) -> InterpResult<'tcx> { trace!("drop_in_place: {:?},\n {:?}, {:?}", *place, place.layout.ty, instance); // We take the address of the object. This may well be unaligned, which is fine // for us here. However, unaligned accesses will probably make the actual drop // implementation fail -- a problem shared by rustc. let place = self.force_allocation(place)?; let (instance, place) = match place.layout.ty.sty { ty::Dynamic(..) => { // Dropping a trait object. self.unpack_dyn_trait(place)? } _ => (instance, place), }; let arg = ImmTy { imm: place.to_ref(), layout: self.layout_of(self.tcx.mk_mut_ptr(place.layout.ty))?, }; let ty = self.tcx.mk_unit(); // return type is () let dest = MPlaceTy::dangling(self.layout_of(ty)?, self); self.eval_fn_call( FnVal::Instance(instance), span, Abi::Rust, &[arg.into()], Some(dest.into()), Some(target), ) } } RustIntrinsic and PlatformIntrinsic are also the same ABI as Rust use std::borrow::Cow; use rustc::{mir, ty}; use rustc::ty::Instance; use rustc::ty::layout::{self, TyLayout, LayoutOf}; use syntax::source_map::Span; use rustc_target::spec::abi::Abi; use super::{ InterpResult, PointerArithmetic, InterpError, Scalar, InterpCx, Machine, Immediate, OpTy, ImmTy, PlaceTy, MPlaceTy, StackPopCleanup, FnVal, }; impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { #[inline] pub fn goto_block(&mut self, target: Option<mir::BasicBlock>) -> InterpResult<'tcx> { if let Some(target) = target { self.frame_mut().block = target; self.frame_mut().stmt = 0; Ok(()) } else { err!(Unreachable) } } pub(super) fn eval_terminator( &mut self, terminator: &mir::Terminator<'tcx>, ) -> InterpResult<'tcx> { use rustc::mir::TerminatorKind::*; match terminator.kind { Return => { self.frame().return_place.map(|r| self.dump_place(*r)); self.pop_stack_frame()? } Goto { target } => self.goto_block(Some(target))?, SwitchInt { ref discr, ref values, ref targets, .. } => { let discr = self.read_immediate(self.eval_operand(discr, None)?)?; trace!("SwitchInt({:?})", *discr); // Branch to the `otherwise` case by default, if no match is found. let mut target_block = targets[targets.len() - 1]; for (index, &const_int) in values.iter().enumerate() { // Compare using binary_op, to also support pointer values let const_int = Scalar::from_uint(const_int, discr.layout.size); let (res, _) = self.binary_op(mir::BinOp::Eq, discr, ImmTy::from_scalar(const_int, discr.layout), )?; if res.to_bool()? { target_block = targets[index]; break; } } self.goto_block(Some(target_block))?; } Call { ref func, ref args, ref destination, .. } => { let (dest, ret) = match *destination { Some((ref lv, target)) => (Some(self.eval_place(lv)?), Some(target)), None => (None, None), }; let func = self.eval_operand(func, None)?; let (fn_val, abi) = match func.layout.ty.sty { ty::FnPtr(sig) => { let caller_abi = sig.abi(); let fn_ptr = self.read_scalar(func)?.not_undef()?; let fn_val = self.memory.get_fn(fn_ptr)?; (fn_val, caller_abi) } ty::FnDef(def_id, substs) => { let sig = func.layout.ty.fn_sig(*self.tcx); (FnVal::Instance(self.resolve(def_id, substs)?), sig.abi()) }, _ => { let msg = format!("can't handle callee of type {:?}", func.layout.ty); return err!(Unimplemented(msg)); } }; let args = self.eval_operands(args)?; self.eval_fn_call( fn_val, terminator.source_info.span, abi, &args[..], dest, ret, )?; } Drop { ref location, target, .. } => { // FIXME(CTFE): forbid drop in const eval let place = self.eval_place(location)?; let ty = place.layout.ty; trace!("TerminatorKind::drop: {:?}, type {}", location, ty); let instance = Instance::resolve_drop_in_place(*self.tcx, ty); self.drop_in_place( place, instance, terminator.source_info.span, target, )?; } Assert { ref cond, expected, ref msg, target, .. } => { let cond_val = self.read_immediate(self.eval_operand(cond, None)?)? .to_scalar()?.to_bool()?; if expected == cond_val { self.goto_block(Some(target))?; } else { // Compute error message use rustc::mir::interpret::InterpError::*; return match *msg { BoundsCheck { ref len, ref index } => { let len = self.read_immediate(self.eval_operand(len, None)?) .expect("can't eval len").to_scalar()? .to_bits(self.memory().pointer_size())? as u64; let index = self.read_immediate(self.eval_operand(index, None)?) .expect("can't eval index").to_scalar()? .to_bits(self.memory().pointer_size())? as u64; err!(BoundsCheck { len, index }) } Overflow(op) => Err(Overflow(op).into()), OverflowNeg => Err(OverflowNeg.into()), DivisionByZero => Err(DivisionByZero.into()), RemainderByZero => Err(RemainderByZero.into()), GeneratorResumedAfterReturn | GeneratorResumedAfterPanic => unimplemented!(), _ => bug!(), }; } } Yield { .. } | GeneratorDrop | DropAndReplace { .. } | Resume | Abort => unimplemented!("{:#?}", terminator.kind), FalseEdges { .. } => bug!("should have been eliminated by\ `simplify_branches` mir pass"), FalseUnwind { .. } => bug!("should have been eliminated by\ `simplify_branches` mir pass"), Unreachable => return err!(Unreachable), } Ok(()) } fn check_argument_compat( rust_abi: bool, caller: TyLayout<'tcx>, callee: TyLayout<'tcx>, ) -> bool { if caller.ty == callee.ty { // No question return true; } if !rust_abi { // Don't risk anything return false; } // Compare layout match (&caller.abi, &callee.abi) { // Different valid ranges are okay (once we enforce validity, // that will take care to make it UB to leave the range, just // like for transmute). (layout::Abi::Scalar(ref caller), layout::Abi::Scalar(ref callee)) => caller.value == callee.value, (layout::Abi::ScalarPair(ref caller1, ref caller2), layout::Abi::ScalarPair(ref callee1, ref callee2)) => caller1.value == callee1.value && caller2.value == callee2.value, // Be conservative _ => false } } /// Pass a single argument, checking the types for compatibility. fn pass_argument( &mut self, rust_abi: bool, caller_arg: &mut impl Iterator<Item=OpTy<'tcx, M::PointerTag>>, callee_arg: PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { if rust_abi && callee_arg.layout.is_zst() { // Nothing to do. trace!("Skipping callee ZST"); return Ok(()); } let caller_arg = caller_arg.next() .ok_or_else(|| InterpError::FunctionArgCountMismatch)?; if rust_abi { debug_assert!(!caller_arg.layout.is_zst(), "ZSTs must have been already filtered out"); } // Now, check if !Self::check_argument_compat(rust_abi, caller_arg.layout, callee_arg.layout) { return err!(FunctionArgMismatch(caller_arg.layout.ty, callee_arg.layout.ty)); } // We allow some transmutes here self.copy_op_transmute(caller_arg, callee_arg) } /// Call this function -- pushing the stack frame and initializing the arguments. fn eval_fn_call( &mut self, fn_val: FnVal<'tcx, M::ExtraFnVal>, span: Span, caller_abi: Abi, args: &[OpTy<'tcx, M::PointerTag>], dest: Option<PlaceTy<'tcx, M::PointerTag>>, ret: Option<mir::BasicBlock>, ) -> InterpResult<'tcx> { trace!("eval_fn_call: {:#?}", fn_val); let instance = match fn_val { FnVal::Instance(instance) => instance, FnVal::Other(extra) => { return M::call_extra_fn(self, extra, args, dest, ret); } }; match instance.def { ty::InstanceDef::Intrinsic(..) => { if caller_abi != Abi::RustIntrinsic { return err!(FunctionAbiMismatch(caller_abi, Abi::RustIntrinsic)); } // The intrinsic itself cannot diverge, so if we got here without a return // place... (can happen e.g., for transmute returning `!`) let dest = match dest { Some(dest) => dest, None => return err!(Unreachable) }; M::call_intrinsic(self, instance, args, dest)?; // No stack frame gets pushed, the main loop will just act as if the // call completed. self.goto_block(ret)?; self.dump_place(*dest); Ok(()) } ty::InstanceDef::VtableShim(..) | ty::InstanceDef::ClosureOnceShim { .. } | ty::InstanceDef::FnPtrShim(..) | ty::InstanceDef::DropGlue(..) | ty::InstanceDef::CloneShim(..) | ty::InstanceDef::Item(_) => { // ABI check { let callee_abi = { let instance_ty = instance.ty(*self.tcx); match instance_ty.sty { ty::FnDef(..) => instance_ty.fn_sig(*self.tcx).abi(), ty::Closure(..) => Abi::RustCall, ty::Generator(..) => Abi::Rust, _ => bug!("unexpected callee ty: {:?}", instance_ty), } }; // Rust and RustCall are compatible let normalize_abi = |abi| match abi { Abi::Rust | Abi::RustCall | Abi::RustIntrinsic | Abi::PlatformIntrinsic => // These are all the same ABI, really. Abi::Rust, abi => abi, }; if normalize_abi(caller_abi) != normalize_abi(callee_abi) { return err!(FunctionAbiMismatch(caller_abi, callee_abi)); } } // We need MIR for this fn let body = match M::find_fn(self, instance, args, dest, ret)? { Some(body) => body, None => return Ok(()), }; self.push_stack_frame( instance, span, body, dest, StackPopCleanup::Goto(ret), )?; // We want to pop this frame again in case there was an error, to put // the blame in the right location. Until the 2018 edition is used in // the compiler, we have to do this with an immediately invoked function. let res = (||{ trace!( "caller ABI: {:?}, args: {:#?}", caller_abi, args.iter() .map(|arg| (arg.layout.ty, format!("{:?}", **arg))) .collect::<Vec<_>>() ); trace!( "spread_arg: {:?}, locals: {:#?}", body.spread_arg, body.args_iter() .map(|local| (local, self.layout_of_local(self.frame(), local, None).unwrap().ty) ) .collect::<Vec<_>>() ); // Figure out how to pass which arguments. // The Rust ABI is special: ZST get skipped. let rust_abi = match caller_abi { Abi::Rust | Abi::RustCall => true, _ => false }; // We have two iterators: Where the arguments come from, // and where they go to. // For where they come from: If the ABI is RustCall, we untuple the // last incoming argument. These two iterators do not have the same type, // so to keep the code paths uniform we accept an allocation // (for RustCall ABI only). let caller_args : Cow<'_, [OpTy<'tcx, M::PointerTag>]> = if caller_abi == Abi::RustCall && !args.is_empty() { // Untuple let (&untuple_arg, args) = args.split_last().unwrap(); trace!("eval_fn_call: Will pass last argument by untupling"); Cow::from(args.iter().map(|&a| Ok(a)) .chain((0..untuple_arg.layout.fields.count()).into_iter() .map(|i| self.operand_field(untuple_arg, i as u64)) ) .collect::<InterpResult<'_, Vec<OpTy<'tcx, M::PointerTag>>>>()?) } else { // Plain arg passing Cow::from(args) }; // Skip ZSTs let mut caller_iter = caller_args.iter() .filter(|op| !rust_abi || !op.layout.is_zst()) .map(|op| *op); // Now we have to spread them out across the callee's locals, // taking into account the `spread_arg`. If we could write // this is a single iterator (that handles `spread_arg`), then // `pass_argument` would be the loop body. It takes care to // not advance `caller_iter` for ZSTs. let mut locals_iter = body.args_iter(); while let Some(local) = locals_iter.next() { let dest = self.eval_place( &mir::Place::from(local) )?; if Some(local) == body.spread_arg { // Must be a tuple for i in 0..dest.layout.fields.count() { let dest = self.place_field(dest, i as u64)?; self.pass_argument(rust_abi, &mut caller_iter, dest)?; } } else { // Normal argument self.pass_argument(rust_abi, &mut caller_iter, dest)?; } } // Now we should have no more caller args if caller_iter.next().is_some() { trace!("Caller has passed too many args"); return err!(FunctionArgCountMismatch); } // Don't forget to check the return type! if let Some(caller_ret) = dest { let callee_ret = self.eval_place( &mir::Place::RETURN_PLACE )?; if !Self::check_argument_compat( rust_abi, caller_ret.layout, callee_ret.layout, ) { return err!(FunctionRetMismatch( caller_ret.layout.ty, callee_ret.layout.ty )); } } else { let local = mir::RETURN_PLACE; let ty = self.frame().body.local_decls[local].ty; if !self.tcx.is_ty_uninhabited_from_any_module(ty) { return err!(FunctionRetMismatch(self.tcx.types.never, ty)); } } Ok(()) })(); match res { Err(err) => { self.stack.pop(); Err(err) } Ok(v) => Ok(v) } } // cannot use the shim here, because that will only result in infinite recursion ty::InstanceDef::Virtual(_, idx) => { let mut args = args.to_vec(); let ptr_size = self.pointer_size(); // We have to implement all "object safe receivers". Currently we // support built-in pointers (&, &mut, Box) as well as unsized-self. We do // not yet support custom self types. // Also see librustc_codegen_llvm/abi.rs and librustc_codegen_llvm/mir/block.rs. let receiver_place = match args[0].layout.ty.builtin_deref(true) { Some(_) => { // Built-in pointer. self.deref_operand(args[0])? } None => { // Unsized self. args[0].assert_mem_place() } }; // Find and consult vtable let vtable = receiver_place.vtable(); let vtable_slot = vtable.ptr_offset(ptr_size * (idx as u64 + 3), self)?; let vtable_slot = self.memory.check_ptr_access( vtable_slot, ptr_size, self.tcx.data_layout.pointer_align.abi, )?.expect("cannot be a ZST"); let fn_ptr = self.memory.get(vtable_slot.alloc_id)? .read_ptr_sized(self, vtable_slot)?.not_undef()?; let drop_fn = self.memory.get_fn(fn_ptr)?; // `*mut receiver_place.layout.ty` is almost the layout that we // want for args[0]: We have to project to field 0 because we want // a thin pointer. assert!(receiver_place.layout.is_unsized()); let receiver_ptr_ty = self.tcx.mk_mut_ptr(receiver_place.layout.ty); let this_receiver_ptr = self.layout_of(receiver_ptr_ty)?.field(self, 0)?; // Adjust receiver argument. args[0] = OpTy::from(ImmTy { layout: this_receiver_ptr, imm: Immediate::Scalar(receiver_place.ptr.into()) }); trace!("Patched self operand to {:#?}", args[0]); // recurse with concrete function self.eval_fn_call(drop_fn, span, caller_abi, &args, dest, ret) } } } fn drop_in_place( &mut self, place: PlaceTy<'tcx, M::PointerTag>, instance: ty::Instance<'tcx>, span: Span, target: mir::BasicBlock, ) -> InterpResult<'tcx> { trace!("drop_in_place: {:?},\n {:?}, {:?}", *place, place.layout.ty, instance); // We take the address of the object. This may well be unaligned, which is fine // for us here. However, unaligned accesses will probably make the actual drop // implementation fail -- a problem shared by rustc. let place = self.force_allocation(place)?; let (instance, place) = match place.layout.ty.sty { ty::Dynamic(..) => { // Dropping a trait object. self.unpack_dyn_trait(place)? } _ => (instance, place), }; let arg = ImmTy { imm: place.to_ref(), layout: self.layout_of(self.tcx.mk_mut_ptr(place.layout.ty))?, }; let ty = self.tcx.mk_unit(); // return type is () let dest = MPlaceTy::dangling(self.layout_of(ty)?, self); self.eval_fn_call( FnVal::Instance(instance), span, Abi::Rust, &[arg.into()], Some(dest.into()), Some(target), ) } }
// Copyright (C) 2014 The 6502-rs Developers // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // 1. Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // 3. Neither the names of the copyright holders nor the names of any // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. use std; use address::{Address, AddressDiff}; use instruction; use instruction::{DecodedInstr}; use memory::Memory; use registers::{ Registers, StackPointer, Status, StatusArgs }; use registers::{ PS_NEGATIVE, PS_DECIMAL_MODE, PS_OVERFLOW, PS_ZERO, PS_CARRY, PS_DISABLE_INTERRUPTS }; pub struct Machine { pub registers: Registers, pub memory: Memory } impl Machine { pub fn new() -> Machine { Machine{ registers: Registers::new(), memory: Memory::new() } } pub fn reset(&mut self) { *self = Machine::new(); } pub fn fetch_next_and_decode(&mut self) -> Option<DecodedInstr> { let x: u8 = self.memory.get_byte(self.registers.program_counter); match instruction::OPCODES[x as uint] { Some((instr, am)) => { let extra_bytes = am.extra_bytes(); let num_bytes = AddressDiff(1) + extra_bytes; let data_start = self.registers.program_counter + AddressDiff(1); let slice = self.memory.get_slice(data_start, extra_bytes); let am_out = am.process(self, slice); // Increment program counter self.registers.program_counter = self.registers.program_counter + num_bytes; Some((instr, am_out)) } _ => None } } pub fn execute_instruction(&mut self, decoded_instr: DecodedInstr) { match decoded_instr { (instruction::ADC, instruction::UseImmediate(val)) => { debug!("add with carry immediate: {}", val); self.add_with_carry(val as i8); } (instruction::ADC, instruction::UseAddress(addr)) => { let val = self.memory.get_byte(addr) as i8; debug!("add with carry. address: {}. value: {}", addr, val); self.add_with_carry(val); } (instruction::AND, instruction::UseImmediate(val)) => { self.and(val as i8); } (instruction::AND, instruction::UseAddress(addr)) => { let val = self.memory.get_byte(addr) as i8; self.and(val as i8); } (instruction::ASL, instruction::UseImplied) => { // Accumulator mode let mut val = self.registers.accumulator as u8; Machine::shift_left_with_flags(&mut val, &mut self.registers.status); self.registers.accumulator = val as i8; } (instruction::ASL, instruction::UseAddress(addr)) => { Machine::shift_left_with_flags( self.memory.get_byte_mut_ref(addr), &mut self.registers.status); } (instruction::BCC, instruction::UseRelative(rel)) => { let addr = self.registers.program_counter + AddressDiff(rel as i32); self.branch_if_carry_clear(addr); } (instruction::BCS, instruction::UseRelative(rel)) => { let addr = self.registers.program_counter + AddressDiff(rel as i32); self.branch_if_carry_set(addr); } (instruction::BIT, instruction::UseAddress(addr)) => { let a: u8 = self.registers.accumulator as u8; let m: u8 = self.memory.get_byte(addr); let res = a & m; // The zero flag is set based on the result of the 'and'. let is_zero = 0 == res; // The N flag is set to bit 7 of the byte from memory. let bit7 = 0 != (0x80 & res); // The V flag is set to bit 6 of the byte from memory. let bit6 = 0 != (0x40 & res); self.registers.status.set_with_mask( PS_ZERO | PS_NEGATIVE | PS_OVERFLOW, Status::new(StatusArgs { zero: is_zero, negative: bit7, overflow: bit6, ..StatusArgs::none() } )); } (instruction::BMI, instruction::UseRelative(rel)) => { let addr = self.registers.program_counter + AddressDiff(rel as i32); debug!("branch if minus relative. address: {}", addr); self.branch_if_minus(addr); } (instruction::CLC, instruction::UseImplied) => { self.registers.status.and(!PS_CARRY); } (instruction::CLD, instruction::UseImplied) => { self.registers.status.and(!PS_DECIMAL_MODE); } (instruction::CLI, instruction::UseImplied) => { self.registers.status.and(!PS_DISABLE_INTERRUPTS); } (instruction::CLV, instruction::UseImplied) => { self.registers.status.and(!PS_OVERFLOW); } (instruction::DEC, instruction::UseAddress(addr)) => { self.decrement_memory(addr) } (instruction::DEX, instruction::UseImplied) => { self.dec_x(); } (instruction::INC, instruction::UseAddress(addr)) => { let m = self.memory.get_byte(addr); let m = m + 1; self.memory.set_byte(addr, m); let i = m as i8; Machine::set_flags_from_i8(&mut self.registers.status, i); } (instruction::INX, instruction::UseImplied) => { let x = self.registers.index_x + 1; self.load_x_register(x); } (instruction::INY, instruction::UseImplied) => { let y = self.registers.index_y + 1; self.load_y_register(y); } (instruction::JMP, instruction::UseAddress(addr)) => { self.jump(addr) } (instruction::LDA, instruction::UseImmediate(val)) => { debug!("load A immediate: {}", val); self.load_accumulator(val as i8); } (instruction::LDA, instruction::UseAddress(addr)) => { let val = self.memory.get_byte(addr); debug!("load A. address: {}. value: {}", addr, val); self.load_accumulator(val as i8); } (instruction::LDX, instruction::UseImmediate(val)) => { debug!("load X immediate: {}", val); self.load_x_register(val as i8); } (instruction::LDX, instruction::UseAddress(addr)) => { let val = self.memory.get_byte(addr); debug!("load X. address: {}. value: {}", addr, val); self.load_x_register(val as i8); } (instruction::LDY, instruction::UseImmediate(val)) => { debug!("load Y immediate: {}", val); self.load_y_register(val as i8); } (instruction::LDY, instruction::UseAddress(addr)) => { let val = self.memory.get_byte(addr); debug!("load Y. address: {}. value: {}", addr, val); self.load_y_register(val as i8); } (instruction::LSR, instruction::UseImplied) => { // Accumulator mode let mut val = self.registers.accumulator as u8; Machine::shift_right_with_flags(&mut val, &mut self.registers.status); self.registers.accumulator = val as i8; } (instruction::LSR, instruction::UseAddress(addr)) => { Machine::shift_right_with_flags( self.memory.get_byte_mut_ref(addr), &mut self.registers.status); } (instruction::PHA, instruction::UseImplied) => { // Push accumulator let val = self.registers.accumulator as u8; self.push_on_stack(val); } (instruction::PHP, instruction::UseImplied) => { // Push status let val = self.registers.status.bits(); self.push_on_stack(val); } (instruction::PLA, instruction::UseImplied) => { // Pull accumulator let val: u8 = self.pull_from_stack(); self.registers.accumulator = val as i8; } (instruction::PLP, instruction::UseImplied) => { // Pull status let val: u8 = self.pull_from_stack(); // The `truncate` here won't do anything because we have a // constant for the single unused flags bit. This probably // corresponds to the behavior of the 6502...? FIXME: verify self.registers.status = Status::from_bits_truncate(val); } (instruction::ROL, instruction::UseImplied) => { // Accumulator mode let mut val = self.registers.accumulator as u8; Machine::rotate_left_with_flags(&mut val, &mut self.registers.status); self.registers.accumulator = val as i8; } (instruction::ROL, instruction::UseAddress(addr)) => { Machine::rotate_left_with_flags( self.memory.get_byte_mut_ref(addr), &mut self.registers.status); } (instruction::ROR, instruction::UseImplied) => { // Accumulator mode let mut val = self.registers.accumulator as u8; Machine::rotate_right_with_flags(&mut val, &mut self.registers.status); self.registers.accumulator = val as i8; } (instruction::ROR, instruction::UseAddress(addr)) => { Machine::rotate_right_with_flags( self.memory.get_byte_mut_ref(addr), &mut self.registers.status); } (instruction::SBC, instruction::UseImmediate(val)) => { debug!("subtract with carry immediate: {}", val); self.subtract_with_carry(val as i8); } (instruction::SBC, instruction::UseAddress(addr)) => { let val = self.memory.get_byte(addr) as i8; debug!("subtract with carry. address: {}. value: {}", addr, val); self.subtract_with_carry(val); } (instruction::SEC, instruction::UseImplied) => { self.registers.status.or(PS_CARRY); } (instruction::SED, instruction::UseImplied) => { self.registers.status.or(PS_DECIMAL_MODE); } (instruction::SEI, instruction::UseImplied) => { self.registers.status.or(PS_DISABLE_INTERRUPTS); } (instruction::STA, instruction::UseAddress(addr)) => { self.memory.set_byte(addr, self.registers.accumulator as u8); } (instruction::STX, instruction::UseAddress(addr)) => { self.memory.set_byte(addr, self.registers.index_x as u8); } (instruction::STY, instruction::UseAddress(addr)) => { self.memory.set_byte(addr, self.registers.index_y as u8); } (instruction::TAX, instruction::UseImplied) => { let val = self.registers.accumulator; self.load_x_register(val); } (instruction::TAY, instruction::UseImplied) => { let val = self.registers.accumulator; self.load_y_register(val); } (instruction::TSX, instruction::UseImplied) => { let StackPointer(val) = self.registers.stack_pointer; let val = val as i8; self.load_x_register(val); } (instruction::TXA, instruction::UseImplied) => { let val = self.registers.index_x; self.load_accumulator(val); } (instruction::TXS, instruction::UseImplied) => { // Note that this is the only 'transfer' instruction that does // NOT set the zero and negative flags. (Because the target // is the stack pointer) let val = self.registers.index_x; self.registers.stack_pointer = StackPointer(val as u8); } (instruction::TYA, instruction::UseImplied) => { let val = self.registers.index_y; self.load_accumulator(val); } (instruction::NOP, instruction::UseImplied) => { debug!("NOP instruction"); } (_, _) => { debug!("attempting to execute unimplemented or invalid \ instruction"); } }; } pub fn run(&mut self) { loop { if let Some(decoded_instr) = self.fetch_next_and_decode() { self.execute_instruction(decoded_instr); } else { break } } } fn set_flags_from_i8(status: &mut Status, value: i8) { let is_zero = value == 0; let is_negative = value < 0; status.set_with_mask( PS_ZERO | PS_NEGATIVE, Status::new(StatusArgs { zero: is_zero, negative: is_negative, ..StatusArgs::none() } )); } fn shift_left_with_flags(p_val: &mut u8, status: &mut Status) { let mask = 1 << 7; let is_bit_7_set = (*p_val & mask) == mask; let shifted = (*p_val & !(1 << 7)) << 1; *p_val = shifted; status.set_with_mask( PS_CARRY, Status::new(StatusArgs { carry: is_bit_7_set, ..StatusArgs::none() } )); Machine::set_flags_from_i8(status, *p_val as i8); } fn shift_right_with_flags(p_val: &mut u8, status: &mut Status) { let mask = 1; let is_bit_0_set = (*p_val & mask) == mask; *p_val = *p_val >> 1; status.set_with_mask( PS_CARRY, Status::new(StatusArgs { carry: is_bit_0_set, ..StatusArgs::none() } )); Machine::set_flags_from_i8(status, *p_val as i8); } fn rotate_left_with_flags(p_val: &mut u8, status: &mut Status) { let is_carry_set = status.contains(PS_CARRY); let mask = 1 << 7; let is_bit_7_set = (*p_val & mask) == mask; let shifted = (*p_val & !(1 << 7)) << 1; *p_val = shifted + if is_carry_set { 1 } else { 0 }; status.set_with_mask( PS_CARRY, Status::new(StatusArgs { carry: is_bit_7_set, ..StatusArgs::none() } )); Machine::set_flags_from_i8(status, *p_val as i8); } fn rotate_right_with_flags(p_val: &mut u8, status: &mut Status) { let is_carry_set = status.contains(PS_CARRY); let mask = 1; let is_bit_0_set = (*p_val & mask) == mask; let shifted = *p_val >> 1; *p_val = shifted + if is_carry_set { 1 << 7 } else { 0 }; status.set_with_mask( PS_CARRY, Status::new(StatusArgs { carry: is_bit_0_set, ..StatusArgs::none() } )); Machine::set_flags_from_i8(status, *p_val as i8); } fn set_i8_with_flags(mem: &mut i8, status: &mut Status, value: i8) { *mem = value; Machine::set_flags_from_i8(status, value); } fn load_x_register(&mut self, value: i8) { Machine::set_i8_with_flags(&mut self.registers.index_x, &mut self.registers.status, value); } fn load_y_register(&mut self, value: i8) { Machine::set_i8_with_flags(&mut self.registers.index_y, &mut self.registers.status, value); } fn load_accumulator(&mut self, value: i8) { Machine::set_i8_with_flags(&mut self.registers.accumulator, &mut self.registers.status, value); } fn add_with_carry(&mut self, value: i8) { if self.registers.status.contains(PS_DECIMAL_MODE) { // TODO akeeton: Implement binary-coded decimal. debug!("binary-coded decimal not implemented for add_with_carry"); } else { let a_before: i8 = self.registers.accumulator; let c_before: i8 = if self.registers.status.contains(PS_CARRY) { 1 } else { 0 }; let a_after: i8 = a_before + c_before + value; debug_assert_eq!(a_after as u8, a_before as u8 + c_before as u8 + value as u8); let did_carry = (a_after as u8) < (a_before as u8); let did_overflow = (a_before < 0 && value < 0 && a_after >= 0) || (a_before > 0 && value > 0 && a_after <= 0); let mask = PS_CARRY | PS_OVERFLOW; self.registers.status.set_with_mask(mask, Status::new(StatusArgs { carry: did_carry, overflow: did_overflow, ..StatusArgs::none() } )); self.load_accumulator(a_after); debug!("accumulator: {}", self.registers.accumulator); } } fn and(&mut self, value: i8) { let a_after = self.registers.accumulator & value; self.load_accumulator(a_after); } // TODO: Implement binary-coded decimal fn subtract_with_carry(&mut self, value: i8) { if self.registers.status.contains(PS_DECIMAL_MODE) { debug!("binary-coded decimal not implemented for \ subtract_with_carry"); } else { // A - M - (1 - C) // nc -- 'not carry' let nc: i8 = if self.registers.status.contains(PS_CARRY) { 0 } else { 1 }; let a_before: i8 = self.registers.accumulator; let a_after = a_before - value - nc; // The carry flag is set on unsigned overflow. let did_carry = (a_after as u8) > (a_before as u8); // The overflow flag is set on two's-complement overflow. // // range of A is -128 to 127 // range of - M - (1 - C) is -128 to 128 // -(127 + 1) to -(-128 + 0) // let over = ((nc == 0 && value < 0) || (nc == 1 && value < -1)) && a_before >= 0 && a_after < 0; let under = (a_before < 0) && (-value - nc < 0) && a_after >= 0; let did_overflow = over || under; let mask = PS_CARRY | PS_OVERFLOW; self.registers.status.set_with_mask(mask, Status::new(StatusArgs { carry: did_carry, overflow: did_overflow, ..StatusArgs::none() } )); self.load_accumulator(a_after); } } fn decrement_memory(&mut self, addr: Address) { let value_new = self.memory.get_byte(addr) - 1; self.memory.set_byte(addr, value_new); let is_negative = (value_new as i8) < 0; let is_zero = value_new == 0; self.registers.status.set_with_mask( PS_NEGATIVE | PS_ZERO, Status::new(StatusArgs { negative: is_negative, zero: is_zero, ..StatusArgs::none() } )); } fn dec_x(&mut self) { let val = self.registers.index_x; self.load_x_register(val - 1); } fn jump(&mut self, addr: Address) { self.registers.program_counter = addr; } fn branch_if_carry_clear(&mut self, addr: Address) { if !self.registers.status.contains(PS_CARRY) { self.registers.program_counter = addr; } } fn branch_if_carry_set(&mut self, addr: Address) { if self.registers.status.contains(PS_CARRY) { self.registers.program_counter = addr; } } fn branch_if_minus(&mut self, addr: Address) { if self.registers.status.contains(PS_NEGATIVE) { self.registers.program_counter = addr; } } fn push_on_stack(&mut self, val: u8) { let addr = self.registers.stack_pointer.to_address(); self.memory.set_byte(addr, val); self.registers.stack_pointer.decrement(); } fn pull_from_stack(&mut self) -> u8 { let addr = self.registers.stack_pointer.to_address(); let out = self.memory.get_byte(addr); self.registers.stack_pointer.increment(); out } } impl std::fmt::Show for Machine { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "Machine Dump:\n\nAccumulator: {}", self.registers.accumulator) } } #[test] fn add_with_carry_test() { let mut machine = Machine::new(); machine.add_with_carry(1); assert_eq!(machine.registers.accumulator, 1); assert_eq!(machine.registers.status.contains(PS_CARRY), false); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); machine.add_with_carry(-1); assert_eq!(machine.registers.accumulator, 0); assert_eq!(machine.registers.status.contains(PS_CARRY), true); assert_eq!(machine.registers.status.contains(PS_ZERO), true); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); machine.add_with_carry(1); assert_eq!(machine.registers.accumulator, 2); assert_eq!(machine.registers.status.contains(PS_CARRY), false); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); let mut machine = Machine::new(); machine.add_with_carry(127); assert_eq!(machine.registers.accumulator, 127); assert_eq!(machine.registers.status.contains(PS_CARRY), false); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); machine.add_with_carry(-127); assert_eq!(machine.registers.accumulator, 0); assert_eq!(machine.registers.status.contains(PS_CARRY), true); assert_eq!(machine.registers.status.contains(PS_ZERO), true); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); machine.registers.status.remove(PS_CARRY); machine.add_with_carry(-128); assert_eq!(machine.registers.accumulator, -128); assert_eq!(machine.registers.status.contains(PS_CARRY), false); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); machine.add_with_carry(127); assert_eq!(machine.registers.accumulator, -1); assert_eq!(machine.registers.status.contains(PS_CARRY), false); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); let mut machine = Machine::new(); machine.add_with_carry(127); assert_eq!(machine.registers.accumulator, 127); assert_eq!(machine.registers.status.contains(PS_CARRY), false); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); machine.add_with_carry(1); assert_eq!(machine.registers.accumulator, -128); assert_eq!(machine.registers.status.contains(PS_CARRY), false); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), true); } #[test] fn and_test() { let mut machine = Machine::new(); machine.registers.accumulator = 0; machine.and(-1); assert_eq!(machine.registers.accumulator, 0); assert_eq!(machine.registers.status.contains(PS_ZERO), true); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); machine.registers.accumulator = -1; machine.and(0); assert_eq!(machine.registers.accumulator, 0); assert_eq!(machine.registers.status.contains(PS_ZERO), true); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); machine.registers.accumulator = -1; machine.and(0x0f); assert_eq!(machine.registers.accumulator, 0x0f); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); machine.registers.accumulator = -1; machine.and(-128); assert_eq!(machine.registers.accumulator, -128); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true); } #[test] fn subtract_with_carry_test() { let mut machine = Machine::new(); machine.execute_instruction((instruction::SEC, instruction::UseImplied)); machine.registers.accumulator = 0; machine.subtract_with_carry(1); assert_eq!(machine.registers.accumulator, -1); assert_eq!(machine.registers.status.contains(PS_CARRY), true); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); machine.execute_instruction((instruction::SEC, instruction::UseImplied)); machine.registers.accumulator = -128; machine.subtract_with_carry(1); assert_eq!(machine.registers.accumulator, 127); assert_eq!(machine.registers.status.contains(PS_CARRY), false); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), true); machine.execute_instruction((instruction::SEC, instruction::UseImplied)); machine.registers.accumulator = 127; machine.subtract_with_carry(-1); assert_eq!(machine.registers.accumulator, -128); assert_eq!(machine.registers.status.contains(PS_CARRY), true); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), true); machine.execute_instruction((instruction::CLC, instruction::UseImplied)); machine.registers.accumulator = -64; machine.subtract_with_carry(64); assert_eq!(machine.registers.accumulator, 127); assert_eq!(machine.registers.status.contains(PS_CARRY), false); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), true); machine.execute_instruction((instruction::SEC, instruction::UseImplied)); machine.registers.accumulator = 0; machine.subtract_with_carry(-128); assert_eq!(machine.registers.accumulator, -128); assert_eq!(machine.registers.status.contains(PS_CARRY), true); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), true); machine.execute_instruction((instruction::CLC, instruction::UseImplied)); machine.registers.accumulator = 0; machine.subtract_with_carry(127); assert_eq!(machine.registers.accumulator, -128); assert_eq!(machine.registers.status.contains(PS_CARRY), true); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); } #[test] fn decrement_memory_test() { let mut machine = Machine::new(); let addr = Address(0xA1B2); machine.memory.set_byte(addr, 5); machine.decrement_memory(addr); assert_eq!(machine.memory.get_byte(addr), 4); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); machine.decrement_memory(addr); assert_eq!(machine.memory.get_byte(addr), 3); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); machine.decrement_memory(addr); machine.decrement_memory(addr); machine.decrement_memory(addr); assert_eq!(machine.memory.get_byte(addr), 0); assert_eq!(machine.registers.status.contains(PS_ZERO), true); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); machine.decrement_memory(addr); assert_eq!(machine.memory.get_byte(addr) as i8, -1); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true); } #[test] fn logical_shift_right_test() { // Testing UseImplied version (which targets the accumulator) only, for now let mut machine = Machine::new(); machine.execute_instruction((instruction::LDA, instruction::UseImmediate(0))); machine.execute_instruction((instruction::LSR, instruction::UseImplied)); assert_eq!(machine.registers.accumulator, 0); assert_eq!(machine.registers.status.contains(PS_CARRY), false); assert_eq!(machine.registers.status.contains(PS_ZERO), true); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); machine.execute_instruction((instruction::LDA, instruction::UseImmediate(1))); machine.execute_instruction((instruction::LSR, instruction::UseImplied)); assert_eq!(machine.registers.accumulator, 0); assert_eq!(machine.registers.status.contains(PS_CARRY), true); assert_eq!(machine.registers.status.contains(PS_ZERO), true); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); machine.execute_instruction((instruction::LDA, instruction::UseImmediate(255))); machine.execute_instruction((instruction::LSR, instruction::UseImplied)); assert_eq!(machine.registers.accumulator, 0x7F); assert_eq!(machine.registers.status.contains(PS_CARRY), true); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); machine.execute_instruction((instruction::LDA, instruction::UseImmediate(254))); machine.execute_instruction((instruction::LSR, instruction::UseImplied)); assert_eq!(machine.registers.accumulator, 0x7F); assert_eq!(machine.registers.status.contains(PS_CARRY), false); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); } #[test] fn dec_x_test() { let mut machine = Machine::new(); machine.dec_x(); assert_eq!(machine.registers.index_x, -1); assert_eq!(machine.registers.status.contains(PS_CARRY), false); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); machine.dec_x(); assert_eq!(machine.registers.index_x, -2); assert_eq!(machine.registers.status.contains(PS_CARRY), false); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); machine.load_x_register(5); machine.dec_x(); assert_eq!(machine.registers.index_x, 4); assert_eq!(machine.registers.status.contains(PS_CARRY), false); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); machine.dec_x(); machine.dec_x(); machine.dec_x(); machine.dec_x(); assert_eq!(machine.registers.index_x, 0); assert_eq!(machine.registers.status.contains(PS_CARRY), false); assert_eq!(machine.registers.status.contains(PS_ZERO), true); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); machine.dec_x(); assert_eq!(machine.registers.index_x, -1); assert_eq!(machine.registers.status.contains(PS_CARRY), false); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); } #[test] fn jump_test() { let mut machine = Machine::new(); let addr = Address(0xA1B1); machine.jump(addr); assert_eq!(machine.registers.program_counter, addr); } #[test] fn branch_if_carry_clear_test() { let mut machine = Machine::new(); machine.execute_instruction((instruction::SEC, instruction::UseImplied)); machine.branch_if_carry_clear(Address(0xABCD)); assert_eq!(machine.registers.program_counter, Address(0)); machine.execute_instruction((instruction::CLC, instruction::UseImplied)); machine.branch_if_carry_clear(Address(0xABCD)); assert_eq!(machine.registers.program_counter, Address(0xABCD)); } #[test] fn branch_if_carry_set_test() { let mut machine = Machine::new(); machine.execute_instruction((instruction::CLC, instruction::UseImplied)); machine.branch_if_carry_set(Address(0xABCD)); assert_eq!(machine.registers.program_counter, Address(0)); machine.execute_instruction((instruction::SEC, instruction::UseImplied)); machine.branch_if_carry_set(Address(0xABCD)); assert_eq!(machine.registers.program_counter, Address(0xABCD)); } #[test] fn branch_if_equal_test() { let mut machine = Machine::new(); machine.branch_if_equal(Address(0xABCD)); assert_eq!(machine.registers.program_counter, Address(0)); machine.registers.status.or(PS_ZERO); machine.branch_if_equal(Address(0xABCD)); assert_eq!(machine.registers.program_counter, Address(0xABCD)); } #[test] fn branch_if_minus_test() { { let mut machine = Machine::new(); let registers_before = machine.registers; machine.branch_if_minus(Address(0xABCD)); assert_eq!(machine.registers, registers_before); assert_eq!(machine.registers.program_counter, Address(0)); } { let mut machine = Machine::new(); machine.registers.status.or(PS_NEGATIVE); let registers_before = machine.registers; machine.branch_if_minus(Address(0xABCD)); assert_eq!(machine.registers.status, registers_before.status); assert_eq!(machine.registers.program_counter, Address(0xABCD)); } } Add branch_if_equal(). // Copyright (C) 2014 The 6502-rs Developers // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // 1. Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // 3. Neither the names of the copyright holders nor the names of any // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. use std; use address::{Address, AddressDiff}; use instruction; use instruction::{DecodedInstr}; use memory::Memory; use registers::{ Registers, StackPointer, Status, StatusArgs }; use registers::{ PS_NEGATIVE, PS_DECIMAL_MODE, PS_OVERFLOW, PS_ZERO, PS_CARRY, PS_DISABLE_INTERRUPTS }; pub struct Machine { pub registers: Registers, pub memory: Memory } impl Machine { pub fn new() -> Machine { Machine{ registers: Registers::new(), memory: Memory::new() } } pub fn reset(&mut self) { *self = Machine::new(); } pub fn fetch_next_and_decode(&mut self) -> Option<DecodedInstr> { let x: u8 = self.memory.get_byte(self.registers.program_counter); match instruction::OPCODES[x as uint] { Some((instr, am)) => { let extra_bytes = am.extra_bytes(); let num_bytes = AddressDiff(1) + extra_bytes; let data_start = self.registers.program_counter + AddressDiff(1); let slice = self.memory.get_slice(data_start, extra_bytes); let am_out = am.process(self, slice); // Increment program counter self.registers.program_counter = self.registers.program_counter + num_bytes; Some((instr, am_out)) } _ => None } } pub fn execute_instruction(&mut self, decoded_instr: DecodedInstr) { match decoded_instr { (instruction::ADC, instruction::UseImmediate(val)) => { debug!("add with carry immediate: {}", val); self.add_with_carry(val as i8); } (instruction::ADC, instruction::UseAddress(addr)) => { let val = self.memory.get_byte(addr) as i8; debug!("add with carry. address: {}. value: {}", addr, val); self.add_with_carry(val); } (instruction::AND, instruction::UseImmediate(val)) => { self.and(val as i8); } (instruction::AND, instruction::UseAddress(addr)) => { let val = self.memory.get_byte(addr) as i8; self.and(val as i8); } (instruction::ASL, instruction::UseImplied) => { // Accumulator mode let mut val = self.registers.accumulator as u8; Machine::shift_left_with_flags(&mut val, &mut self.registers.status); self.registers.accumulator = val as i8; } (instruction::ASL, instruction::UseAddress(addr)) => { Machine::shift_left_with_flags( self.memory.get_byte_mut_ref(addr), &mut self.registers.status); } (instruction::BCC, instruction::UseRelative(rel)) => { let addr = self.registers.program_counter + AddressDiff(rel as i32); self.branch_if_carry_clear(addr); } (instruction::BCS, instruction::UseRelative(rel)) => { let addr = self.registers.program_counter + AddressDiff(rel as i32); self.branch_if_carry_set(addr); } (instruction::BIT, instruction::UseAddress(addr)) => { let a: u8 = self.registers.accumulator as u8; let m: u8 = self.memory.get_byte(addr); let res = a & m; // The zero flag is set based on the result of the 'and'. let is_zero = 0 == res; // The N flag is set to bit 7 of the byte from memory. let bit7 = 0 != (0x80 & res); // The V flag is set to bit 6 of the byte from memory. let bit6 = 0 != (0x40 & res); self.registers.status.set_with_mask( PS_ZERO | PS_NEGATIVE | PS_OVERFLOW, Status::new(StatusArgs { zero: is_zero, negative: bit7, overflow: bit6, ..StatusArgs::none() } )); } (instruction::BMI, instruction::UseRelative(rel)) => { let addr = self.registers.program_counter + AddressDiff(rel as i32); debug!("branch if minus relative. address: {}", addr); self.branch_if_minus(addr); } (instruction::CLC, instruction::UseImplied) => { self.registers.status.and(!PS_CARRY); } (instruction::CLD, instruction::UseImplied) => { self.registers.status.and(!PS_DECIMAL_MODE); } (instruction::CLI, instruction::UseImplied) => { self.registers.status.and(!PS_DISABLE_INTERRUPTS); } (instruction::CLV, instruction::UseImplied) => { self.registers.status.and(!PS_OVERFLOW); } (instruction::DEC, instruction::UseAddress(addr)) => { self.decrement_memory(addr) } (instruction::DEX, instruction::UseImplied) => { self.dec_x(); } (instruction::INC, instruction::UseAddress(addr)) => { let m = self.memory.get_byte(addr); let m = m + 1; self.memory.set_byte(addr, m); let i = m as i8; Machine::set_flags_from_i8(&mut self.registers.status, i); } (instruction::INX, instruction::UseImplied) => { let x = self.registers.index_x + 1; self.load_x_register(x); } (instruction::INY, instruction::UseImplied) => { let y = self.registers.index_y + 1; self.load_y_register(y); } (instruction::JMP, instruction::UseAddress(addr)) => { self.jump(addr) } (instruction::LDA, instruction::UseImmediate(val)) => { debug!("load A immediate: {}", val); self.load_accumulator(val as i8); } (instruction::LDA, instruction::UseAddress(addr)) => { let val = self.memory.get_byte(addr); debug!("load A. address: {}. value: {}", addr, val); self.load_accumulator(val as i8); } (instruction::LDX, instruction::UseImmediate(val)) => { debug!("load X immediate: {}", val); self.load_x_register(val as i8); } (instruction::LDX, instruction::UseAddress(addr)) => { let val = self.memory.get_byte(addr); debug!("load X. address: {}. value: {}", addr, val); self.load_x_register(val as i8); } (instruction::LDY, instruction::UseImmediate(val)) => { debug!("load Y immediate: {}", val); self.load_y_register(val as i8); } (instruction::LDY, instruction::UseAddress(addr)) => { let val = self.memory.get_byte(addr); debug!("load Y. address: {}. value: {}", addr, val); self.load_y_register(val as i8); } (instruction::LSR, instruction::UseImplied) => { // Accumulator mode let mut val = self.registers.accumulator as u8; Machine::shift_right_with_flags(&mut val, &mut self.registers.status); self.registers.accumulator = val as i8; } (instruction::LSR, instruction::UseAddress(addr)) => { Machine::shift_right_with_flags( self.memory.get_byte_mut_ref(addr), &mut self.registers.status); } (instruction::PHA, instruction::UseImplied) => { // Push accumulator let val = self.registers.accumulator as u8; self.push_on_stack(val); } (instruction::PHP, instruction::UseImplied) => { // Push status let val = self.registers.status.bits(); self.push_on_stack(val); } (instruction::PLA, instruction::UseImplied) => { // Pull accumulator let val: u8 = self.pull_from_stack(); self.registers.accumulator = val as i8; } (instruction::PLP, instruction::UseImplied) => { // Pull status let val: u8 = self.pull_from_stack(); // The `truncate` here won't do anything because we have a // constant for the single unused flags bit. This probably // corresponds to the behavior of the 6502...? FIXME: verify self.registers.status = Status::from_bits_truncate(val); } (instruction::ROL, instruction::UseImplied) => { // Accumulator mode let mut val = self.registers.accumulator as u8; Machine::rotate_left_with_flags(&mut val, &mut self.registers.status); self.registers.accumulator = val as i8; } (instruction::ROL, instruction::UseAddress(addr)) => { Machine::rotate_left_with_flags( self.memory.get_byte_mut_ref(addr), &mut self.registers.status); } (instruction::ROR, instruction::UseImplied) => { // Accumulator mode let mut val = self.registers.accumulator as u8; Machine::rotate_right_with_flags(&mut val, &mut self.registers.status); self.registers.accumulator = val as i8; } (instruction::ROR, instruction::UseAddress(addr)) => { Machine::rotate_right_with_flags( self.memory.get_byte_mut_ref(addr), &mut self.registers.status); } (instruction::SBC, instruction::UseImmediate(val)) => { debug!("subtract with carry immediate: {}", val); self.subtract_with_carry(val as i8); } (instruction::SBC, instruction::UseAddress(addr)) => { let val = self.memory.get_byte(addr) as i8; debug!("subtract with carry. address: {}. value: {}", addr, val); self.subtract_with_carry(val); } (instruction::SEC, instruction::UseImplied) => { self.registers.status.or(PS_CARRY); } (instruction::SED, instruction::UseImplied) => { self.registers.status.or(PS_DECIMAL_MODE); } (instruction::SEI, instruction::UseImplied) => { self.registers.status.or(PS_DISABLE_INTERRUPTS); } (instruction::STA, instruction::UseAddress(addr)) => { self.memory.set_byte(addr, self.registers.accumulator as u8); } (instruction::STX, instruction::UseAddress(addr)) => { self.memory.set_byte(addr, self.registers.index_x as u8); } (instruction::STY, instruction::UseAddress(addr)) => { self.memory.set_byte(addr, self.registers.index_y as u8); } (instruction::TAX, instruction::UseImplied) => { let val = self.registers.accumulator; self.load_x_register(val); } (instruction::TAY, instruction::UseImplied) => { let val = self.registers.accumulator; self.load_y_register(val); } (instruction::TSX, instruction::UseImplied) => { let StackPointer(val) = self.registers.stack_pointer; let val = val as i8; self.load_x_register(val); } (instruction::TXA, instruction::UseImplied) => { let val = self.registers.index_x; self.load_accumulator(val); } (instruction::TXS, instruction::UseImplied) => { // Note that this is the only 'transfer' instruction that does // NOT set the zero and negative flags. (Because the target // is the stack pointer) let val = self.registers.index_x; self.registers.stack_pointer = StackPointer(val as u8); } (instruction::TYA, instruction::UseImplied) => { let val = self.registers.index_y; self.load_accumulator(val); } (instruction::NOP, instruction::UseImplied) => { debug!("NOP instruction"); } (_, _) => { debug!("attempting to execute unimplemented or invalid \ instruction"); } }; } pub fn run(&mut self) { loop { if let Some(decoded_instr) = self.fetch_next_and_decode() { self.execute_instruction(decoded_instr); } else { break } } } fn set_flags_from_i8(status: &mut Status, value: i8) { let is_zero = value == 0; let is_negative = value < 0; status.set_with_mask( PS_ZERO | PS_NEGATIVE, Status::new(StatusArgs { zero: is_zero, negative: is_negative, ..StatusArgs::none() } )); } fn shift_left_with_flags(p_val: &mut u8, status: &mut Status) { let mask = 1 << 7; let is_bit_7_set = (*p_val & mask) == mask; let shifted = (*p_val & !(1 << 7)) << 1; *p_val = shifted; status.set_with_mask( PS_CARRY, Status::new(StatusArgs { carry: is_bit_7_set, ..StatusArgs::none() } )); Machine::set_flags_from_i8(status, *p_val as i8); } fn shift_right_with_flags(p_val: &mut u8, status: &mut Status) { let mask = 1; let is_bit_0_set = (*p_val & mask) == mask; *p_val = *p_val >> 1; status.set_with_mask( PS_CARRY, Status::new(StatusArgs { carry: is_bit_0_set, ..StatusArgs::none() } )); Machine::set_flags_from_i8(status, *p_val as i8); } fn rotate_left_with_flags(p_val: &mut u8, status: &mut Status) { let is_carry_set = status.contains(PS_CARRY); let mask = 1 << 7; let is_bit_7_set = (*p_val & mask) == mask; let shifted = (*p_val & !(1 << 7)) << 1; *p_val = shifted + if is_carry_set { 1 } else { 0 }; status.set_with_mask( PS_CARRY, Status::new(StatusArgs { carry: is_bit_7_set, ..StatusArgs::none() } )); Machine::set_flags_from_i8(status, *p_val as i8); } fn rotate_right_with_flags(p_val: &mut u8, status: &mut Status) { let is_carry_set = status.contains(PS_CARRY); let mask = 1; let is_bit_0_set = (*p_val & mask) == mask; let shifted = *p_val >> 1; *p_val = shifted + if is_carry_set { 1 << 7 } else { 0 }; status.set_with_mask( PS_CARRY, Status::new(StatusArgs { carry: is_bit_0_set, ..StatusArgs::none() } )); Machine::set_flags_from_i8(status, *p_val as i8); } fn set_i8_with_flags(mem: &mut i8, status: &mut Status, value: i8) { *mem = value; Machine::set_flags_from_i8(status, value); } fn load_x_register(&mut self, value: i8) { Machine::set_i8_with_flags(&mut self.registers.index_x, &mut self.registers.status, value); } fn load_y_register(&mut self, value: i8) { Machine::set_i8_with_flags(&mut self.registers.index_y, &mut self.registers.status, value); } fn load_accumulator(&mut self, value: i8) { Machine::set_i8_with_flags(&mut self.registers.accumulator, &mut self.registers.status, value); } fn add_with_carry(&mut self, value: i8) { if self.registers.status.contains(PS_DECIMAL_MODE) { // TODO akeeton: Implement binary-coded decimal. debug!("binary-coded decimal not implemented for add_with_carry"); } else { let a_before: i8 = self.registers.accumulator; let c_before: i8 = if self.registers.status.contains(PS_CARRY) { 1 } else { 0 }; let a_after: i8 = a_before + c_before + value; debug_assert_eq!(a_after as u8, a_before as u8 + c_before as u8 + value as u8); let did_carry = (a_after as u8) < (a_before as u8); let did_overflow = (a_before < 0 && value < 0 && a_after >= 0) || (a_before > 0 && value > 0 && a_after <= 0); let mask = PS_CARRY | PS_OVERFLOW; self.registers.status.set_with_mask(mask, Status::new(StatusArgs { carry: did_carry, overflow: did_overflow, ..StatusArgs::none() } )); self.load_accumulator(a_after); debug!("accumulator: {}", self.registers.accumulator); } } fn and(&mut self, value: i8) { let a_after = self.registers.accumulator & value; self.load_accumulator(a_after); } // TODO: Implement binary-coded decimal fn subtract_with_carry(&mut self, value: i8) { if self.registers.status.contains(PS_DECIMAL_MODE) { debug!("binary-coded decimal not implemented for \ subtract_with_carry"); } else { // A - M - (1 - C) // nc -- 'not carry' let nc: i8 = if self.registers.status.contains(PS_CARRY) { 0 } else { 1 }; let a_before: i8 = self.registers.accumulator; let a_after = a_before - value - nc; // The carry flag is set on unsigned overflow. let did_carry = (a_after as u8) > (a_before as u8); // The overflow flag is set on two's-complement overflow. // // range of A is -128 to 127 // range of - M - (1 - C) is -128 to 128 // -(127 + 1) to -(-128 + 0) // let over = ((nc == 0 && value < 0) || (nc == 1 && value < -1)) && a_before >= 0 && a_after < 0; let under = (a_before < 0) && (-value - nc < 0) && a_after >= 0; let did_overflow = over || under; let mask = PS_CARRY | PS_OVERFLOW; self.registers.status.set_with_mask(mask, Status::new(StatusArgs { carry: did_carry, overflow: did_overflow, ..StatusArgs::none() } )); self.load_accumulator(a_after); } } fn decrement_memory(&mut self, addr: Address) { let value_new = self.memory.get_byte(addr) - 1; self.memory.set_byte(addr, value_new); let is_negative = (value_new as i8) < 0; let is_zero = value_new == 0; self.registers.status.set_with_mask( PS_NEGATIVE | PS_ZERO, Status::new(StatusArgs { negative: is_negative, zero: is_zero, ..StatusArgs::none() } )); } fn dec_x(&mut self) { let val = self.registers.index_x; self.load_x_register(val - 1); } fn jump(&mut self, addr: Address) { self.registers.program_counter = addr; } fn branch_if_carry_clear(&mut self, addr: Address) { if !self.registers.status.contains(PS_CARRY) { self.registers.program_counter = addr; } } fn branch_if_carry_set(&mut self, addr: Address) { if self.registers.status.contains(PS_CARRY) { self.registers.program_counter = addr; } } fn branch_if_equal(&mut self, addr: Address) { if self.registers.status.contains(PS_ZERO) { self.registers.program_counter = addr; } } fn branch_if_minus(&mut self, addr: Address) { if self.registers.status.contains(PS_NEGATIVE) { self.registers.program_counter = addr; } } fn push_on_stack(&mut self, val: u8) { let addr = self.registers.stack_pointer.to_address(); self.memory.set_byte(addr, val); self.registers.stack_pointer.decrement(); } fn pull_from_stack(&mut self) -> u8 { let addr = self.registers.stack_pointer.to_address(); let out = self.memory.get_byte(addr); self.registers.stack_pointer.increment(); out } } impl std::fmt::Show for Machine { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "Machine Dump:\n\nAccumulator: {}", self.registers.accumulator) } } #[test] fn add_with_carry_test() { let mut machine = Machine::new(); machine.add_with_carry(1); assert_eq!(machine.registers.accumulator, 1); assert_eq!(machine.registers.status.contains(PS_CARRY), false); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); machine.add_with_carry(-1); assert_eq!(machine.registers.accumulator, 0); assert_eq!(machine.registers.status.contains(PS_CARRY), true); assert_eq!(machine.registers.status.contains(PS_ZERO), true); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); machine.add_with_carry(1); assert_eq!(machine.registers.accumulator, 2); assert_eq!(machine.registers.status.contains(PS_CARRY), false); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); let mut machine = Machine::new(); machine.add_with_carry(127); assert_eq!(machine.registers.accumulator, 127); assert_eq!(machine.registers.status.contains(PS_CARRY), false); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); machine.add_with_carry(-127); assert_eq!(machine.registers.accumulator, 0); assert_eq!(machine.registers.status.contains(PS_CARRY), true); assert_eq!(machine.registers.status.contains(PS_ZERO), true); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); machine.registers.status.remove(PS_CARRY); machine.add_with_carry(-128); assert_eq!(machine.registers.accumulator, -128); assert_eq!(machine.registers.status.contains(PS_CARRY), false); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); machine.add_with_carry(127); assert_eq!(machine.registers.accumulator, -1); assert_eq!(machine.registers.status.contains(PS_CARRY), false); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); let mut machine = Machine::new(); machine.add_with_carry(127); assert_eq!(machine.registers.accumulator, 127); assert_eq!(machine.registers.status.contains(PS_CARRY), false); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); machine.add_with_carry(1); assert_eq!(machine.registers.accumulator, -128); assert_eq!(machine.registers.status.contains(PS_CARRY), false); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), true); } #[test] fn and_test() { let mut machine = Machine::new(); machine.registers.accumulator = 0; machine.and(-1); assert_eq!(machine.registers.accumulator, 0); assert_eq!(machine.registers.status.contains(PS_ZERO), true); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); machine.registers.accumulator = -1; machine.and(0); assert_eq!(machine.registers.accumulator, 0); assert_eq!(machine.registers.status.contains(PS_ZERO), true); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); machine.registers.accumulator = -1; machine.and(0x0f); assert_eq!(machine.registers.accumulator, 0x0f); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); machine.registers.accumulator = -1; machine.and(-128); assert_eq!(machine.registers.accumulator, -128); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true); } #[test] fn subtract_with_carry_test() { let mut machine = Machine::new(); machine.execute_instruction((instruction::SEC, instruction::UseImplied)); machine.registers.accumulator = 0; machine.subtract_with_carry(1); assert_eq!(machine.registers.accumulator, -1); assert_eq!(machine.registers.status.contains(PS_CARRY), true); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); machine.execute_instruction((instruction::SEC, instruction::UseImplied)); machine.registers.accumulator = -128; machine.subtract_with_carry(1); assert_eq!(machine.registers.accumulator, 127); assert_eq!(machine.registers.status.contains(PS_CARRY), false); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), true); machine.execute_instruction((instruction::SEC, instruction::UseImplied)); machine.registers.accumulator = 127; machine.subtract_with_carry(-1); assert_eq!(machine.registers.accumulator, -128); assert_eq!(machine.registers.status.contains(PS_CARRY), true); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), true); machine.execute_instruction((instruction::CLC, instruction::UseImplied)); machine.registers.accumulator = -64; machine.subtract_with_carry(64); assert_eq!(machine.registers.accumulator, 127); assert_eq!(machine.registers.status.contains(PS_CARRY), false); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), true); machine.execute_instruction((instruction::SEC, instruction::UseImplied)); machine.registers.accumulator = 0; machine.subtract_with_carry(-128); assert_eq!(machine.registers.accumulator, -128); assert_eq!(machine.registers.status.contains(PS_CARRY), true); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), true); machine.execute_instruction((instruction::CLC, instruction::UseImplied)); machine.registers.accumulator = 0; machine.subtract_with_carry(127); assert_eq!(machine.registers.accumulator, -128); assert_eq!(machine.registers.status.contains(PS_CARRY), true); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); } #[test] fn decrement_memory_test() { let mut machine = Machine::new(); let addr = Address(0xA1B2); machine.memory.set_byte(addr, 5); machine.decrement_memory(addr); assert_eq!(machine.memory.get_byte(addr), 4); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); machine.decrement_memory(addr); assert_eq!(machine.memory.get_byte(addr), 3); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); machine.decrement_memory(addr); machine.decrement_memory(addr); machine.decrement_memory(addr); assert_eq!(machine.memory.get_byte(addr), 0); assert_eq!(machine.registers.status.contains(PS_ZERO), true); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); machine.decrement_memory(addr); assert_eq!(machine.memory.get_byte(addr) as i8, -1); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true); } #[test] fn logical_shift_right_test() { // Testing UseImplied version (which targets the accumulator) only, for now let mut machine = Machine::new(); machine.execute_instruction((instruction::LDA, instruction::UseImmediate(0))); machine.execute_instruction((instruction::LSR, instruction::UseImplied)); assert_eq!(machine.registers.accumulator, 0); assert_eq!(machine.registers.status.contains(PS_CARRY), false); assert_eq!(machine.registers.status.contains(PS_ZERO), true); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); machine.execute_instruction((instruction::LDA, instruction::UseImmediate(1))); machine.execute_instruction((instruction::LSR, instruction::UseImplied)); assert_eq!(machine.registers.accumulator, 0); assert_eq!(machine.registers.status.contains(PS_CARRY), true); assert_eq!(machine.registers.status.contains(PS_ZERO), true); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); machine.execute_instruction((instruction::LDA, instruction::UseImmediate(255))); machine.execute_instruction((instruction::LSR, instruction::UseImplied)); assert_eq!(machine.registers.accumulator, 0x7F); assert_eq!(machine.registers.status.contains(PS_CARRY), true); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); machine.execute_instruction((instruction::LDA, instruction::UseImmediate(254))); machine.execute_instruction((instruction::LSR, instruction::UseImplied)); assert_eq!(machine.registers.accumulator, 0x7F); assert_eq!(machine.registers.status.contains(PS_CARRY), false); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); } #[test] fn dec_x_test() { let mut machine = Machine::new(); machine.dec_x(); assert_eq!(machine.registers.index_x, -1); assert_eq!(machine.registers.status.contains(PS_CARRY), false); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); machine.dec_x(); assert_eq!(machine.registers.index_x, -2); assert_eq!(machine.registers.status.contains(PS_CARRY), false); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); machine.load_x_register(5); machine.dec_x(); assert_eq!(machine.registers.index_x, 4); assert_eq!(machine.registers.status.contains(PS_CARRY), false); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); machine.dec_x(); machine.dec_x(); machine.dec_x(); machine.dec_x(); assert_eq!(machine.registers.index_x, 0); assert_eq!(machine.registers.status.contains(PS_CARRY), false); assert_eq!(machine.registers.status.contains(PS_ZERO), true); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); machine.dec_x(); assert_eq!(machine.registers.index_x, -1); assert_eq!(machine.registers.status.contains(PS_CARRY), false); assert_eq!(machine.registers.status.contains(PS_ZERO), false); assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true); assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false); } #[test] fn jump_test() { let mut machine = Machine::new(); let addr = Address(0xA1B1); machine.jump(addr); assert_eq!(machine.registers.program_counter, addr); } #[test] fn branch_if_carry_clear_test() { let mut machine = Machine::new(); machine.execute_instruction((instruction::SEC, instruction::UseImplied)); machine.branch_if_carry_clear(Address(0xABCD)); assert_eq!(machine.registers.program_counter, Address(0)); machine.execute_instruction((instruction::CLC, instruction::UseImplied)); machine.branch_if_carry_clear(Address(0xABCD)); assert_eq!(machine.registers.program_counter, Address(0xABCD)); } #[test] fn branch_if_carry_set_test() { let mut machine = Machine::new(); machine.execute_instruction((instruction::CLC, instruction::UseImplied)); machine.branch_if_carry_set(Address(0xABCD)); assert_eq!(machine.registers.program_counter, Address(0)); machine.execute_instruction((instruction::SEC, instruction::UseImplied)); machine.branch_if_carry_set(Address(0xABCD)); assert_eq!(machine.registers.program_counter, Address(0xABCD)); } #[test] fn branch_if_equal_test() { let mut machine = Machine::new(); machine.branch_if_equal(Address(0xABCD)); assert_eq!(machine.registers.program_counter, Address(0)); machine.registers.status.or(PS_ZERO); machine.branch_if_equal(Address(0xABCD)); assert_eq!(machine.registers.program_counter, Address(0xABCD)); } #[test] fn branch_if_minus_test() { { let mut machine = Machine::new(); let registers_before = machine.registers; machine.branch_if_minus(Address(0xABCD)); assert_eq!(machine.registers, registers_before); assert_eq!(machine.registers.program_counter, Address(0)); } { let mut machine = Machine::new(); machine.registers.status.or(PS_NEGATIVE); let registers_before = machine.registers; machine.branch_if_minus(Address(0xABCD)); assert_eq!(machine.registers.status, registers_before.status); assert_eq!(machine.registers.program_counter, Address(0xABCD)); } }
// Copyright 2017 tokio-jsonrpc Developers // // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. //! JSON-RPC 2.0 messages. //! //! The main entrypoint here is the [Message](enum.Message.html). The others are just building //! blocks and you should generally work with `Message` instead. use serde::ser::{Serialize, SerializeStruct, Serializer}; use serde::de::{Deserialize, Deserializer, Error, Unexpected}; use serde_json::{Value, to_value}; use uuid::Uuid; #[derive(Debug, Clone, PartialEq, Eq, Hash)] struct Version; impl Serialize for Version { fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> { serializer.serialize_str("2.0") } } impl Deserialize for Version { fn deserialize<D: Deserializer>(deserializer: D) -> Result<Self, D::Error> { // The version is actually a string let parsed: String = Deserialize::deserialize(deserializer)?; if parsed == "2.0" { Ok(Version) } else { Err(D::Error::invalid_value(Unexpected::Str(&parsed), &"value 2.0")) } } } /// An RPC request. #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] #[serde(deny_unknown_fields)] pub struct Request { jsonrpc: Version, pub method: String, #[serde(skip_serializing_if = "Option::is_none")] pub params: Option<Value>, pub id: Value, } impl Request { /// Answer the request with a (positive) reply. /// /// The ID is taken from the request. pub fn reply(&self, reply: Value) -> Message { Message::Response(Response { jsonrpc: Version, result: Ok(reply), id: self.id.clone(), }) } /// Answer the request with an error. pub fn error(&self, error: RpcError) -> Message { Message::Response(Response { jsonrpc: Version, result: Err(error), id: self.id.clone(), }) } } /// An error code. #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] #[serde(deny_unknown_fields)] pub struct RpcError { pub code: i64, pub message: String, #[serde(skip_serializing_if = "Option::is_none")] pub data: Option<Value>, } impl RpcError { /// A generic constructor. /// /// Mostly for completeness, doesn't do anything but filling in the corresponding fields. pub fn new(code: i64, message: String, data: Option<Value>) -> Self { RpcError { code: code, message: message, data: data, } } /// Create an Invalid Param error. pub fn invalid_params(msg: Option<String>) -> Self { RpcError::new(-32602, "Invalid params".to_owned(), msg.map(Value::String)) } /// Create a server error. pub fn server_error<E: Serialize>(e: Option<E>) -> Self { RpcError::new(-32000, "Server error".to_owned(), e.map(|v| to_value(v).expect("Must be representable in JSON"))) } /// Create an invalid request error. pub fn invalid_request() -> Self { RpcError::new(-32600, "Invalid request".to_owned(), None) } /// Create a parse error. pub fn parse_error(e: String) -> Self { RpcError::new(-32700, "Parse error".to_owned(), Some(Value::String(e))) } /// Create a method not found error. pub fn method_not_found(method: String) -> Self { RpcError::new(-32601, "Method not found".to_owned(), Some(Value::String(method))) } } /// A response to an RPC. /// /// It is created by the methods on [Request](struct.Request.html). #[derive(Debug, Clone, PartialEq)] pub struct Response { jsonrpc: Version, pub result: Result<Value, RpcError>, pub id: Value, } impl Serialize for Response { fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> { let mut sub = serializer.serialize_struct("Response", 3)?; sub.serialize_field("jsonrpc", &self.jsonrpc)?; match self.result { Ok(ref value) => sub.serialize_field("result", value), Err(ref err) => sub.serialize_field("error", err), }?; sub.serialize_field("id", &self.id)?; sub.end() } } /// Deserializer for `Option<Value>` that produces `Some(Value::Null)`. /// /// The usual one produces None in that case. But we need to know the difference between /// `{x: null}` and `{}`. fn some_value<D: Deserializer>(deserializer: D) -> Result<Option<Value>, D::Error> { Deserialize::deserialize(deserializer).map(Some) } /// A helper trick for deserialization. #[derive(Deserialize)] #[serde(deny_unknown_fields)] struct WireResponse { // It is actually used to eat and sanity check the deserialized text #[allow(dead_code)] jsonrpc: Version, // Make sure we accept null as Some(Value::Null), instead of going to None #[serde(default, deserialize_with = "some_value")] result: Option<Value>, error: Option<RpcError>, id: Value, } // Implementing deserialize is hard. We sidestep the difficulty by deserializing a similar // structure that directly corresponds to whatever is on the wire and then convert it to our more // convenient representation. impl Deserialize for Response { #[allow(unreachable_code)] // For that unreachable below fn deserialize<D: Deserializer>(deserializer: D) -> Result<Self, D::Error> { let wr: WireResponse = Deserialize::deserialize(deserializer)?; let result = match (wr.result, wr.error) { (Some(res), None) => Ok(res), (None, Some(err)) => Err(err), _ => { let err = D::Error::custom("Either 'error' or 'result' is expected, but not both"); return Err(err); // A trick to make the compiler accept this branch unreachable!(); }, }; Ok(Response { jsonrpc: Version, result: result, id: wr.id, }) } } /// A notification (doesn't expect an answer). #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] #[serde(deny_unknown_fields)] pub struct Notification { jsonrpc: Version, pub method: String, #[serde(skip_serializing_if = "Option::is_none")] pub params: Option<Value>, } /// One message of the JSON RPC protocol. /// /// One message, directly mapped from the structures of the protocol. See the /// [specification](http://www.jsonrpc.org/specification) for more details. /// /// Since the protocol allows one endpoint to be both client and server at the same time, the /// message can decode and encode both directions of the protocol. /// /// The `Batch` variant is supposed to be created directly, without a constructor. /// /// The `UnmatchedSub` variant is used when a request is an array and some of the subrequests /// aren't recognized as valid json rpc 2.0 messages. This is never returned as a top-level /// element, it is returned as `Err(Broken::Unmatched)`. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(untagged)] pub enum Message { /// An RPC request. Request(Request), /// A response to a Request. Response(Response), /// A notification. Notification(Notification), /// A batch of more requests or responses. /// /// The protocol allows bundling multiple requests, notifications or responses to a single /// message. /// /// This variant has no direct constructor and is expected to be constructed manually. Batch(Vec<Message>), /// An unmatched sub entry in a `Batch`. /// /// When there's a `Batch` and an element doesn't comform to the JSONRPC 2.0 format, that one /// is represented by this. This is never produced as a top-level value when parsing, the /// `Err(Broken::Unmatched)` is used instead. It is not possible to serialize. #[serde(skip_serializing)] UnmatchedSub(Value), } impl Message { /// A constructor for a request. /// /// The ID is auto-generated. pub fn request(method: String, params: Option<Value>) -> Self { Message::Request(Request { jsonrpc: Version, method: method, params: params, id: Value::String(Uuid::new_v4().hyphenated().to_string()), }) } /// Create a top-level error (without an ID). pub fn error(error: RpcError) -> Self { Message::Response(Response { jsonrpc: Version, result: Err(error), id: Value::Null, }) } /// A constructor for a notification. pub fn notification(method: String, params: Option<Value>) -> Self { Message::Notification(Notification { jsonrpc: Version, method: method, params: params, }) } } /// A broken message. /// /// Protocol-level errors. #[derive(Debug, Clone, PartialEq, Deserialize)] #[serde(untagged)] pub enum Broken { /// It was valid JSON, but doesn't match the form of a JSONRPC 2.0 message. Unmatched(Value), /// Invalid JSON. #[serde(skip_deserializing)] SyntaxError(String), } impl Broken { /// Generate an appropriate error message. /// /// The error message for these things are specified in the RFC, so this just creates an error /// with the right values. pub fn reply(&self) -> Message { match *self { Broken::Unmatched(_) => Message::error(RpcError::invalid_request()), Broken::SyntaxError(ref e) => Message::error(RpcError::parse_error(e.clone())), } } } /// A trick to easily deserialize and detect valid JSON, but invalid Message. #[derive(Deserialize)] #[serde(untagged)] enum WireMessage { Message(Message), Broken(Broken), } pub type Parsed = Result<Message, Broken>; /// Read a [Message](enum.Message.html) from a slice. /// /// Invalid JSON or JSONRPC messages are reported as [Broken](enum.Broken.html). pub fn from_slice(s: &[u8]) -> Parsed { match ::serde_json::de::from_slice(s) { Ok(WireMessage::Message(Message::UnmatchedSub(value))) => Err(Broken::Unmatched(value)), Ok(WireMessage::Message(m)) => Ok(m), Ok(WireMessage::Broken(b)) => Err(b), // Other errors can't happen right now, when we have the slice Err(e) => Err(Broken::SyntaxError(format!("{}", e))), } } /// Read a [Message](enum.Message.html) from a string. /// /// Invalid JSON or JSONRPC messages are reported as [Broken](enum.Broken.html). pub fn from_str(s: &str) -> Parsed { from_slice(s.as_bytes()) } impl Into<String> for Message { fn into(self) -> String { ::serde_json::ser::to_string(&self).unwrap() } } impl Into<Vec<u8>> for Message { fn into(self) -> Vec<u8> { ::serde_json::ser::to_vec(&self).unwrap() } } #[cfg(test)] mod tests { use super::*; use serde_json::Value; use serde_json::ser::to_vec; use serde_json::de::from_slice; /// Test serialization and deserialization of the Message /// /// We first deserialize it from a string. That way we check deserialization works. /// But since serialization doesn't have to produce the exact same result (order, spaces, …), /// we then serialize and deserialize the thing again and check it matches. #[test] fn message_serde() { // A helper for running one message test fn one(input: &str, expected: &Message) { let parsed: Message = from_str(input).unwrap(); assert_eq!(*expected, parsed); let serialized = to_vec(&parsed).unwrap(); let deserialized: Message = from_slice(&serialized).unwrap(); assert_eq!(parsed, deserialized); } // A request without parameters one(r#"{"jsonrpc": "2.0", "method": "call", "id": 1}"#, &Message::Request(Request { jsonrpc: Version, method: "call".to_owned(), params: None, id: json!(1), })); // A request with parameters one(r#"{"jsonrpc": "2.0", "method": "call", "params": [1, 2, 3], "id": 2}"#, &Message::Request(Request { jsonrpc: Version, method: "call".to_owned(), params: Some(json!([1, 2, 3])), id: json!(2), })); // A notification (with parameters) one(r#"{"jsonrpc": "2.0", "method": "notif", "params": {"x": "y"}}"#, &Message::Notification(Notification { jsonrpc: Version, method: "notif".to_owned(), params: Some(json!({"x": "y"})), })); // A successful response one(r#"{"jsonrpc": "2.0", "result": 42, "id": 3}"#, &Message::Response(Response { jsonrpc: Version, result: Ok(json!(42)), id: json!(3), })); // A successful response one(r#"{"jsonrpc": "2.0", "result": null, "id": 3}"#, &Message::Response(Response { jsonrpc: Version, result: Ok(Value::Null), id: json!(3), })); // An error one(r#"{"jsonrpc": "2.0", "error": {"code": 42, "message": "Wrong!"}, "id": null}"#, &Message::Response(Response { jsonrpc: Version, result: Err(RpcError::new(42, "Wrong!".to_owned(), None)), id: Value::Null, })); // A batch one(r#"[ {"jsonrpc": "2.0", "method": "notif"}, {"jsonrpc": "2.0", "method": "call", "id": 42} ]"#, &Message::Batch(vec![ Message::Notification(Notification { jsonrpc: Version, method: "notif".to_owned(), params: None, }), Message::Request(Request { jsonrpc: Version, method: "call".to_owned(), params: None, id: json!(42), }), ])); // Some handling of broken messages inside a batch let parsed = from_str(r#"[ {"jsonrpc": "2.0", "method": "notif"}, {"jsonrpc": "2.0", "method": "call", "id": 42}, true ]"#) .unwrap(); assert_eq!(Message::Batch(vec![ Message::Notification(Notification { jsonrpc: Version, method: "notif".to_owned(), params: None, }), Message::Request(Request { jsonrpc: Version, method: "call".to_owned(), params: None, id: json!(42), }), Message::UnmatchedSub(Value::Bool(true)), ]), parsed); to_vec(&Message::UnmatchedSub(Value::Null)).unwrap_err(); } /// A helper for the `broken` test. /// /// Check that the given JSON string parses, but is not recognized as a valid RPC message. /// Test things that are almost but not entirely JSONRPC are rejected /// /// The reject is done by returning it as Unmatched. #[test] fn broken() { // A helper with one test fn one(input: &str) { let msg = from_str(input); match msg { Err(Broken::Unmatched(_)) => (), _ => panic!("{} recognized as an RPC message: {:?}!", input, msg), } } // Missing the version one(r#"{"method": "notif"}"#); // Wrong version one(r#"{"jsonrpc": 2.0, "method": "notif"}"#); // A response with both result and error one(r#"{"jsonrpc": "2.0", "result": 42, "error": {"code": 42, "message": "!"}, "id": 1}"#); // A response without an id one(r#"{"jsonrpc": "2.0", "result": 42}"#); // An extra field one(r#"{"jsonrpc": "2.0", "method": "weird", "params": 42, "others": 43, "id": 2}"#); // Something completely different one(r#"{"x": [1, 2, 3]}"#); match from_str(r#"{]"#) { Err(Broken::SyntaxError(_)) => (), other => panic!("Something unexpected: {:?}", other), }; } /// Test some non-trivial aspects of the constructors /// /// This doesn't have a full coverage, because there's not much to actually test there. /// Most of it is related to the ids. #[test] fn constructors() { let msg1 = Message::request("call".to_owned(), Some(json!([1, 2, 3]))); let msg2 = Message::request("call".to_owned(), Some(json!([1, 2, 3]))); // They differ, even when created with the same parameters assert_ne!(msg1, msg2); // And, specifically, they differ in the ID's let (req1, req2) = if let (Message::Request(req1), Message::Request(req2)) = (msg1, msg2) { assert_ne!(req1.id, req2.id); assert!(req1.id.is_string()); assert!(req2.id.is_string()); (req1, req2) } else { panic!("Non-request received"); }; let id1 = req1.id.clone(); // When we answer a message, we get the same ID if let Message::Response(ref resp) = req1.reply(json!([1, 2, 3])) { assert_eq!(*resp, Response { jsonrpc: Version, result: Ok(json!([1, 2, 3])), id: id1, }); } else { panic!("Not a response"); } let id2 = req2.id.clone(); // The same with an error if let Message::Response(ref resp) = req2.error(RpcError::new(42, "Wrong!".to_owned(), None)) { assert_eq!(*resp, Response { jsonrpc: Version, result: Err(RpcError::new(42, "Wrong!".to_owned(), None)), id: id2, }); } else { panic!("Not a response"); } // When we have unmatched, we generate a top-level error with Null id. if let Message::Response(ref resp) = Message::error(RpcError::new(43, "Also wrong!".to_owned(), None)) { assert_eq!(*resp, Response { jsonrpc: Version, result: Err(RpcError::new(43, "Also wrong!".to_owned(), None)), id: Value::Null, }); } else { panic!("Not a response"); } } } misc: unnecesary unreachable code // Copyright 2017 tokio-jsonrpc Developers // // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. //! JSON-RPC 2.0 messages. //! //! The main entrypoint here is the [Message](enum.Message.html). The others are just building //! blocks and you should generally work with `Message` instead. use serde::ser::{Serialize, SerializeStruct, Serializer}; use serde::de::{Deserialize, Deserializer, Error, Unexpected}; use serde_json::{Value, to_value}; use uuid::Uuid; #[derive(Debug, Clone, PartialEq, Eq, Hash)] struct Version; impl Serialize for Version { fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> { serializer.serialize_str("2.0") } } impl Deserialize for Version { fn deserialize<D: Deserializer>(deserializer: D) -> Result<Self, D::Error> { // The version is actually a string let parsed: String = Deserialize::deserialize(deserializer)?; if parsed == "2.0" { Ok(Version) } else { Err(D::Error::invalid_value(Unexpected::Str(&parsed), &"value 2.0")) } } } /// An RPC request. #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] #[serde(deny_unknown_fields)] pub struct Request { jsonrpc: Version, pub method: String, #[serde(skip_serializing_if = "Option::is_none")] pub params: Option<Value>, pub id: Value, } impl Request { /// Answer the request with a (positive) reply. /// /// The ID is taken from the request. pub fn reply(&self, reply: Value) -> Message { Message::Response(Response { jsonrpc: Version, result: Ok(reply), id: self.id.clone(), }) } /// Answer the request with an error. pub fn error(&self, error: RpcError) -> Message { Message::Response(Response { jsonrpc: Version, result: Err(error), id: self.id.clone(), }) } } /// An error code. #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] #[serde(deny_unknown_fields)] pub struct RpcError { pub code: i64, pub message: String, #[serde(skip_serializing_if = "Option::is_none")] pub data: Option<Value>, } impl RpcError { /// A generic constructor. /// /// Mostly for completeness, doesn't do anything but filling in the corresponding fields. pub fn new(code: i64, message: String, data: Option<Value>) -> Self { RpcError { code: code, message: message, data: data, } } /// Create an Invalid Param error. pub fn invalid_params(msg: Option<String>) -> Self { RpcError::new(-32602, "Invalid params".to_owned(), msg.map(Value::String)) } /// Create a server error. pub fn server_error<E: Serialize>(e: Option<E>) -> Self { RpcError::new(-32000, "Server error".to_owned(), e.map(|v| to_value(v).expect("Must be representable in JSON"))) } /// Create an invalid request error. pub fn invalid_request() -> Self { RpcError::new(-32600, "Invalid request".to_owned(), None) } /// Create a parse error. pub fn parse_error(e: String) -> Self { RpcError::new(-32700, "Parse error".to_owned(), Some(Value::String(e))) } /// Create a method not found error. pub fn method_not_found(method: String) -> Self { RpcError::new(-32601, "Method not found".to_owned(), Some(Value::String(method))) } } /// A response to an RPC. /// /// It is created by the methods on [Request](struct.Request.html). #[derive(Debug, Clone, PartialEq)] pub struct Response { jsonrpc: Version, pub result: Result<Value, RpcError>, pub id: Value, } impl Serialize for Response { fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> { let mut sub = serializer.serialize_struct("Response", 3)?; sub.serialize_field("jsonrpc", &self.jsonrpc)?; match self.result { Ok(ref value) => sub.serialize_field("result", value), Err(ref err) => sub.serialize_field("error", err), }?; sub.serialize_field("id", &self.id)?; sub.end() } } /// Deserializer for `Option<Value>` that produces `Some(Value::Null)`. /// /// The usual one produces None in that case. But we need to know the difference between /// `{x: null}` and `{}`. fn some_value<D: Deserializer>(deserializer: D) -> Result<Option<Value>, D::Error> { Deserialize::deserialize(deserializer).map(Some) } /// A helper trick for deserialization. #[derive(Deserialize)] #[serde(deny_unknown_fields)] struct WireResponse { // It is actually used to eat and sanity check the deserialized text #[allow(dead_code)] jsonrpc: Version, // Make sure we accept null as Some(Value::Null), instead of going to None #[serde(default, deserialize_with = "some_value")] result: Option<Value>, error: Option<RpcError>, id: Value, } // Implementing deserialize is hard. We sidestep the difficulty by deserializing a similar // structure that directly corresponds to whatever is on the wire and then convert it to our more // convenient representation. impl Deserialize for Response { #[allow(unreachable_code)] // For that unreachable below fn deserialize<D: Deserializer>(deserializer: D) -> Result<Self, D::Error> { let wr: WireResponse = Deserialize::deserialize(deserializer)?; let result = match (wr.result, wr.error) { (Some(res), None) => Ok(res), (None, Some(err)) => Err(err), _ => { let err = D::Error::custom("Either 'error' or 'result' is expected, but not both"); return Err(err); }, }; Ok(Response { jsonrpc: Version, result: result, id: wr.id, }) } } /// A notification (doesn't expect an answer). #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] #[serde(deny_unknown_fields)] pub struct Notification { jsonrpc: Version, pub method: String, #[serde(skip_serializing_if = "Option::is_none")] pub params: Option<Value>, } /// One message of the JSON RPC protocol. /// /// One message, directly mapped from the structures of the protocol. See the /// [specification](http://www.jsonrpc.org/specification) for more details. /// /// Since the protocol allows one endpoint to be both client and server at the same time, the /// message can decode and encode both directions of the protocol. /// /// The `Batch` variant is supposed to be created directly, without a constructor. /// /// The `UnmatchedSub` variant is used when a request is an array and some of the subrequests /// aren't recognized as valid json rpc 2.0 messages. This is never returned as a top-level /// element, it is returned as `Err(Broken::Unmatched)`. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(untagged)] pub enum Message { /// An RPC request. Request(Request), /// A response to a Request. Response(Response), /// A notification. Notification(Notification), /// A batch of more requests or responses. /// /// The protocol allows bundling multiple requests, notifications or responses to a single /// message. /// /// This variant has no direct constructor and is expected to be constructed manually. Batch(Vec<Message>), /// An unmatched sub entry in a `Batch`. /// /// When there's a `Batch` and an element doesn't comform to the JSONRPC 2.0 format, that one /// is represented by this. This is never produced as a top-level value when parsing, the /// `Err(Broken::Unmatched)` is used instead. It is not possible to serialize. #[serde(skip_serializing)] UnmatchedSub(Value), } impl Message { /// A constructor for a request. /// /// The ID is auto-generated. pub fn request(method: String, params: Option<Value>) -> Self { Message::Request(Request { jsonrpc: Version, method: method, params: params, id: Value::String(Uuid::new_v4().hyphenated().to_string()), }) } /// Create a top-level error (without an ID). pub fn error(error: RpcError) -> Self { Message::Response(Response { jsonrpc: Version, result: Err(error), id: Value::Null, }) } /// A constructor for a notification. pub fn notification(method: String, params: Option<Value>) -> Self { Message::Notification(Notification { jsonrpc: Version, method: method, params: params, }) } } /// A broken message. /// /// Protocol-level errors. #[derive(Debug, Clone, PartialEq, Deserialize)] #[serde(untagged)] pub enum Broken { /// It was valid JSON, but doesn't match the form of a JSONRPC 2.0 message. Unmatched(Value), /// Invalid JSON. #[serde(skip_deserializing)] SyntaxError(String), } impl Broken { /// Generate an appropriate error message. /// /// The error message for these things are specified in the RFC, so this just creates an error /// with the right values. pub fn reply(&self) -> Message { match *self { Broken::Unmatched(_) => Message::error(RpcError::invalid_request()), Broken::SyntaxError(ref e) => Message::error(RpcError::parse_error(e.clone())), } } } /// A trick to easily deserialize and detect valid JSON, but invalid Message. #[derive(Deserialize)] #[serde(untagged)] enum WireMessage { Message(Message), Broken(Broken), } pub type Parsed = Result<Message, Broken>; /// Read a [Message](enum.Message.html) from a slice. /// /// Invalid JSON or JSONRPC messages are reported as [Broken](enum.Broken.html). pub fn from_slice(s: &[u8]) -> Parsed { match ::serde_json::de::from_slice(s) { Ok(WireMessage::Message(Message::UnmatchedSub(value))) => Err(Broken::Unmatched(value)), Ok(WireMessage::Message(m)) => Ok(m), Ok(WireMessage::Broken(b)) => Err(b), // Other errors can't happen right now, when we have the slice Err(e) => Err(Broken::SyntaxError(format!("{}", e))), } } /// Read a [Message](enum.Message.html) from a string. /// /// Invalid JSON or JSONRPC messages are reported as [Broken](enum.Broken.html). pub fn from_str(s: &str) -> Parsed { from_slice(s.as_bytes()) } impl Into<String> for Message { fn into(self) -> String { ::serde_json::ser::to_string(&self).unwrap() } } impl Into<Vec<u8>> for Message { fn into(self) -> Vec<u8> { ::serde_json::ser::to_vec(&self).unwrap() } } #[cfg(test)] mod tests { use super::*; use serde_json::Value; use serde_json::ser::to_vec; use serde_json::de::from_slice; /// Test serialization and deserialization of the Message /// /// We first deserialize it from a string. That way we check deserialization works. /// But since serialization doesn't have to produce the exact same result (order, spaces, …), /// we then serialize and deserialize the thing again and check it matches. #[test] fn message_serde() { // A helper for running one message test fn one(input: &str, expected: &Message) { let parsed: Message = from_str(input).unwrap(); assert_eq!(*expected, parsed); let serialized = to_vec(&parsed).unwrap(); let deserialized: Message = from_slice(&serialized).unwrap(); assert_eq!(parsed, deserialized); } // A request without parameters one(r#"{"jsonrpc": "2.0", "method": "call", "id": 1}"#, &Message::Request(Request { jsonrpc: Version, method: "call".to_owned(), params: None, id: json!(1), })); // A request with parameters one(r#"{"jsonrpc": "2.0", "method": "call", "params": [1, 2, 3], "id": 2}"#, &Message::Request(Request { jsonrpc: Version, method: "call".to_owned(), params: Some(json!([1, 2, 3])), id: json!(2), })); // A notification (with parameters) one(r#"{"jsonrpc": "2.0", "method": "notif", "params": {"x": "y"}}"#, &Message::Notification(Notification { jsonrpc: Version, method: "notif".to_owned(), params: Some(json!({"x": "y"})), })); // A successful response one(r#"{"jsonrpc": "2.0", "result": 42, "id": 3}"#, &Message::Response(Response { jsonrpc: Version, result: Ok(json!(42)), id: json!(3), })); // A successful response one(r#"{"jsonrpc": "2.0", "result": null, "id": 3}"#, &Message::Response(Response { jsonrpc: Version, result: Ok(Value::Null), id: json!(3), })); // An error one(r#"{"jsonrpc": "2.0", "error": {"code": 42, "message": "Wrong!"}, "id": null}"#, &Message::Response(Response { jsonrpc: Version, result: Err(RpcError::new(42, "Wrong!".to_owned(), None)), id: Value::Null, })); // A batch one(r#"[ {"jsonrpc": "2.0", "method": "notif"}, {"jsonrpc": "2.0", "method": "call", "id": 42} ]"#, &Message::Batch(vec![ Message::Notification(Notification { jsonrpc: Version, method: "notif".to_owned(), params: None, }), Message::Request(Request { jsonrpc: Version, method: "call".to_owned(), params: None, id: json!(42), }), ])); // Some handling of broken messages inside a batch let parsed = from_str(r#"[ {"jsonrpc": "2.0", "method": "notif"}, {"jsonrpc": "2.0", "method": "call", "id": 42}, true ]"#) .unwrap(); assert_eq!(Message::Batch(vec![ Message::Notification(Notification { jsonrpc: Version, method: "notif".to_owned(), params: None, }), Message::Request(Request { jsonrpc: Version, method: "call".to_owned(), params: None, id: json!(42), }), Message::UnmatchedSub(Value::Bool(true)), ]), parsed); to_vec(&Message::UnmatchedSub(Value::Null)).unwrap_err(); } /// A helper for the `broken` test. /// /// Check that the given JSON string parses, but is not recognized as a valid RPC message. /// Test things that are almost but not entirely JSONRPC are rejected /// /// The reject is done by returning it as Unmatched. #[test] fn broken() { // A helper with one test fn one(input: &str) { let msg = from_str(input); match msg { Err(Broken::Unmatched(_)) => (), _ => panic!("{} recognized as an RPC message: {:?}!", input, msg), } } // Missing the version one(r#"{"method": "notif"}"#); // Wrong version one(r#"{"jsonrpc": 2.0, "method": "notif"}"#); // A response with both result and error one(r#"{"jsonrpc": "2.0", "result": 42, "error": {"code": 42, "message": "!"}, "id": 1}"#); // A response without an id one(r#"{"jsonrpc": "2.0", "result": 42}"#); // An extra field one(r#"{"jsonrpc": "2.0", "method": "weird", "params": 42, "others": 43, "id": 2}"#); // Something completely different one(r#"{"x": [1, 2, 3]}"#); match from_str(r#"{]"#) { Err(Broken::SyntaxError(_)) => (), other => panic!("Something unexpected: {:?}", other), }; } /// Test some non-trivial aspects of the constructors /// /// This doesn't have a full coverage, because there's not much to actually test there. /// Most of it is related to the ids. #[test] fn constructors() { let msg1 = Message::request("call".to_owned(), Some(json!([1, 2, 3]))); let msg2 = Message::request("call".to_owned(), Some(json!([1, 2, 3]))); // They differ, even when created with the same parameters assert_ne!(msg1, msg2); // And, specifically, they differ in the ID's let (req1, req2) = if let (Message::Request(req1), Message::Request(req2)) = (msg1, msg2) { assert_ne!(req1.id, req2.id); assert!(req1.id.is_string()); assert!(req2.id.is_string()); (req1, req2) } else { panic!("Non-request received"); }; let id1 = req1.id.clone(); // When we answer a message, we get the same ID if let Message::Response(ref resp) = req1.reply(json!([1, 2, 3])) { assert_eq!(*resp, Response { jsonrpc: Version, result: Ok(json!([1, 2, 3])), id: id1, }); } else { panic!("Not a response"); } let id2 = req2.id.clone(); // The same with an error if let Message::Response(ref resp) = req2.error(RpcError::new(42, "Wrong!".to_owned(), None)) { assert_eq!(*resp, Response { jsonrpc: Version, result: Err(RpcError::new(42, "Wrong!".to_owned(), None)), id: id2, }); } else { panic!("Not a response"); } // When we have unmatched, we generate a top-level error with Null id. if let Message::Response(ref resp) = Message::error(RpcError::new(43, "Also wrong!".to_owned(), None)) { assert_eq!(*resp, Response { jsonrpc: Version, result: Err(RpcError::new(43, "Also wrong!".to_owned(), None)), id: Value::Null, }); } else { panic!("Not a response"); } } }
use std::collections::BTreeMap; #[derive(Clone, Debug)] pub struct Message { uuid: String, name: Option<String>, data: BTreeMap<String, String> } impl Message { pub fn new(uuid: String) -> Message { Message { uuid: uuid, name: None, data: BTreeMap::new() } } pub fn uuid(&self) -> &String { &self.uuid } pub fn get(&self, key: &str) -> Option<&String> { self.data.get(key) } } pub struct Builder { uuid: String, name: Option<String>, data: BTreeMap<String, String> } impl Builder { pub fn new(uuid: String) -> Builder { Builder { uuid: uuid, name: None, data: BTreeMap::new() } } pub fn name(&mut self, name: String) -> &mut Builder { self.name = Some(name); self } pub fn pair(&mut self, key: String, value: String) -> &mut Builder { self.data.insert(key, value); self } pub fn build(&self) -> Message { Message { uuid: self.uuid.clone(), name: self.name.clone(), data: self.data.clone() } } } Message: add name() method Signed-off-by: Tibor Benke <0159c19b1c1721a670d8317ea6ed73d9a838ee77@balabit.com> use std::collections::BTreeMap; #[derive(Clone, Debug)] pub struct Message { uuid: String, name: Option<String>, data: BTreeMap<String, String> } impl Message { pub fn new(uuid: String) -> Message { Message { uuid: uuid, name: None, data: BTreeMap::new() } } pub fn uuid(&self) -> &String { &self.uuid } pub fn name(&self) -> Option<&String> { self.name.as_ref() } pub fn get(&self, key: &str) -> Option<&String> { self.data.get(key) } } pub struct Builder { uuid: String, name: Option<String>, data: BTreeMap<String, String> } impl Builder { pub fn new(uuid: String) -> Builder { Builder { uuid: uuid, name: None, data: BTreeMap::new() } } pub fn name(&mut self, name: String) -> &mut Builder { self.name = Some(name); self } pub fn pair(&mut self, key: String, value: String) -> &mut Builder { self.data.insert(key, value); self } pub fn build(&self) -> Message { Message { uuid: self.uuid.clone(), name: self.name.clone(), data: self.data.clone() } } }
extern crate git2; use git2::*; use std::process::Command; use std::path::Path; use std::path::PathBuf; const TMP_NAME: &'static str = "tmp_fd2db5f8_bac2_4a1e_9487_4ac3414788aa"; pub trait RepoHost { fn central(&self) -> &str; fn projects(&self) -> Vec<String>; fn remote_url(&self, &str) -> String; fn fetch_url(&self, module: &str) -> String { self.remote_url(module) } } pub struct Scratch<'a> { repo: Repository, host: &'a RepoHost, } impl<'a> Scratch<'a> { pub fn new(path: &Path, host: &'a RepoHost) -> Scratch<'a> { Scratch { repo: Repository::init_bare(&path).expect("could not init scratch"), host: host, } } fn tracking(&self, module: &str, branch: &str) -> Option<Object> { let remote_name = format!("{}", module); let fetch_url = self.host.fetch_url(&module); let mut remote = if let Ok(remote) = self.repo.find_remote(&remote_name) { remote } else { debug!("==== create remote (remote_name:{}, remote_url:{})", &remote_name, &fetch_url); self.repo.remote(&remote_name, &fetch_url).expect("can't create remote") }; let rs = remote.get_refspec(0).unwrap().str().unwrap().to_string(); remote.fetch(&[&rs], None, None).expect("fetch failed"); return self.repo .revparse_single(&format!("remotes/{}/{}", module, branch)) .ok(); } fn call_git(self: &Scratch<'a>, cmd: &str) -> Result<String, Error> { let args: Vec<&str> = cmd.split(" ").collect(); let repo_path = &self.repo.path(); let mut c = Command::new("git"); c.current_dir(&repo_path); c.env("GIT_DIR", repo_path.as_os_str()); c.args(&args); let output = c.output() .unwrap_or_else(|e| panic!("failed to execute process: {}", e)); Ok(String::from_utf8(output.stderr).expect("cannot decode utf8")) } // force push of the new revision-object to temp repo pub fn transfer(&self, rev: &str, source: &Path) -> Object { // TODO: implement using libgit let target = &self.repo.path(); let shell = Shell { cwd: source.to_path_buf() }; // create tmp branch shell.command(&format!("git branch -f {} {}", TMP_NAME, rev)); // force push shell.command(&format!("git push --force {} {}", &target.to_string_lossy(), TMP_NAME)); // delete tmp branch shell.command(&format!("git branch -D {}", TMP_NAME)); let obj = self.repo.revparse_single(rev).expect("can't find transfered ref"); return obj; } // takes everything from base except it's tree and replaces it with the tree // given fn rewrite(&self, base: &Commit, parents: &[&Commit], tree: &Tree) -> Oid { if parents.len() != 0 { self.repo.set_head_detached(parents[0].id()).expect("rewrite: can't detach head"); } self.repo .commit(Some("HEAD"), &base.author(), &base.committer(), &base.message().unwrap_or("no message"), tree, parents) .expect("rewrite: can't commit") } pub fn push(&self, oid: Oid, module: &str, target: &str) -> String { let commit = &self.repo.find_commit(oid).expect("can't find commit"); self.repo.set_head_detached(commit.id()).expect("can't detach HEAD"); let output = self.call_git(&format!("push {} HEAD:{}", self.host.remote_url(module), target)) .expect("can't push"); format!("{}", output) } fn subtree(&self, tree: &Tree, path: &Path) -> Option<Tree> { if let Some(oid) = tree.get_path(path).map(|x| x.id()).ok() { return self.repo.find_tree(oid).ok(); } else { return None; } } fn replace_child(&self, child: &Path, subtree: Oid, full_tree: Tree) -> Tree { let full_tree_id = { let mut builder = self.repo .treebuilder(Some(&full_tree)) .expect("replace_child: can't create treebuilder"); builder.insert(child, subtree, 0o0040000) // GIT_FILEMODE_TREE .expect("replace_child: can't insert tree"); builder.write().expect("replace_child: can't write tree") }; return self.repo.find_tree(full_tree_id).expect("replace_child: can't find new tree"); } fn replace_subtree(&self, path: &Path, subtree: Oid, full_tree: Tree) -> Tree { if path.components().count() == 1 { return self.replace_child(path, subtree, full_tree); } else { let name = Path::new(path.file_name().expect("no module name")); let path = path.parent().expect("module not in subdir"); let st = self.subtree(&full_tree, path).unwrap(); let tree = self.replace_child(name, subtree, st); return self.replace_subtree(path, tree.id(), full_tree); } } fn split_subdir(&self, module: &str, newrev: Oid) -> Object { // TODO: implement using libgit let shell = Shell { cwd: self.repo.path().to_path_buf() }; shell.command("rm -Rf refs/original"); shell.command("rm -Rf .git-rewrite"); self.repo.set_head_detached(newrev).expect("can't detatch head");; self.call_git(&format!("filter-branch --subdirectory-filter {}/ -- HEAD", module)) .expect("error in filter-branch"); return self.repo .revparse_single("HEAD") .expect("can't find rewritten branch"); } } pub enum ReviewUploadResult { Uploaded(Oid), RejectNoFF, RejectMerge, NoChanges, Central, } pub fn review_upload(scratch: &Scratch, newrev: Object, module: &str) -> ReviewUploadResult { debug!(".\n\n==== Doing review upload for module {}", &module); let new = newrev.id(); let old = scratch.tracking(&module, "master").expect("no tracking branch 1").id(); if old == new { return ReviewUploadResult::NoChanges; } match scratch.repo.graph_descendant_of(new, old) { Err(_) => return ReviewUploadResult::RejectNoFF, Ok(false) => return ReviewUploadResult::RejectNoFF, Ok(true) => (), } debug!("==== walking commits from {} to {}", old, new); let walk = { let mut walk = scratch.repo.revwalk().expect("walk: can't create revwalk"); walk.set_sorting(SORT_REVERSE | SORT_TIME); let range = format!("{}..{}", old, new); walk.push_range(&range).expect(&format!("walk: invalid range: {}", range));; walk }; let mut current = scratch.tracking(scratch.host.central(), "master").expect("no central tracking").id(); for rev in walk { let rev = rev.expect("walk: invalid rev"); if old == rev { continue; } debug!("==== walking commit {}", rev); let module_commit = scratch.repo .find_commit(rev) .expect("walk: object is not actually a commit"); if module_commit.parents().count() > 1 { // TODO: also do this check on pushes to cenral refs/for/master // TODO: invectigate the possibility of allowing merge commits return ReviewUploadResult::RejectMerge; } if module != scratch.host.central() { debug!("==== Rewriting commit {}", rev); let tree = module_commit.tree().expect("walk: commit has no tree"); let parent = scratch.repo.find_commit(current).expect("walk: current object is no commit"); let new_tree = scratch.replace_subtree(Path::new(module), tree.id(), parent.tree() .expect("walk: parent has no tree")); current = scratch.rewrite(&module_commit, &vec![&parent], &new_tree); } } if module != scratch.host.central() { return ReviewUploadResult::Uploaded(current); } else { return ReviewUploadResult::Central; } } pub fn project_created(scratch: &Scratch) { if let Some(rev) = scratch.tracking(scratch.host.central(), "master") { central_submit(scratch, rev); } } pub fn central_submit(scratch: &Scratch, newrev: Object) { debug!(" ---> central_submit (sha1 of commit: {})", &newrev.id()); let central_commit = newrev.as_commit().expect("could not get commit from obj"); let central_tree = central_commit.tree().expect("commit has no tree"); for module in scratch.host.projects() { if module == scratch.host.central() { continue; } debug!(""); debug!("==== fetching tracking branch for module: {}", &module); let module_master_commit_obj = match scratch.tracking(&module, "master") { Some(obj) => obj, None => { debug!("==== no tracking branch for module {} => project does not exist or is \ empty", &module); debug!("==== initializing with subdir history"); let commit = scratch.split_subdir(&module, newrev.id()); scratch.push(commit.id(), &module, "refs/heads/master"); scratch.tracking(&module, "master").expect("no tracking branch 3") } }; let parents = vec![module_master_commit_obj.as_commit() .expect("could not get commit from obj")]; debug!("==== checking for changes in module: {:?}", module); // new tree is sub-tree of complete central tree let old_tree_id = if let Ok(tree) = parents[0].tree() { tree.id() } else { Oid::from_str("0000000000000000000000000000000000000000").unwrap() }; let new_tree_id = if let Ok(tree_entry) = central_tree.get_path(&Path::new(&module)) { tree_entry.id() } else { Oid::from_str("0000000000000000000000000000000000000000").unwrap() }; // if sha1's are equal the content is equal if new_tree_id != old_tree_id && !new_tree_id.is_zero() { let new_tree = scratch.repo.find_tree(new_tree_id).expect("central_submit: can't find tree"); debug!("==== commit changes module => make commit on module"); let module_commit = scratch.rewrite(central_commit, &parents, &new_tree); let output = scratch.push(module_commit, &module, "master"); debug!("{}",output); } else { debug!("==== commit does not change module => skipping"); } } } pub fn find_repos(root: &Path, path: &Path, mut repos: Vec<String>) -> Vec<String> { if let Ok(children) = path.read_dir() { for child in children { let path = child.unwrap().path(); let name = format!("{}", &path.to_str().unwrap()); if let Some(last) = path.extension() { if last == "git" { repos.push(name.trim_right_matches(".git") .trim_left_matches(root.to_str().unwrap()) .trim_left_matches("/") .to_string()); continue; } } repos = find_repos(root, &path, repos); } } return repos; } pub struct Shell { pub cwd: PathBuf, } impl Shell { pub fn command(&self, cmd: &str) -> String { debug!("Shell::command: {}", cmd); let output = Command::new("sh") .current_dir(&self.cwd) .arg("-c") .arg(&cmd) .output() .unwrap_or_else(|e| panic!("failed to execute process: {}", e)); return String::from_utf8(output.stdout).expect("failed to decode utf8").trim().to_string(); } } better debug output extern crate git2; use git2::*; use std::process::Command; use std::path::Path; use std::path::PathBuf; const TMP_NAME: &'static str = "tmp_fd2db5f8_bac2_4a1e_9487_4ac3414788aa"; pub trait RepoHost { fn central(&self) -> &str; fn projects(&self) -> Vec<String>; fn remote_url(&self, &str) -> String; fn fetch_url(&self, module: &str) -> String { self.remote_url(module) } } pub struct Scratch<'a> { repo: Repository, host: &'a RepoHost, } impl<'a> Scratch<'a> { pub fn new(path: &Path, host: &'a RepoHost) -> Scratch<'a> { Scratch { repo: Repository::init_bare(&path).expect("could not init scratch"), host: host, } } fn tracking(&self, module: &str, branch: &str) -> Option<Object> { let remote_name = format!("{}", module); let fetch_url = self.host.fetch_url(&module); let mut remote = if let Ok(remote) = self.repo.find_remote(&remote_name) { remote } else { debug!("==== create remote (remote_name:{}, remote_url:{})", &remote_name, &fetch_url); self.repo.remote(&remote_name, &fetch_url).expect("can't create remote") }; let rs = remote.get_refspec(0).unwrap().str().unwrap().to_string(); remote.fetch(&[&rs], None, None).expect("fetch failed"); return self.repo .revparse_single(&format!("remotes/{}/{}", module, branch)) .ok(); } fn call_git(self: &Scratch<'a>, cmd: &str) -> Result<String, Error> { let args: Vec<&str> = cmd.split(" ").collect(); let repo_path = &self.repo.path(); let mut c = Command::new("git"); c.current_dir(&repo_path); c.env("GIT_DIR", repo_path.as_os_str()); c.args(&args); let output = c.output() .unwrap_or_else(|e| panic!("failed to execute process: {}", e)); Ok(String::from_utf8(output.stderr).expect("cannot decode utf8")) } // force push of the new revision-object to temp repo pub fn transfer(&self, rev: &str, source: &Path) -> Object { // TODO: implement using libgit let target = &self.repo.path(); let shell = Shell { cwd: source.to_path_buf() }; // create tmp branch shell.command(&format!("git branch -f {} {}", TMP_NAME, rev)); // force push shell.command(&format!("git push --force {} {}", &target.to_string_lossy(), TMP_NAME)); // delete tmp branch shell.command(&format!("git branch -D {}", TMP_NAME)); let obj = self.repo.revparse_single(rev).expect("can't find transfered ref"); return obj; } // takes everything from base except it's tree and replaces it with the tree // given fn rewrite(&self, base: &Commit, parents: &[&Commit], tree: &Tree) -> Oid { if parents.len() != 0 { self.repo.set_head_detached(parents[0].id()).expect("rewrite: can't detach head"); } self.repo .commit(Some("HEAD"), &base.author(), &base.committer(), &base.message().unwrap_or("no message"), tree, parents) .expect("rewrite: can't commit") } pub fn push(&self, oid: Oid, module: &str, target: &str) -> String { let commit = &self.repo.find_commit(oid).expect("can't find commit"); self.repo.set_head_detached(commit.id()).expect("can't detach HEAD"); let cmd = format!("push {} HEAD:{}", self.host.remote_url(module), target); let output = self.call_git(&cmd) .expect("can't push"); debug!("push: {}\n{}", cmd, output); format!("{}", output) } fn subtree(&self, tree: &Tree, path: &Path) -> Option<Tree> { if let Some(oid) = tree.get_path(path).map(|x| x.id()).ok() { return self.repo.find_tree(oid).ok(); } else { return None; } } fn replace_child(&self, child: &Path, subtree: Oid, full_tree: Tree) -> Tree { let full_tree_id = { let mut builder = self.repo .treebuilder(Some(&full_tree)) .expect("replace_child: can't create treebuilder"); builder.insert(child, subtree, 0o0040000) // GIT_FILEMODE_TREE .expect("replace_child: can't insert tree"); builder.write().expect("replace_child: can't write tree") }; return self.repo.find_tree(full_tree_id).expect("replace_child: can't find new tree"); } fn replace_subtree(&self, path: &Path, subtree: Oid, full_tree: Tree) -> Tree { if path.components().count() == 1 { return self.replace_child(path, subtree, full_tree); } else { let name = Path::new(path.file_name().expect("no module name")); let path = path.parent().expect("module not in subdir"); let st = self.subtree(&full_tree, path).unwrap(); let tree = self.replace_child(name, subtree, st); return self.replace_subtree(path, tree.id(), full_tree); } } fn split_subdir(&self, module: &str, newrev: Oid) -> Object { // TODO: implement using libgit let shell = Shell { cwd: self.repo.path().to_path_buf() }; shell.command("rm -Rf refs/original"); shell.command("rm -Rf .git-rewrite"); self.repo.set_head_detached(newrev).expect("can't detatch head");; self.call_git(&format!("filter-branch --subdirectory-filter {}/ -- HEAD", module)) .expect("error in filter-branch"); return self.repo .revparse_single("HEAD") .expect("can't find rewritten branch"); } } pub enum ReviewUploadResult { Uploaded(Oid), RejectNoFF, RejectMerge, NoChanges, Central, } pub fn review_upload(scratch: &Scratch, newrev: Object, module: &str) -> ReviewUploadResult { debug!(".\n\n==== Doing review upload for module {}", &module); let new = newrev.id(); let old = scratch.tracking(&module, "master").expect("no tracking branch 1").id(); if old == new { return ReviewUploadResult::NoChanges; } match scratch.repo.graph_descendant_of(new, old) { Err(_) => return ReviewUploadResult::RejectNoFF, Ok(false) => return ReviewUploadResult::RejectNoFF, Ok(true) => (), } debug!("==== walking commits from {} to {}", old, new); let walk = { let mut walk = scratch.repo.revwalk().expect("walk: can't create revwalk"); walk.set_sorting(SORT_REVERSE | SORT_TIME); let range = format!("{}..{}", old, new); walk.push_range(&range).expect(&format!("walk: invalid range: {}", range));; walk }; let mut current = scratch.tracking(scratch.host.central(), "master").expect("no central tracking").id(); for rev in walk { let rev = rev.expect("walk: invalid rev"); if old == rev { continue; } debug!("==== walking commit {}", rev); let module_commit = scratch.repo .find_commit(rev) .expect("walk: object is not actually a commit"); if module_commit.parents().count() > 1 { // TODO: also do this check on pushes to cenral refs/for/master // TODO: invectigate the possibility of allowing merge commits return ReviewUploadResult::RejectMerge; } if module != scratch.host.central() { debug!("==== Rewriting commit {}", rev); let tree = module_commit.tree().expect("walk: commit has no tree"); let parent = scratch.repo.find_commit(current).expect("walk: current object is no commit"); let new_tree = scratch.replace_subtree(Path::new(module), tree.id(), parent.tree() .expect("walk: parent has no tree")); current = scratch.rewrite(&module_commit, &vec![&parent], &new_tree); } } if module != scratch.host.central() { return ReviewUploadResult::Uploaded(current); } else { return ReviewUploadResult::Central; } } pub fn project_created(scratch: &Scratch) { if let Some(rev) = scratch.tracking(scratch.host.central(), "master") { central_submit(scratch, rev); } } pub fn central_submit(scratch: &Scratch, newrev: Object) { debug!(" ---> central_submit (sha1 of commit: {})", &newrev.id()); let central_commit = newrev.as_commit().expect("could not get commit from obj"); let central_tree = central_commit.tree().expect("commit has no tree"); for module in scratch.host.projects() { if module == scratch.host.central() { continue; } debug!(""); debug!("==== fetching tracking branch for module: {}", &module); let module_master_commit_obj = match scratch.tracking(&module, "master") { Some(obj) => obj, None => { debug!("==== no tracking branch for module {} => project does not exist or is \ empty", &module); debug!("==== initializing with subdir history"); let commit = scratch.split_subdir(&module, newrev.id()); scratch.push(commit.id(), &module, "refs/heads/master"); scratch.tracking(&module, "master") .expect(&format!("no tracking branch for module: {}",module)) } }; let parents = vec![module_master_commit_obj.as_commit() .expect("could not get commit from obj")]; debug!("==== checking for changes in module: {:?}", module); // new tree is sub-tree of complete central tree let old_tree_id = if let Ok(tree) = parents[0].tree() { tree.id() } else { Oid::from_str("0000000000000000000000000000000000000000").unwrap() }; let new_tree_id = if let Ok(tree_entry) = central_tree.get_path(&Path::new(&module)) { tree_entry.id() } else { Oid::from_str("0000000000000000000000000000000000000000").unwrap() }; // if sha1's are equal the content is equal if new_tree_id != old_tree_id && !new_tree_id.is_zero() { let new_tree = scratch.repo.find_tree(new_tree_id).expect("central_submit: can't find tree"); debug!("==== commit changes module => make commit on module"); let module_commit = scratch.rewrite(central_commit, &parents, &new_tree); let output = scratch.push(module_commit, &module, "master"); debug!("{}",output); } else { debug!("==== commit does not change module => skipping"); } } } pub fn find_repos(root: &Path, path: &Path, mut repos: Vec<String>) -> Vec<String> { if let Ok(children) = path.read_dir() { for child in children { let path = child.unwrap().path(); let name = format!("{}", &path.to_str().unwrap()); if let Some(last) = path.extension() { if last == "git" { repos.push(name.trim_right_matches(".git") .trim_left_matches(root.to_str().unwrap()) .trim_left_matches("/") .to_string()); continue; } } repos = find_repos(root, &path, repos); } } return repos; } pub struct Shell { pub cwd: PathBuf, } impl Shell { pub fn command(&self, cmd: &str) -> String { debug!("Shell::command: {}", cmd); let output = Command::new("sh") .current_dir(&self.cwd) .arg("-c") .arg(&cmd) .output() .unwrap_or_else(|e| panic!("failed to execute process: {}", e)); return String::from_utf8(output.stdout).expect("failed to decode utf8").trim().to_string(); } }
// Integration with Musashi extern crate libc; // Register enum copied from Musashi's m68k_register_t enum #[repr(C)] #[derive(Copy, Clone, Debug, PartialEq)] #[allow(dead_code)] pub enum Register { /* Real registers */ D0, /* Data registers */ D1, D2, D3, D4, D5, D6, D7, A0, /* Address registers */ A1, A2, A3, A4, A5, A6, A7, PC, /* Program Counter */ SR, /* Status Register */ SP, /* The current Stack Pointer (located in A7) */ USP, /* User Stack Pointer */ ISP, /* Interrupt Stack Pointer */ MSP, /* Master Stack Pointer */ SFC, /* Source Function Code */ DFC, /* Destination Function Code */ VBR, /* Vector Base Register */ CACR, /* Cache Control Register */ CAAR, /* Cache Address Register */ /* Assumed registers */ /* These are cheat registers which emulate the 1-longword prefetch * present in the 68000 and 68010. */ PrefAddr, /* Last prefetch address */ PrefData, /* Last prefetch data */ /* Convenience registers */ PPC, /* Previous value in the program counter */ IR, /* Instruction register */ CpuType /* Type of CPU being run */ } #[repr(C)] #[derive(Copy, Clone)] #[allow(dead_code)] enum CpuType { Invalid, M68000, M68010, M68EC020, M68020, M68030, /* Supported by disassembler ONLY */ M68040 /* Supported by disassembler ONLY */ } #[link(name = "musashi", kind = "static")] extern { fn m68k_init(); fn m68k_set_cpu_type(cputype: CpuType); fn m68k_pulse_reset(); fn m68k_execute(num_cycles: i32) -> i32; fn m68k_get_reg(context: *mut libc::c_void, regnum: Register) -> u32; fn m68k_set_reg(regnum: Register, value: u32); } use ram::{Operation, AddressBus, AddressSpace, SUPERVISOR_PROGRAM, SUPERVISOR_DATA, USER_PROGRAM, USER_DATA}; static mut musashi_memory: [u8; 1024] = [0xff; 1024]; // as statics are not allowed to have destructors, allocate a // big enough array to hold the small number of operations // expected from executing a very limited number of opcodes static mut musashi_ops: [Operation; 128] = [Operation::None; 128]; static mut musashi_opcount: usize = 0; static mut musashi_address_space: AddressSpace = SUPERVISOR_PROGRAM; unsafe fn register_op(op: Operation) { if musashi_opcount < musashi_ops.len() { musashi_ops[musashi_opcount] = op; musashi_opcount += 1; } } // callbacks from Musashi #[no_mangle] pub extern fn cpu_read_byte(address: u32) -> u32 { unsafe { let addr = address as usize; let value = musashi_memory[addr]; let op = Operation::ReadByte(musashi_address_space, address, value); register_op(op); value as u32 } } #[no_mangle] pub extern fn cpu_read_word(address: u32) -> u32 { unsafe { let addr = address as usize; let value = (musashi_memory[addr+0] as u16) << 8 |(musashi_memory[addr+1] as u16) << 0; let op = Operation::ReadWord(musashi_address_space, address, value); register_op(op); value as u32 } } #[no_mangle] pub extern fn cpu_read_long(address: u32) -> u32 { unsafe { let addr = address as usize; let value = ((musashi_memory[addr+0] as u32) << 24 |(musashi_memory[addr+1] as u32) << 16 |(musashi_memory[addr+2] as u32) << 8 |(musashi_memory[addr+3] as u32) << 0) as u32; let op = Operation::ReadLong(musashi_address_space, address, value); register_op(op); value } } #[no_mangle] pub extern fn cpu_write_byte(address: u32, value: u32) { unsafe { let op = Operation::WriteByte(musashi_address_space, address, value); let address = address as usize; register_op(op); musashi_memory[address+0] = (value & 0xff) as u8; } } #[no_mangle] pub extern fn cpu_write_word(address: u32, value: u32) { unsafe { let op = Operation::WriteWord(musashi_address_space, address, value); let address = address as usize; register_op(op); musashi_memory[address+0] = (value & 0xff00 >> 8) as u8; musashi_memory[address+1] = (value & 0x00ff >> 0) as u8; } } #[no_mangle] pub extern fn cpu_write_long(address: u32, value: u32) { unsafe { let op = Operation::WriteLong(musashi_address_space, address, value); let address = address as usize; register_op(op); musashi_memory[address+0] = (value & 0xff000000 >> 24) as u8; musashi_memory[address+1] = (value & 0x00ff0000 >> 16) as u8; musashi_memory[address+2] = (value & 0x0000ff00 >> 8) as u8; musashi_memory[address+3] = (value & 0x000000ff >> 0) as u8; } } #[no_mangle] pub extern fn cpu_pulse_reset() {panic!("pr")} #[no_mangle] pub extern fn cpu_long_branch() {} #[no_mangle] pub extern fn cpu_set_fc(fc: u32) { unsafe { musashi_address_space = match fc { 1 => USER_DATA, 2 => USER_PROGRAM, 5 => SUPERVISOR_DATA, 6 => SUPERVISOR_PROGRAM, _ => panic!("unknown fc: {}", fc), }; // println!("set_fc {:?}", musashi_address_space); } } #[allow(unused_variables)] #[no_mangle] pub extern fn cpu_irq_ack(level: i32) -> i32 {panic!("ia")} #[no_mangle] pub extern fn cpu_instr_callback() {} use std::ptr; pub fn experimental_communication() { unsafe { m68k_init(); m68k_set_cpu_type(CpuType::M68000); m68k_set_reg(Register::D0, 123); println!("D0: {}", m68k_get_reg(ptr::null_mut(), Register::D0)); } } pub fn roundtrip_register(reg: Register, value: u32) -> u32 { unsafe { m68k_init(); m68k_set_cpu_type(CpuType::M68000); m68k_set_reg(reg, value); m68k_get_reg(ptr::null_mut(), reg) } } use cpu::Core; static REGS:[Register; 16] = [Register::D0, Register::D1, Register::D2, Register::D3, Register::D4, Register::D5, Register::D6, Register::D7, Register::A0, Register::A1, Register::A2, Register::A3, Register::A4, Register::A5, Register::A6, Register::A7]; pub fn initialize_musashi(core: &mut Core) { unsafe { m68k_init(); m68k_set_cpu_type(CpuType::M68000); m68k_pulse_reset(); // Resetting opcount, because m68k_pulse_reset causes irrelevant // reads from 0x00000000 to set PC/SP, a jump to PC and // resetting of state. But we don't want to test those ops. musashi_opcount = 0; m68k_set_reg(Register::PC, core.pc); m68k_set_reg(Register::USP, core.inactive_usp); m68k_set_reg(Register::SR, core.status_register()); for (i, &reg) in REGS.iter().enumerate() { m68k_set_reg(reg, core.dar[i]); } for i in 0..1024usize { musashi_memory[i] = core.mem.read_byte(SUPERVISOR_PROGRAM, i as u32) as u8; } } } pub fn execute1(core: &mut Core) { unsafe { m68k_execute(1); for (i, &reg) in REGS.iter().enumerate() { core.dar[i] = m68k_get_reg(ptr::null_mut(), reg); } core.pc = m68k_get_reg(ptr::null_mut(), Register::PC); core.inactive_usp = m68k_get_reg(ptr::null_mut(), Register::USP); core.sr_to_flags(m68k_get_reg(ptr::null_mut(), Register::SR)); } } pub fn reset_and_execute1(core: &mut Core) { initialize_musashi(core); execute1(core); } extern crate quickcheck; #[cfg(test)] mod tests { use super::*; use ram::SUPERVISOR_PROGRAM; use super::musashi_ops; use super::musashi_opcount; use ram::Operation; use cpu::Core; use musashi::quickcheck::*; #[derive(Copy, Clone, Debug, PartialEq)] struct Bitpattern(u32); impl Arbitrary for Bitpattern { fn arbitrary<G: Gen>(g: &mut G) -> Bitpattern { // let m : u32 = Arbitrary::arbitrary(g); // let mut mask: u32 = 0xF; //((m & 0xF) | (m >> 4) & 0xF) as u32; // let mut i : u32 = Arbitrary::arbitrary(g); // let mut sum: u32 = 0; // println!("{}/{} when {}", i, mask, g.size()); // // 0b11001100 => 0xFF00FF00 // while i > 0 { // sum += if i & 1 == 1 { mask } else { 0 }; // i >>= 1; // mask <<= 4; // } // when size 256, could generate any 32 bit pattern let i1: u32 = Arbitrary::arbitrary(g); let i2: u32 = Arbitrary::arbitrary(g); let i3: u32 = Arbitrary::arbitrary(g); let i4: u32 = Arbitrary::arbitrary(g); let sum: u32 = (i1 << 24) | (i2 << 16) | (i3 << 8) | i4; // println!("{:b} when {}", i4, g.size()); Bitpattern(sum) } fn shrink(&self) -> Box<Iterator<Item=Self>> { match *self { Bitpattern(x) => { let xs = x.shrink(); // should shrink Bitpattern by clearing bits, not setting new ones let tagged = xs //.inspect(|x|println!("{}", x)) .map(Bitpattern); Box::new(tagged) } } } } impl Arbitrary for Register { fn arbitrary<G: Gen>(g: &mut G) -> Register { let regs = [Register::D0, Register::D1, Register::D2, Register::D3, Register::D4, Register::D5, Register::D6, Register::D7, Register::A0, Register::A1, Register::A2, Register::A3, Register::A4, Register::A5, Register::A6, // Register::A7, Register::SP, Register::SR, Register::PC ]; //println!("{}",i); if let Some(&reg) = g.choose(&regs) { reg } else { unreachable!(); } } } extern crate rand; use itertools::{Itertools, assert_equal}; // struct OpSeq { // mask: u32, // matching: u32, // current_op: u32, // } // impl OpSeq { // fn new(mask: u32, matching: u32) -> OpSeq { // OpSeq { mask: mask, matching: matching, current_op: 0 } // } // } // impl Iterator for OpSeq { // type Item = u32; // fn next(&mut self) -> Option<u32> { // if self.current_op == 0x10000 { // None // } else { // while (self.current_op & self.mask) != self.matching && self.current_op < 0x10000 { // self.current_op += 1; // } // if self.current_op == 0x10000 { // return None; // } // let res = Some(self.current_op); // self.current_op += 1; // res // } // } // } fn opcodes(mask: u32, matching: u32) -> Vec<u16> { (0..0x10000u32) .filter(|opcode| (opcode & mask) == matching) .map(|v|v as u16).collect::<Vec<u16>>() } macro_rules! opcodes { ($mask:expr , $matching:expr) => {(0..0x10000).filter(|opcode| (opcode & $mask) == $matching)} } #[test] fn opcodes_from_mask_and_matching(){ let mut opseq = Vec::new(); opseq.extend(opcodes!(0xf1f8, 0xc100)); assert_eq!(64, opseq.len()); let ops = opseq.iter().unique(); assert_eq!(64, ops.count()); if let Some(&min) = opseq.iter().min() { assert_eq!(0b1100000100000000, min); } if let Some(&max) = opseq.iter().max() { assert_eq!(0b1100111100000111, max); } } static mut opcode_under_test: u16 = 0; fn hammer_cores(rs: Vec<(Register, Bitpattern)>) -> bool { let pc = 0x40; let mem = unsafe { [((opcode_under_test >> 8) & 0xff) as u8, (opcode_under_test & 0xff) as u8] }; let mut musashi = Core::new_mem(pc, &mem); for r in rs { match r { (Register::D0, Bitpattern(bp)) => musashi.dar[0] = bp, (Register::D1, Bitpattern(bp)) => musashi.dar[1] = bp, (Register::D2, Bitpattern(bp)) => musashi.dar[2] = bp, (Register::D3, Bitpattern(bp)) => musashi.dar[3] = bp, (Register::D4, Bitpattern(bp)) => musashi.dar[4] = bp, (Register::D5, Bitpattern(bp)) => musashi.dar[5] = bp, (Register::D6, Bitpattern(bp)) => musashi.dar[6] = bp, (Register::D7, Bitpattern(bp)) => musashi.dar[7] = bp, (Register::A0, Bitpattern(bp)) => musashi.dar[0+8] = bp, (Register::A1, Bitpattern(bp)) => musashi.dar[1+8] = bp, (Register::A2, Bitpattern(bp)) => musashi.dar[2+8] = bp, (Register::A3, Bitpattern(bp)) => musashi.dar[3+8] = bp, (Register::A4, Bitpattern(bp)) => musashi.dar[4+8] = bp, (Register::A5, Bitpattern(bp)) => musashi.dar[5+8] = bp, (Register::A6, Bitpattern(bp)) => musashi.dar[6+8] = bp, (Register::A7, Bitpattern(bp)) => musashi.dar[7+8] = bp, (Register::USP, Bitpattern(bp)) => musashi.inactive_usp = bp, (Register::SR, Bitpattern(bp)) => musashi.sr_to_flags(bp), _ => { panic!("No idea how to set {:?}", r.0) }, } } let mut r68k = musashi.clone(); // so very self-aware! reset_and_execute1(&mut musashi); r68k.execute1(); assert_cores_equal(&musashi, &r68k) } #[test] #[ignore] fn test_core_with_quickcheck() { for opcode in opcodes(0xf1f8, 0xc100) { println!("Will hammer {:b}", opcode); unsafe { opcode_under_test = opcode; } QuickCheck::new() .gen(StdGen::new(rand::thread_rng(), 256)) .tests(10) .quickcheck(hammer_cores as fn(Vec<(Register, Bitpattern)>) -> bool); } } fn get_ops() -> Vec<Operation> { let mut res: Vec<Operation> = vec![]; unsafe { for i in 0..musashi_opcount { res.push(musashi_ops[i]); } } res } macro_rules! core_eq { ($left:ident , $right:ident . $field:ident [ $index:expr ]) => ({ match (&($left.$field[$index]), &($right.$field[$index])) { (left_val, right_val) => { if !(*left_val == *right_val) { println!("core incoherence: `{}[{}]` differs \ ({}: `0x{:x}`, {}: `0x{:x}`)", stringify!($field), $index, stringify!($left), left_val, stringify!($right), right_val); return false; } } } }); ($left:ident , $right:ident . $field:ident () ?) => ({ match (&($left.$field()), &($right.$field())) { (left_val, right_val) => { if !(*left_val == *right_val) { println!("core incoherence: `{}()` differs \ ({}: `{:?}`, {}: `{:?}`)", stringify!($field), stringify!($left), left_val, stringify!($right), right_val); return false; } } } }); ($left:ident , $right:ident . $field:ident ()) => ({ match (&($left.$field()), &($right.$field())) { (left_val, right_val) => { if !(*left_val == *right_val) { println!("core incoherence: `{}()` differs \ ({}: `0x{:x}`, {}: `0x{:x}`)", stringify!($field), stringify!($left), left_val, stringify!($right), right_val); return false; } } } }); ($left:ident , $right:ident . $field:ident) => ({ match (&($left.$field), &($right.$field)) { (left_val, right_val) => { if !(*left_val == *right_val) { println!("core incoherence: `{}` differs \ ({}: `0x{:x}`, {}: `0x{:x}`)", stringify!($field), stringify!($left), left_val, stringify!($right), right_val); return false; } } } }) } fn assert_cores_equal(musashi: &Core, r68k: &Core) -> bool { // check memory accesses match up assert_equal(get_ops(), r68k.mem.logger.ops()); core_eq!(musashi, r68k.pc); core_eq!(musashi, r68k.inactive_usp); for i in (0..16).rev() { core_eq!(musashi, r68k.dar[i]); } core_eq!(musashi, r68k.flags() ?); core_eq!(musashi, r68k.status_register()); true } #[test] fn roundtrip_d0() { assert_eq!(256, roundtrip_register(Register::D0, 256)); } #[test] fn roundtrip_abcd_rr() { let pc = 0x40; let mut cpu = Core::new_mem(pc, &[0xc1, 0x01, 0x00, 0x00]); cpu.dar[0] = 0x17; cpu.dar[1] = 0x27; cpu.dar[5] = 0x55555; reset_and_execute1(&mut cpu); // 17 + 27 is 44 assert_eq!(0x44, cpu.dar[0]); assert_eq!(0x27, cpu.dar[1]); assert_eq!(0x55555, cpu.dar[5]); let ops = get_ops(); assert_eq!(1, ops.len()); assert_eq!(Operation::ReadLong(SUPERVISOR_PROGRAM, pc, 0xc1010000), ops[0]); } #[test] fn compare_abcd_rr() { let pc = 0x40; let mut musashi = Core::new_mem(pc, &[0xc3, 0x00]); musashi.dar[0] = 0x16; musashi.dar[1] = 0x26; let mut r68k = musashi.clone(); // so very self-aware! reset_and_execute1(&mut musashi); r68k.execute1(); assert_eq!(0x42, r68k.dar[1]); assert_cores_equal(&musashi, &r68k); } #[test] fn run_abcd_rr_twice() { let pc = 0x40; let mut musashi = Core::new_mem(pc, &[0xc3, 0x00, 0xc3, 0x02]); musashi.dar[0] = 0x16; musashi.dar[1] = 0x26; musashi.dar[2] = 0x31; let mut r68k = musashi.clone(); // so very self-aware! initialize_musashi(&mut musashi); execute1(&mut musashi); r68k.execute1(); assert_eq!(0x42, musashi.dar[1]); assert_eq!(0x42, r68k.dar[1]); execute1(&mut musashi); r68k.execute1(); assert_eq!(0x73, musashi.dar[1]); assert_eq!(0x73, r68k.dar[1]); assert_cores_equal(&musashi, &r68k); } } Clarify which instructions are tested Also mention we are now executing two instructions in a single test // Integration with Musashi extern crate libc; // Register enum copied from Musashi's m68k_register_t enum #[repr(C)] #[derive(Copy, Clone, Debug, PartialEq)] #[allow(dead_code)] pub enum Register { /* Real registers */ D0, /* Data registers */ D1, D2, D3, D4, D5, D6, D7, A0, /* Address registers */ A1, A2, A3, A4, A5, A6, A7, PC, /* Program Counter */ SR, /* Status Register */ SP, /* The current Stack Pointer (located in A7) */ USP, /* User Stack Pointer */ ISP, /* Interrupt Stack Pointer */ MSP, /* Master Stack Pointer */ SFC, /* Source Function Code */ DFC, /* Destination Function Code */ VBR, /* Vector Base Register */ CACR, /* Cache Control Register */ CAAR, /* Cache Address Register */ /* Assumed registers */ /* These are cheat registers which emulate the 1-longword prefetch * present in the 68000 and 68010. */ PrefAddr, /* Last prefetch address */ PrefData, /* Last prefetch data */ /* Convenience registers */ PPC, /* Previous value in the program counter */ IR, /* Instruction register */ CpuType /* Type of CPU being run */ } #[repr(C)] #[derive(Copy, Clone)] #[allow(dead_code)] enum CpuType { Invalid, M68000, M68010, M68EC020, M68020, M68030, /* Supported by disassembler ONLY */ M68040 /* Supported by disassembler ONLY */ } #[link(name = "musashi", kind = "static")] extern { fn m68k_init(); fn m68k_set_cpu_type(cputype: CpuType); fn m68k_pulse_reset(); fn m68k_execute(num_cycles: i32) -> i32; fn m68k_get_reg(context: *mut libc::c_void, regnum: Register) -> u32; fn m68k_set_reg(regnum: Register, value: u32); } use ram::{Operation, AddressBus, AddressSpace, SUPERVISOR_PROGRAM, SUPERVISOR_DATA, USER_PROGRAM, USER_DATA}; static mut musashi_memory: [u8; 1024] = [0xff; 1024]; // as statics are not allowed to have destructors, allocate a // big enough array to hold the small number of operations // expected from executing a very limited number of opcodes static mut musashi_ops: [Operation; 128] = [Operation::None; 128]; static mut musashi_opcount: usize = 0; static mut musashi_address_space: AddressSpace = SUPERVISOR_PROGRAM; unsafe fn register_op(op: Operation) { if musashi_opcount < musashi_ops.len() { musashi_ops[musashi_opcount] = op; musashi_opcount += 1; } } // callbacks from Musashi #[no_mangle] pub extern fn cpu_read_byte(address: u32) -> u32 { unsafe { let addr = address as usize; let value = musashi_memory[addr]; let op = Operation::ReadByte(musashi_address_space, address, value); register_op(op); value as u32 } } #[no_mangle] pub extern fn cpu_read_word(address: u32) -> u32 { unsafe { let addr = address as usize; let value = (musashi_memory[addr+0] as u16) << 8 |(musashi_memory[addr+1] as u16) << 0; let op = Operation::ReadWord(musashi_address_space, address, value); register_op(op); value as u32 } } #[no_mangle] pub extern fn cpu_read_long(address: u32) -> u32 { unsafe { let addr = address as usize; let value = ((musashi_memory[addr+0] as u32) << 24 |(musashi_memory[addr+1] as u32) << 16 |(musashi_memory[addr+2] as u32) << 8 |(musashi_memory[addr+3] as u32) << 0) as u32; let op = Operation::ReadLong(musashi_address_space, address, value); register_op(op); value } } #[no_mangle] pub extern fn cpu_write_byte(address: u32, value: u32) { unsafe { let op = Operation::WriteByte(musashi_address_space, address, value); let address = address as usize; register_op(op); musashi_memory[address+0] = (value & 0xff) as u8; } } #[no_mangle] pub extern fn cpu_write_word(address: u32, value: u32) { unsafe { let op = Operation::WriteWord(musashi_address_space, address, value); let address = address as usize; register_op(op); musashi_memory[address+0] = (value & 0xff00 >> 8) as u8; musashi_memory[address+1] = (value & 0x00ff >> 0) as u8; } } #[no_mangle] pub extern fn cpu_write_long(address: u32, value: u32) { unsafe { let op = Operation::WriteLong(musashi_address_space, address, value); let address = address as usize; register_op(op); musashi_memory[address+0] = (value & 0xff000000 >> 24) as u8; musashi_memory[address+1] = (value & 0x00ff0000 >> 16) as u8; musashi_memory[address+2] = (value & 0x0000ff00 >> 8) as u8; musashi_memory[address+3] = (value & 0x000000ff >> 0) as u8; } } #[no_mangle] pub extern fn cpu_pulse_reset() {panic!("pr")} #[no_mangle] pub extern fn cpu_long_branch() {} #[no_mangle] pub extern fn cpu_set_fc(fc: u32) { unsafe { musashi_address_space = match fc { 1 => USER_DATA, 2 => USER_PROGRAM, 5 => SUPERVISOR_DATA, 6 => SUPERVISOR_PROGRAM, _ => panic!("unknown fc: {}", fc), }; // println!("set_fc {:?}", musashi_address_space); } } #[allow(unused_variables)] #[no_mangle] pub extern fn cpu_irq_ack(level: i32) -> i32 {panic!("ia")} #[no_mangle] pub extern fn cpu_instr_callback() {} use std::ptr; pub fn experimental_communication() { unsafe { m68k_init(); m68k_set_cpu_type(CpuType::M68000); m68k_set_reg(Register::D0, 123); println!("D0: {}", m68k_get_reg(ptr::null_mut(), Register::D0)); } } pub fn roundtrip_register(reg: Register, value: u32) -> u32 { unsafe { m68k_init(); m68k_set_cpu_type(CpuType::M68000); m68k_set_reg(reg, value); m68k_get_reg(ptr::null_mut(), reg) } } use cpu::Core; static REGS:[Register; 16] = [Register::D0, Register::D1, Register::D2, Register::D3, Register::D4, Register::D5, Register::D6, Register::D7, Register::A0, Register::A1, Register::A2, Register::A3, Register::A4, Register::A5, Register::A6, Register::A7]; pub fn initialize_musashi(core: &mut Core) { unsafe { m68k_init(); m68k_set_cpu_type(CpuType::M68000); m68k_pulse_reset(); // Resetting opcount, because m68k_pulse_reset causes irrelevant // reads from 0x00000000 to set PC/SP, a jump to PC and // resetting of state. But we don't want to test those ops. musashi_opcount = 0; m68k_set_reg(Register::PC, core.pc); m68k_set_reg(Register::USP, core.inactive_usp); m68k_set_reg(Register::SR, core.status_register()); for (i, &reg) in REGS.iter().enumerate() { m68k_set_reg(reg, core.dar[i]); } for i in 0..1024usize { musashi_memory[i] = core.mem.read_byte(SUPERVISOR_PROGRAM, i as u32) as u8; } } } pub fn execute1(core: &mut Core) { unsafe { m68k_execute(1); for (i, &reg) in REGS.iter().enumerate() { core.dar[i] = m68k_get_reg(ptr::null_mut(), reg); } core.pc = m68k_get_reg(ptr::null_mut(), Register::PC); core.inactive_usp = m68k_get_reg(ptr::null_mut(), Register::USP); core.sr_to_flags(m68k_get_reg(ptr::null_mut(), Register::SR)); } } pub fn reset_and_execute1(core: &mut Core) { initialize_musashi(core); execute1(core); } extern crate quickcheck; #[cfg(test)] mod tests { use super::*; use ram::SUPERVISOR_PROGRAM; use super::musashi_ops; use super::musashi_opcount; use ram::Operation; use cpu::Core; use musashi::quickcheck::*; #[derive(Copy, Clone, Debug, PartialEq)] struct Bitpattern(u32); impl Arbitrary for Bitpattern { fn arbitrary<G: Gen>(g: &mut G) -> Bitpattern { // let m : u32 = Arbitrary::arbitrary(g); // let mut mask: u32 = 0xF; //((m & 0xF) | (m >> 4) & 0xF) as u32; // let mut i : u32 = Arbitrary::arbitrary(g); // let mut sum: u32 = 0; // println!("{}/{} when {}", i, mask, g.size()); // // 0b11001100 => 0xFF00FF00 // while i > 0 { // sum += if i & 1 == 1 { mask } else { 0 }; // i >>= 1; // mask <<= 4; // } // when size 256, could generate any 32 bit pattern let i1: u32 = Arbitrary::arbitrary(g); let i2: u32 = Arbitrary::arbitrary(g); let i3: u32 = Arbitrary::arbitrary(g); let i4: u32 = Arbitrary::arbitrary(g); let sum: u32 = (i1 << 24) | (i2 << 16) | (i3 << 8) | i4; // println!("{:b} when {}", i4, g.size()); Bitpattern(sum) } fn shrink(&self) -> Box<Iterator<Item=Self>> { match *self { Bitpattern(x) => { let xs = x.shrink(); // should shrink Bitpattern by clearing bits, not setting new ones let tagged = xs //.inspect(|x|println!("{}", x)) .map(Bitpattern); Box::new(tagged) } } } } impl Arbitrary for Register { fn arbitrary<G: Gen>(g: &mut G) -> Register { let regs = [Register::D0, Register::D1, Register::D2, Register::D3, Register::D4, Register::D5, Register::D6, Register::D7, Register::A0, Register::A1, Register::A2, Register::A3, Register::A4, Register::A5, Register::A6, // Register::A7, Register::SP, Register::SR, Register::PC ]; //println!("{}",i); if let Some(&reg) = g.choose(&regs) { reg } else { unreachable!(); } } } extern crate rand; use itertools::{Itertools, assert_equal}; // struct OpSeq { // mask: u32, // matching: u32, // current_op: u32, // } // impl OpSeq { // fn new(mask: u32, matching: u32) -> OpSeq { // OpSeq { mask: mask, matching: matching, current_op: 0 } // } // } // impl Iterator for OpSeq { // type Item = u32; // fn next(&mut self) -> Option<u32> { // if self.current_op == 0x10000 { // None // } else { // while (self.current_op & self.mask) != self.matching && self.current_op < 0x10000 { // self.current_op += 1; // } // if self.current_op == 0x10000 { // return None; // } // let res = Some(self.current_op); // self.current_op += 1; // res // } // } // } fn opcodes(mask: u32, matching: u32) -> Vec<u16> { (0..0x10000u32) .filter(|opcode| (opcode & mask) == matching) .map(|v|v as u16).collect::<Vec<u16>>() } macro_rules! opcodes { ($mask:expr , $matching:expr) => {(0..0x10000).filter(|opcode| (opcode & $mask) == $matching)} } #[test] fn opcodes_from_mask_and_matching(){ let mut opseq = Vec::new(); opseq.extend(opcodes!(0xf1f8, 0xc100)); assert_eq!(64, opseq.len()); let ops = opseq.iter().unique(); assert_eq!(64, ops.count()); if let Some(&min) = opseq.iter().min() { assert_eq!(0b1100000100000000, min); } if let Some(&max) = opseq.iter().max() { assert_eq!(0b1100111100000111, max); } } static mut opcode_under_test: u16 = 0; fn hammer_cores(rs: Vec<(Register, Bitpattern)>) -> bool { let pc = 0x40; let mem = unsafe { [((opcode_under_test >> 8) & 0xff) as u8, (opcode_under_test & 0xff) as u8] }; let mut musashi = Core::new_mem(pc, &mem); for r in rs { match r { (Register::D0, Bitpattern(bp)) => musashi.dar[0] = bp, (Register::D1, Bitpattern(bp)) => musashi.dar[1] = bp, (Register::D2, Bitpattern(bp)) => musashi.dar[2] = bp, (Register::D3, Bitpattern(bp)) => musashi.dar[3] = bp, (Register::D4, Bitpattern(bp)) => musashi.dar[4] = bp, (Register::D5, Bitpattern(bp)) => musashi.dar[5] = bp, (Register::D6, Bitpattern(bp)) => musashi.dar[6] = bp, (Register::D7, Bitpattern(bp)) => musashi.dar[7] = bp, (Register::A0, Bitpattern(bp)) => musashi.dar[0+8] = bp, (Register::A1, Bitpattern(bp)) => musashi.dar[1+8] = bp, (Register::A2, Bitpattern(bp)) => musashi.dar[2+8] = bp, (Register::A3, Bitpattern(bp)) => musashi.dar[3+8] = bp, (Register::A4, Bitpattern(bp)) => musashi.dar[4+8] = bp, (Register::A5, Bitpattern(bp)) => musashi.dar[5+8] = bp, (Register::A6, Bitpattern(bp)) => musashi.dar[6+8] = bp, (Register::A7, Bitpattern(bp)) => musashi.dar[7+8] = bp, (Register::USP, Bitpattern(bp)) => musashi.inactive_usp = bp, (Register::SR, Bitpattern(bp)) => musashi.sr_to_flags(bp), _ => { panic!("No idea how to set {:?}", r.0) }, } } let mut r68k = musashi.clone(); // so very self-aware! reset_and_execute1(&mut musashi); r68k.execute1(); assert_cores_equal(&musashi, &r68k) } #[test] #[ignore] fn test_core_with_quickcheck() { for opcode in opcodes(0xf1f8, 0xc100) { println!("Will hammer {:b}", opcode); unsafe { opcode_under_test = opcode; } QuickCheck::new() .gen(StdGen::new(rand::thread_rng(), 256)) .tests(10) .quickcheck(hammer_cores as fn(Vec<(Register, Bitpattern)>) -> bool); } } fn get_ops() -> Vec<Operation> { let mut res: Vec<Operation> = vec![]; unsafe { for i in 0..musashi_opcount { res.push(musashi_ops[i]); } } res } macro_rules! core_eq { ($left:ident , $right:ident . $field:ident [ $index:expr ]) => ({ match (&($left.$field[$index]), &($right.$field[$index])) { (left_val, right_val) => { if !(*left_val == *right_val) { println!("core incoherence: `{}[{}]` differs \ ({}: `0x{:x}`, {}: `0x{:x}`)", stringify!($field), $index, stringify!($left), left_val, stringify!($right), right_val); return false; } } } }); ($left:ident , $right:ident . $field:ident () ?) => ({ match (&($left.$field()), &($right.$field())) { (left_val, right_val) => { if !(*left_val == *right_val) { println!("core incoherence: `{}()` differs \ ({}: `{:?}`, {}: `{:?}`)", stringify!($field), stringify!($left), left_val, stringify!($right), right_val); return false; } } } }); ($left:ident , $right:ident . $field:ident ()) => ({ match (&($left.$field()), &($right.$field())) { (left_val, right_val) => { if !(*left_val == *right_val) { println!("core incoherence: `{}()` differs \ ({}: `0x{:x}`, {}: `0x{:x}`)", stringify!($field), stringify!($left), left_val, stringify!($right), right_val); return false; } } } }); ($left:ident , $right:ident . $field:ident) => ({ match (&($left.$field), &($right.$field)) { (left_val, right_val) => { if !(*left_val == *right_val) { println!("core incoherence: `{}` differs \ ({}: `0x{:x}`, {}: `0x{:x}`)", stringify!($field), stringify!($left), left_val, stringify!($right), right_val); return false; } } } }) } fn assert_cores_equal(musashi: &Core, r68k: &Core) -> bool { // check memory accesses match up assert_equal(get_ops(), r68k.mem.logger.ops()); core_eq!(musashi, r68k.pc); core_eq!(musashi, r68k.inactive_usp); for i in (0..16).rev() { core_eq!(musashi, r68k.dar[i]); } core_eq!(musashi, r68k.flags() ?); core_eq!(musashi, r68k.status_register()); true } #[test] fn roundtrip_d0() { assert_eq!(256, roundtrip_register(Register::D0, 256)); } #[test] fn roundtrip_abcd_rr() { let pc = 0x40; // 0xc101: ABCD D0, D1 let mut cpu = Core::new_mem(pc, &[0xc1, 0x01, 0x00, 0x00]); cpu.dar[0] = 0x17; cpu.dar[1] = 0x27; cpu.dar[5] = 0x55555; reset_and_execute1(&mut cpu); // 17 + 27 is 44 assert_eq!(0x44, cpu.dar[0]); assert_eq!(0x27, cpu.dar[1]); assert_eq!(0x55555, cpu.dar[5]); let ops = get_ops(); assert_eq!(1, ops.len()); assert_eq!(Operation::ReadLong(SUPERVISOR_PROGRAM, pc, 0xc1010000), ops[0]); } #[test] fn compare_abcd_rr() { let pc = 0x40; // 0xc300: ABCD D1, D0 let mut musashi = Core::new_mem(pc, &[0xc3, 0x00]); musashi.dar[0] = 0x16; musashi.dar[1] = 0x26; let mut r68k = musashi.clone(); // so very self-aware! reset_and_execute1(&mut musashi); r68k.execute1(); assert_eq!(0x42, r68k.dar[1]); assert_cores_equal(&musashi, &r68k); } #[test] fn run_abcd_rr_twice() { let pc = 0x40; // 0xc300: ABCD D1, D0 // 0xc302: ABCD D1, D2 let mut musashi = Core::new_mem(pc, &[0xc3, 0x00, 0xc3, 0x02]); musashi.dar[0] = 0x16; musashi.dar[1] = 0x26; musashi.dar[2] = 0x31; let mut r68k = musashi.clone(); // so very self-aware! initialize_musashi(&mut musashi); // execute ABCD D1, D0 execute1(&mut musashi); r68k.execute1(); assert_eq!(0x42, musashi.dar[1]); assert_eq!(0x42, r68k.dar[1]); // then execute a second instruction (ABCD D1, D2) on the core execute1(&mut musashi); r68k.execute1(); assert_eq!(0x73, musashi.dar[1]); assert_eq!(0x73, r68k.dar[1]); assert_cores_equal(&musashi, &r68k); } }
// Copyright 2016 Walter Kuppens. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use nes::instruction::Instruction; use nes::memory::Memory; use std::fmt; // Flag constants that allow easy bitwise getting and setting of flag values. pub const CARRY_FLAG : u8 = 0x1; pub const ZERO_FLAG : u8 = 0x2; pub const INTERRUPT_DISABLE: u8 = 0x4; pub const DECIMAL_MODE : u8 = 0x8; pub const BREAK_COMMAND : u8 = 0x10; pub const OVERFLOW_FLAG : u8 = 0x40; pub const NEGATIVE_FLAG : u8 = 0x80; /// This is an implementation of 2A03 processor used in the NES. The 2A03 is /// based off the 6502 processor with some minor changes such as having no /// binary-coded decimal mode. Currently only the NTSC variant of the chip is /// planned to be implemented. /// /// Much of the information and comments are due credit to www.obelisk.me.uk, /// which has really good information about the 6502 processor. If you're /// interested in diving further, I recommend you give that site a visit. #[derive(Debug)] pub struct CPU { // The program counter is a 16-bit register which points to the next // instruction to be executed. The value of program counter is modified // automatically as instructions are executed. // // The value of the program counter can be modified by executing a jump, a // relative branch, a subroutine call to another memory address, by // returning from a subroutine, or by an interrupt. pub pc: u16, // The processor supports a 256 byte stack located between $0100 and $01FF. // The stack pointer is an 8-bit register and holds the next free location // on the stack. The location of the stack is fixed and cannot be moved and // grows downwards. pub sp: u8, // The 8-bit accumulator is used all arithmetic and logical operations (with // the exception of increments and decrements). The contents of the // accumulator can be stored and retrieved either from memory or the stack. pub a: u8, // The 8-bit X register can be used to control information, compare values // in memory, and be incremented or decremented. The X register is special // as it can be used to get a copy of the stack pointer or change its value. pub x: u8, // The 8-bit Y register like X, can be used to manage information and be // incremented or decremented; however it doesn't have any special functions // like the X register does. pub y: u8, // The Processor Status register contains a list of flags that are set and // cleared by instructions to record the results of operations. Each flag // has a special bit within the register (8 bits). Instructions exist to // set, clear, and read the various flags. One even allows pushing or // pulling the flags to the stack. // // Carry Flag: // // The carry flag is set if the last operation caused an overflow from bit 7 // of the result or an underflow from bit 0. This condition is set during // arithmetic, comparison and during logical shifts. It can be explicitly // set using the 'Set Carry Flag' (SEC) instruction and cleared with 'Clear // Carry Flag' (CLC). // // Zero Flag: // // The zero flag is set if the result of the last operation as was zero. // // Interrupt Disable: // // The interrupt disable flag is set if the program has executed a 'Set // Interrupt Disable' (SEI) instruction. While this flag is set the // processor will not respond to interrupts from devices until it is cleared // by a 'Clear Interrupt Disable' (CLI) instruction. // // Decimal Mode: (UNUSED in 2A03) // // While the decimal mode flag is set the processor will obey the rules of // Binary Coded Decimal (BCD) arithmetic during addition and subtraction. // The flag can be explicitly set using 'Set Decimal Flag' (SED) and cleared // with 'Clear Decimal Flag' (CLD). // // Break Command: // // The break command bit is set when a BRK instruction has been executed and // an interrupt has been generated to process it. // // Overflow Flag: // // The overflow flag is set during arithmetic operations if the result has // yielded an invalid 2's complement result (e.g. adding to positive numbers // and ending up with a negative result: 64 + 64 => -128). It is determined // by looking at the carry between bits 6 and 7 and between bit 7 and the // carry flag. // // Negative Flag: // // The negative flag is set if the result of the last operation had bit 7 // set to a one. pub p: u8, // The amount of cycles currently accumulated. A cycle represents a unit of // time (the time it takes for the CPU clock to fire). Different // instructions take a different amount of cycles to complete depending on // their complexity. pub cycles: u16 } impl CPU { pub fn new() -> CPU { CPU { pc: 0xC000, sp: 0xFD, a: 0, x: 0, y: 0, p: 0x24, cycles: 0 } } /// Sets the carry flag in the status register. #[inline(always)] pub fn set_carry_flag(&mut self) { self.p |= CARRY_FLAG; } /// Sets the zero flag in the status register. #[inline(always)] pub fn set_zero_flag(&mut self) { self.p |= ZERO_FLAG; } /// Sets the interrupt disable flag in the status register. #[inline(always)] pub fn set_interrupt_disable(&mut self) { self.p |= INTERRUPT_DISABLE; } /// Sets the decimal mode flag in the status register. /// NOTE: This flag is disabled in the 2A03 variation of the 6502. #[inline(always)] pub fn set_decimal_mode(&mut self) { self.p |= DECIMAL_MODE; } /// Sets the break command flag in the status register. #[inline(always)] pub fn set_break_command(&mut self) { self.p |= BREAK_COMMAND; } /// Sets the overflow flag in the status register. #[inline(always)] pub fn set_overflow_flag(&mut self) { self.p |= OVERFLOW_FLAG; } /// Sets the negative flag in the status register. #[inline(always)] pub fn set_negative_flag(&mut self) { self.p |= NEGATIVE_FLAG; } /// Sets the carry flag in the status register. #[inline(always)] pub fn carry_flag_set(&self) -> bool { self.p & CARRY_FLAG == CARRY_FLAG } /// Sets the zero flag in the status register. #[inline(always)] pub fn zero_flag_set(&self) -> bool { self.p & ZERO_FLAG == ZERO_FLAG } /// Sets the interrupt disable flag in the status register. #[inline(always)] pub fn interrupt_disable_set(&self) -> bool { self.p & INTERRUPT_DISABLE == INTERRUPT_DISABLE } /// Sets the decimal mode flag in the status register. /// NOTE: This flag is disabled in the 2A03 variation of the 6502. #[inline(always)] pub fn decimal_mode_set(&self) -> bool { self.p & DECIMAL_MODE == DECIMAL_MODE } /// Sets the break command flag in the status register. #[inline(always)] pub fn break_command_set(&self) -> bool { self.p & BREAK_COMMAND == BREAK_COMMAND } /// Sets the overflow flag in the status register. #[inline(always)] pub fn overflow_flag_set(&self) -> bool { self.p & OVERFLOW_FLAG == OVERFLOW_FLAG } /// Sets the negative flag in the status register. #[inline(always)] pub fn negative_flag_set(&self) -> bool { self.p & NEGATIVE_FLAG == NEGATIVE_FLAG } /// Parse an instruction from memory at the address the program counter /// currently points execute it. All instruction logic is in instruction.rs. pub fn execute(&mut self, memory: &mut Memory) { // NOTE: At this time, some parsing logic is done twice for the sake of // code simplicity. In the future I may rework the function arguments to // reuse as much data as possible since this is high-performance code. let instr = Instruction::parse(self.pc as usize, memory); instr.log(self); instr.execute(self, memory); } fn fmt_flag(&self, flag: bool) -> &'static str { if flag { "SET" } else { "UNSET" } } } impl fmt::Display for CPU { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { writeln!(f, "\nCPU Crash State:").unwrap(); writeln!(f, " Program Counter: {:#X}", self.pc).unwrap(); writeln!(f, " Stack Pointer: {:#X}", self.sp).unwrap(); writeln!(f, " Accumulator: {:#X}", self.a).unwrap(); writeln!(f, " X Register: {:#X}", self.x).unwrap(); writeln!(f, " Y Register: {:#X}", self.y).unwrap(); writeln!(f, "").unwrap(); writeln!(f, "Status Register: {:#X}", self.p).unwrap(); writeln!(f, " Carry Flag: {}", self.fmt_flag(self.carry_flag_set())).unwrap(); writeln!(f, " Zero Flag: {}", self.fmt_flag(self.zero_flag_set())).unwrap(); writeln!(f, " Interrupt Disable: {}", self.fmt_flag(self.interrupt_disable_set())).unwrap(); writeln!(f, " Decimal Mode: {}", self.fmt_flag(self.decimal_mode_set())).unwrap(); writeln!(f, " Break Command: {}", self.fmt_flag(self.break_command_set())).unwrap(); writeln!(f, " Overflow Flag: {}", self.fmt_flag(self.overflow_flag_set())).unwrap(); writeln!(f, " Negative Flag: {}", self.fmt_flag(self.negative_flag_set())) } } Added flag unsetting methods // Copyright 2016 Walter Kuppens. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use nes::instruction::Instruction; use nes::memory::Memory; use std::fmt; // Flag constants that allow easy bitwise getting and setting of flag values. pub const CARRY_FLAG : u8 = 0x1; pub const ZERO_FLAG : u8 = 0x2; pub const INTERRUPT_DISABLE: u8 = 0x4; pub const DECIMAL_MODE : u8 = 0x8; pub const BREAK_COMMAND : u8 = 0x10; pub const OVERFLOW_FLAG : u8 = 0x40; pub const NEGATIVE_FLAG : u8 = 0x80; /// This is an implementation of 2A03 processor used in the NES. The 2A03 is /// based off the 6502 processor with some minor changes such as having no /// binary-coded decimal mode. Currently only the NTSC variant of the chip is /// planned to be implemented. /// /// Much of the information and comments are due credit to www.obelisk.me.uk, /// which has really good information about the 6502 processor. If you're /// interested in diving further, I recommend you give that site a visit. #[derive(Debug)] pub struct CPU { // The program counter is a 16-bit register which points to the next // instruction to be executed. The value of program counter is modified // automatically as instructions are executed. // // The value of the program counter can be modified by executing a jump, a // relative branch, a subroutine call to another memory address, by // returning from a subroutine, or by an interrupt. pub pc: u16, // The processor supports a 256 byte stack located between $0100 and $01FF. // The stack pointer is an 8-bit register and holds the next free location // on the stack. The location of the stack is fixed and cannot be moved and // grows downwards. pub sp: u8, // The 8-bit accumulator is used all arithmetic and logical operations (with // the exception of increments and decrements). The contents of the // accumulator can be stored and retrieved either from memory or the stack. pub a: u8, // The 8-bit X register can be used to control information, compare values // in memory, and be incremented or decremented. The X register is special // as it can be used to get a copy of the stack pointer or change its value. pub x: u8, // The 8-bit Y register like X, can be used to manage information and be // incremented or decremented; however it doesn't have any special functions // like the X register does. pub y: u8, // The Processor Status register contains a list of flags that are set and // cleared by instructions to record the results of operations. Each flag // has a special bit within the register (8 bits). Instructions exist to // set, clear, and read the various flags. One even allows pushing or // pulling the flags to the stack. // // Carry Flag: // // The carry flag is set if the last operation caused an overflow from bit 7 // of the result or an underflow from bit 0. This condition is set during // arithmetic, comparison and during logical shifts. It can be explicitly // set using the 'Set Carry Flag' (SEC) instruction and cleared with 'Clear // Carry Flag' (CLC). // // Zero Flag: // // The zero flag is set if the result of the last operation as was zero. // // Interrupt Disable: // // The interrupt disable flag is set if the program has executed a 'Set // Interrupt Disable' (SEI) instruction. While this flag is set the // processor will not respond to interrupts from devices until it is cleared // by a 'Clear Interrupt Disable' (CLI) instruction. // // Decimal Mode: (UNUSED in 2A03) // // While the decimal mode flag is set the processor will obey the rules of // Binary Coded Decimal (BCD) arithmetic during addition and subtraction. // The flag can be explicitly set using 'Set Decimal Flag' (SED) and cleared // with 'Clear Decimal Flag' (CLD). // // Break Command: // // The break command bit is set when a BRK instruction has been executed and // an interrupt has been generated to process it. // // Overflow Flag: // // The overflow flag is set during arithmetic operations if the result has // yielded an invalid 2's complement result (e.g. adding to positive numbers // and ending up with a negative result: 64 + 64 => -128). It is determined // by looking at the carry between bits 6 and 7 and between bit 7 and the // carry flag. // // Negative Flag: // // The negative flag is set if the result of the last operation had bit 7 // set to a one. pub p: u8, // The amount of cycles currently accumulated. A cycle represents a unit of // time (the time it takes for the CPU clock to fire). Different // instructions take a different amount of cycles to complete depending on // their complexity. pub cycles: u16 } impl CPU { pub fn new() -> CPU { CPU { pc: 0xC000, sp: 0xFD, a: 0, x: 0, y: 0, p: 0x24, cycles: 0 } } /// Sets the carry flag in the status register. #[inline(always)] pub fn set_carry_flag(&mut self) { self.p |= CARRY_FLAG; } /// Sets the zero flag in the status register. #[inline(always)] pub fn set_zero_flag(&mut self) { self.p |= ZERO_FLAG; } /// Sets the interrupt disable flag in the status register. #[inline(always)] pub fn set_interrupt_disable(&mut self) { self.p |= INTERRUPT_DISABLE; } /// Sets the decimal mode flag in the status register. /// NOTE: This flag is disabled in the 2A03 variation of the 6502. #[inline(always)] pub fn set_decimal_mode(&mut self) { self.p |= DECIMAL_MODE; } /// Sets the break command flag in the status register. #[inline(always)] pub fn set_break_command(&mut self) { self.p |= BREAK_COMMAND; } /// Sets the overflow flag in the status register. #[inline(always)] pub fn set_overflow_flag(&mut self) { self.p |= OVERFLOW_FLAG; } /// Sets the negative flag in the status register. #[inline(always)] pub fn set_negative_flag(&mut self) { self.p |= NEGATIVE_FLAG; } /// Unsets the carry flag in the status register. #[inline(always)] pub fn unset_carry_flag(&mut self) { self.p &= ~CARRY_FLAG; } /// Unsets the zero flag in the status register. #[inline(always)] pub fn unset_zero_flag(&mut self) { self.p &= ~ZERO_FLAG; } /// Unsets the interrupt disable flag in the status register. #[inline(always)] pub fn unset_interrupt_disable(&mut self) { self.p &= ~INTERRUPT_DISABLE; } /// Unsets the decimal mode flag in the status register. /// NOTE: This flag is disabled in the 2A03 variation of the 6502. #[inline(always)] pub fn unset_decimal_mode(&mut self) { self.p &= ~DECIMAL_MODE; } /// Unsets the break command flag in the status register. #[inline(always)] pub fn unset_break_command(&mut self) { self.p &= ~BREAK_COMMAND; } /// Unsets the overflow flag in the status register. #[inline(always)] pub fn unset_overflow_flag(&mut self) { self.p &= ~OVERFLOW_FLAG; } /// Unsets the negative flag in the status register. #[inline(always)] pub fn unset_negative_flag(&mut self) { self.p &= ~NEGATIVE_FLAG; } /// Sets the carry flag in the status register. #[inline(always)] pub fn carry_flag_set(&self) -> bool { self.p & CARRY_FLAG == CARRY_FLAG } /// Sets the zero flag in the status register. #[inline(always)] pub fn zero_flag_set(&self) -> bool { self.p & ZERO_FLAG == ZERO_FLAG } /// Sets the interrupt disable flag in the status register. #[inline(always)] pub fn interrupt_disable_set(&self) -> bool { self.p & INTERRUPT_DISABLE == INTERRUPT_DISABLE } /// Sets the decimal mode flag in the status register. /// NOTE: This flag is disabled in the 2A03 variation of the 6502. #[inline(always)] pub fn decimal_mode_set(&self) -> bool { self.p & DECIMAL_MODE == DECIMAL_MODE } /// Sets the break command flag in the status register. #[inline(always)] pub fn break_command_set(&self) -> bool { self.p & BREAK_COMMAND == BREAK_COMMAND } /// Sets the overflow flag in the status register. #[inline(always)] pub fn overflow_flag_set(&self) -> bool { self.p & OVERFLOW_FLAG == OVERFLOW_FLAG } /// Sets the negative flag in the status register. #[inline(always)] pub fn negative_flag_set(&self) -> bool { self.p & NEGATIVE_FLAG == NEGATIVE_FLAG } /// Parse an instruction from memory at the address the program counter /// currently points execute it. All instruction logic is in instruction.rs. pub fn execute(&mut self, memory: &mut Memory) { // NOTE: At this time, some parsing logic is done twice for the sake of // code simplicity. In the future I may rework the function arguments to // reuse as much data as possible since this is high-performance code. let instr = Instruction::parse(self.pc as usize, memory); instr.log(self); instr.execute(self, memory); } fn fmt_flag(&self, flag: bool) -> &'static str { if flag { "SET" } else { "UNSET" } } } impl fmt::Display for CPU { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { writeln!(f, "\nCPU Crash State:").unwrap(); writeln!(f, " Program Counter: {:#X}", self.pc).unwrap(); writeln!(f, " Stack Pointer: {:#X}", self.sp).unwrap(); writeln!(f, " Accumulator: {:#X}", self.a).unwrap(); writeln!(f, " X Register: {:#X}", self.x).unwrap(); writeln!(f, " Y Register: {:#X}", self.y).unwrap(); writeln!(f, "").unwrap(); writeln!(f, "Status Register: {:#X}", self.p).unwrap(); writeln!(f, " Carry Flag: {}", self.fmt_flag(self.carry_flag_set())).unwrap(); writeln!(f, " Zero Flag: {}", self.fmt_flag(self.zero_flag_set())).unwrap(); writeln!(f, " Interrupt Disable: {}", self.fmt_flag(self.interrupt_disable_set())).unwrap(); writeln!(f, " Decimal Mode: {}", self.fmt_flag(self.decimal_mode_set())).unwrap(); writeln!(f, " Break Command: {}", self.fmt_flag(self.break_command_set())).unwrap(); writeln!(f, " Overflow Flag: {}", self.fmt_flag(self.overflow_flag_set())).unwrap(); writeln!(f, " Negative Flag: {}", self.fmt_flag(self.negative_flag_set())) } }
use std; const WORD_SIZE: usize = 63; const DISTANCE: usize = 23; // 2t+1 = 23 => t = 11 const ERRORS: usize = 11; const SYNDROMES: usize = 2 * ERRORS; // Maps α^i to codewords. const CODEWORDS: &'static [u8] = &[ 0b100000, 0b010000, 0b001000, 0b000100, 0b000010, 0b000001, 0b110000, 0b011000, 0b001100, 0b000110, 0b000011, 0b110001, 0b101000, 0b010100, 0b001010, 0b000101, 0b110010, 0b011001, 0b111100, 0b011110, 0b001111, 0b110111, 0b101011, 0b100101, 0b100010, 0b010001, 0b111000, 0b011100, 0b001110, 0b000111, 0b110011, 0b101001, 0b100100, 0b010010, 0b001001, 0b110100, 0b011010, 0b001101, 0b110110, 0b011011, 0b111101, 0b101110, 0b010111, 0b111011, 0b101101, 0b100110, 0b010011, 0b111001, 0b101100, 0b010110, 0b001011, 0b110101, 0b101010, 0b010101, 0b111010, 0b011101, 0b111110, 0b011111, 0b111111, 0b101111, 0b100111, 0b100011, 0b100001 ]; // Maps codewords to α^i. const POWERS: &'static [usize] = &[ 5, 4, 10, 3, 15, 9, 29, 2, 34, 14, 50, 8, 37, 28, 20, 1, 25, 33, 46, 13, 53, 49, 42, 7, 17, 36, 39, 27, 55, 19, 57, 0, 62, 24, 61, 32, 23, 45, 60, 12, 31, 52, 22, 48, 44, 41, 59, 6, 11, 16, 30, 35, 51, 38, 21, 26, 47, 54, 43, 18, 40, 56, 58, ]; const GEN: &'static [u16] = &[ 0b1000000000000000, 0b0100000000000000, 0b0010000000000000, 0b0001000000000000, 0b0000100000000000, 0b0000010000000000, 0b0000001000000000, 0b0000000100000000, 0b0000000010000000, 0b0000000001000000, 0b0000000000100000, 0b0000000000010000, 0b0000000000001000, 0b0000000000000100, 0b0000000000000010, 0b0000000000000001, 0b1110110001000111, 0b1001101001100100, 0b0100110100110010, 0b0010011010011001, 0b1111111100001011, 0b1001001111000010, 0b0100100111100001, 0b1100100010110111, 0b1000100000011100, 0b0100010000001110, 0b0010001000000111, 0b1111110101000100, 0b0111111010100010, 0b0011111101010001, 0b1111001111101111, 0b1001010110110000, 0b0100101011011000, 0b0010010101101100, 0b0001001010110110, 0b0000100101011011, 0b1110100011101010, 0b0111010001110101, 0b1101011001111101, 0b1000011101111001, 0b1010111111111011, 0b1011101110111010, 0b0101110111011101, 0b1100001010101001, 0b1000110100010011, 0b1010101011001110, 0b0101010101100111, 0b1100011011110100, 0b0110001101111010, 0b0011000110111101, 0b1111010010011001, 0b1001011000001011, 0b1010011101000010, 0b0101001110100001, 0b1100010110010111, 0b1000111010001100, 0b0100011101000110, 0b0010001110100011, 0b1111110110010110, 0b0111111011001011, 0b1101001100100010, 0b0110100110010001, 0b1101100010001111, 0b0000000000000011, ]; pub fn encode(word: u16) -> u64 { GEN.iter().fold(0, |accum, row| { accum << 1 | ((word & row).count_ones() % 2) as u64 }) } pub fn decode(word: u64) -> Option<(u64, usize)> { let poly = BCHDecoder::new(Syndromes::new(word)).decode(); let errors = match poly.degree() { Some(deg) => deg, None => return None, }; let locs = ErrorLocations::new(poly.coefs().iter().cloned()); let (word, count) = locs.take(errors).fold((word, 0), |(word, s), loc| { (word ^ 1 << loc, s + 1) }); // "If the Chien Search fails to find v roots of a error locator polynomial of degree // v, then the error pattern is an uncorrectable error pattern" -- Lecture 17: // Berlekamp-Massey Algorithm for Binary BCH Codes if count == errors { Some((word, errors)) } else { None } } struct Syndromes { pow: std::ops::Range<usize>, word: u64, } impl Syndromes { pub fn new(word: u64) -> Syndromes { Syndromes { pow: 1..DISTANCE, word: word, } } } impl Iterator for Syndromes { type Item = Codeword; fn next(&mut self) -> Option<Self::Item> { match self.pow.next() { Some(pow) => Some((0..WORD_SIZE).fold(Codeword::default(), |s, b| { if self.word >> b & 1 == 0 { s } else { s + Codeword::for_power(b * pow) } })), None => None, } } } #[derive(Copy, Clone)] struct Codeword(u8); impl std::fmt::Debug for Codeword { fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { write!(fmt, "Codeword({:?})", self.power()) } } impl Codeword { pub fn new(codeword: u8) -> Codeword { Codeword(codeword) } pub fn zero(&self) -> bool { self.0 == 0 } pub fn power(&self) -> Option<usize> { if self.zero() { None } else { Some(POWERS[self.0 as usize - 1]) } } pub fn for_power(power: usize) -> Codeword { Codeword::new(CODEWORDS[power % POWERS.len()]) } pub fn invert(self) -> Codeword { match self.power() { Some(p) => Codeword::for_power(POWERS.len() - p), None => panic!("divide by zero"), } } } impl Default for Codeword { fn default() -> Self { Codeword::new(0) } } impl std::ops::Mul for Codeword { type Output = Codeword; fn mul(self, rhs: Codeword) -> Self::Output { match (self.power(), rhs.power()) { (Some(p), Some(q)) => Codeword::for_power(p + q), _ => Codeword::default(), } } } impl std::ops::Div for Codeword { type Output = Codeword; fn div(self, rhs: Codeword) -> Self::Output { match (self.power(), rhs.power()) { // min(power) = -62 => 63+min(power) > 0 (Some(p), Some(q)) => Codeword::for_power(p + POWERS.len() - q), (None, Some(_)) => Codeword::default(), (_, None) => panic!("divide by zero"), } } } impl std::ops::Add for Codeword { type Output = Codeword; fn add(self, rhs: Codeword) -> Self::Output { Codeword::new(self.0 ^ rhs.0) } } impl std::ops::Sub for Codeword { type Output = Codeword; fn sub(self, rhs: Codeword) -> Self::Output { self + rhs } } impl std::cmp::PartialEq for Codeword { fn eq(&self, other: &Self) -> bool { self.0 == other.0 } } impl std::cmp::Eq for Codeword {} impl std::cmp::PartialOrd for Codeword { fn partial_cmp(&self, rhs: &Self) -> Option<std::cmp::Ordering> { use std::cmp::Ordering::*; match (self.power(), rhs.power()) { (Some(p), Some(q)) => Some(p.cmp(&q)), (Some(_), None) => Some(Greater), (None, Some(_)) => Some(Less), (None, None) => Some(Equal), } } } impl std::cmp::Ord for Codeword { fn cmp(&self, rhs: &Self) -> std::cmp::Ordering { self.partial_cmp(rhs).unwrap() } } #[derive(Copy, Clone)] struct Polynomial { /// Coefficients of the polynomial. coefs: [Codeword; SYNDROMES + 2], /// Index into `coefs` of the degree-0 coefficient. start: usize, } impl Polynomial { pub fn new<T: Iterator<Item = Codeword>>(coefs: T) -> Polynomial { let mut c = [Codeword::default(); SYNDROMES + 2]; for (i, coef) in coefs.enumerate() { c[i] = c[i] + coef; } Polynomial { coefs: c, start: 0, } } pub fn constant(&self) -> Codeword { self.coefs[self.start] } pub fn coefs(&self) -> &[Codeword] { &self.coefs[self.start..] } pub fn degree(&self) -> Option<usize> { for (deg, coef) in self.coefs.iter().enumerate().rev() { if !coef.zero() { return Some(deg - self.start); } } None } pub fn shift(mut self) -> Polynomial { self.coefs[self.start] = Codeword::default(); self.start += 1; self } fn get(&self, idx: usize) -> Codeword { match self.coefs.get(idx) { Some(&c) => c, None => Codeword::default(), } } pub fn coef(&self, deg: usize) -> Codeword { self.get(self.start + deg) } } impl std::fmt::Display for Polynomial { fn fmt(&self, fmt: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> { for (i, coef) in self.coefs().iter().enumerate() { match coef.power() { Some(p) => try!(write!(fmt, "a^{}*x^{} + ", p, i)), None => {}, } } try!(write!(fmt, "0")); Ok(()) } } impl std::ops::Add for Polynomial { type Output = Polynomial; fn add(mut self, rhs: Polynomial) -> Self::Output { for i in 0..self.coefs.len() { self.coefs[i] = self.coef(i) + rhs.coef(i); } self.start = 0; self } } impl std::ops::Mul<Codeword> for Polynomial { type Output = Polynomial; fn mul(mut self, rhs: Codeword) -> Self::Output { for coef in self.coefs.iter_mut() { *coef = *coef * rhs; } self } } struct BCHDecoder { p_cur: Polynomial, p_saved: Polynomial, q_cur: Polynomial, q_saved: Polynomial, deg_saved: usize, deg_cur: usize, } impl BCHDecoder { pub fn new<T: Iterator<Item = Codeword>>(syndromes: T) -> BCHDecoder { let q = Polynomial::new(std::iter::once(Codeword::for_power(0)) .chain(syndromes.into_iter())); let p = Polynomial::new((0..SYNDROMES+1).map(|_| Codeword::default()) .chain(std::iter::once(Codeword::for_power(0)))); BCHDecoder { q_saved: q, q_cur: q.shift(), p_saved: p, p_cur: p.shift(), deg_saved: 0, deg_cur: 1, } } pub fn decode(mut self) -> Polynomial { for _ in 0..SYNDROMES { self.step(); } self.p_cur } fn step(&mut self) { let (save, q, p, d) = if self.q_cur.constant().zero() { self.reduce() } else { self.transform() }; if save { self.q_saved = self.q_cur; self.p_saved = self.p_cur; self.deg_saved = self.deg_cur; } self.q_cur = q; self.p_cur = p; self.deg_cur = d; } fn reduce(&mut self) -> (bool, Polynomial, Polynomial, usize) { ( false, self.q_cur.shift(), self.p_cur.shift(), 2 + self.deg_cur, ) } fn transform(&mut self) -> (bool, Polynomial, Polynomial, usize) { let mult = self.q_cur.constant() / self.q_saved.constant(); ( self.deg_cur >= self.deg_saved, (self.q_cur + self.q_saved * mult).shift(), (self.p_cur + self.p_saved * mult).shift(), 2 + std::cmp::min(self.deg_cur, self.deg_saved), ) } } struct ErrorLocations { terms: Vec<Codeword>, pow: std::ops::Range<usize>, } impl ErrorLocations { // Λ(x) = coefs[0] + coefs[1]*x + coefs[2]*x^2 + ... pub fn new<T: Iterator<Item = Codeword>>(coefs: T) -> ErrorLocations { ErrorLocations { terms: coefs.enumerate().map(|(p, c)| { c / Codeword::for_power(p) }).collect(), pow: 0..POWERS.len(), } } fn update_terms(&mut self) { for (j, term) in self.terms.iter_mut().enumerate() { *term = *term * Codeword::for_power(j); } } fn sum_terms(&self) -> Codeword { self.terms.iter().fold(Codeword::default(), |s, &x| { s + x }) } } impl Iterator for ErrorLocations { type Item = usize; fn next(&mut self) -> Option<Self::Item> { loop { let pow = match self.pow.next() { Some(pow) => pow, None => return None, }; self.update_terms(); if self.sum_terms().zero() { return Some(Codeword::for_power(pow).invert().power().unwrap()); } } } } #[cfg(test)] mod test { use super::{encode, Syndromes, Codeword, Polynomial, decode}; #[test] fn test_for_power() { assert_eq!(Codeword::for_power(0).0, 0b100000); assert_eq!(Codeword::for_power(62).0, 0b100001); assert_eq!(Codeword::for_power(63).0, 0b100000); } #[test] fn test_add_sub() { assert_eq!((Codeword::new(0b100000) + Codeword::new(0b010000)).0, 0b110000); assert_eq!((Codeword::new(0b100000) - Codeword::new(0b010000)).0, 0b110000); assert_eq!((Codeword::new(0b100001) + Codeword::new(0b100001)).0, 0b000000); assert_eq!((Codeword::new(0b100001) - Codeword::new(0b100001)).0, 0b000000); assert_eq!((Codeword::new(0b100001) + Codeword::new(0b110100)).0, 0b010101); assert_eq!((Codeword::new(0b100001) - Codeword::new(0b110100)).0, 0b010101); } #[test] fn test_mul() { assert_eq!((Codeword::new(0b011000) * Codeword::new(0b101000)).0, 0b011110); assert_eq!((Codeword::new(0b000000) * Codeword::new(0b101000)).0, 0b000000); assert_eq!((Codeword::new(0b011000) * Codeword::new(0b000000)).0, 0b000000); assert_eq!((Codeword::new(0b000000) * Codeword::new(0b000000)).0, 0b000000); assert_eq!((Codeword::new(0b100001) * Codeword::new(0b100000)).0, 0b100001); assert_eq!((Codeword::new(0b100001) * Codeword::new(0b010000)).0, 0b100000); assert_eq!((Codeword::new(0b110011) * Codeword::new(0b110011)).0, 0b100111); assert_eq!((Codeword::new(0b111101) * Codeword::new(0b111101)).0, 0b011001); } #[test] fn test_div() { assert_eq!((Codeword::new(0b000100) / Codeword::new(0b101000)).0, 0b111010); assert_eq!((Codeword::new(0b000000) / Codeword::new(0b101000)).0, 0b000000); assert_eq!((Codeword::new(0b011110) / Codeword::new(0b100000)).0, 0b011110); assert_eq!((Codeword::new(0b011110) / Codeword::new(0b011110)).0, 0b100000); } #[test] fn test_cmp() { assert!(Codeword::new(0b100000) > Codeword::new(0b000000)); assert!(Codeword::new(0b000000) == Codeword::new(0b000000)); assert!(Codeword::new(0b010000) > Codeword::new(0b100000)); assert!(Codeword::new(0b100001) > Codeword::new(0b100000)); } #[test] fn test_encode() { assert_eq!(encode(0b1111111100000000), 0b1111111100000000100100110001000011000010001100000110100001101000); assert_eq!(encode(0b0011)&1, 0); assert_eq!(encode(0b0101)&1, 1); assert_eq!(encode(0b1010)&1, 1); assert_eq!(encode(0b1100)&1, 0); assert_eq!(encode(0b1111)&1, 0); } #[test] fn test_syndromes() { let w = encode(0b1111111100000000)>>1; assert!(Syndromes::new(w).all(|s| s.zero())); assert!(!Syndromes::new(w ^ 1<<60).all(|s| s.zero())); } #[test] fn test_polynomial() { let p = Polynomial::new((0..23).map(|i| { Codeword::for_power(i) })); assert!(p.degree().unwrap() == 22); assert!(p.constant() == Codeword::for_power(0)); let p = p.shift(); assert!(p.degree().unwrap() == 21); assert!(p.constant() == Codeword::for_power(1)); let q = p.clone() * Codeword::for_power(0); assert!(q.degree().unwrap() == 21); assert!(q.constant() == Codeword::for_power(1)); let q = p.clone() * Codeword::for_power(2); assert!(q.degree().unwrap() == 21); assert!(q.constant() == Codeword::for_power(3)); let q = p.clone() + p.clone(); assert!(q.constant().zero()); for coef in q.coefs() { assert!(coef.zero()); } let p = Polynomial::new((4..27).map(|i| { Codeword::for_power(i) })); let q = Polynomial::new((3..26).map(|i| { Codeword::for_power(i) })); let r = p + q.shift(); assert!(r.coefs[0].zero()); assert!(r.coefs[1].zero()); assert!(r.coefs[2].zero()); assert!(r.coefs[3].zero()); assert!(r.coefs[4].zero()); assert!(!r.coefs[22].zero()); let p = Polynomial::new((0..2).map(|_| { Codeword::for_power(0) })); let q = Polynomial::new((0..4).map(|_| { Codeword::for_power(1) })); let r = p + q; assert!(r.coef(0) == Codeword::for_power(6)); } #[test] fn test_decode() { let w = encode(0b1111111100000000)>>1 ^ 0b11010011<<30; let d = decode(w); println!("{:?}", d); match d { Some((9187424089929167924, 5)) => {}, _ => panic!(), } } } Move only public exports to top of file use std; pub fn encode(word: u16) -> u64 { GEN.iter().fold(0, |accum, row| { accum << 1 | ((word & row).count_ones() % 2) as u64 }) } pub fn decode(word: u64) -> Option<(u64, usize)> { let poly = BCHDecoder::new(Syndromes::new(word)).decode(); let errors = match poly.degree() { Some(deg) => deg, None => return None, }; // Even if there are more errors, the BM algorithm produces a polynomial with degree // no greater than ERRORS. assert!(errors <= ERRORS); let locs = ErrorLocations::new(poly.coefs().iter().cloned()); let (word, count) = locs.take(errors).fold((word, 0), |(word, s), loc| { (word ^ 1 << loc, s + 1) }); // "If the Chien Search fails to find v roots of a error locator polynomial of degree // v, then the error pattern is an uncorrectable error pattern" -- Lecture 17: // Berlekamp-Massey Algorithm for Binary BCH Codes if count == errors { Some((word, errors)) } else { None } } const WORD_SIZE: usize = 63; const DISTANCE: usize = 23; // 2t+1 = 23 => t = 11 const ERRORS: usize = 11; const SYNDROMES: usize = 2 * ERRORS; // Maps α^i to codewords. const CODEWORDS: &'static [u8] = &[ 0b100000, 0b010000, 0b001000, 0b000100, 0b000010, 0b000001, 0b110000, 0b011000, 0b001100, 0b000110, 0b000011, 0b110001, 0b101000, 0b010100, 0b001010, 0b000101, 0b110010, 0b011001, 0b111100, 0b011110, 0b001111, 0b110111, 0b101011, 0b100101, 0b100010, 0b010001, 0b111000, 0b011100, 0b001110, 0b000111, 0b110011, 0b101001, 0b100100, 0b010010, 0b001001, 0b110100, 0b011010, 0b001101, 0b110110, 0b011011, 0b111101, 0b101110, 0b010111, 0b111011, 0b101101, 0b100110, 0b010011, 0b111001, 0b101100, 0b010110, 0b001011, 0b110101, 0b101010, 0b010101, 0b111010, 0b011101, 0b111110, 0b011111, 0b111111, 0b101111, 0b100111, 0b100011, 0b100001 ]; // Maps codewords to α^i. const POWERS: &'static [usize] = &[ 5, 4, 10, 3, 15, 9, 29, 2, 34, 14, 50, 8, 37, 28, 20, 1, 25, 33, 46, 13, 53, 49, 42, 7, 17, 36, 39, 27, 55, 19, 57, 0, 62, 24, 61, 32, 23, 45, 60, 12, 31, 52, 22, 48, 44, 41, 59, 6, 11, 16, 30, 35, 51, 38, 21, 26, 47, 54, 43, 18, 40, 56, 58, ]; const GEN: &'static [u16] = &[ 0b1000000000000000, 0b0100000000000000, 0b0010000000000000, 0b0001000000000000, 0b0000100000000000, 0b0000010000000000, 0b0000001000000000, 0b0000000100000000, 0b0000000010000000, 0b0000000001000000, 0b0000000000100000, 0b0000000000010000, 0b0000000000001000, 0b0000000000000100, 0b0000000000000010, 0b0000000000000001, 0b1110110001000111, 0b1001101001100100, 0b0100110100110010, 0b0010011010011001, 0b1111111100001011, 0b1001001111000010, 0b0100100111100001, 0b1100100010110111, 0b1000100000011100, 0b0100010000001110, 0b0010001000000111, 0b1111110101000100, 0b0111111010100010, 0b0011111101010001, 0b1111001111101111, 0b1001010110110000, 0b0100101011011000, 0b0010010101101100, 0b0001001010110110, 0b0000100101011011, 0b1110100011101010, 0b0111010001110101, 0b1101011001111101, 0b1000011101111001, 0b1010111111111011, 0b1011101110111010, 0b0101110111011101, 0b1100001010101001, 0b1000110100010011, 0b1010101011001110, 0b0101010101100111, 0b1100011011110100, 0b0110001101111010, 0b0011000110111101, 0b1111010010011001, 0b1001011000001011, 0b1010011101000010, 0b0101001110100001, 0b1100010110010111, 0b1000111010001100, 0b0100011101000110, 0b0010001110100011, 0b1111110110010110, 0b0111111011001011, 0b1101001100100010, 0b0110100110010001, 0b1101100010001111, 0b0000000000000011, ]; struct Syndromes { pow: std::ops::Range<usize>, word: u64, } impl Syndromes { pub fn new(word: u64) -> Syndromes { Syndromes { pow: 1..DISTANCE, word: word, } } } impl Iterator for Syndromes { type Item = Codeword; fn next(&mut self) -> Option<Self::Item> { match self.pow.next() { Some(pow) => Some((0..WORD_SIZE).fold(Codeword::default(), |s, b| { if self.word >> b & 1 == 0 { s } else { s + Codeword::for_power(b * pow) } })), None => None, } } } #[derive(Copy, Clone)] struct Codeword(u8); impl std::fmt::Debug for Codeword { fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { write!(fmt, "Codeword({:?})", self.power()) } } impl Codeword { pub fn new(codeword: u8) -> Codeword { Codeword(codeword) } pub fn zero(&self) -> bool { self.0 == 0 } pub fn power(&self) -> Option<usize> { if self.zero() { None } else { Some(POWERS[self.0 as usize - 1]) } } pub fn for_power(power: usize) -> Codeword { Codeword::new(CODEWORDS[power % POWERS.len()]) } pub fn invert(self) -> Codeword { match self.power() { Some(p) => Codeword::for_power(POWERS.len() - p), None => panic!("divide by zero"), } } } impl Default for Codeword { fn default() -> Self { Codeword::new(0) } } impl std::ops::Mul for Codeword { type Output = Codeword; fn mul(self, rhs: Codeword) -> Self::Output { match (self.power(), rhs.power()) { (Some(p), Some(q)) => Codeword::for_power(p + q), _ => Codeword::default(), } } } impl std::ops::Div for Codeword { type Output = Codeword; fn div(self, rhs: Codeword) -> Self::Output { match (self.power(), rhs.power()) { // min(power) = -62 => 63+min(power) > 0 (Some(p), Some(q)) => Codeword::for_power(p + POWERS.len() - q), (None, Some(_)) => Codeword::default(), (_, None) => panic!("divide by zero"), } } } impl std::ops::Add for Codeword { type Output = Codeword; fn add(self, rhs: Codeword) -> Self::Output { Codeword::new(self.0 ^ rhs.0) } } impl std::ops::Sub for Codeword { type Output = Codeword; fn sub(self, rhs: Codeword) -> Self::Output { self + rhs } } impl std::cmp::PartialEq for Codeword { fn eq(&self, other: &Self) -> bool { self.0 == other.0 } } impl std::cmp::Eq for Codeword {} impl std::cmp::PartialOrd for Codeword { fn partial_cmp(&self, rhs: &Self) -> Option<std::cmp::Ordering> { use std::cmp::Ordering::*; match (self.power(), rhs.power()) { (Some(p), Some(q)) => Some(p.cmp(&q)), (Some(_), None) => Some(Greater), (None, Some(_)) => Some(Less), (None, None) => Some(Equal), } } } impl std::cmp::Ord for Codeword { fn cmp(&self, rhs: &Self) -> std::cmp::Ordering { self.partial_cmp(rhs).unwrap() } } #[derive(Copy, Clone)] struct Polynomial { /// Coefficients of the polynomial. coefs: [Codeword; SYNDROMES + 2], /// Index into `coefs` of the degree-0 coefficient. start: usize, } impl Polynomial { pub fn new<T: Iterator<Item = Codeword>>(coefs: T) -> Polynomial { let mut c = [Codeword::default(); SYNDROMES + 2]; for (i, coef) in coefs.enumerate() { c[i] = c[i] + coef; } Polynomial { coefs: c, start: 0, } } pub fn constant(&self) -> Codeword { self.coefs[self.start] } pub fn coefs(&self) -> &[Codeword] { &self.coefs[self.start..] } pub fn degree(&self) -> Option<usize> { for (deg, coef) in self.coefs.iter().enumerate().rev() { if !coef.zero() { return Some(deg - self.start); } } None } pub fn shift(mut self) -> Polynomial { self.coefs[self.start] = Codeword::default(); self.start += 1; self } fn get(&self, idx: usize) -> Codeword { match self.coefs.get(idx) { Some(&c) => c, None => Codeword::default(), } } pub fn coef(&self, deg: usize) -> Codeword { self.get(self.start + deg) } } impl std::fmt::Display for Polynomial { fn fmt(&self, fmt: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> { for (i, coef) in self.coefs().iter().enumerate() { match coef.power() { Some(p) => try!(write!(fmt, "a^{}*x^{} + ", p, i)), None => {}, } } try!(write!(fmt, "0")); Ok(()) } } impl std::ops::Add for Polynomial { type Output = Polynomial; fn add(mut self, rhs: Polynomial) -> Self::Output { for i in 0..self.coefs.len() { self.coefs[i] = self.coef(i) + rhs.coef(i); } self.start = 0; self } } impl std::ops::Mul<Codeword> for Polynomial { type Output = Polynomial; fn mul(mut self, rhs: Codeword) -> Self::Output { for coef in self.coefs.iter_mut() { *coef = *coef * rhs; } self } } struct BCHDecoder { p_cur: Polynomial, p_saved: Polynomial, q_cur: Polynomial, q_saved: Polynomial, deg_saved: usize, deg_cur: usize, } impl BCHDecoder { pub fn new<T: Iterator<Item = Codeword>>(syndromes: T) -> BCHDecoder { let q = Polynomial::new(std::iter::once(Codeword::for_power(0)) .chain(syndromes.into_iter())); let p = Polynomial::new((0..SYNDROMES+1).map(|_| Codeword::default()) .chain(std::iter::once(Codeword::for_power(0)))); BCHDecoder { q_saved: q, q_cur: q.shift(), p_saved: p, p_cur: p.shift(), deg_saved: 0, deg_cur: 1, } } pub fn decode(mut self) -> Polynomial { for _ in 0..SYNDROMES { self.step(); } self.p_cur } fn step(&mut self) { let (save, q, p, d) = if self.q_cur.constant().zero() { self.reduce() } else { self.transform() }; if save { self.q_saved = self.q_cur; self.p_saved = self.p_cur; self.deg_saved = self.deg_cur; } self.q_cur = q; self.p_cur = p; self.deg_cur = d; } fn reduce(&mut self) -> (bool, Polynomial, Polynomial, usize) { ( false, self.q_cur.shift(), self.p_cur.shift(), 2 + self.deg_cur, ) } fn transform(&mut self) -> (bool, Polynomial, Polynomial, usize) { let mult = self.q_cur.constant() / self.q_saved.constant(); ( self.deg_cur >= self.deg_saved, (self.q_cur + self.q_saved * mult).shift(), (self.p_cur + self.p_saved * mult).shift(), 2 + std::cmp::min(self.deg_cur, self.deg_saved), ) } } struct ErrorLocations { terms: Vec<Codeword>, pow: std::ops::Range<usize>, } impl ErrorLocations { // Λ(x) = coefs[0] + coefs[1]*x + coefs[2]*x^2 + ... pub fn new<T: Iterator<Item = Codeword>>(coefs: T) -> ErrorLocations { ErrorLocations { terms: coefs.enumerate().map(|(p, c)| { c / Codeword::for_power(p) }).collect(), pow: 0..POWERS.len(), } } fn update_terms(&mut self) { for (j, term) in self.terms.iter_mut().enumerate() { *term = *term * Codeword::for_power(j); } } fn sum_terms(&self) -> Codeword { self.terms.iter().fold(Codeword::default(), |s, &x| { s + x }) } } impl Iterator for ErrorLocations { type Item = usize; fn next(&mut self) -> Option<Self::Item> { loop { let pow = match self.pow.next() { Some(pow) => pow, None => return None, }; self.update_terms(); if self.sum_terms().zero() { return Some(Codeword::for_power(pow).invert().power().unwrap()); } } } } #[cfg(test)] mod test { use super::{encode, Syndromes, Codeword, Polynomial, decode}; #[test] fn test_for_power() { assert_eq!(Codeword::for_power(0).0, 0b100000); assert_eq!(Codeword::for_power(62).0, 0b100001); assert_eq!(Codeword::for_power(63).0, 0b100000); } #[test] fn test_add_sub() { assert_eq!((Codeword::new(0b100000) + Codeword::new(0b010000)).0, 0b110000); assert_eq!((Codeword::new(0b100000) - Codeword::new(0b010000)).0, 0b110000); assert_eq!((Codeword::new(0b100001) + Codeword::new(0b100001)).0, 0b000000); assert_eq!((Codeword::new(0b100001) - Codeword::new(0b100001)).0, 0b000000); assert_eq!((Codeword::new(0b100001) + Codeword::new(0b110100)).0, 0b010101); assert_eq!((Codeword::new(0b100001) - Codeword::new(0b110100)).0, 0b010101); } #[test] fn test_mul() { assert_eq!((Codeword::new(0b011000) * Codeword::new(0b101000)).0, 0b011110); assert_eq!((Codeword::new(0b000000) * Codeword::new(0b101000)).0, 0b000000); assert_eq!((Codeword::new(0b011000) * Codeword::new(0b000000)).0, 0b000000); assert_eq!((Codeword::new(0b000000) * Codeword::new(0b000000)).0, 0b000000); assert_eq!((Codeword::new(0b100001) * Codeword::new(0b100000)).0, 0b100001); assert_eq!((Codeword::new(0b100001) * Codeword::new(0b010000)).0, 0b100000); assert_eq!((Codeword::new(0b110011) * Codeword::new(0b110011)).0, 0b100111); assert_eq!((Codeword::new(0b111101) * Codeword::new(0b111101)).0, 0b011001); } #[test] fn test_div() { assert_eq!((Codeword::new(0b000100) / Codeword::new(0b101000)).0, 0b111010); assert_eq!((Codeword::new(0b000000) / Codeword::new(0b101000)).0, 0b000000); assert_eq!((Codeword::new(0b011110) / Codeword::new(0b100000)).0, 0b011110); assert_eq!((Codeword::new(0b011110) / Codeword::new(0b011110)).0, 0b100000); } #[test] fn test_cmp() { assert!(Codeword::new(0b100000) > Codeword::new(0b000000)); assert!(Codeword::new(0b000000) == Codeword::new(0b000000)); assert!(Codeword::new(0b010000) > Codeword::new(0b100000)); assert!(Codeword::new(0b100001) > Codeword::new(0b100000)); } #[test] fn test_encode() { assert_eq!(encode(0b1111111100000000), 0b1111111100000000100100110001000011000010001100000110100001101000); assert_eq!(encode(0b0011)&1, 0); assert_eq!(encode(0b0101)&1, 1); assert_eq!(encode(0b1010)&1, 1); assert_eq!(encode(0b1100)&1, 0); assert_eq!(encode(0b1111)&1, 0); } #[test] fn test_syndromes() { let w = encode(0b1111111100000000)>>1; assert!(Syndromes::new(w).all(|s| s.zero())); assert!(!Syndromes::new(w ^ 1<<60).all(|s| s.zero())); } #[test] fn test_polynomial() { let p = Polynomial::new((0..23).map(|i| { Codeword::for_power(i) })); assert!(p.degree().unwrap() == 22); assert!(p.constant() == Codeword::for_power(0)); let p = p.shift(); assert!(p.degree().unwrap() == 21); assert!(p.constant() == Codeword::for_power(1)); let q = p.clone() * Codeword::for_power(0); assert!(q.degree().unwrap() == 21); assert!(q.constant() == Codeword::for_power(1)); let q = p.clone() * Codeword::for_power(2); assert!(q.degree().unwrap() == 21); assert!(q.constant() == Codeword::for_power(3)); let q = p.clone() + p.clone(); assert!(q.constant().zero()); for coef in q.coefs() { assert!(coef.zero()); } let p = Polynomial::new((4..27).map(|i| { Codeword::for_power(i) })); let q = Polynomial::new((3..26).map(|i| { Codeword::for_power(i) })); let r = p + q.shift(); assert!(r.coefs[0].zero()); assert!(r.coefs[1].zero()); assert!(r.coefs[2].zero()); assert!(r.coefs[3].zero()); assert!(r.coefs[4].zero()); assert!(!r.coefs[22].zero()); let p = Polynomial::new((0..2).map(|_| { Codeword::for_power(0) })); let q = Polynomial::new((0..4).map(|_| { Codeword::for_power(1) })); let r = p + q; assert!(r.coef(0) == Codeword::for_power(6)); } #[test] fn test_decode() { let w = encode(0b1111111100000000)>>1 ^ 0b11010011<<30; let d = decode(w); println!("{:?}", d); match d { Some((9187424089929167924, 5)) => {}, _ => panic!(), } } }
#![allow(non_snake_case)] use std::{ io::{BufRead, BufReader, Read}, ops::{Range, RangeFrom, RangeFull}, vec, }; use nom::{ branch::alt, bytes::streaming::{escaped, is_not, tag, take_while, take_while1}, character::{ complete::{alphanumeric1 as alphanumeric, char, multispace1, none_of, one_of}, is_digit, is_hex_digit, streaming::{alpha1, alphanumeric1, digit1, multispace0}, }, combinator::{cut, map, map_res, opt, recognize, value}, error::{ context, convert_error, dbg_dmp, ContextError, Error, ErrorKind, ParseError, VerboseError, }, error_position, multi::{many0, many1, separated_list0}, number::complete::double, sequence::{delimited, pair, preceded, separated_pair, terminated, tuple}, AsChar, Err, IResult, InputIter, InputLength, Needed, Offset, Parser, Slice, }; // https://tools.ietf.org/html/rfc3629 static UTF8_CHAR_WIDTH: [u8; 256] = [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 0x1F */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 0x3F */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 0x5F */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 0x7F */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x9F */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xBF */ 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, /* 0xDF */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, /* 0xEF */ 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xFF */ ]; /// Given a first byte, determine how many bytes are in this UTF-8 character #[inline] fn utf8_char_width(b: u8) -> usize { return UTF8_CHAR_WIDTH[b as usize] as usize; } // enum CharResult { // Char(char, usize), // Err, // Eof, // } // #[inline] // fn parse_char(input: &[u8]) -> CharResult { // if input.len() == 0 { // return CharResult::Eof; // } // let width = utf8_char_width(input[0]); // if input.len() < width { // return CharResult::Eof; // } // match std::str::from_utf8(&input[..width]).ok() { // Some(s) => CharResult::Char(s.chars().next().unwrap(), width), // None => CharResult::Err, // } // } // [^<&] // none_of("<&") // CdataEnd // tag("]]>") // [2] Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF] #[inline] pub fn is_xml_char_t(chr: char) -> bool { chr == '\u{9}' || (chr >= '\u{A}' && chr <= '\u{D}') || (chr >= '\u{20}' && chr <= '\u{D7FF}') || (chr >= '\u{E000}' && chr <= '\u{FFFD}') || (chr >= '\u{10000}' && chr <= '\u{10FFFF}') } // [4] NameStartChar ::= ":" | [A-Z] | "_" | [a-z] | [#xC0-#xD6] | [#xD8-#xF6] | // [#xF8-#x2FF] | [#x370-#x37D] | [#x37F-#x1FFF] | [#x200C-#x200D] | [#x2070-#x218F] | // [#x2C00-#x2FEF] | [#x3001-#xD7FF] | [#xF900-#xFDCF] | [#xFDF0-#xFFFD] | [#x10000-#xEFFFF] // ('A', 'Z'), /* ('A', 'Z'), veya ('\u{0041}', '\u{005A}'), */ // ('a', 'z'), // ('a', 'z') veya ('\u{61}', '\u{7A}'), // ('\u{C0}', '\u{D6}'), // ('\u{D8}', '\u{F6}'), // ('\u{F8}', '\u{2FF}'), // ('\u{370}', '\u{37D}'), // ('\u{37F}', '\u{1FFF}'), // ('\u{200C}', '\u{200D}'), // ('\u{2070}', '\u{218F}'), // ('\u{2C00}', '\u{2FEF}'), // ('\u{3001}', '\u{D7FF}'), // ('\u{F900}', '\u{FDCF}'), // ('\u{FDF0}', '\u{FFFD}'), // ('\u{10000}', '\u{EFFFF}'), // NameStartChar.expected_chars.push(':'); // NameStartChar.expected_chars.push('_'); #[inline] pub fn is_namestart_char_t(chr: char) -> bool { (chr >= 'A' && chr <= 'Z') || (chr >= 'a' && chr <= 'z') || (chr >= '\u{C0}' && chr <= '\u{D6}') || (chr >= '\u{D8}' && chr <= '\u{F6}') || (chr >= '\u{F8}' && chr <= '\u{2FF}') || (chr >= '\u{370}' && chr <= '\u{37D}') || (chr >= '\u{37F}' && chr <= '\u{1FFF}') || (chr >= '\u{200C}' && chr <= '\u{200D}') || (chr >= '\u{2070}' && chr <= '\u{218F}') || (chr >= '\u{2C00}' && chr <= '\u{2FEF}') || (chr >= '\u{3001}' && chr <= '\u{D7FF}') || (chr >= '\u{F900}' && chr <= '\u{FDCF}') || (chr >= '\u{FDF0}' && chr <= '\u{FFFD}') || (chr >= '\u{10000}' && chr <= '\u{EFFFF}') || chr == ':' || chr == '_' } pub fn namestart_char(input: &[u8]) -> IResult<&[u8], &[u8]> { if input.len() == 0 { return Err(Err::Incomplete(Needed::new(1))); } let width = utf8_char_width(input[0]); if input.len() < width { return Err(Err::Incomplete(Needed::new(width - input.len()))); } let c = match std::str::from_utf8(&input[..width]).ok() { Some(s) => s.chars().next().unwrap(), None => return Err(Err::Error(Error::new(input, ErrorKind::Char))), }; if is_namestart_char_t(c) { return Ok((&input[width..], &input[0..width])); } else { return Err(Err::Error(Error::new(input, ErrorKind::Char))); } } // [4a] NameChar ::= NameStartChar | "-" | "." | [0-9] | #xB7 | [#x0300-#x036F] | [#x203F-#x2040] #[inline] pub fn is_namechar_t(chr: char) -> bool { is_namestart_char_t(chr) || (chr >= '0' && chr <= '9') || (chr >= '\u{0300}' && chr <= 'z') || (chr >= '\u{203F}' && chr <= '\u{2040}') || chr == '-' || chr == '.' || chr == '\u{B7}' } pub fn namechar(input: &[u8]) -> IResult<&[u8], &[u8]> { if input.len() == 0 { return Err(Err::Incomplete(Needed::new(1))); } let width = utf8_char_width(input[0]); if input.len() < width { return Err(Err::Incomplete(Needed::new(width - input.len()))); } let c = match std::str::from_utf8(&input[..width]).ok() { Some(s) => s.chars().next().unwrap(), None => return Err(Err::Error(Error::new(input, ErrorKind::Char))), }; if is_namechar_t(c) { return Ok((&input[width..], &input[0..width])); } else { return Err(Err::Error(Error::new(input, ErrorKind::Char))); } } pub fn many0_custom_chardata<I, O, E, F>(mut f: F) -> impl FnMut(I) -> IResult<I, (), E> where I: Clone + InputLength, F: Parser<I, O, E>, E: ParseError<I>, { move |mut i: I| { // let mut acc = crate::lib::std::vec::Vec::with_capacity(4); loop { let len = i.input_len(); match f.parse(i.clone()) { Err(Err::Error(_)) => return Ok((i, ())), // Err(e) => return Err(e), // ref#streamcut Err(e) => return Ok((i, ())), Ok((i1, o)) => { // infinite loop check: the parser must always consume if i1.input_len() == len { return Err(Err::Error(E::from_error_kind(i, ErrorKind::Many0))); } i = i1; // acc.push(o); } } } } } pub fn many0_custom_trycomplete<I, O, E, F>(mut f: F) -> impl FnMut(I) -> IResult<I, (), E> where I: Clone + InputLength, F: Parser<I, O, E>, E: ParseError<I>, { move |mut i: I| { // let mut acc = crate::lib::std::vec::Vec::with_capacity(4); loop { let len = i.input_len(); match f.parse(i.clone()) { Err(Err::Error(_)) => return Ok((i, ())), Err(e) => return Err(e), //returns incomplete here // Err(e) => return Ok((i, ())), Ok((i1, o)) => { // infinite loop check: the parser must always consume if i1.input_len() == len { return Err(Err::Error(E::from_error_kind(i, ErrorKind::Many0))); } i = i1; // acc.push(o); } } } } } fn many1_custom<I, O, E, F>(mut f: F) -> impl FnMut(I) -> IResult<I, (), E> where I: Clone + InputLength, F: Parser<I, O, E>, E: ParseError<I>, { move |mut i: I| match f.parse(i.clone()) { Err(Err::Error(err)) => Err(Err::Error(E::append(i, ErrorKind::Many1, err))), Err(e) => Err(e), Ok((i1, o)) => { i = i1; loop { let len = i.input_len(); match f.parse(i.clone()) { Err(Err::Error(_)) => return Ok((i, ())), Err(e) => return Err(e), Ok((i1, o)) => { // infinite loop check: the parser must always consume if i1.input_len() == len { return Err(Err::Error(E::from_error_kind(i, ErrorKind::Many1))); } i = i1; } } } } } } fn name(input: &[u8]) -> IResult<&[u8], &[u8]> { recognize(pair(namestart_char, many0_custom_trycomplete(namechar)))(input) } // [66] CharRef ::= '&#' [0-9]+ ';' | '&#x' [0-9a-fA-F]+ ';' fn CharRef(input: &[u8]) -> IResult<&[u8], &[u8]> { alt(( recognize(tuple((tag("&#"), take_while1(is_digit), char(';')))), recognize(tuple((tag("&#x"), take_while1(is_hex_digit), char(';')))), ))(input) } // [68] EntityRef ::= '&' Name ';' fn EntityRef(input: &[u8]) -> IResult<&[u8], &[u8]> { recognize(tuple((tag("&"), name, char(';'))))(input) } // [67] Reference ::= EntityRef | CharRef fn Reference(input: &[u8]) -> IResult<&[u8], &[u8]> { alt((EntityRef, CharRef))(input) } #[derive(Clone, Debug, Eq, PartialEq)] struct Reference<'a> { initial: &'a str, // resolved: &'a str, } // [10] AttValue ::= '"' ([^<&"] | Reference)* '"' | "'" ([^<&'] | Reference)* "'" fn AttValue(input: &[u8]) -> IResult<&[u8], &[u8]> { alt(( delimited( char('"'), recognize(many0_custom_trycomplete(alt((is_not(r#"<&""#), Reference)))), char('"'), ), delimited( char('\''), recognize(many0_custom_trycomplete(alt((is_not(r#"<&'"#), Reference)))), char('\''), ), ))(input) } // [25] Eq ::= S? '=' S? fn Eq(input: &[u8]) -> IResult<&[u8], &[u8]> { recognize(tuple((multispace0, char('='), multispace0)))(input) } // [41] Attribute ::= Name Eq AttValue fn Attribute(input: &[u8]) -> IResult<&[u8], SAXAttribute> { match tuple((name, Eq, AttValue))(input) { Ok((i, o)) => { return Ok(( i, SAXAttribute { value: unsafe { std::str::from_utf8_unchecked(o.2) }, qualified_name: unsafe { std::str::from_utf8_unchecked(o.0) }, }, )); } Err(e) => Err(e), } } // let mut Attribute = ParsingRule::new("Attribute".to_owned(), RuleType::Sequence); // Attribute.children_names.push("Name".to_owned()); // Attribute.children_names.push("Eq".to_owned()); // Attribute.children_names.push("AttValue".to_owned()); // rule_nameRegistry.insert(Attribute.rule_name.clone(), Attribute); // [40] STag ::= '<' Name (S Attribute)* S? '>' #[derive(Clone, Debug, Eq, PartialEq)] struct SAXAttribute<'a> { pub value: &'a str, pub qualified_name: &'a str, // fn get_value(&self) -> &str; // fn get_local_name(&self) -> &str; // fn get_qualified_name(&self) -> &str; // fn get_uri(&self) -> &str; } #[derive(Clone, Debug, Eq, PartialEq)] struct SAXAttribute2 { pub value: std::ops::Range<usize>, pub qualified_name: std::ops::Range<usize>, // fn get_value(&self) -> &str; // fn get_local_name(&self) -> &str; // fn get_qualified_name(&self) -> &str; // fn get_uri(&self) -> &str; } #[derive(Clone, Debug, Eq, PartialEq)] struct StartElement<'a> { pub name: &'a str, pub attributes: Vec<SAXAttribute<'a>>, } #[derive(Clone, Debug, Eq, PartialEq)] struct EndElement<'a> { pub name: &'a str, } fn STag<'a>(input: &'a [u8]) -> IResult<&[u8], StartElement<'a>> { match tuple(( char('<'), name, many0(preceded(multispace0, Attribute)), multispace0, char('>'), ))(input) { Ok((i, o)) => { return Ok(( i, StartElement { name: unsafe { std::str::from_utf8_unchecked(o.1) }, attributes: o.2, }, )); } Err(e) => Err(e), } } // [44] EmptyElemTag ::= '<' Name (S Attribute)* S? '/>' fn EmptyElemTag(input: &[u8]) -> IResult<&[u8], StartElement> { match tuple(( char('<'), name, many0(preceded(multispace0, Attribute)), multispace0, tag("/>"), ))(input) { Ok((i, o)) => Ok(( i, StartElement { name: unsafe { std::str::from_utf8_unchecked(o.1) }, attributes: o.2, }, )), Err(e) => Err(e), } } // [3] S ::= (#x20 | #x9 | #xD | #xA)+ // multispace0 fits // [42] ETag ::= '</' Name S? '>' fn ETag(input: &[u8]) -> IResult<&[u8], EndElement> { match tuple((tag("</"), name, multispace0, char('>')))(input) { Ok((i, o)) => { // println!("{:?}", o); return Ok(( i, EndElement { name: unsafe { std::str::from_utf8_unchecked(o.1) }, }, )); } Err(e) => Err(e), } } #[test] fn test_etag() { let data = r#"</A>"#.as_bytes(); let res = ETag(&data); println!("{:?}", res); } #[test] fn test_namestart_char_t() { let data = "<a.abc-ab1çroot><A/><B/><C/></root>".as_bytes(); // fn parser(s: &[u8]) -> IResult<&[u8], &[u8]> { // namestart_char_t(s) // } let res = STag(&data); println!("{:?}", res); } #[test] fn test_stag() { let data = r#"<A a="b" c = "d"></A>"#.as_bytes(); let res = STag(&data); println!("{:?}", res); let data = r#"<A a='x'>"#.as_bytes(); let res = STag(&data); println!("{:?}", res); let data = r#"<B b="val" >"#.as_bytes(); let res = STag(&data); println!("{:?}", res); } // [14] CharData ::= [^<&]* - ([^<&]* ']]>' [^<&]*) // no '>' except ']]>' // The spec is not clear but we also apply Char restrictions #[inline] pub fn is_CharData_single_pure_t(chr: char) -> bool { chr != '<' && chr != '&' && is_xml_char_t(chr) } pub fn CharData_single_pure(input: &[u8]) -> IResult<&[u8], &[u8]> { if input.len() == 0 { return Err(Err::Incomplete(Needed::new(1))); } let width = utf8_char_width(input[0]); if input.len() < width { return Err(Err::Incomplete(Needed::new(width - input.len()))); } let c = match std::str::from_utf8(&input[..width]).ok() { Some(s) => s.chars().next().unwrap(), None => return Err(Err::Error(Error::new(input, ErrorKind::Char))), }; if is_CharData_single_pure_t(c) { return Ok((&input[width..], &input[0..width])); } else { return Err(Err::Error(Error::new(input, ErrorKind::Char))); } } fn CharData_single(input: &[u8]) -> IResult<&[u8], &[u8]> { //if input = 0 , don't send incomplete // ref#streamcut if input.len() == 0 { return Err(Err::Error(Error::new(input, ErrorKind::Char))); } // ']]>' should not appear in the chardata, if we can't be sure because input is eof, we should request more data. match tag::<&str, &[u8], Error<&[u8]>>("]]>")(input) { Ok(r) => return Err(Err::Error(Error::new(input, ErrorKind::Char))), Err(Err::Incomplete(n)) => return Err(Err::Incomplete(Needed::Unknown)), _ => (), }; CharData_single_pure(input) } #[test] fn test_chardata_single() { let data = "]]".as_bytes(); assert_eq!( CharData_single("]".as_bytes()), Err(Err::Incomplete(Needed::Unknown)) ); assert_eq!( CharData_single("]]".as_bytes()), Err(Err::Incomplete(Needed::Unknown)) ); assert_eq!( CharData_single("]]>".as_bytes()), Err(Err::Error(error_position!( "]]>".as_bytes(), ErrorKind::Char ))) ); assert_eq!( CharData_single("]]<".as_bytes()), Ok((&b"]<"[..], &b"]"[..])) ); assert_eq!( CharData_single("&".as_bytes()), Err(Err::Error(error_position!("&".as_bytes(), ErrorKind::Char))) ); assert_eq!( CharData_single("<".as_bytes()), Err(Err::Error(error_position!("<".as_bytes(), ErrorKind::Char))) ); assert_eq!( CharData_single("abc".as_bytes()), Ok((&b"bc"[..], &b"a"[..])) ); } // [14] CharData ::= [^<&]* - ([^<&]* ']]>' [^<&]*) //our implementation requires at least one char fn CharData(input: &[u8]) -> IResult<&[u8], &[u8]> { recognize(tuple(( CharData_single, many0_custom_chardata(CharData_single), )))(input) } #[test] fn test_chardata() { assert_eq!(CharData("abc]".as_bytes()), Ok((&b"]"[..], &b"abc"[..]))); assert_eq!( CharData("]]".as_bytes()), Err(Err::Incomplete(Needed::Unknown)) ); //since we want chardata to parse at least 1 char now: // assert_eq!(CharData("]]>".as_bytes()), Ok((&b"]]>"[..], &b""[..]))); assert_eq!( CharData("]]>".as_bytes()), Err(Err::Error(error_position!( "]]>".as_bytes(), ErrorKind::Char ))) ); assert_eq!(CharData("]]<".as_bytes()), Ok((&b"<"[..], &b"]]"[..]))); //since we want chardata to parse at least 1 char now: // assert_eq!(CharData("&".as_bytes()), Ok((&b"&"[..], &b""[..]))); assert_eq!(CharData("a&".as_bytes()), Ok((&b"&"[..], &b"a"[..]))); assert_eq!(CharData("a<".as_bytes()), Ok((&b"<"[..], &b"a"[..]))); //this was returning incomplete since the next char can be the start of "]]>", but we plan to cut it off for streaming! //see ref#streamcut assert_eq!(CharData("abc".as_bytes()), Ok((&b""[..], &b"abc"[..]))); let data: Vec<u8> = [ 65, 108, 99, 104, 101, 109, 121, 32, 40, 102, 114, 111, 109, 32, 65, 114, 97, 98, 105, 99, 58, 32, 97, 108, 45, 107, 196, 171, 109, 105, 121, 196, ] .to_vec(); let remainder: Vec<u8> = [196].to_vec(); println!("try to read: {:?}", unsafe { std::str::from_utf8_unchecked(&data[0..31]) }); assert_eq!( CharData(&data), Ok(( &remainder[0..1], &"Alchemy (from Arabic: al-kīmiy".as_bytes()[..] )) ); } // [43] content ::= CharData? ((element | Reference | CDSect | PI | Comment) CharData?)* //we will use state machine instead of this rule to make it streamable enum ContentRelaxed<'a> { CharData(&'a [u8]), StartElement(StartElement<'a>), EmptyElemTag(StartElement<'a>), EndElement(EndElement<'a>), Reference(Reference<'a>), CdataStart, CommentStart, } fn content_relaxed_CharData(input: &[u8]) -> IResult<&[u8], ContentRelaxed> { match CharData(input) { Ok(succ) => Ok((succ.0, ContentRelaxed::CharData(succ.1))), Err(err) => return Err(err), } } fn content_relaxed_STag(input: &[u8]) -> IResult<&[u8], ContentRelaxed> { match STag(input) { Ok(succ) => Ok((succ.0, ContentRelaxed::StartElement(succ.1))), Err(err) => return Err(err), } } fn content_relaxed_ETag(input: &[u8]) -> IResult<&[u8], ContentRelaxed> { match ETag(input) { Ok(succ) => Ok((succ.0, ContentRelaxed::EndElement(succ.1))), Err(err) => return Err(err), } } //todo add endelement as next step or inform it is an emptyelem tag via event api? fn content_relaxed_EmptyElemTag(input: &[u8]) -> IResult<&[u8], ContentRelaxed> { match EmptyElemTag(input) { Ok(succ) => Ok((succ.0, ContentRelaxed::StartElement(succ.1))), Err(err) => return Err(err), } } fn content_relaxed_Reference(input: &[u8]) -> IResult<&[u8], ContentRelaxed> { match Reference(input) { Ok(succ) => Ok(( succ.0, ContentRelaxed::Reference(Reference { initial: unsafe { std::str::from_utf8_unchecked(succ.1) }, }), )), Err(err) => return Err(err), } } fn content_relaxed_CdataStart(input: &[u8]) -> IResult<&[u8], ContentRelaxed> { match CDATASection_start(input) { Ok(succ) => Ok((succ.0, ContentRelaxed::CdataStart)), Err(err) => return Err(err), } } fn content_relaxed_CommentStart(input: &[u8]) -> IResult<&[u8], ContentRelaxed> { match Comment_start(input) { Ok(succ) => Ok((succ.0, ContentRelaxed::CommentStart)), Err(err) => return Err(err), } } // [custom] relaxed ::= CharData | STag | EmptyElemTag | ETag | Reference | CDATA | Comment ... todo: add PI fn content_relaxed(input: &[u8]) -> IResult<&[u8], ContentRelaxed> { alt(( content_relaxed_CharData, content_relaxed_STag, content_relaxed_EmptyElemTag, content_relaxed_ETag, content_relaxed_Reference, content_relaxed_CdataStart, content_relaxed_CommentStart, ))(input) } #[test] fn test_xml3() { let data = "<root><A/><B/><C/></root>".as_bytes(); fn parser(s: &[u8]) -> IResult<&[u8], &[u8]> { tag("<root>")(s) } let res = parser(&data); println!("{:?}", res); } // Parser Rules organized by W3C Spec // [26] VersionNum ::= '1.' [0-9]+ fn VersionNum(input: &[u8]) -> IResult<&[u8], &[u8]> { recognize(tuple((tag("1."), digit1)))(input) } #[test] fn test_VersionNum() { let data = r#"1.123 "#.as_bytes(); let res = VersionNum(&data); println!("{:?}", res); } // [24] VersionInfo ::= S 'version' Eq ("'" VersionNum "'" | '"' VersionNum '"') fn VersionInfo(input: &[u8]) -> IResult<&[u8], &[u8]> { recognize(tuple(( multispace1, tag("version"), Eq, alt(( delimited(char('"'), VersionNum, char('"')), delimited(char('\''), VersionNum, char('\'')), )), )))(input) } #[test] fn test_VersionInfo() { let data = r#" version="1.0" "#.as_bytes(); let res = VersionInfo(&data); println!("{:?}", res); } // [81] EncName ::= [A-Za-z] ([A-Za-z0-9._] | '-')* fn EncName(input: &[u8]) -> IResult<&[u8], &[u8]> { recognize(tuple(( alpha1, many0_custom_trycomplete(alt((alphanumeric1, tag("-"), tag("."), tag("_")))), )))(input) } #[test] fn test_EncName() { let data = r#"UTF-8 "#.as_bytes(); let res = EncName(&data); println!("{:?}", res); } // [80] EncodingDecl ::= S 'encoding' Eq ('"' EncName '"' | "'" EncName "'" ) fn EncodingDecl(input: &[u8]) -> IResult<&[u8], &[u8]> { recognize(tuple(( multispace1, tag("encoding"), Eq, alt(( delimited(char('"'), EncName, char('"')), delimited(char('\''), EncName, char('\'')), )), )))(input) } #[test] fn test_EncodingDecl() { let data = r#" encoding='EUC-JP' "#.as_bytes(); let res = EncodingDecl(&data); println!("{:?}", res); } // [32] SDDecl ::= S 'standalone' Eq (("'" ('yes' | 'no') "'") | ('"' ('yes' | 'no') '"')) fn yes_mi_no_mu(input: &[u8]) -> IResult<&[u8], &[u8]> { alt((tag("yes"), tag("no")))(input) } fn SDDecl(input: &[u8]) -> IResult<&[u8], &[u8]> { recognize(tuple(( multispace1, tag("standalone"), Eq, alt(( delimited(char('"'), yes_mi_no_mu, char('"')), delimited(char('\''), yes_mi_no_mu, char('\'')), )), )))(input) } #[test] fn test_SDDecl() { let data = r#" standalone='yes' "#.as_bytes(); let res = SDDecl(&data); println!("{:?}", res); } // [23] XMLDecl ::= '<?xml' VersionInfo EncodingDecl? SDDecl? S? '?>' fn XMLDecl(input: &[u8]) -> IResult<&[u8], &[u8]> { recognize(tuple(( tag("<?xml"), VersionInfo, opt(EncodingDecl), opt(SDDecl), multispace0, tag("?>"), )))(input) } // [27] Misc ::= Comment | PI | S //todo: comment | PI, we may need to separate // fn Misc(input: &[u8]) -> IResult<&[u8], &[u8]> { // recognize(alt((multispace1,)))(input) // } fn docstart_custom(input: &[u8]) -> IResult<&[u8], &[u8]> { recognize(tuple((XMLDecl, multispace0)))(input) } #[test] fn test_XMLDecl() { let data = r#"<?xml version="1.0" encoding="UTF-8" standalone='yes'?>"#.as_bytes(); let res = XMLDecl(&data); println!("{:?}", res); } // [1] document ::= prolog element Misc* // [22] prolog ::= XMLDecl? Misc* (doctypedecl Misc*)? // [15] Comment ::= '<!--' ((Char - '-') | ('-' (Char - '-')))* '-->' //spec seems to not allow empty comments? There are parsers that allow it. fn Comment_start(input: &[u8]) -> IResult<&[u8], &[u8]> { tag("<!--")(input) } fn Comment_end(input: &[u8]) -> IResult<&[u8], &[u8]> { tag("-->")(input) } // We don't need to exclude "-" we handle that in inside_Comment_single // #[inline] // fn is_CharData_single_pure_t(chr: char) -> bool { // chr != '<' && chr != '&' && is_xml_char_t(chr) // } fn inside_Comment_or_CDATA_single_pure(input: &[u8]) -> IResult<&[u8], &[u8]> { if input.len() == 0 { return Err(Err::Incomplete(Needed::new(1))); } let width = utf8_char_width(input[0]); if input.len() < width { return Err(Err::Incomplete(Needed::new(width - input.len()))); } let c = match std::str::from_utf8(&input[..width]).ok() { Some(s) => s.chars().next().unwrap(), None => return Err(Err::Error(Error::new(input, ErrorKind::Char))), }; if is_xml_char_t(c) { return Ok((&input[width..], &input[0..width])); } else { return Err(Err::Error(Error::new(input, ErrorKind::Char))); } } fn inside_Comment_single(input: &[u8]) -> IResult<&[u8], &[u8]> { //if input = 0 , don't send incomplete // ref#streamcut if input.len() == 0 { return Err(Err::Error(Error::new(input, ErrorKind::Char))); } // '--' should not appear in the comment, if we can't be sure because input is eof, we should request more data. match tag::<&str, &[u8], Error<&[u8]>>("--")(input) { Ok(r) => return Err(Err::Error(Error::new(input, ErrorKind::Char))), Err(Err::Incomplete(n)) => return Err(Err::Incomplete(Needed::new(1))), _ => (), }; inside_Comment_or_CDATA_single_pure(input) } fn Comment(input: &[u8]) -> IResult<&[u8], &[u8]> { recognize(tuple(( Comment_start, many0_custom_chardata(inside_Comment_single), Comment_end, )))(input) } #[test] fn test_comment() { assert_eq!( Comment("<!-- comment -->a".as_bytes()), Ok((&b"a"[..], &b"<!-- comment -->"[..])) ); assert_eq!( Comment("<!---->cc".as_bytes()), Ok((&b"cc"[..], &b"<!---->"[..])) ); assert_eq!( Comment("<!-- comment --->a".as_bytes()), Err(Err::Error(error_position!( "--->a".as_bytes(), ErrorKind::Tag ))) ); assert_eq!( Comment("<!-- com--ment -->a".as_bytes()), Err(Err::Error(error_position!( "--ment -->a".as_bytes(), ErrorKind::Tag ))) ); assert_eq!( Comment("<!--ok-".as_bytes()), Err(Err::Incomplete(Needed::new(2))) ); assert_eq!( Comment("<!--ok--".as_bytes()), Err(Err::Incomplete(Needed::new(1))) ); } enum InsideComment<'a> { Characters(&'a [u8]), CommentEnd, } fn insidecomment_characters(input: &[u8]) -> IResult<&[u8], InsideComment> { match recognize(tuple(( inside_Comment_single, many0_custom_chardata(inside_Comment_single), )))(input) { Ok(succ) => Ok((succ.0, InsideComment::Characters(succ.1))), Err(err) => return Err(err), } } fn insidecomment_comment_end(input: &[u8]) -> IResult<&[u8], InsideComment> { match Comment_end(input) { Ok(succ) => Ok((succ.0, InsideComment::CommentEnd)), Err(err) => return Err(err), } } // [custom] fn insidecomment(input: &[u8]) -> IResult<&[u8], InsideComment> { alt((insidecomment_characters, insidecomment_comment_end))(input) } // [18] CDSect ::= CDStart CData CDEnd // [19] CDStart ::= '<![CDATA[' fn CDATASection_start(input: &[u8]) -> IResult<&[u8], &[u8]> { tag("<![CDATA[")(input) } // [21] CDEnd ::= ']]>' fn CDATASection_end(input: &[u8]) -> IResult<&[u8], &[u8]> { tag("]]>")(input) } // [20] CData ::= (Char* - (Char* ']]>' Char*)) fn inside_CDATASection_single(input: &[u8]) -> IResult<&[u8], &[u8]> { //if input = 0 , don't send incomplete // ref#streamcut if input.len() == 0 { return Err(Err::Error(Error::new(input, ErrorKind::Char))); } // ']]>' should not appear in the cdata section, if we can't be sure because input is eof, we should request more data. match tag::<&str, &[u8], Error<&[u8]>>("]]>")(input) { Ok(r) => return Err(Err::Error(Error::new(input, ErrorKind::Char))), Err(Err::Incomplete(n)) => return Err(Err::Incomplete(Needed::Unknown)), _ => (), }; inside_Comment_or_CDATA_single_pure(input) } fn CDATASection(input: &[u8]) -> IResult<&[u8], &[u8]> { recognize(tuple(( CDATASection_start, many0_custom_chardata(inside_CDATASection_single), CDATASection_end, )))(input) } #[test] fn test_cdata() { assert_eq!( CDATASection("<![CDATA[abc]]>a".as_bytes()), Ok((&b"a"[..], &b"<![CDATA[abc]]>"[..])) ); assert_eq!( CDATASection("<![CDATA[]]>".as_bytes()), Ok((&b""[..], &b"<![CDATA[]]>"[..])) ); assert_eq!( CDATASection("<![CDATA[ ]]".as_bytes()), Err(Err::Incomplete(Needed::new(1))) ); assert_eq!( CDATASection("<![CDATA[ ]".as_bytes()), Err(Err::Incomplete(Needed::new(2))) ); } //only parsed without checking well-formedness inside // [16] PI ::= '<?' PITarget (S (Char* - (Char* '?>' Char*)))? '?>' // [17] PITarget ::= Name - (('X' | 'x') ('M' | 'm') ('L' | 'l')) fn PI_start(input: &[u8]) -> IResult<&[u8], &[u8]> { tag("<?")(input) } fn PI_end(input: &[u8]) -> IResult<&[u8], &[u8]> { tag("?>")(input) } fn inside_PI_single(input: &[u8]) -> IResult<&[u8], &[u8]> { //if input = 0 , don't send incomplete // ref#streamcut if input.len() == 0 { return Err(Err::Error(Error::new(input, ErrorKind::Char))); } // ']]>' should not appear in the cdata section, if we can't be sure because input is eof, we should request more data. match tag::<&str, &[u8], Error<&[u8]>>("?>")(input) { Ok(r) => return Err(Err::Error(Error::new(input, ErrorKind::Char))), Err(Err::Incomplete(n)) => return Err(Err::Incomplete(Needed::Unknown)), _ => (), }; inside_Comment_or_CDATA_single_pure(input) } fn PI(input: &[u8]) -> IResult<&[u8], &[u8]> { recognize(tuple(( PI_start, many0_custom_chardata(inside_PI_single), PI_end, )))(input) } #[test] fn test_pi() { assert_eq!(PI("<??>a".as_bytes()), Ok((&b"a"[..], &b"<??>"[..]))); assert_eq!( PI("<?dummmy?>".as_bytes()), Ok((&b""[..], &b"<?dummmy?>"[..])) ); } //only parsed without checking well-formedness inside // [28] doctypedecl ::= '<!DOCTYPE' S Name (S ExternalID)? S? ('[' intSubset ']' S?)? '>' fn doctypedecl_start(input: &[u8]) -> IResult<&[u8], &[u8]> { tag("<!DOCTYPE")(input) } fn doctypedecl_end(input: &[u8]) -> IResult<&[u8], &[u8]> { tag(">")(input) } // fn inside_doctypedecl_single(input: &[u8]) -> IResult<&[u8], &[u8]> { // //if input = 0 , don't send incomplete // // ref#streamcut // if input.len() == 0 { // return Err(Err::Error(Error::new(input, ErrorKind::Char))); // } // // ']]>' should not appear in the cdata section, if we can't be sure because input is eof, we should request more data. // match tag::<&str, &[u8], Error<&[u8]>>(">")(input) { // Ok(r) => return Err(Err::Error(Error::new(input, ErrorKind::Char))), // Err(Err::Incomplete(n)) => return Err(Err::Incomplete(Needed::Unknown)), // _ => (), // }; // inside_Comment_or_CDATA_single_pure(input) // } //char that is not > or < fn inside_doctypedecl_single_pure(input: &[u8]) -> IResult<&[u8], &[u8]> { if input.len() == 0 { return Err(Err::Incomplete(Needed::new(1))); } let width = utf8_char_width(input[0]); if input.len() < width { return Err(Err::Incomplete(Needed::new(width - input.len()))); } let c = match std::str::from_utf8(&input[..width]).ok() { Some(s) => s.chars().next().unwrap(), None => return Err(Err::Error(Error::new(input, ErrorKind::Char))), }; if is_xml_char_t(c) && c != '<' && c != '>' { return Ok((&input[width..], &input[0..width])); } else { return Err(Err::Error(Error::new(input, ErrorKind::Char))); } } fn doctypedecl_dummy_internal(input: &[u8]) -> IResult<&[u8], &[u8]> { recognize(tuple(( tag("<"), many0_custom_trycomplete(alt(( recognize(many1_custom(inside_doctypedecl_single_pure)), Comment, doctypedecl_dummy_internal, ))), tag(">"), )))(input) } // can contain nested < and > for attlist and internal comments fn doctypedecl(input: &[u8]) -> IResult<&[u8], &[u8]> { recognize(tuple(( doctypedecl_start, many0_custom_trycomplete(alt(( recognize(many1_custom(inside_doctypedecl_single_pure)), Comment, doctypedecl_dummy_internal, ))), doctypedecl_end, )))(input) } #[test] fn test_doctypedecl() { assert_eq!( doctypedecl(r#"<!DOCTYPE>a"#.as_bytes()), Ok((&b"a"[..], &br#"<!DOCTYPE>"#[..])) ); assert_eq!( doctypedecl(r#"<!DOCTYPE greeting SYSTEM "hello.dtd">a"#.as_bytes()), Ok((&b"a"[..], &br#"<!DOCTYPE greeting SYSTEM "hello.dtd">"#[..])) ); assert_eq!( doctypedecl(r#"<!DOCTYPE dummy>"#.as_bytes()), Ok((&b""[..], &br#"<!DOCTYPE dummy>"#[..])) ); assert_eq!( doctypedecl(r#"<!DOCTYPE <!-- --> <[]>dummy>"#.as_bytes()), Ok((&b""[..], &br#"<!DOCTYPE <!-- --> <[]>dummy>"#[..])) ); //also works > inside comment assert_eq!( doctypedecl(r#"<!DOCTYPE <!-- > --> <[]>dummy>"#.as_bytes()), Ok((&b""[..], &br#"<!DOCTYPE <!-- > --> <[]>dummy>"#[..])) ); } enum InsideCdata<'a> { Characters(&'a [u8]), CdataEnd, } fn insidecdata_characters(input: &[u8]) -> IResult<&[u8], InsideCdata> { match recognize(tuple(( inside_CDATASection_single, many0_custom_chardata(inside_CDATASection_single), )))(input) { Ok(succ) => Ok((succ.0, InsideCdata::Characters(succ.1))), Err(err) => return Err(err), } } fn insidecdata_cdata_end(input: &[u8]) -> IResult<&[u8], InsideCdata> { match CDATASection_end(input) { Ok(succ) => Ok((succ.0, InsideCdata::CdataEnd)), Err(err) => return Err(err), } } // [custom] fn insidecdata(input: &[u8]) -> IResult<&[u8], InsideCdata> { alt((insidecdata_characters, insidecdata_cdata_end))(input) } enum MiscBeforeXmlDecl<'a> { PI(&'a [u8]), Whitespace(&'a [u8]), CommentStart, DocType(&'a [u8]), XmlDecl(&'a [u8]), } enum MiscBeforeDoctype<'a> { PI(&'a [u8]), Whitespace(&'a [u8]), CommentStart, DocType(&'a [u8]), } enum Misc<'a> { PI(&'a [u8]), Whitespace(&'a [u8]), CommentStart, } // using map combinator... // fn misc_pi(input: &[u8]) -> IResult<&[u8], Misc> { // map(PI, |a| Misc::PI(a))(input) // // match recognize(tuple(( // // inside_CDATASection_single, // // many0_custom_chardata(inside_CDATASection_single), // // )))(input) // // { // // Ok(succ) => Ok((succ.0, InsideCdata::Characters(succ.1))), // // Err(err) => return Err(err), // // } // } // [custom] fn misc(input: &[u8]) -> IResult<&[u8], Misc> { alt(( map(PI, |a| Misc::PI(a)), map(multispace1, |a| Misc::Whitespace(a)), map(Comment_start, |a| Misc::CommentStart), ))(input) } fn misc_before_doctype(input: &[u8]) -> IResult<&[u8], MiscBeforeDoctype> { alt(( map(PI, |a| MiscBeforeDoctype::PI(a)), map(multispace1, |a| MiscBeforeDoctype::Whitespace(a)), map(Comment_start, |a| MiscBeforeDoctype::CommentStart), map(doctypedecl, |a| MiscBeforeDoctype::DocType(a)), ))(input) } fn misc_before_xmldecl(input: &[u8]) -> IResult<&[u8], MiscBeforeXmlDecl> { alt(( map(XMLDecl, |a| MiscBeforeXmlDecl::XmlDecl(a)), // currently PI can also match XMLDecl so this is first choice map(PI, |a| MiscBeforeXmlDecl::PI(a)), map(multispace1, |a| MiscBeforeXmlDecl::Whitespace(a)), map(Comment_start, |a| MiscBeforeXmlDecl::CommentStart), map(doctypedecl, |a| MiscBeforeXmlDecl::DocType(a)), ))(input) } // Namespaces in XML 1.0 https://www.w3.org/TR/xml-names/ // [1] NSAttName ::= PrefixedAttName | DefaultAttName // [2] PrefixedAttName ::= 'xmlns:' // [3] DefaultAttName ::= 'xmlns' // [4] NCName ::= Name - (Char* ':' Char*) /* An XML Name, minus the ":" */ // [5] NCNameChar ::= NameChar - ':' /* An XML NameChar, minus the ":" */ // [6] NCNameStartChar ::= NCName - ( Char Char Char* ) /* The first letter of an NCName */ // [7] QName ::= PrefixedName | UnprefixedName // [8] PrefixedName ::= Prefix ':' LocalPart // [9] UnprefixedName ::= LocalPart // [10] Prefix ::= NCName // [11] LocalPart ::= NCName #[derive(Clone, Debug, Eq, PartialEq)] enum ParserState { DocStartBeforeXmlDecl, // when xmldecl parsed move to DocStartBeforeDocType, if something else parsed(including whitespace) the same! // DocStartBeforeXmlDeclInsideComment, // not possible - this means that doc doesn't have xmldecl, move to DocStartBeforeDocType DocStartBeforeDocType, //when doctype parsed move to docstart DocStartBeforeDocTypeInsideComment, // this doesn't mean that doc doesn't have doctype, move to DocStartBeforeDocType DocStart, DocStartInsideComment, Content, InsideCdata, InsideComment, //can be at the start or end of the document? specified all DocEnd, //misc DocEndInsideComment, } struct Namespace { level: usize, prefix: Range<usize>, value: Range<usize>, } pub struct OxideParser<R: Read> { state: ParserState, bufreader: BufReader<R>, buffer2: Vec<u8>, strbuffer: String, offset: usize, element_level: usize, element_strbuffer: String, element_list: Vec<Range<usize>>, is_namespace_aware: bool, namespace_strbuffer: String, namespace_list: Vec<Namespace>, } fn convert_start_element<'a>( strbuffer: &'a mut String, event1: StartElement, ) -> xml_sax::StartElement<'a> { let start = strbuffer.len(); let size = event1.name.len(); strbuffer.push_str(event1.name); let mut attributes2: Vec<SAXAttribute2> = vec![]; for att in event1.attributes { let start = strbuffer.len(); let size = att.qualified_name.len(); strbuffer.push_str(att.qualified_name); let qualified_name_range = Range { start: start, end: start + size, }; let start = strbuffer.len(); let size = att.value.len(); strbuffer.push_str(att.value); let value_range = Range { start: start, end: start + size, }; // let qualified_name = &self.strbuffer[start..(start + size)]; // let value = &self.strbuffer[start..(start + size)]; attributes2.push(SAXAttribute2 { value: value_range, qualified_name: qualified_name_range, }); } let mut attributes: Vec<xml_sax::Attribute> = vec![]; for att in attributes2 { // let qualified_name = &self.strbuffer[start..(start + size)]; // let value = &self.strbuffer[start..(start + size)]; attributes.push(xml_sax::Attribute { value: &strbuffer[att.value], name: &strbuffer[att.qualified_name], }); } xml_sax::StartElement { name: &strbuffer[start..(start + size)], attributes: attributes, is_empty: false, } } fn push_str_get_range(strbuffer: &mut String, addition: &str) -> Range<usize> { let start = strbuffer.len(); let size = addition.len(); let range = Range { start: start, end: start + size, }; strbuffer.push_str(addition); range } impl<R: Read> OxideParser<R> { // This method "consumes" the resources of the caller object // `self` desugars to `self: Self` pub fn start(reader: R) -> OxideParser<R> { OxideParser { state: ParserState::DocStartBeforeXmlDecl, bufreader: BufReader::with_capacity(8192, reader), offset: 0, buffer2: vec![], strbuffer: String::new(), element_level: 0, element_list: vec![], element_strbuffer: String::new(), is_namespace_aware: true, namespace_list: vec![], namespace_strbuffer: String::new(), } } fn read_data(&mut self) { self.bufreader.fill_buf().unwrap(); let data2 = self.bufreader.buffer(); self.buffer2.extend_from_slice(data2); self.bufreader.consume(data2.len()); } // , buf: &'b [u8] pub fn read_event<'a, 'b, 'c>(&'a mut self) -> xml_sax::Event<'a> { // self.bufreader.consume(self.offset); self.buffer2.drain(0..self.offset); self.offset = 0; self.strbuffer.clear(); if self.bufreader.capacity() > self.buffer2.len() { self.read_data(); } // let mut event: StartElement = StartElement { // name: "", // attributes: vec![], // }; // let mut event1: StartElement; //<'b>; //&'a let event2: xml_sax::Event; match self.state { ParserState::DocStartBeforeXmlDecl => { let res = misc_before_xmldecl(&self.buffer2); match res { Ok(parseresult) => { self.offset = self.buffer2.offset(parseresult.0); self.state = ParserState::DocStartBeforeDocType; match parseresult.1 { MiscBeforeXmlDecl::XmlDecl(a) => { let str = unsafe { std::str::from_utf8_unchecked(a) }; let range = push_str_get_range(&mut self.strbuffer, &str); event2 = xml_sax::Event::XmlDeclaration(&self.strbuffer[range]) } MiscBeforeXmlDecl::PI(a) => { let str = unsafe { std::str::from_utf8_unchecked(a) }; let range = push_str_get_range(&mut self.strbuffer, &str); event2 = xml_sax::Event::ProcessingInstruction(&self.strbuffer[range]) } MiscBeforeXmlDecl::Whitespace(a) => { let str = unsafe { std::str::from_utf8_unchecked(a) }; let range = push_str_get_range(&mut self.strbuffer, &str); event2 = xml_sax::Event::Whitespace(&self.strbuffer[range]) } MiscBeforeXmlDecl::CommentStart => { self.state = ParserState::DocStartBeforeDocTypeInsideComment; event2 = xml_sax::Event::StartComment; } MiscBeforeXmlDecl::DocType(a) => { self.state = ParserState::DocStart; let str = unsafe { std::str::from_utf8_unchecked(a) }; let range = push_str_get_range(&mut self.strbuffer, &str); event2 = xml_sax::Event::DocumentTypeDeclaration(&self.strbuffer[range]); } } } Err(err) => { //try content! self.state = ParserState::Content; event2 = self.read_event(); } } } ParserState::DocStartBeforeDocType => { let res = misc_before_doctype(&self.buffer2); match res { Ok(parseresult) => { self.offset = self.buffer2.offset(parseresult.0); // self.state = ParserState::DocStartBeforeDocType; match parseresult.1 { MiscBeforeDoctype::PI(a) => { let str = unsafe { std::str::from_utf8_unchecked(a) }; let range = push_str_get_range(&mut self.strbuffer, &str); event2 = xml_sax::Event::ProcessingInstruction(&self.strbuffer[range]) } MiscBeforeDoctype::Whitespace(a) => { let str = unsafe { std::str::from_utf8_unchecked(a) }; let range = push_str_get_range(&mut self.strbuffer, &str); event2 = xml_sax::Event::Whitespace(&self.strbuffer[range]) } MiscBeforeDoctype::CommentStart => { self.state = ParserState::DocStartBeforeDocTypeInsideComment; event2 = xml_sax::Event::StartComment; } MiscBeforeDoctype::DocType(a) => { self.state = ParserState::DocStart; let str = unsafe { std::str::from_utf8_unchecked(a) }; let range = push_str_get_range(&mut self.strbuffer, &str); event2 = xml_sax::Event::DocumentTypeDeclaration(&self.strbuffer[range]); } } } Err(err) => { //try content! self.state = ParserState::Content; event2 = self.read_event(); } } } ParserState::DocStartBeforeDocTypeInsideComment => { //expect comment or comment-end let res = insidecomment(&self.buffer2); match res { Ok(parseresult) => { self.offset = self.buffer2.offset(parseresult.0); match parseresult.1 { InsideComment::Characters(characters) => { let start = self.strbuffer.len(); let size = characters.len(); self.strbuffer .push_str(unsafe { std::str::from_utf8_unchecked(characters) }); event2 = xml_sax::Event::Comment(&self.strbuffer[start..(start + size)]) } InsideComment::CommentEnd => { self.state = ParserState::DocStartBeforeDocType; event2 = xml_sax::Event::EndComment; } } } Err(err) => panic!(), } } ParserState::DocStart => { let res = misc(&self.buffer2); match res { Ok(parseresult) => { self.offset = self.buffer2.offset(parseresult.0); // self.state = ParserState::DocStartBeforeDocType; match parseresult.1 { Misc::PI(a) => { let str = unsafe { std::str::from_utf8_unchecked(a) }; let range = push_str_get_range(&mut self.strbuffer, &str); event2 = xml_sax::Event::ProcessingInstruction(&self.strbuffer[range]) } Misc::Whitespace(a) => { let str = unsafe { std::str::from_utf8_unchecked(a) }; let range = push_str_get_range(&mut self.strbuffer, &str); event2 = xml_sax::Event::Whitespace(&self.strbuffer[range]) } Misc::CommentStart => { self.state = ParserState::DocStartInsideComment; event2 = xml_sax::Event::StartComment; } } } Err(err) => { //try content! self.state = ParserState::Content; event2 = self.read_event(); } } } ParserState::DocStartInsideComment => { //expect comment or comment-end let res = insidecomment(&self.buffer2); match res { Ok(parseresult) => { self.offset = self.buffer2.offset(parseresult.0); match parseresult.1 { InsideComment::Characters(characters) => { let start = self.strbuffer.len(); let size = characters.len(); self.strbuffer .push_str(unsafe { std::str::from_utf8_unchecked(characters) }); event2 = xml_sax::Event::Comment(&self.strbuffer[start..(start + size)]) } InsideComment::CommentEnd => { self.state = ParserState::DocStart; event2 = xml_sax::Event::EndComment; } } } Err(err) => panic!(), } } ParserState::Content => { let res = content_relaxed(&self.buffer2); match res { Ok(parseresult) => { self.offset = self.buffer2.offset(parseresult.0); match parseresult.1 { ContentRelaxed::CharData(event1) => { let start = self.strbuffer.len(); let size = event1.len(); self.strbuffer .push_str(unsafe { std::str::from_utf8_unchecked(event1) }); event2 = xml_sax::Event::Characters( &self.strbuffer[start..(start + size)], ) } ContentRelaxed::StartElement(event1) => { //todo decode event2 = xml_sax::Event::StartElement(convert_start_element( &mut self.strbuffer, event1, )); } ContentRelaxed::EmptyElemTag(event1) => { //todo decode let mut start_elem = convert_start_element(&mut self.strbuffer, event1); start_elem.is_empty = true; event2 = xml_sax::Event::StartElement(start_elem); //todo add endelement after this? } ContentRelaxed::EndElement(event1) => { let start = self.strbuffer.len(); let size = event1.name.len(); self.strbuffer.push_str(event1.name); event2 = xml_sax::Event::EndElement(xml_sax::EndElement { name: &self.strbuffer[start..(start + size)], }) } ContentRelaxed::Reference(event1) => { // let start = self.strbuffer.len(); // let size = event1.initial.len(); // let range_initial = Range { // start: start, // end: start + size, // }; // self.strbuffer.push_str(event1.initial); let range: Range<usize> = push_str_get_range(&mut self.strbuffer, event1.initial); let range_resolved = match event1.initial { "&amp;" => push_str_get_range(&mut self.strbuffer, "&"), "&lt" => push_str_get_range(&mut self.strbuffer, "<"), "&gt;" => push_str_get_range(&mut self.strbuffer, ">"), "&quot;" => push_str_get_range(&mut self.strbuffer, "\""), "&apos;" => push_str_get_range(&mut self.strbuffer, "'"), _ => push_str_get_range(&mut self.strbuffer, event1.initial), }; //todo resolve char refs //we are ignoring DTD entity refs event2 = xml_sax::Event::Reference(xml_sax::Reference { raw: &self.strbuffer[range], resolved: &self.strbuffer[range_resolved], }) } ContentRelaxed::CdataStart => { event2 = xml_sax::Event::StartCdataSection; self.state = ParserState::InsideCdata; } ContentRelaxed::CommentStart => { event2 = xml_sax::Event::StartComment; self.state = ParserState::InsideComment; } } } Err(Err::Incomplete(e)) => { // panic!() // self.read_data(); // if read bytes are 0 then return eof, otherwise return dummy event if self.buffer2.len() == 0 { return xml_sax::Event::EndDocument; } println!("try to read bytes: {:?}", unsafe { &self.buffer2 }); println!("try to read: {:?}", unsafe { std::str::from_utf8_unchecked(&self.buffer2) }); println!("err: {:?}", e); panic!() } Err(e) => { println!("try to read bytes: {:?}", unsafe { &self.buffer2 }); println!("try to read: {:?}", unsafe { std::str::from_utf8_unchecked(&self.buffer2) }); println!("err: {:?}", e); panic!() } } } ParserState::InsideCdata => { //expect cdata or cdata-end let res = insidecdata(&self.buffer2); match res { Ok(parseresult) => { self.offset = self.buffer2.offset(parseresult.0); match parseresult.1 { InsideCdata::Characters(characters) => { let start = self.strbuffer.len(); let size = characters.len(); self.strbuffer .push_str(unsafe { std::str::from_utf8_unchecked(characters) }); event2 = xml_sax::Event::Cdata(&self.strbuffer[start..(start + size)]) } InsideCdata::CdataEnd => { self.state = ParserState::Content; event2 = xml_sax::Event::EndCdataSection; } } } Err(err) => panic!(), } } ParserState::InsideComment => { //expect comment or comment-end let res = insidecomment(&self.buffer2); match res { Ok(parseresult) => { self.offset = self.buffer2.offset(parseresult.0); match parseresult.1 { InsideComment::Characters(characters) => { let start = self.strbuffer.len(); let size = characters.len(); self.strbuffer .push_str(unsafe { std::str::from_utf8_unchecked(characters) }); event2 = xml_sax::Event::Comment(&self.strbuffer[start..(start + size)]) } InsideComment::CommentEnd => { self.state = ParserState::Content; event2 = xml_sax::Event::EndComment; } } } Err(err) => panic!(), } } ParserState::DocEnd => { // EOF if self.buffer2.len() == 0 { return xml_sax::Event::EndDocument; } let res = misc(&self.buffer2); match res { Ok(parseresult) => { self.offset = self.buffer2.offset(parseresult.0); match parseresult.1 { Misc::PI(a) => { let str = unsafe { std::str::from_utf8_unchecked(a) }; let range = push_str_get_range(&mut self.strbuffer, &str); event2 = xml_sax::Event::ProcessingInstruction(&self.strbuffer[range]) } Misc::Whitespace(a) => { let str = unsafe { std::str::from_utf8_unchecked(a) }; let range = push_str_get_range(&mut self.strbuffer, &str); event2 = xml_sax::Event::Whitespace(&self.strbuffer[range]) } Misc::CommentStart => { self.state = ParserState::DocEndInsideComment; event2 = xml_sax::Event::StartComment; } } } Err(err) => { panic!() } } } ParserState::DocEndInsideComment => { //expect comment or comment-end let res = insidecomment(&self.buffer2); match res { Ok(parseresult) => { self.offset = self.buffer2.offset(parseresult.0); match parseresult.1 { InsideComment::Characters(characters) => { let start = self.strbuffer.len(); let size = characters.len(); self.strbuffer .push_str(unsafe { std::str::from_utf8_unchecked(characters) }); event2 = xml_sax::Event::Comment(&self.strbuffer[start..(start + size)]) } InsideComment::CommentEnd => { self.state = ParserState::DocEnd; event2 = xml_sax::Event::EndComment; } } } Err(err) => panic!(), } } } event2 } } #[test] fn test_parser1() { let data = r#"<root><A a='x'> <B b="val" a:12='val2' ><C/></B></A></root>"# .as_bytes(); // let mut buf = vec![]; let mut p = OxideParser::start(data); loop { let res = p.read_event(); println!("{:?}", res); match res { xml_sax::Event::StartDocument => todo!(), xml_sax::Event::EndDocument => todo!(), xml_sax::Event::StartElement(el) => { if el.name == "C" { break; } } xml_sax::Event::EndElement(_) => todo!(), xml_sax::Event::Characters(c) => {} xml_sax::Event::Reference(c) => {} _ => {} } } // let res = p.read_event(); // println!("{:?}", res); // let res = p.read_event(); // println!("{:?}", res); // let res = p.read_event(); // println!("{:?}", res); } check matching start and end element tags #![allow(non_snake_case)] use std::{ io::{BufRead, BufReader, Read}, ops::{Range, RangeFrom, RangeFull}, vec, }; use nom::{ branch::alt, bytes::streaming::{escaped, is_not, tag, take_while, take_while1}, character::{ complete::{alphanumeric1 as alphanumeric, char, multispace1, none_of, one_of}, is_digit, is_hex_digit, streaming::{alpha1, alphanumeric1, digit1, multispace0}, }, combinator::{cut, map, map_res, opt, recognize, value}, error::{ context, convert_error, dbg_dmp, ContextError, Error, ErrorKind, ParseError, VerboseError, }, error_position, multi::{many0, many1, separated_list0}, number::complete::double, sequence::{delimited, pair, preceded, separated_pair, terminated, tuple}, AsChar, Err, IResult, InputIter, InputLength, Needed, Offset, Parser, Slice, }; // https://tools.ietf.org/html/rfc3629 static UTF8_CHAR_WIDTH: [u8; 256] = [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 0x1F */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 0x3F */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 0x5F */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 0x7F */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x9F */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xBF */ 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, /* 0xDF */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, /* 0xEF */ 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xFF */ ]; /// Given a first byte, determine how many bytes are in this UTF-8 character #[inline] fn utf8_char_width(b: u8) -> usize { return UTF8_CHAR_WIDTH[b as usize] as usize; } // enum CharResult { // Char(char, usize), // Err, // Eof, // } // #[inline] // fn parse_char(input: &[u8]) -> CharResult { // if input.len() == 0 { // return CharResult::Eof; // } // let width = utf8_char_width(input[0]); // if input.len() < width { // return CharResult::Eof; // } // match std::str::from_utf8(&input[..width]).ok() { // Some(s) => CharResult::Char(s.chars().next().unwrap(), width), // None => CharResult::Err, // } // } // [^<&] // none_of("<&") // CdataEnd // tag("]]>") // [2] Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF] #[inline] pub fn is_xml_char_t(chr: char) -> bool { chr == '\u{9}' || (chr >= '\u{A}' && chr <= '\u{D}') || (chr >= '\u{20}' && chr <= '\u{D7FF}') || (chr >= '\u{E000}' && chr <= '\u{FFFD}') || (chr >= '\u{10000}' && chr <= '\u{10FFFF}') } // [4] NameStartChar ::= ":" | [A-Z] | "_" | [a-z] | [#xC0-#xD6] | [#xD8-#xF6] | // [#xF8-#x2FF] | [#x370-#x37D] | [#x37F-#x1FFF] | [#x200C-#x200D] | [#x2070-#x218F] | // [#x2C00-#x2FEF] | [#x3001-#xD7FF] | [#xF900-#xFDCF] | [#xFDF0-#xFFFD] | [#x10000-#xEFFFF] // ('A', 'Z'), /* ('A', 'Z'), veya ('\u{0041}', '\u{005A}'), */ // ('a', 'z'), // ('a', 'z') veya ('\u{61}', '\u{7A}'), // ('\u{C0}', '\u{D6}'), // ('\u{D8}', '\u{F6}'), // ('\u{F8}', '\u{2FF}'), // ('\u{370}', '\u{37D}'), // ('\u{37F}', '\u{1FFF}'), // ('\u{200C}', '\u{200D}'), // ('\u{2070}', '\u{218F}'), // ('\u{2C00}', '\u{2FEF}'), // ('\u{3001}', '\u{D7FF}'), // ('\u{F900}', '\u{FDCF}'), // ('\u{FDF0}', '\u{FFFD}'), // ('\u{10000}', '\u{EFFFF}'), // NameStartChar.expected_chars.push(':'); // NameStartChar.expected_chars.push('_'); #[inline] pub fn is_namestart_char_t(chr: char) -> bool { (chr >= 'A' && chr <= 'Z') || (chr >= 'a' && chr <= 'z') || (chr >= '\u{C0}' && chr <= '\u{D6}') || (chr >= '\u{D8}' && chr <= '\u{F6}') || (chr >= '\u{F8}' && chr <= '\u{2FF}') || (chr >= '\u{370}' && chr <= '\u{37D}') || (chr >= '\u{37F}' && chr <= '\u{1FFF}') || (chr >= '\u{200C}' && chr <= '\u{200D}') || (chr >= '\u{2070}' && chr <= '\u{218F}') || (chr >= '\u{2C00}' && chr <= '\u{2FEF}') || (chr >= '\u{3001}' && chr <= '\u{D7FF}') || (chr >= '\u{F900}' && chr <= '\u{FDCF}') || (chr >= '\u{FDF0}' && chr <= '\u{FFFD}') || (chr >= '\u{10000}' && chr <= '\u{EFFFF}') || chr == ':' || chr == '_' } pub fn namestart_char(input: &[u8]) -> IResult<&[u8], &[u8]> { if input.len() == 0 { return Err(Err::Incomplete(Needed::new(1))); } let width = utf8_char_width(input[0]); if input.len() < width { return Err(Err::Incomplete(Needed::new(width - input.len()))); } let c = match std::str::from_utf8(&input[..width]).ok() { Some(s) => s.chars().next().unwrap(), None => return Err(Err::Error(Error::new(input, ErrorKind::Char))), }; if is_namestart_char_t(c) { return Ok((&input[width..], &input[0..width])); } else { return Err(Err::Error(Error::new(input, ErrorKind::Char))); } } // [4a] NameChar ::= NameStartChar | "-" | "." | [0-9] | #xB7 | [#x0300-#x036F] | [#x203F-#x2040] #[inline] pub fn is_namechar_t(chr: char) -> bool { is_namestart_char_t(chr) || (chr >= '0' && chr <= '9') || (chr >= '\u{0300}' && chr <= 'z') || (chr >= '\u{203F}' && chr <= '\u{2040}') || chr == '-' || chr == '.' || chr == '\u{B7}' } pub fn namechar(input: &[u8]) -> IResult<&[u8], &[u8]> { if input.len() == 0 { return Err(Err::Incomplete(Needed::new(1))); } let width = utf8_char_width(input[0]); if input.len() < width { return Err(Err::Incomplete(Needed::new(width - input.len()))); } let c = match std::str::from_utf8(&input[..width]).ok() { Some(s) => s.chars().next().unwrap(), None => return Err(Err::Error(Error::new(input, ErrorKind::Char))), }; if is_namechar_t(c) { return Ok((&input[width..], &input[0..width])); } else { return Err(Err::Error(Error::new(input, ErrorKind::Char))); } } pub fn many0_custom_chardata<I, O, E, F>(mut f: F) -> impl FnMut(I) -> IResult<I, (), E> where I: Clone + InputLength, F: Parser<I, O, E>, E: ParseError<I>, { move |mut i: I| { // let mut acc = crate::lib::std::vec::Vec::with_capacity(4); loop { let len = i.input_len(); match f.parse(i.clone()) { Err(Err::Error(_)) => return Ok((i, ())), // Err(e) => return Err(e), // ref#streamcut Err(e) => return Ok((i, ())), Ok((i1, o)) => { // infinite loop check: the parser must always consume if i1.input_len() == len { return Err(Err::Error(E::from_error_kind(i, ErrorKind::Many0))); } i = i1; // acc.push(o); } } } } } pub fn many0_custom_trycomplete<I, O, E, F>(mut f: F) -> impl FnMut(I) -> IResult<I, (), E> where I: Clone + InputLength, F: Parser<I, O, E>, E: ParseError<I>, { move |mut i: I| { // let mut acc = crate::lib::std::vec::Vec::with_capacity(4); loop { let len = i.input_len(); match f.parse(i.clone()) { Err(Err::Error(_)) => return Ok((i, ())), Err(e) => return Err(e), //returns incomplete here // Err(e) => return Ok((i, ())), Ok((i1, o)) => { // infinite loop check: the parser must always consume if i1.input_len() == len { return Err(Err::Error(E::from_error_kind(i, ErrorKind::Many0))); } i = i1; // acc.push(o); } } } } } fn many1_custom<I, O, E, F>(mut f: F) -> impl FnMut(I) -> IResult<I, (), E> where I: Clone + InputLength, F: Parser<I, O, E>, E: ParseError<I>, { move |mut i: I| match f.parse(i.clone()) { Err(Err::Error(err)) => Err(Err::Error(E::append(i, ErrorKind::Many1, err))), Err(e) => Err(e), Ok((i1, o)) => { i = i1; loop { let len = i.input_len(); match f.parse(i.clone()) { Err(Err::Error(_)) => return Ok((i, ())), Err(e) => return Err(e), Ok((i1, o)) => { // infinite loop check: the parser must always consume if i1.input_len() == len { return Err(Err::Error(E::from_error_kind(i, ErrorKind::Many1))); } i = i1; } } } } } } fn name(input: &[u8]) -> IResult<&[u8], &[u8]> { recognize(pair(namestart_char, many0_custom_trycomplete(namechar)))(input) } // [66] CharRef ::= '&#' [0-9]+ ';' | '&#x' [0-9a-fA-F]+ ';' fn CharRef(input: &[u8]) -> IResult<&[u8], &[u8]> { alt(( recognize(tuple((tag("&#"), take_while1(is_digit), char(';')))), recognize(tuple((tag("&#x"), take_while1(is_hex_digit), char(';')))), ))(input) } // [68] EntityRef ::= '&' Name ';' fn EntityRef(input: &[u8]) -> IResult<&[u8], &[u8]> { recognize(tuple((tag("&"), name, char(';'))))(input) } // [67] Reference ::= EntityRef | CharRef fn Reference(input: &[u8]) -> IResult<&[u8], &[u8]> { alt((EntityRef, CharRef))(input) } #[derive(Clone, Debug, Eq, PartialEq)] struct Reference<'a> { initial: &'a str, // resolved: &'a str, } // [10] AttValue ::= '"' ([^<&"] | Reference)* '"' | "'" ([^<&'] | Reference)* "'" fn AttValue(input: &[u8]) -> IResult<&[u8], &[u8]> { alt(( delimited( char('"'), recognize(many0_custom_trycomplete(alt((is_not(r#"<&""#), Reference)))), char('"'), ), delimited( char('\''), recognize(many0_custom_trycomplete(alt((is_not(r#"<&'"#), Reference)))), char('\''), ), ))(input) } // [25] Eq ::= S? '=' S? fn Eq(input: &[u8]) -> IResult<&[u8], &[u8]> { recognize(tuple((multispace0, char('='), multispace0)))(input) } // [41] Attribute ::= Name Eq AttValue fn Attribute(input: &[u8]) -> IResult<&[u8], SAXAttribute> { match tuple((name, Eq, AttValue))(input) { Ok((i, o)) => { return Ok(( i, SAXAttribute { value: unsafe { std::str::from_utf8_unchecked(o.2) }, qualified_name: unsafe { std::str::from_utf8_unchecked(o.0) }, }, )); } Err(e) => Err(e), } } // let mut Attribute = ParsingRule::new("Attribute".to_owned(), RuleType::Sequence); // Attribute.children_names.push("Name".to_owned()); // Attribute.children_names.push("Eq".to_owned()); // Attribute.children_names.push("AttValue".to_owned()); // rule_nameRegistry.insert(Attribute.rule_name.clone(), Attribute); // [40] STag ::= '<' Name (S Attribute)* S? '>' #[derive(Clone, Debug, Eq, PartialEq)] struct SAXAttribute<'a> { pub value: &'a str, pub qualified_name: &'a str, // fn get_value(&self) -> &str; // fn get_local_name(&self) -> &str; // fn get_qualified_name(&self) -> &str; // fn get_uri(&self) -> &str; } #[derive(Clone, Debug, Eq, PartialEq)] struct SAXAttribute2 { pub value: std::ops::Range<usize>, pub qualified_name: std::ops::Range<usize>, // fn get_value(&self) -> &str; // fn get_local_name(&self) -> &str; // fn get_qualified_name(&self) -> &str; // fn get_uri(&self) -> &str; } #[derive(Clone, Debug, Eq, PartialEq)] struct StartElement<'a> { pub name: &'a str, pub attributes: Vec<SAXAttribute<'a>>, } #[derive(Clone, Debug, Eq, PartialEq)] struct EndElement<'a> { pub name: &'a str, } fn STag<'a>(input: &'a [u8]) -> IResult<&[u8], StartElement<'a>> { match tuple(( char('<'), name, many0(preceded(multispace0, Attribute)), multispace0, char('>'), ))(input) { Ok((i, o)) => { return Ok(( i, StartElement { name: unsafe { std::str::from_utf8_unchecked(o.1) }, attributes: o.2, }, )); } Err(e) => Err(e), } } // [44] EmptyElemTag ::= '<' Name (S Attribute)* S? '/>' fn EmptyElemTag(input: &[u8]) -> IResult<&[u8], StartElement> { match tuple(( char('<'), name, many0(preceded(multispace0, Attribute)), multispace0, tag("/>"), ))(input) { Ok((i, o)) => Ok(( i, StartElement { name: unsafe { std::str::from_utf8_unchecked(o.1) }, attributes: o.2, }, )), Err(e) => Err(e), } } // [3] S ::= (#x20 | #x9 | #xD | #xA)+ // multispace0 fits // [42] ETag ::= '</' Name S? '>' fn ETag(input: &[u8]) -> IResult<&[u8], EndElement> { match tuple((tag("</"), name, multispace0, char('>')))(input) { Ok((i, o)) => { // println!("{:?}", o); return Ok(( i, EndElement { name: unsafe { std::str::from_utf8_unchecked(o.1) }, }, )); } Err(e) => Err(e), } } #[test] fn test_etag() { let data = r#"</A>"#.as_bytes(); let res = ETag(&data); println!("{:?}", res); } #[test] fn test_namestart_char_t() { let data = "<a.abc-ab1çroot><A/><B/><C/></root>".as_bytes(); // fn parser(s: &[u8]) -> IResult<&[u8], &[u8]> { // namestart_char_t(s) // } let res = STag(&data); println!("{:?}", res); } #[test] fn test_stag() { let data = r#"<A a="b" c = "d"></A>"#.as_bytes(); let res = STag(&data); println!("{:?}", res); let data = r#"<A a='x'>"#.as_bytes(); let res = STag(&data); println!("{:?}", res); let data = r#"<B b="val" >"#.as_bytes(); let res = STag(&data); println!("{:?}", res); } // [14] CharData ::= [^<&]* - ([^<&]* ']]>' [^<&]*) // no '>' except ']]>' // The spec is not clear but we also apply Char restrictions #[inline] pub fn is_CharData_single_pure_t(chr: char) -> bool { chr != '<' && chr != '&' && is_xml_char_t(chr) } pub fn CharData_single_pure(input: &[u8]) -> IResult<&[u8], &[u8]> { if input.len() == 0 { return Err(Err::Incomplete(Needed::new(1))); } let width = utf8_char_width(input[0]); if input.len() < width { return Err(Err::Incomplete(Needed::new(width - input.len()))); } let c = match std::str::from_utf8(&input[..width]).ok() { Some(s) => s.chars().next().unwrap(), None => return Err(Err::Error(Error::new(input, ErrorKind::Char))), }; if is_CharData_single_pure_t(c) { return Ok((&input[width..], &input[0..width])); } else { return Err(Err::Error(Error::new(input, ErrorKind::Char))); } } fn CharData_single(input: &[u8]) -> IResult<&[u8], &[u8]> { //if input = 0 , don't send incomplete // ref#streamcut if input.len() == 0 { return Err(Err::Error(Error::new(input, ErrorKind::Char))); } // ']]>' should not appear in the chardata, if we can't be sure because input is eof, we should request more data. match tag::<&str, &[u8], Error<&[u8]>>("]]>")(input) { Ok(r) => return Err(Err::Error(Error::new(input, ErrorKind::Char))), Err(Err::Incomplete(n)) => return Err(Err::Incomplete(Needed::Unknown)), _ => (), }; CharData_single_pure(input) } #[test] fn test_chardata_single() { let data = "]]".as_bytes(); assert_eq!( CharData_single("]".as_bytes()), Err(Err::Incomplete(Needed::Unknown)) ); assert_eq!( CharData_single("]]".as_bytes()), Err(Err::Incomplete(Needed::Unknown)) ); assert_eq!( CharData_single("]]>".as_bytes()), Err(Err::Error(error_position!( "]]>".as_bytes(), ErrorKind::Char ))) ); assert_eq!( CharData_single("]]<".as_bytes()), Ok((&b"]<"[..], &b"]"[..])) ); assert_eq!( CharData_single("&".as_bytes()), Err(Err::Error(error_position!("&".as_bytes(), ErrorKind::Char))) ); assert_eq!( CharData_single("<".as_bytes()), Err(Err::Error(error_position!("<".as_bytes(), ErrorKind::Char))) ); assert_eq!( CharData_single("abc".as_bytes()), Ok((&b"bc"[..], &b"a"[..])) ); } // [14] CharData ::= [^<&]* - ([^<&]* ']]>' [^<&]*) //our implementation requires at least one char fn CharData(input: &[u8]) -> IResult<&[u8], &[u8]> { recognize(tuple(( CharData_single, many0_custom_chardata(CharData_single), )))(input) } #[test] fn test_chardata() { assert_eq!(CharData("abc]".as_bytes()), Ok((&b"]"[..], &b"abc"[..]))); assert_eq!( CharData("]]".as_bytes()), Err(Err::Incomplete(Needed::Unknown)) ); //since we want chardata to parse at least 1 char now: // assert_eq!(CharData("]]>".as_bytes()), Ok((&b"]]>"[..], &b""[..]))); assert_eq!( CharData("]]>".as_bytes()), Err(Err::Error(error_position!( "]]>".as_bytes(), ErrorKind::Char ))) ); assert_eq!(CharData("]]<".as_bytes()), Ok((&b"<"[..], &b"]]"[..]))); //since we want chardata to parse at least 1 char now: // assert_eq!(CharData("&".as_bytes()), Ok((&b"&"[..], &b""[..]))); assert_eq!(CharData("a&".as_bytes()), Ok((&b"&"[..], &b"a"[..]))); assert_eq!(CharData("a<".as_bytes()), Ok((&b"<"[..], &b"a"[..]))); //this was returning incomplete since the next char can be the start of "]]>", but we plan to cut it off for streaming! //see ref#streamcut assert_eq!(CharData("abc".as_bytes()), Ok((&b""[..], &b"abc"[..]))); let data: Vec<u8> = [ 65, 108, 99, 104, 101, 109, 121, 32, 40, 102, 114, 111, 109, 32, 65, 114, 97, 98, 105, 99, 58, 32, 97, 108, 45, 107, 196, 171, 109, 105, 121, 196, ] .to_vec(); let remainder: Vec<u8> = [196].to_vec(); println!("try to read: {:?}", unsafe { std::str::from_utf8_unchecked(&data[0..31]) }); assert_eq!( CharData(&data), Ok(( &remainder[0..1], &"Alchemy (from Arabic: al-kīmiy".as_bytes()[..] )) ); } // [43] content ::= CharData? ((element | Reference | CDSect | PI | Comment) CharData?)* //we will use state machine instead of this rule to make it streamable enum ContentRelaxed<'a> { CharData(&'a [u8]), StartElement(StartElement<'a>), EmptyElemTag(StartElement<'a>), EndElement(EndElement<'a>), Reference(Reference<'a>), CdataStart, CommentStart, } fn content_relaxed_CharData(input: &[u8]) -> IResult<&[u8], ContentRelaxed> { match CharData(input) { Ok(succ) => Ok((succ.0, ContentRelaxed::CharData(succ.1))), Err(err) => return Err(err), } } fn content_relaxed_STag(input: &[u8]) -> IResult<&[u8], ContentRelaxed> { match STag(input) { Ok(succ) => Ok((succ.0, ContentRelaxed::StartElement(succ.1))), Err(err) => return Err(err), } } fn content_relaxed_ETag(input: &[u8]) -> IResult<&[u8], ContentRelaxed> { match ETag(input) { Ok(succ) => Ok((succ.0, ContentRelaxed::EndElement(succ.1))), Err(err) => return Err(err), } } //todo add endelement as next step or inform it is an emptyelem tag via event api? fn content_relaxed_EmptyElemTag(input: &[u8]) -> IResult<&[u8], ContentRelaxed> { match EmptyElemTag(input) { Ok(succ) => Ok((succ.0, ContentRelaxed::EmptyElemTag(succ.1))), Err(err) => return Err(err), } } fn content_relaxed_Reference(input: &[u8]) -> IResult<&[u8], ContentRelaxed> { match Reference(input) { Ok(succ) => Ok(( succ.0, ContentRelaxed::Reference(Reference { initial: unsafe { std::str::from_utf8_unchecked(succ.1) }, }), )), Err(err) => return Err(err), } } fn content_relaxed_CdataStart(input: &[u8]) -> IResult<&[u8], ContentRelaxed> { match CDATASection_start(input) { Ok(succ) => Ok((succ.0, ContentRelaxed::CdataStart)), Err(err) => return Err(err), } } fn content_relaxed_CommentStart(input: &[u8]) -> IResult<&[u8], ContentRelaxed> { match Comment_start(input) { Ok(succ) => Ok((succ.0, ContentRelaxed::CommentStart)), Err(err) => return Err(err), } } // [custom] relaxed ::= CharData | STag | EmptyElemTag | ETag | Reference | CDATA | Comment ... todo: add PI fn content_relaxed(input: &[u8]) -> IResult<&[u8], ContentRelaxed> { alt(( content_relaxed_CharData, content_relaxed_STag, content_relaxed_EmptyElemTag, content_relaxed_ETag, content_relaxed_Reference, content_relaxed_CdataStart, content_relaxed_CommentStart, ))(input) } #[test] fn test_xml3() { let data = "<root><A/><B/><C/></root>".as_bytes(); fn parser(s: &[u8]) -> IResult<&[u8], &[u8]> { tag("<root>")(s) } let res = parser(&data); println!("{:?}", res); } // Parser Rules organized by W3C Spec // [26] VersionNum ::= '1.' [0-9]+ fn VersionNum(input: &[u8]) -> IResult<&[u8], &[u8]> { recognize(tuple((tag("1."), digit1)))(input) } #[test] fn test_VersionNum() { let data = r#"1.123 "#.as_bytes(); let res = VersionNum(&data); println!("{:?}", res); } // [24] VersionInfo ::= S 'version' Eq ("'" VersionNum "'" | '"' VersionNum '"') fn VersionInfo(input: &[u8]) -> IResult<&[u8], &[u8]> { recognize(tuple(( multispace1, tag("version"), Eq, alt(( delimited(char('"'), VersionNum, char('"')), delimited(char('\''), VersionNum, char('\'')), )), )))(input) } #[test] fn test_VersionInfo() { let data = r#" version="1.0" "#.as_bytes(); let res = VersionInfo(&data); println!("{:?}", res); } // [81] EncName ::= [A-Za-z] ([A-Za-z0-9._] | '-')* fn EncName(input: &[u8]) -> IResult<&[u8], &[u8]> { recognize(tuple(( alpha1, many0_custom_trycomplete(alt((alphanumeric1, tag("-"), tag("."), tag("_")))), )))(input) } #[test] fn test_EncName() { let data = r#"UTF-8 "#.as_bytes(); let res = EncName(&data); println!("{:?}", res); } // [80] EncodingDecl ::= S 'encoding' Eq ('"' EncName '"' | "'" EncName "'" ) fn EncodingDecl(input: &[u8]) -> IResult<&[u8], &[u8]> { recognize(tuple(( multispace1, tag("encoding"), Eq, alt(( delimited(char('"'), EncName, char('"')), delimited(char('\''), EncName, char('\'')), )), )))(input) } #[test] fn test_EncodingDecl() { let data = r#" encoding='EUC-JP' "#.as_bytes(); let res = EncodingDecl(&data); println!("{:?}", res); } // [32] SDDecl ::= S 'standalone' Eq (("'" ('yes' | 'no') "'") | ('"' ('yes' | 'no') '"')) fn yes_mi_no_mu(input: &[u8]) -> IResult<&[u8], &[u8]> { alt((tag("yes"), tag("no")))(input) } fn SDDecl(input: &[u8]) -> IResult<&[u8], &[u8]> { recognize(tuple(( multispace1, tag("standalone"), Eq, alt(( delimited(char('"'), yes_mi_no_mu, char('"')), delimited(char('\''), yes_mi_no_mu, char('\'')), )), )))(input) } #[test] fn test_SDDecl() { let data = r#" standalone='yes' "#.as_bytes(); let res = SDDecl(&data); println!("{:?}", res); } // [23] XMLDecl ::= '<?xml' VersionInfo EncodingDecl? SDDecl? S? '?>' fn XMLDecl(input: &[u8]) -> IResult<&[u8], &[u8]> { recognize(tuple(( tag("<?xml"), VersionInfo, opt(EncodingDecl), opt(SDDecl), multispace0, tag("?>"), )))(input) } // [27] Misc ::= Comment | PI | S //todo: comment | PI, we may need to separate // fn Misc(input: &[u8]) -> IResult<&[u8], &[u8]> { // recognize(alt((multispace1,)))(input) // } fn docstart_custom(input: &[u8]) -> IResult<&[u8], &[u8]> { recognize(tuple((XMLDecl, multispace0)))(input) } #[test] fn test_XMLDecl() { let data = r#"<?xml version="1.0" encoding="UTF-8" standalone='yes'?>"#.as_bytes(); let res = XMLDecl(&data); println!("{:?}", res); } // [1] document ::= prolog element Misc* // [22] prolog ::= XMLDecl? Misc* (doctypedecl Misc*)? // [15] Comment ::= '<!--' ((Char - '-') | ('-' (Char - '-')))* '-->' //spec seems to not allow empty comments? There are parsers that allow it. fn Comment_start(input: &[u8]) -> IResult<&[u8], &[u8]> { tag("<!--")(input) } fn Comment_end(input: &[u8]) -> IResult<&[u8], &[u8]> { tag("-->")(input) } // We don't need to exclude "-" we handle that in inside_Comment_single // #[inline] // fn is_CharData_single_pure_t(chr: char) -> bool { // chr != '<' && chr != '&' && is_xml_char_t(chr) // } fn inside_Comment_or_CDATA_single_pure(input: &[u8]) -> IResult<&[u8], &[u8]> { if input.len() == 0 { return Err(Err::Incomplete(Needed::new(1))); } let width = utf8_char_width(input[0]); if input.len() < width { return Err(Err::Incomplete(Needed::new(width - input.len()))); } let c = match std::str::from_utf8(&input[..width]).ok() { Some(s) => s.chars().next().unwrap(), None => return Err(Err::Error(Error::new(input, ErrorKind::Char))), }; if is_xml_char_t(c) { return Ok((&input[width..], &input[0..width])); } else { return Err(Err::Error(Error::new(input, ErrorKind::Char))); } } fn inside_Comment_single(input: &[u8]) -> IResult<&[u8], &[u8]> { //if input = 0 , don't send incomplete // ref#streamcut if input.len() == 0 { return Err(Err::Error(Error::new(input, ErrorKind::Char))); } // '--' should not appear in the comment, if we can't be sure because input is eof, we should request more data. match tag::<&str, &[u8], Error<&[u8]>>("--")(input) { Ok(r) => return Err(Err::Error(Error::new(input, ErrorKind::Char))), Err(Err::Incomplete(n)) => return Err(Err::Incomplete(Needed::new(1))), _ => (), }; inside_Comment_or_CDATA_single_pure(input) } fn Comment(input: &[u8]) -> IResult<&[u8], &[u8]> { recognize(tuple(( Comment_start, many0_custom_chardata(inside_Comment_single), Comment_end, )))(input) } #[test] fn test_comment() { assert_eq!( Comment("<!-- comment -->a".as_bytes()), Ok((&b"a"[..], &b"<!-- comment -->"[..])) ); assert_eq!( Comment("<!---->cc".as_bytes()), Ok((&b"cc"[..], &b"<!---->"[..])) ); assert_eq!( Comment("<!-- comment --->a".as_bytes()), Err(Err::Error(error_position!( "--->a".as_bytes(), ErrorKind::Tag ))) ); assert_eq!( Comment("<!-- com--ment -->a".as_bytes()), Err(Err::Error(error_position!( "--ment -->a".as_bytes(), ErrorKind::Tag ))) ); assert_eq!( Comment("<!--ok-".as_bytes()), Err(Err::Incomplete(Needed::new(2))) ); assert_eq!( Comment("<!--ok--".as_bytes()), Err(Err::Incomplete(Needed::new(1))) ); } enum InsideComment<'a> { Characters(&'a [u8]), CommentEnd, } fn insidecomment_characters(input: &[u8]) -> IResult<&[u8], InsideComment> { match recognize(tuple(( inside_Comment_single, many0_custom_chardata(inside_Comment_single), )))(input) { Ok(succ) => Ok((succ.0, InsideComment::Characters(succ.1))), Err(err) => return Err(err), } } fn insidecomment_comment_end(input: &[u8]) -> IResult<&[u8], InsideComment> { match Comment_end(input) { Ok(succ) => Ok((succ.0, InsideComment::CommentEnd)), Err(err) => return Err(err), } } // [custom] fn insidecomment(input: &[u8]) -> IResult<&[u8], InsideComment> { alt((insidecomment_characters, insidecomment_comment_end))(input) } // [18] CDSect ::= CDStart CData CDEnd // [19] CDStart ::= '<![CDATA[' fn CDATASection_start(input: &[u8]) -> IResult<&[u8], &[u8]> { tag("<![CDATA[")(input) } // [21] CDEnd ::= ']]>' fn CDATASection_end(input: &[u8]) -> IResult<&[u8], &[u8]> { tag("]]>")(input) } // [20] CData ::= (Char* - (Char* ']]>' Char*)) fn inside_CDATASection_single(input: &[u8]) -> IResult<&[u8], &[u8]> { //if input = 0 , don't send incomplete // ref#streamcut if input.len() == 0 { return Err(Err::Error(Error::new(input, ErrorKind::Char))); } // ']]>' should not appear in the cdata section, if we can't be sure because input is eof, we should request more data. match tag::<&str, &[u8], Error<&[u8]>>("]]>")(input) { Ok(r) => return Err(Err::Error(Error::new(input, ErrorKind::Char))), Err(Err::Incomplete(n)) => return Err(Err::Incomplete(Needed::Unknown)), _ => (), }; inside_Comment_or_CDATA_single_pure(input) } fn CDATASection(input: &[u8]) -> IResult<&[u8], &[u8]> { recognize(tuple(( CDATASection_start, many0_custom_chardata(inside_CDATASection_single), CDATASection_end, )))(input) } #[test] fn test_cdata() { assert_eq!( CDATASection("<![CDATA[abc]]>a".as_bytes()), Ok((&b"a"[..], &b"<![CDATA[abc]]>"[..])) ); assert_eq!( CDATASection("<![CDATA[]]>".as_bytes()), Ok((&b""[..], &b"<![CDATA[]]>"[..])) ); assert_eq!( CDATASection("<![CDATA[ ]]".as_bytes()), Err(Err::Incomplete(Needed::new(1))) ); assert_eq!( CDATASection("<![CDATA[ ]".as_bytes()), Err(Err::Incomplete(Needed::new(2))) ); } //only parsed without checking well-formedness inside // [16] PI ::= '<?' PITarget (S (Char* - (Char* '?>' Char*)))? '?>' // [17] PITarget ::= Name - (('X' | 'x') ('M' | 'm') ('L' | 'l')) fn PI_start(input: &[u8]) -> IResult<&[u8], &[u8]> { tag("<?")(input) } fn PI_end(input: &[u8]) -> IResult<&[u8], &[u8]> { tag("?>")(input) } fn inside_PI_single(input: &[u8]) -> IResult<&[u8], &[u8]> { //if input = 0 , don't send incomplete // ref#streamcut if input.len() == 0 { return Err(Err::Error(Error::new(input, ErrorKind::Char))); } // ']]>' should not appear in the cdata section, if we can't be sure because input is eof, we should request more data. match tag::<&str, &[u8], Error<&[u8]>>("?>")(input) { Ok(r) => return Err(Err::Error(Error::new(input, ErrorKind::Char))), Err(Err::Incomplete(n)) => return Err(Err::Incomplete(Needed::Unknown)), _ => (), }; inside_Comment_or_CDATA_single_pure(input) } fn PI(input: &[u8]) -> IResult<&[u8], &[u8]> { recognize(tuple(( PI_start, many0_custom_chardata(inside_PI_single), PI_end, )))(input) } #[test] fn test_pi() { assert_eq!(PI("<??>a".as_bytes()), Ok((&b"a"[..], &b"<??>"[..]))); assert_eq!( PI("<?dummmy?>".as_bytes()), Ok((&b""[..], &b"<?dummmy?>"[..])) ); } //only parsed without checking well-formedness inside // [28] doctypedecl ::= '<!DOCTYPE' S Name (S ExternalID)? S? ('[' intSubset ']' S?)? '>' fn doctypedecl_start(input: &[u8]) -> IResult<&[u8], &[u8]> { tag("<!DOCTYPE")(input) } fn doctypedecl_end(input: &[u8]) -> IResult<&[u8], &[u8]> { tag(">")(input) } // fn inside_doctypedecl_single(input: &[u8]) -> IResult<&[u8], &[u8]> { // //if input = 0 , don't send incomplete // // ref#streamcut // if input.len() == 0 { // return Err(Err::Error(Error::new(input, ErrorKind::Char))); // } // // ']]>' should not appear in the cdata section, if we can't be sure because input is eof, we should request more data. // match tag::<&str, &[u8], Error<&[u8]>>(">")(input) { // Ok(r) => return Err(Err::Error(Error::new(input, ErrorKind::Char))), // Err(Err::Incomplete(n)) => return Err(Err::Incomplete(Needed::Unknown)), // _ => (), // }; // inside_Comment_or_CDATA_single_pure(input) // } //char that is not > or < fn inside_doctypedecl_single_pure(input: &[u8]) -> IResult<&[u8], &[u8]> { if input.len() == 0 { return Err(Err::Incomplete(Needed::new(1))); } let width = utf8_char_width(input[0]); if input.len() < width { return Err(Err::Incomplete(Needed::new(width - input.len()))); } let c = match std::str::from_utf8(&input[..width]).ok() { Some(s) => s.chars().next().unwrap(), None => return Err(Err::Error(Error::new(input, ErrorKind::Char))), }; if is_xml_char_t(c) && c != '<' && c != '>' { return Ok((&input[width..], &input[0..width])); } else { return Err(Err::Error(Error::new(input, ErrorKind::Char))); } } fn doctypedecl_dummy_internal(input: &[u8]) -> IResult<&[u8], &[u8]> { recognize(tuple(( tag("<"), many0_custom_trycomplete(alt(( recognize(many1_custom(inside_doctypedecl_single_pure)), Comment, doctypedecl_dummy_internal, ))), tag(">"), )))(input) } // can contain nested < and > for attlist and internal comments fn doctypedecl(input: &[u8]) -> IResult<&[u8], &[u8]> { recognize(tuple(( doctypedecl_start, many0_custom_trycomplete(alt(( recognize(many1_custom(inside_doctypedecl_single_pure)), Comment, doctypedecl_dummy_internal, ))), doctypedecl_end, )))(input) } #[test] fn test_doctypedecl() { assert_eq!( doctypedecl(r#"<!DOCTYPE>a"#.as_bytes()), Ok((&b"a"[..], &br#"<!DOCTYPE>"#[..])) ); assert_eq!( doctypedecl(r#"<!DOCTYPE greeting SYSTEM "hello.dtd">a"#.as_bytes()), Ok((&b"a"[..], &br#"<!DOCTYPE greeting SYSTEM "hello.dtd">"#[..])) ); assert_eq!( doctypedecl(r#"<!DOCTYPE dummy>"#.as_bytes()), Ok((&b""[..], &br#"<!DOCTYPE dummy>"#[..])) ); assert_eq!( doctypedecl(r#"<!DOCTYPE <!-- --> <[]>dummy>"#.as_bytes()), Ok((&b""[..], &br#"<!DOCTYPE <!-- --> <[]>dummy>"#[..])) ); //also works > inside comment assert_eq!( doctypedecl(r#"<!DOCTYPE <!-- > --> <[]>dummy>"#.as_bytes()), Ok((&b""[..], &br#"<!DOCTYPE <!-- > --> <[]>dummy>"#[..])) ); } enum InsideCdata<'a> { Characters(&'a [u8]), CdataEnd, } fn insidecdata_characters(input: &[u8]) -> IResult<&[u8], InsideCdata> { match recognize(tuple(( inside_CDATASection_single, many0_custom_chardata(inside_CDATASection_single), )))(input) { Ok(succ) => Ok((succ.0, InsideCdata::Characters(succ.1))), Err(err) => return Err(err), } } fn insidecdata_cdata_end(input: &[u8]) -> IResult<&[u8], InsideCdata> { match CDATASection_end(input) { Ok(succ) => Ok((succ.0, InsideCdata::CdataEnd)), Err(err) => return Err(err), } } // [custom] fn insidecdata(input: &[u8]) -> IResult<&[u8], InsideCdata> { alt((insidecdata_characters, insidecdata_cdata_end))(input) } enum MiscBeforeXmlDecl<'a> { PI(&'a [u8]), Whitespace(&'a [u8]), CommentStart, DocType(&'a [u8]), XmlDecl(&'a [u8]), } enum MiscBeforeDoctype<'a> { PI(&'a [u8]), Whitespace(&'a [u8]), CommentStart, DocType(&'a [u8]), } enum Misc<'a> { PI(&'a [u8]), Whitespace(&'a [u8]), CommentStart, } // using map combinator... // fn misc_pi(input: &[u8]) -> IResult<&[u8], Misc> { // map(PI, |a| Misc::PI(a))(input) // // match recognize(tuple(( // // inside_CDATASection_single, // // many0_custom_chardata(inside_CDATASection_single), // // )))(input) // // { // // Ok(succ) => Ok((succ.0, InsideCdata::Characters(succ.1))), // // Err(err) => return Err(err), // // } // } // [custom] fn misc(input: &[u8]) -> IResult<&[u8], Misc> { alt(( map(PI, |a| Misc::PI(a)), map(multispace1, |a| Misc::Whitespace(a)), map(Comment_start, |a| Misc::CommentStart), ))(input) } fn misc_before_doctype(input: &[u8]) -> IResult<&[u8], MiscBeforeDoctype> { alt(( map(PI, |a| MiscBeforeDoctype::PI(a)), map(multispace1, |a| MiscBeforeDoctype::Whitespace(a)), map(Comment_start, |a| MiscBeforeDoctype::CommentStart), map(doctypedecl, |a| MiscBeforeDoctype::DocType(a)), ))(input) } fn misc_before_xmldecl(input: &[u8]) -> IResult<&[u8], MiscBeforeXmlDecl> { alt(( map(XMLDecl, |a| MiscBeforeXmlDecl::XmlDecl(a)), // currently PI can also match XMLDecl so this is first choice map(PI, |a| MiscBeforeXmlDecl::PI(a)), map(multispace1, |a| MiscBeforeXmlDecl::Whitespace(a)), map(Comment_start, |a| MiscBeforeXmlDecl::CommentStart), map(doctypedecl, |a| MiscBeforeXmlDecl::DocType(a)), ))(input) } // Namespaces in XML 1.0 https://www.w3.org/TR/xml-names/ // [1] NSAttName ::= PrefixedAttName | DefaultAttName // [2] PrefixedAttName ::= 'xmlns:' // [3] DefaultAttName ::= 'xmlns' // [4] NCName ::= Name - (Char* ':' Char*) /* An XML Name, minus the ":" */ // [5] NCNameChar ::= NameChar - ':' /* An XML NameChar, minus the ":" */ // [6] NCNameStartChar ::= NCName - ( Char Char Char* ) /* The first letter of an NCName */ // [7] QName ::= PrefixedName | UnprefixedName // [8] PrefixedName ::= Prefix ':' LocalPart // [9] UnprefixedName ::= LocalPart // [10] Prefix ::= NCName // [11] LocalPart ::= NCName #[derive(Clone, Debug, Eq, PartialEq)] enum ParserState { Initial, DocStartBeforeXmlDecl, // when xmldecl parsed move to DocStartBeforeDocType, if something else parsed(including whitespace) the same! // DocStartBeforeXmlDeclInsideComment, // not possible - this means that doc doesn't have xmldecl, move to DocStartBeforeDocType DocStartBeforeDocType, //when doctype parsed move to docstart DocStartBeforeDocTypeInsideComment, // this doesn't mean that doc doesn't have doctype, move to DocStartBeforeDocType DocStart, DocStartInsideComment, Content, InsideCdata, InsideComment, //can be at the start or end of the document? specified all DocEnd, //misc DocEndInsideComment, } struct Namespace { level: usize, prefix: Range<usize>, value: Range<usize>, } pub struct OxideParser<R: Read> { state: ParserState, bufreader: BufReader<R>, buffer2: Vec<u8>, strbuffer: String, offset: usize, // document_complete: bool, //if element_level reaches 0 again , we control this via state element_level: usize, element_strbuffer: String, element_list: Vec<Range<usize>>, is_namespace_aware: bool, namespace_strbuffer: String, namespace_list: Vec<Namespace>, } fn convert_start_element<'a>( strbuffer: &'a mut String, event1: StartElement, ) -> xml_sax::StartElement<'a> { let start = strbuffer.len(); let size = event1.name.len(); strbuffer.push_str(event1.name); let mut attributes2: Vec<SAXAttribute2> = vec![]; for att in event1.attributes { let start = strbuffer.len(); let size = att.qualified_name.len(); strbuffer.push_str(att.qualified_name); let qualified_name_range = Range { start: start, end: start + size, }; let start = strbuffer.len(); let size = att.value.len(); strbuffer.push_str(att.value); let value_range = Range { start: start, end: start + size, }; // let qualified_name = &self.strbuffer[start..(start + size)]; // let value = &self.strbuffer[start..(start + size)]; attributes2.push(SAXAttribute2 { value: value_range, qualified_name: qualified_name_range, }); } let mut attributes: Vec<xml_sax::Attribute> = vec![]; for att in attributes2 { // let qualified_name = &self.strbuffer[start..(start + size)]; // let value = &self.strbuffer[start..(start + size)]; attributes.push(xml_sax::Attribute { value: &strbuffer[att.value], name: &strbuffer[att.qualified_name], }); } xml_sax::StartElement { name: &strbuffer[start..(start + size)], attributes: attributes, is_empty: false, } } fn push_str_get_range(strbuffer: &mut String, addition: &str) -> Range<usize> { let start = strbuffer.len(); let size = addition.len(); let range = Range { start: start, end: start + size, }; strbuffer.push_str(addition); range } impl<R: Read> OxideParser<R> { // This method "consumes" the resources of the caller object // `self` desugars to `self: Self` pub fn start(reader: R) -> OxideParser<R> { OxideParser { state: ParserState::DocStartBeforeXmlDecl, bufreader: BufReader::with_capacity(8192, reader), offset: 0, buffer2: vec![], strbuffer: String::new(), element_level: 0, element_list: vec![], element_strbuffer: String::new(), is_namespace_aware: true, namespace_list: vec![], namespace_strbuffer: String::new(), } } fn read_data(&mut self) { self.bufreader.fill_buf().unwrap(); let data2 = self.bufreader.buffer(); self.buffer2.extend_from_slice(data2); self.bufreader.consume(data2.len()); } // , buf: &'b [u8] pub fn read_event<'a, 'b, 'c>(&'a mut self) -> xml_sax::Event<'a> { // self.bufreader.consume(self.offset); self.buffer2.drain(0..self.offset); self.offset = 0; self.strbuffer.clear(); if self.bufreader.capacity() > self.buffer2.len() { self.read_data(); } // let mut event: StartElement = StartElement { // name: "", // attributes: vec![], // }; // let mut event1: StartElement; //<'b>; //&'a let event2: xml_sax::Event; match self.state { ParserState::Initial => { self.state = ParserState::DocStartBeforeXmlDecl; return xml_sax::Event::StartDocument; } ParserState::DocStartBeforeXmlDecl => { let res = misc_before_xmldecl(&self.buffer2); match res { Ok(parseresult) => { self.offset = self.buffer2.offset(parseresult.0); self.state = ParserState::DocStartBeforeDocType; match parseresult.1 { MiscBeforeXmlDecl::XmlDecl(a) => { let str = unsafe { std::str::from_utf8_unchecked(a) }; let range = push_str_get_range(&mut self.strbuffer, &str); event2 = xml_sax::Event::XmlDeclaration(&self.strbuffer[range]) } MiscBeforeXmlDecl::PI(a) => { let str = unsafe { std::str::from_utf8_unchecked(a) }; let range = push_str_get_range(&mut self.strbuffer, &str); event2 = xml_sax::Event::ProcessingInstruction(&self.strbuffer[range]) } MiscBeforeXmlDecl::Whitespace(a) => { let str = unsafe { std::str::from_utf8_unchecked(a) }; let range = push_str_get_range(&mut self.strbuffer, &str); event2 = xml_sax::Event::Whitespace(&self.strbuffer[range]) } MiscBeforeXmlDecl::CommentStart => { self.state = ParserState::DocStartBeforeDocTypeInsideComment; event2 = xml_sax::Event::StartComment; } MiscBeforeXmlDecl::DocType(a) => { self.state = ParserState::DocStart; let str = unsafe { std::str::from_utf8_unchecked(a) }; let range = push_str_get_range(&mut self.strbuffer, &str); event2 = xml_sax::Event::DocumentTypeDeclaration(&self.strbuffer[range]); } } } Err(err) => { //try content! self.state = ParserState::Content; event2 = self.read_event(); } } } ParserState::DocStartBeforeDocType => { let res = misc_before_doctype(&self.buffer2); match res { Ok(parseresult) => { self.offset = self.buffer2.offset(parseresult.0); // self.state = ParserState::DocStartBeforeDocType; match parseresult.1 { MiscBeforeDoctype::PI(a) => { let str = unsafe { std::str::from_utf8_unchecked(a) }; let range = push_str_get_range(&mut self.strbuffer, &str); event2 = xml_sax::Event::ProcessingInstruction(&self.strbuffer[range]) } MiscBeforeDoctype::Whitespace(a) => { let str = unsafe { std::str::from_utf8_unchecked(a) }; let range = push_str_get_range(&mut self.strbuffer, &str); event2 = xml_sax::Event::Whitespace(&self.strbuffer[range]) } MiscBeforeDoctype::CommentStart => { self.state = ParserState::DocStartBeforeDocTypeInsideComment; event2 = xml_sax::Event::StartComment; } MiscBeforeDoctype::DocType(a) => { self.state = ParserState::DocStart; let str = unsafe { std::str::from_utf8_unchecked(a) }; let range = push_str_get_range(&mut self.strbuffer, &str); event2 = xml_sax::Event::DocumentTypeDeclaration(&self.strbuffer[range]); } } } Err(err) => { //try content! self.state = ParserState::Content; event2 = self.read_event(); } } } ParserState::DocStartBeforeDocTypeInsideComment => { //expect comment or comment-end let res = insidecomment(&self.buffer2); match res { Ok(parseresult) => { self.offset = self.buffer2.offset(parseresult.0); match parseresult.1 { InsideComment::Characters(characters) => { let start = self.strbuffer.len(); let size = characters.len(); self.strbuffer .push_str(unsafe { std::str::from_utf8_unchecked(characters) }); event2 = xml_sax::Event::Comment(&self.strbuffer[start..(start + size)]) } InsideComment::CommentEnd => { self.state = ParserState::DocStartBeforeDocType; event2 = xml_sax::Event::EndComment; } } } Err(err) => panic!(), } } ParserState::DocStart => { let res = misc(&self.buffer2); match res { Ok(parseresult) => { self.offset = self.buffer2.offset(parseresult.0); // self.state = ParserState::DocStartBeforeDocType; match parseresult.1 { Misc::PI(a) => { let str = unsafe { std::str::from_utf8_unchecked(a) }; let range = push_str_get_range(&mut self.strbuffer, &str); event2 = xml_sax::Event::ProcessingInstruction(&self.strbuffer[range]) } Misc::Whitespace(a) => { let str = unsafe { std::str::from_utf8_unchecked(a) }; let range = push_str_get_range(&mut self.strbuffer, &str); event2 = xml_sax::Event::Whitespace(&self.strbuffer[range]) } Misc::CommentStart => { self.state = ParserState::DocStartInsideComment; event2 = xml_sax::Event::StartComment; } } } Err(err) => { //try content! self.state = ParserState::Content; event2 = self.read_event(); } } } ParserState::DocStartInsideComment => { //expect comment or comment-end let res = insidecomment(&self.buffer2); match res { Ok(parseresult) => { self.offset = self.buffer2.offset(parseresult.0); match parseresult.1 { InsideComment::Characters(characters) => { let start = self.strbuffer.len(); let size = characters.len(); self.strbuffer .push_str(unsafe { std::str::from_utf8_unchecked(characters) }); event2 = xml_sax::Event::Comment(&self.strbuffer[start..(start + size)]) } InsideComment::CommentEnd => { self.state = ParserState::DocStart; event2 = xml_sax::Event::EndComment; } } } Err(err) => panic!(), } } ParserState::Content => { let res = content_relaxed(&self.buffer2); match res { Ok(parseresult) => { self.offset = self.buffer2.offset(parseresult.0); match parseresult.1 { ContentRelaxed::CharData(event1) => { let start = self.strbuffer.len(); let size = event1.len(); self.strbuffer .push_str(unsafe { std::str::from_utf8_unchecked(event1) }); event2 = xml_sax::Event::Characters( &self.strbuffer[start..(start + size)], ) } ContentRelaxed::StartElement(event1) => { //todo decode let start_element = convert_start_element(&mut self.strbuffer, event1); self.element_level += 1; //add element to list for expected tags check let range = push_str_get_range( &mut self.element_strbuffer, start_element.name, ); self.element_list.push(range); //todo: add namespaces event2 = xml_sax::Event::StartElement(start_element); } ContentRelaxed::EmptyElemTag(event1) => { //todo decode let mut start_elem = convert_start_element(&mut self.strbuffer, event1); start_elem.is_empty = true; event2 = xml_sax::Event::StartElement(start_elem); //todo: add & clear up namespaces //add endelement after this? no..? } ContentRelaxed::EndElement(event1) => { //todo: check if it is the expected tag match self.element_list.pop() { Some(r) => { if &self.element_strbuffer[r.clone()] == event1.name { self.element_strbuffer.truncate(r.start); } else { panic!( "Expected closing tag: {} ,found: {}", &self.element_strbuffer[r.clone()], event1.name ); // TODO Expected closing tag: ... &self.element_strbuffer[r.clone()] found event1.name } } None => { panic!() } } // let range = push_str_get_range( // &mut self.element_strbuffer, // start_element.name, // ); // self.element_list.push(range); let start = self.strbuffer.len(); let size = event1.name.len(); self.strbuffer.push_str(event1.name); event2 = xml_sax::Event::EndElement(xml_sax::EndElement { name: &self.strbuffer[start..(start + size)], }); self.element_level -= 1; if self.element_level == 0 { self.state = ParserState::DocEnd; } //todo: clear up namespaces } ContentRelaxed::Reference(event1) => { // let start = self.strbuffer.len(); // let size = event1.initial.len(); // let range_initial = Range { // start: start, // end: start + size, // }; // self.strbuffer.push_str(event1.initial); let range: Range<usize> = push_str_get_range(&mut self.strbuffer, event1.initial); let range_resolved = match event1.initial { "&amp;" => push_str_get_range(&mut self.strbuffer, "&"), "&lt" => push_str_get_range(&mut self.strbuffer, "<"), "&gt;" => push_str_get_range(&mut self.strbuffer, ">"), "&quot;" => push_str_get_range(&mut self.strbuffer, "\""), "&apos;" => push_str_get_range(&mut self.strbuffer, "'"), _ => push_str_get_range(&mut self.strbuffer, event1.initial), }; //todo resolve char refs //we are ignoring DTD entity refs event2 = xml_sax::Event::Reference(xml_sax::Reference { raw: &self.strbuffer[range], resolved: &self.strbuffer[range_resolved], }) } ContentRelaxed::CdataStart => { event2 = xml_sax::Event::StartCdataSection; self.state = ParserState::InsideCdata; } ContentRelaxed::CommentStart => { event2 = xml_sax::Event::StartComment; self.state = ParserState::InsideComment; } } } Err(Err::Incomplete(e)) => { // panic!() // self.read_data(); // if read bytes are 0 then return eof, otherwise return dummy event if self.buffer2.len() == 0 { return xml_sax::Event::EndDocument; } println!("try to read bytes: {:?}", unsafe { &self.buffer2 }); println!("try to read: {:?}", unsafe { std::str::from_utf8_unchecked(&self.buffer2) }); println!("err: {:?}", e); panic!() } Err(e) => { println!("try to read bytes: {:?}", unsafe { &self.buffer2 }); println!("try to read: {:?}", unsafe { std::str::from_utf8_unchecked(&self.buffer2) }); println!("err: {:?}", e); panic!() } } } ParserState::InsideCdata => { //expect cdata or cdata-end let res = insidecdata(&self.buffer2); match res { Ok(parseresult) => { self.offset = self.buffer2.offset(parseresult.0); match parseresult.1 { InsideCdata::Characters(characters) => { let start = self.strbuffer.len(); let size = characters.len(); self.strbuffer .push_str(unsafe { std::str::from_utf8_unchecked(characters) }); event2 = xml_sax::Event::Cdata(&self.strbuffer[start..(start + size)]) } InsideCdata::CdataEnd => { self.state = ParserState::Content; event2 = xml_sax::Event::EndCdataSection; } } } Err(err) => panic!(), } } ParserState::InsideComment => { //expect comment or comment-end let res = insidecomment(&self.buffer2); match res { Ok(parseresult) => { self.offset = self.buffer2.offset(parseresult.0); match parseresult.1 { InsideComment::Characters(characters) => { let start = self.strbuffer.len(); let size = characters.len(); self.strbuffer .push_str(unsafe { std::str::from_utf8_unchecked(characters) }); event2 = xml_sax::Event::Comment(&self.strbuffer[start..(start + size)]) } InsideComment::CommentEnd => { self.state = ParserState::Content; event2 = xml_sax::Event::EndComment; } } } Err(err) => panic!(), } } ParserState::DocEnd => { // EOF if self.buffer2.len() == 0 { return xml_sax::Event::EndDocument; } let res = misc(&self.buffer2); match res { Ok(parseresult) => { self.offset = self.buffer2.offset(parseresult.0); match parseresult.1 { Misc::PI(a) => { let str = unsafe { std::str::from_utf8_unchecked(a) }; let range = push_str_get_range(&mut self.strbuffer, &str); event2 = xml_sax::Event::ProcessingInstruction(&self.strbuffer[range]) } Misc::Whitespace(a) => { let str = unsafe { std::str::from_utf8_unchecked(a) }; let range = push_str_get_range(&mut self.strbuffer, &str); event2 = xml_sax::Event::Whitespace(&self.strbuffer[range]) } Misc::CommentStart => { self.state = ParserState::DocEndInsideComment; event2 = xml_sax::Event::StartComment; } } } Err(err) => { panic!() } } } ParserState::DocEndInsideComment => { //expect comment or comment-end let res = insidecomment(&self.buffer2); match res { Ok(parseresult) => { self.offset = self.buffer2.offset(parseresult.0); match parseresult.1 { InsideComment::Characters(characters) => { let start = self.strbuffer.len(); let size = characters.len(); self.strbuffer .push_str(unsafe { std::str::from_utf8_unchecked(characters) }); event2 = xml_sax::Event::Comment(&self.strbuffer[start..(start + size)]) } InsideComment::CommentEnd => { self.state = ParserState::DocEnd; event2 = xml_sax::Event::EndComment; } } } Err(err) => panic!(), } } } event2 } } #[test] fn test_parser1() { let data = r#"<root><A a='x'> <B b="val" a:12='val2' ><C/></B> </A> </root>"# .as_bytes(); // let mut buf = vec![]; let mut p = OxideParser::start(data); loop { let res = p.read_event(); println!("{:?}", res); match res { xml_sax::Event::StartDocument => {} xml_sax::Event::EndDocument => { break; } xml_sax::Event::StartElement(el) => {} xml_sax::Event::EndElement(_) => {} xml_sax::Event::Characters(c) => {} xml_sax::Event::Reference(c) => {} _ => {} } } // let res = p.read_event(); // println!("{:?}", res); // let res = p.read_event(); // println!("{:?}", res); // let res = p.read_event(); // println!("{:?}", res); }
//! # Parsing //! //! A Rocket League game replay is a little endian binary encoded file with an emphasis. The number //! 100 would be represented as the four byte sequence: //! //! ```plain //! 0x64 0x00 0x00 0x00 //! ``` //! //! This in contrast to big-endian, which would represent the number as: //! //! ```plain //! 0x00 0x00 0x00 0x64 //! ``` //! //! A replay is split into three major sections, a header, body, and footer. //! //! ## Header //! //! The first four bytes of a replay is the number of bytes that comprises the header. A length //! prefixed integer is very common throughout a replay. This prefix may either be in reference to //! the number of bytes an elements takes up, as just seen, or the number of elements in a list. //! //! The next four bytes make up the [cyclic redundancy check //! (CRC)](https://en.wikipedia.org/wiki/Cyclic_redundancy_check) for the header. The check ensures //! that the data has not be tampered with or, more likely, corrupted. //! //! The game's major and minor version follow, each 32bit integers. //! //! Subsequently, the game type is encoded as a string. Strings in Rocket League Replay files are //! length prefixed and null terminated. //! //! The properties is where all the good nuggets of info reside. Visualize the properties as a map //! of strings to various types (number, string, array) that continues until a "None" key is found. //! //! ## Body //! //! Out of the body we get: //! //! - Levels (what level did the match take place) //! - `KeyFrames` //! - The body's crc. This check is actually for the rest of the content (including the footer). //! //! Since everything is length prefixed, we're able to skip the network stream data. This would be //! 90% of the file. Most of the interesting bits like player stats and goals are contained in the //! header, so it's not a tremendous loss if we can't parse the network data. //! //! ## Footer //! //! After the network stream there we see: //! //! - Debug info //! - Tickmarks //! - Packages //! - Etc use encoding_rs::{UTF_16LE, WINDOWS_1252}; use models::*; use crc::calc_crc; use errors::{AttributeError, NetworkError, ParseError}; use std::borrow::Cow; use failure::{Error, ResultExt}; use byteorder::{ByteOrder, LittleEndian}; use bitter::BitGet; use hashes::{ATTRIBUTES, OBJECT_CLASSES, PARENT_CLASSES, SPAWN_STATS}; use network::{normalize_object, Frame, NewActor, SpawnTrajectory, Trajectory, UpdatedAttribute}; use attributes::{AttributeDecoder, AttributeTag}; use std::collections::HashMap; use fnv::FnvHashMap; use std::ops::Deref; use multimap::MultiMap; /// Determines under what circumstances the parser should perform the crc check for replay /// corruption. Since the crc check is the most time consuming check for parsing (causing /// microseconds to turn into milliseconds), clients should choose under what circumstances a crc /// check is performed. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum CrcCheck { /// Always perform the crc check. Useful when the replay has had its contents modified. This /// will catch a user that increased the number of goals they scored (easy) but only if they /// didn't update the crc as well (not as easy). Always, /// Never perform the crc check. Useful only when it doesn't matter to know if a replay is /// corrupt or not, you either want the data or the parsing error. Never, /// Only perform the crc check when parsing a section fails. This option gets the best of both /// worlds. If parsing fails, the crc check will determine if it is a programming error or the /// replay is corrupt. If parsing succeeds it won't precious time performing the check. This /// option is the default for parsing. OnError, } /// Determines how the parser should handle the network data, which is the most /// intensive and volatile section of the replay. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum NetworkParse { /// If the network data fails parse return an error Always, /// Skip parsing the network data Never, /// Attempt to parse the network data, but if unsuccessful ignore the error /// and continue parsing IgnoreOnError, } /// Intermediate parsing structure for the header #[derive(Debug, PartialEq)] pub struct Header<'a> { pub major_version: i32, pub minor_version: i32, pub net_version: Option<i32>, pub game_type: Cow<'a, str>, pub properties: Vec<(&'a str, HeaderProp<'a>)>, } impl<'a> Header<'a> { fn num_frames(&self) -> Option<i32> { self.properties .iter() .find(|&&(key, _)| key == "NumFrames") .and_then(|&(_, ref prop)| { if let HeaderProp::Int(v) = *prop { Some(v) } else { None } }) } fn max_channels(&self) -> Option<i32> { self.properties .iter() .find(|&&(key, _)| key == "MaxChannels") .and_then(|&(_, ref prop)| { if let HeaderProp::Int(v) = *prop { Some(v) } else { None } }) } } /// Intermediate parsing structure for the body / footer #[derive(Debug, PartialEq)] struct ReplayBody<'a> { levels: Vec<Cow<'a, str>>, keyframes: Vec<KeyFrame>, debug_info: Vec<DebugInfo<'a>>, tick_marks: Vec<TickMark<'a>>, packages: Vec<Cow<'a, str>>, objects: Vec<Cow<'a, str>>, names: Vec<Cow<'a, str>>, class_indices: Vec<ClassIndex<'a>>, net_cache: Vec<ClassNetCache>, network_data: &'a [u8], } /// The main entry point to parsing replays in boxcars. Allows one to customize parsing options, /// such as only parsing the header and forgoing crc (corruption) checks. #[derive(Debug, Clone, PartialEq)] pub struct ParserBuilder<'a> { data: &'a [u8], crc_check: Option<CrcCheck>, network_parse: Option<NetworkParse>, } impl<'a> ParserBuilder<'a> { pub fn new(data: &'a [u8]) -> Self { ParserBuilder { data, crc_check: None, network_parse: None, } } pub fn always_check_crc(mut self) -> ParserBuilder<'a> { self.crc_check = Some(CrcCheck::Always); self } pub fn never_check_crc(mut self) -> ParserBuilder<'a> { self.crc_check = Some(CrcCheck::Never); self } pub fn on_error_check_crc(mut self) -> ParserBuilder<'a> { self.crc_check = Some(CrcCheck::OnError); self } pub fn with_crc_check(mut self, check: CrcCheck) -> ParserBuilder<'a> { self.crc_check = Some(check); self } pub fn must_parse_network_data(mut self) -> ParserBuilder<'a> { self.network_parse = Some(NetworkParse::Always); self } pub fn never_parse_network_data(mut self) -> ParserBuilder<'a> { self.network_parse = Some(NetworkParse::Never); self } pub fn ignore_network_data_on_error(mut self) -> ParserBuilder<'a> { self.network_parse = Some(NetworkParse::IgnoreOnError); self } pub fn with_network_parse(mut self, parse: NetworkParse) -> ParserBuilder<'a> { self.network_parse = Some(parse); self } pub fn parse(self) -> Result<Replay<'a>, Error> { let mut parser = Parser::new( self.data, self.crc_check.unwrap_or(CrcCheck::OnError), self.network_parse.unwrap_or(NetworkParse::IgnoreOnError), ); parser.parse() } } #[derive(Debug, Clone, Copy, PartialEq, Eq)] struct ObjectAttribute { attribute: AttributeTag, object_index: i32, } struct CacheInfo { max_prop_id: i32, prop_id_bits: i32, attributes: HashMap<i32, AttributeTag>, } struct FrameDecoder<'a, 'b: 'a> { frames_len: usize, color_ind: u32, painted_ind: u32, channel_bits: i32, header: &'a Header<'b>, body: &'a ReplayBody<'b>, spawns: &'a Vec<SpawnTrajectory>, object_ind_attributes: FnvHashMap<i32, CacheInfo>, object_ind_attrs: HashMap<i32, HashMap<i32, ObjectAttribute>>, } impl<'a, 'b> FrameDecoder<'a, 'b> { fn object_ind_to_string(&self, ind: i32) -> String { String::from( self.body .objects .get(ind as usize) .map(Deref::deref) .unwrap_or("Out of bounds"), ) } fn missing_attribute( &self, cache_info: &CacheInfo, actor_id: i32, type_id: i32, prop_id: i32, ) -> NetworkError { NetworkError::MissingAttribute( actor_id, type_id, self.object_ind_to_string(type_id), prop_id, cache_info .attributes .keys() .map(|x| x.to_string()) .collect::<Vec<_>>() .join(","), ) } fn unimplemented_attribute(&self, actor_id: i32, type_id: i32, prop_id: i32) -> NetworkError { NetworkError::UnimplementedAttribute( actor_id, type_id, self.object_ind_to_string(type_id), prop_id, self.object_ind_attrs .get(&type_id) .and_then(|x| x.get(&prop_id)) .map(|x| self.object_ind_to_string(x.object_index)) .unwrap_or_else(|| "type id not recognized".to_string()), ) } fn parse_new_actor( &self, mut bits: &mut BitGet, actor_id: i32, ) -> Result<NewActor, NetworkError> { if_chain! { if let Some(name_id) = if self.header.major_version > 868 || (self.header.major_version == 868 && self.header.minor_version >= 14) { bits.read_i32().map(Some) } else { Some(None) }; if let Some(_) = bits.read_bit(); if let Some(type_id) = bits.read_i32(); let spawn = self.spawns.get(type_id as usize) .ok_or_else(|| NetworkError::TypeIdOutOfRange(type_id))?; if let Some(traj) = Trajectory::from_spawn(&mut bits, *spawn); then { Ok(NewActor { actor_id, name_id, object_ind: type_id, initial_trajectory: traj }) } else { Err(NetworkError::NotEnoughDataFor("New Actor")) } } } fn decode_frame( &self, attr_decoder: &AttributeDecoder, mut bits: &mut BitGet, actors: &mut FnvHashMap<i32, i32>, time: f32, delta: f32, ) -> Result<Frame, NetworkError> { let mut new_actors = Vec::new(); let mut updated_actors = Vec::new(); let mut deleted_actors = Vec::new(); while bits.read_bit() .ok_or_else(|| NetworkError::NotEnoughDataFor("Actor data"))? { let actor_id = bits.read_i32_bits(self.channel_bits) .ok_or_else(|| NetworkError::NotEnoughDataFor("Actor Id"))?; // alive if bits.read_bit() .ok_or_else(|| NetworkError::NotEnoughDataFor("Is actor alive"))? { // new if bits.read_bit() .ok_or_else(|| NetworkError::NotEnoughDataFor("Is new actor"))? { let actor = self.parse_new_actor(&mut bits, actor_id)?; // Insert the new actor so we can keep track of it for attribute // updates. It's common for an actor id to already exist, so we // overwrite it. actors.insert(actor.actor_id, actor.object_ind); new_actors.push(actor); } else { // We'll be updating an existing actor with some attributes so we need // to track down what the actor's type is let type_id = actors .get(&actor_id) .ok_or_else(|| NetworkError::MissingActor(actor_id))?; // Once we have the type we need to look up what attributes are // available for said type let cache_info = self.object_ind_attributes.get(type_id).ok_or_else(|| { NetworkError::MissingCache( actor_id, *type_id, self.object_ind_to_string(*type_id), ) })?; // While there are more attributes to update for our actor: while bits.read_bit() .ok_or_else(|| NetworkError::NotEnoughDataFor("Is prop present"))? { // We've previously calculated the max the property id can be for a // given type and how many bits that it encompasses so use those // values now let prop_id = bits.read_bits_max(cache_info.prop_id_bits, cache_info.max_prop_id) .map(|x| x as i32) .ok_or_else(|| NetworkError::NotEnoughDataFor("Prop id"))?; // Look the property id up and find the corresponding attribute // decoding function. Experience has told me replays that fail to // parse, fail to do so here, so a large chunk is dedicated to // generating an error message with context let attr = cache_info.attributes.get(&prop_id).ok_or_else(|| { self.missing_attribute(cache_info, actor_id, *type_id, prop_id) })?; let attribute = attr_decoder.decode(*attr, &mut bits).map_err(|e| match e { AttributeError::Unimplemented => { self.unimplemented_attribute(actor_id, *type_id, prop_id) } _ => NetworkError::AttributeError(e), })?; updated_actors.push(UpdatedAttribute { actor_id, attribute_id: prop_id, attribute, }); } } } else { deleted_actors.push(actor_id); actors.remove(&actor_id); } } Ok(Frame { time, delta, new_actors, deleted_actors, updated_actors, }) } pub fn decode_frames(&self) -> Result<Vec<Frame>, Error> { let attr_decoder = AttributeDecoder::new(self.header, self.color_ind, self.painted_ind); let mut frames: Vec<Frame> = Vec::with_capacity(self.frames_len); let mut actors = FnvHashMap::default(); let mut bits = BitGet::new(self.body.network_data); while !bits.is_empty() && frames.len() < self.frames_len { let time = bits.read_f32() .ok_or_else(|| NetworkError::NotEnoughDataFor("Time"))?; if time < 0.0 || (time > 0.0 && time < 1e-10) { return Err(NetworkError::TimeOutOfRange(time))?; } let delta = bits.read_f32() .ok_or_else(|| NetworkError::NotEnoughDataFor("Delta"))?; if delta < 0.0 || (delta > 0.0 && delta < 1e-10) { return Err(NetworkError::DeltaOutOfRange(delta))?; } if time == 0.0 && delta == 0.0 { break; } let frame = self.decode_frame(&attr_decoder, &mut bits, &mut actors, time, delta)?; frames.push(frame); } Ok(frames) } } /// Holds the current state of parsing a replay #[derive(Debug, Clone, PartialEq)] pub struct Parser<'a> { /// A slice (not the whole) view of the replay. Bytes are popped off as data is read. data: &'a [u8], /// Current offset in regards to the whole view of the replay col: i32, crc_check: CrcCheck, network_parse: NetworkParse, } impl<'a> Parser<'a> { fn new(data: &'a [u8], crc_check: CrcCheck, network_parse: NetworkParse) -> Self { Parser { data, col: 0, crc_check, network_parse, } } fn err_str(&self, desc: &'static str, e: &ParseError) -> String { format!( "Could not decode replay {} at offset ({}): {}", desc, self.col, e ) } fn parse(&mut self) -> Result<Replay<'a>, Error> { let header_size = self.take(4, le_i32) .with_context(|e| self.err_str("header size", e))?; let header_crc = self.take(4, le_i32) .with_context(|e| self.err_str("header crc", e))?; let header_data = self.view_data(header_size as usize) .with_context(|e| self.err_str("header data", e))?; let header = self.crc_section(header_data, header_crc as u32, "header", Self::parse_header)?; let content_size = self.take(4, le_i32) .with_context(|e| self.err_str("content size", e))?; let content_crc = self.take(4, le_i32) .with_context(|e| self.err_str("content crc", e))?; let content_data = self.view_data(content_size as usize) .with_context(|e| self.err_str("content data", e))?; let body = self.crc_section(content_data, content_crc as u32, "body", Self::parse_body)?; let mut network: Option<NetworkFrames> = None; match self.network_parse { NetworkParse::Always => { network = Some(self.parse_network(&header, &body)?); } NetworkParse::IgnoreOnError => { if let Ok(v) = self.parse_network(&header, &body) { network = Some(v); } } NetworkParse::Never => network = None, } Ok(Replay { header_size, header_crc, major_version: header.major_version, minor_version: header.minor_version, net_version: header.net_version, game_type: header.game_type, properties: header.properties, content_size, content_crc, network_frames: network, levels: body.levels, keyframes: body.keyframes, debug_info: body.debug_info, tick_marks: body.tick_marks, packages: body.packages, objects: body.objects, names: body.names, class_indices: body.class_indices, net_cache: body.net_cache, }) } fn parse_network( &mut self, header: &Header, body: &ReplayBody, ) -> Result<NetworkFrames, Error> { // Create a parallel vector where each object has it's name normalized let normalized_objects: Vec<&str> = body.objects .iter() .map(|x| normalize_object(x.deref())) .collect(); // Create a parallel vector where we lookup how to decode an object's initial trajectory // when they spawn as a new actor let spawns: Vec<SpawnTrajectory> = body.objects .iter() .map(|x| { SPAWN_STATS .get(x.deref()) .cloned() .unwrap_or(SpawnTrajectory::None) }) .collect(); let attrs: Vec<_> = normalized_objects .iter() .map(|x| { ATTRIBUTES .get(x.deref()) .cloned() .unwrap_or(AttributeTag::NotImplemented) }) .collect(); // Create a map of an object's normalized name to a list of indices in the object // vector that have that same normalized name let normalized_name_obj_ind: MultiMap<&str, usize> = normalized_objects .iter() .enumerate() .map(|(i, x)| (*x, i)) .collect(); // Map each object's name to it's index let name_obj_ind: HashMap<&str, usize> = body.objects .iter() .enumerate() .map(|(ind, name)| (name.deref(), ind)) .collect(); let mut object_ind_attrs: HashMap<i32, HashMap<i32, ObjectAttribute>> = HashMap::new(); for cache in &body.net_cache { let mut all_props: HashMap<i32, ObjectAttribute> = cache .properties .iter() .map(|x| { let attr = attrs.get(x.object_ind as usize).ok_or_else(|| { NetworkError::StreamTooLargeIndex(x.stream_id, x.object_ind) })?; Ok(( x.stream_id, ObjectAttribute { attribute: *attr, object_index: x.object_ind, }, )) }) .collect::<Result<HashMap<_, _>, NetworkError>>()?; let mut had_parent = false; // We are going to recursively resolve an object's name to find their direct parent. // Parents have parents as well (etc), so we repeatedly walk up the chain picking up // attributes on parent objects until we reach an object with no parent (`Core.Object`) let mut object_name: &str = &*body.objects[cache.object_ind as usize]; while let Some(parent_name) = PARENT_CLASSES.get(object_name) { had_parent = true; if let Some(parent_ind) = name_obj_ind.get(parent_name) { if let Some(parent_attrs) = object_ind_attrs.get(&(*parent_ind as i32)) { all_props.extend(parent_attrs.iter()); } } object_name = parent_name; } // Sometimes our hierarchy set up in build.rs isn't perfect so if we don't find a // parent and a parent cache id is set, try and find this parent id and carry down // their props. if !had_parent && cache.parent_id != 0 { if let Some(parent) = body.net_cache .iter() .find(|x| x.cache_id == cache.parent_id) { if let Some(parent_attrs) = object_ind_attrs.get(&parent.object_ind) { all_props.extend(parent_attrs.iter()); } } } object_ind_attrs.insert(cache.object_ind, all_props); } for (obj, parent) in OBJECT_CLASSES.entries() { // It's ok if an object class doesn't appear in our replay. For instance, basketball // objects don't appear in a soccer replay. if let Some(indices) = normalized_name_obj_ind.get_vec(obj) { let parent_ind = name_obj_ind.get(parent).ok_or_else(|| { NetworkError::MissingParentClass(String::from(*obj), String::from(*parent)) })?; for i in indices { let parent_attrs: HashMap<_, _> = object_ind_attrs .get(&(*parent_ind as i32)) .ok_or_else(|| { NetworkError::ParentIndexHasNoAttributes(*parent_ind as i32, *i as i32) })? .clone(); object_ind_attrs.insert(*i as i32, parent_attrs); } } } let object_ind_attributes: FnvHashMap<i32, CacheInfo> = object_ind_attrs .iter() .map(|(obj_ind, attrs)| { let key = *obj_ind; let max = *attrs.keys().max().unwrap_or(&2) + 1; let next_max = (max as u32) .checked_next_power_of_two() .ok_or_else(|| NetworkError::PropIdsTooLarge(max, key))?; Ok(( key, CacheInfo { max_prop_id: max, prop_id_bits: log2(next_max) as i32, attributes: attrs.iter().map(|(k, o)| (*k, o.attribute)).collect(), }, )) }) .collect::<Result<FnvHashMap<_, _>, NetworkError>>()?; let color_ind = *name_obj_ind .get("TAGame.ProductAttribute_UserColor_TA") .unwrap_or(&0) as u32; let painted_ind = *name_obj_ind .get("TAGame.ProductAttribute_Painted_TA") .unwrap_or(&0) as u32; // 1023 stolen from rattletrap let channels = header.max_channels().unwrap_or(1023); let channels = (channels as u32) .checked_next_power_of_two() .ok_or_else(|| NetworkError::ChannelsTooLarge(channels))?; let channel_bits = log2(channels as u32) as i32; let num_frames = header.num_frames(); if let Some(frame_len) = num_frames { let frame_decoder = FrameDecoder { frames_len: frame_len as usize, color_ind, painted_ind, channel_bits, header, body, spawns: &spawns, object_ind_attributes, object_ind_attrs, }; Ok(NetworkFrames { frames: frame_decoder.decode_frames()?, }) } else { Ok(NetworkFrames { frames: Vec::new() }) } } fn parse_header(&mut self) -> Result<Header<'a>, Error> { let major_version = self.take(4, le_i32) .with_context(|e| self.err_str("major version", e))?; let minor_version = self.take(4, le_i32) .with_context(|e| self.err_str("minor version", e))?; let net_version = if major_version > 865 && minor_version > 17 { Some(self.take(4, le_i32) .with_context(|e| self.err_str("net version", e))?) } else { None }; let game_type = self.parse_text() .with_context(|e| self.err_str("game type", e))?; let properties = self.parse_rdict() .with_context(|e| self.err_str("header properties", e))?; Ok(Header { major_version, minor_version, net_version, game_type, properties, }) } /// Parses a section and performs a crc check as configured fn crc_section<T, F>( &mut self, data: &[u8], crc: u32, section: &str, mut f: F, ) -> Result<T, Error> where F: FnMut(&mut Self) -> Result<T, Error>, { match (self.crc_check, f(self)) { (CrcCheck::Always, res) => { let actual = calc_crc(data); if actual != crc as u32 { Err(Error::from(ParseError::CrcMismatch(crc, actual))) } else { res } } (CrcCheck::OnError, Err(e)) => { let actual = calc_crc(data); if actual != crc as u32 { Err(e.context(format!( "Failed to parse {} and crc check failed. Replay is corrupt", section )).into()) } else { Err(e) } } (CrcCheck::OnError, Ok(s)) => Ok(s), (CrcCheck::Never, res) => res, } } fn parse_body(&mut self) -> Result<ReplayBody<'a>, Error> { let levels = self.text_list() .with_context(|e| self.err_str("levels", e))?; let keyframes = self.parse_keyframe() .with_context(|e| self.err_str("keyframes", e))?; let network_size = self.take(4, le_i32) .with_context(|e| self.err_str("network size", e))?; let network_data = self.take(network_size as usize, |d| d) .with_context(|e| self.err_str("network data", e))?; let debug_infos = self.parse_debuginfo() .with_context(|e| self.err_str("debug info", e))?; let tickmarks = self.parse_tickmarks() .with_context(|e| self.err_str("tickmarks", e))?; let packages = self.text_list() .with_context(|e| self.err_str("packages", e))?; let objects = self.text_list() .with_context(|e| self.err_str("objects", e))?; let names = self.text_list().with_context(|e| self.err_str("names", e))?; let class_index = self.parse_classindex() .with_context(|e| self.err_str("class index", e))?; let net_cache = self.parse_classcache() .with_context(|e| self.err_str("net cache", e))?; Ok(ReplayBody { levels, keyframes, debug_info: debug_infos, tick_marks: tickmarks, packages, objects, names, class_indices: class_index, net_cache, network_data, }) } /// Used for skipping some amount of data fn advance(&mut self, ind: usize) { self.col += ind as i32; self.data = &self.data[ind..]; } /// Returns a slice of the replay after ensuring there is enough space for the requested slice fn view_data(&self, size: usize) -> Result<&'a [u8], ParseError> { if size > self.data.len() { Err(ParseError::InsufficientData( size as i32, self.data.len() as i32, )) } else { Ok(&self.data[..size]) } } /// Take the next `size` of bytes and interpret them in an infallible fashion #[inline] fn take<F, T>(&mut self, size: usize, mut f: F) -> Result<T, ParseError> where F: FnMut(&'a [u8]) -> T, { let res = f(self.view_data(size)?); self.advance(size); Ok(res) } /// Take the next `size` of bytes and interpret them, but this interpretation can fail fn take_res<F, T>(&mut self, size: usize, mut f: F) -> Result<T, ParseError> where F: FnMut(&'a [u8]) -> Result<T, ParseError>, { let res = f(self.view_data(size)?)?; self.advance(size); Ok(res) } /// Repeatedly parse the same elements from replay until `size` elements parsed fn repeat<F, T>(&mut self, size: usize, mut f: F) -> Result<Vec<T>, ParseError> where F: FnMut(&mut Self) -> Result<T, ParseError>, { if size > 25_000 { return Err(ParseError::ListTooLarge(size)); } let mut res = Vec::with_capacity(size); for _ in 0..size { res.push(f(self)?); } Ok(res) } fn list_of<F, T>(&mut self, f: F) -> Result<Vec<T>, ParseError> where F: FnMut(&mut Self) -> Result<T, ParseError>, { let size = self.take(4, le_i32)?; self.repeat(size as usize, f) } fn text_list(&mut self) -> Result<Vec<Cow<'a, str>>, ParseError> { self.list_of(|s| s.parse_text()) } /// Parses UTF-8 string from replay fn parse_str(&mut self) -> Result<&'a str, ParseError> { let mut size = self.take(4, le_i32)? as usize; // Replay 6688 has a property name that is listed as having a length of 0x5000000, but it's // really the `\0\0\0None` property. I'm guess at some point in Rocket League, this was a // bug that was fixed. What's interesting is that I couldn't find this constant in // `RocketLeagueReplayParser`, only rattletrap. if size == 0x5_000_000 { size = 8; } self.take_res(size, decode_str) } /// Parses either UTF-16 or Windows-1252 encoded strings fn parse_text(&mut self) -> Result<Cow<'a, str>, ParseError> { // The number of bytes that the string is composed of. If negative, the string is UTF-16, // else the string is windows 1252 encoded. let characters = self.take(4, le_i32)?; // size.abs() will panic at min_value, so we eschew it for manual checking if characters == 0 { Err(ParseError::ZeroSize) } else if characters > 10_000 || characters < -10_000 { Err(ParseError::TextTooLarge(characters)) } else if characters < 0 { // We're dealing with UTF-16 and each character is two bytes, we // multiply the size by 2. The last two bytes included in the count are // null terminators let size = characters * -2; self.take_res(size as usize, |d| decode_utf16(d)) } else { self.take_res(characters as usize, |d| decode_windows1252(d)) } } fn parse_rdict(&mut self) -> Result<Vec<(&'a str, HeaderProp<'a>)>, ParseError> { // Other the actual network data, the header property associative array is the hardest to parse. // The format is to: // - Read string // - If string is "None", we're done // - else we're dealing with a property, and the string just read is the key. Now deserialize the // value. // The return type of this function is a key value vector because since there is no format // specification, we can't rule out duplicate keys. Possibly consider a multi-map in the future. let mut res: Vec<_> = Vec::new(); loop { let key = self.parse_str()?; if key == "None" || key == "\0\0\0None" { break; } let a = self.parse_str()?; let val = match a { "ArrayProperty" => self.array_property(), "BoolProperty" => self.bool_property(), "ByteProperty" => self.byte_property(), "FloatProperty" => self.float_property(), "IntProperty" => self.int_property(), "NameProperty" => self.name_property(), "QWordProperty" => self.qword_property(), "StrProperty" => self.str_property(), x => Err(ParseError::UnexpectedProperty(String::from(x))), }?; res.push((key, val)); } Ok(res) } // Header properties are encoded in a pretty simple format, with some oddities. The first 64bits // is data that can be discarded, some people think that the 64bits is the length of the data // while others think that the first 32bits is the header length in bytes with the subsequent // 32bits unknown. Doesn't matter to us, we throw it out anyways. The rest of the bytes are // decoded property type specific. fn byte_property(&mut self) -> Result<HeaderProp<'a>, ParseError> { // It's unknown (to me at least) why the byte property has two strings in it. self.take(8, |_d| ())?; if self.parse_str()?.deref() != "OnlinePlatform_Steam" { self.parse_str()?; } Ok(HeaderProp::Byte) } fn str_property(&mut self) -> Result<HeaderProp<'a>, ParseError> { self.take(8, |_d| ())?; Ok(HeaderProp::Str(self.parse_text()?)) } fn name_property(&mut self) -> Result<HeaderProp<'a>, ParseError> { self.take(8, |_d| ())?; Ok(HeaderProp::Name(self.parse_text()?)) } fn int_property(&mut self) -> Result<HeaderProp<'a>, ParseError> { self.take(12, |d| HeaderProp::Int(le_i32(&d[8..]))) } fn bool_property(&mut self) -> Result<HeaderProp<'a>, ParseError> { self.take(9, |d| HeaderProp::Bool(d[8] == 1)) } fn float_property(&mut self) -> Result<HeaderProp<'a>, ParseError> { self.take(12, |d| HeaderProp::Float(le_f32(&d[8..]))) } fn qword_property(&mut self) -> Result<HeaderProp<'a>, ParseError> { self.take(16, |d| HeaderProp::QWord(le_i64(&d[8..]))) } fn array_property(&mut self) -> Result<HeaderProp<'a>, ParseError> { let size = self.take(12, |d| le_i32(&d[8..]))?; let arr = self.repeat(size as usize, |s| s.parse_rdict())?; Ok(HeaderProp::Array(arr)) } fn parse_tickmarks(&mut self) -> Result<Vec<TickMark<'a>>, ParseError> { self.list_of(|s| { Ok(TickMark { description: s.parse_text()?, frame: s.take(4, le_i32)?, }) }) } fn parse_keyframe(&mut self) -> Result<Vec<KeyFrame>, ParseError> { self.list_of(|s| { Ok(KeyFrame { time: s.take(4, le_f32)?, frame: s.take(4, le_i32)?, position: s.take(4, le_i32)?, }) }) } fn parse_debuginfo(&mut self) -> Result<Vec<DebugInfo<'a>>, ParseError> { self.list_of(|s| { Ok(DebugInfo { frame: s.take(4, le_i32)?, user: s.parse_text()?, text: s.parse_text()?, }) }) } fn parse_classindex(&mut self) -> Result<Vec<ClassIndex<'a>>, ParseError> { self.list_of(|s| { Ok(ClassIndex { class: s.parse_str()?, index: s.take(4, le_i32)?, }) }) } fn parse_cacheprop(&mut self) -> Result<Vec<CacheProp>, ParseError> { self.list_of(|s| { Ok(CacheProp { object_ind: s.take(4, le_i32)?, stream_id: s.take(4, le_i32)?, }) }) } fn parse_classcache(&mut self) -> Result<Vec<ClassNetCache>, ParseError> { self.list_of(|x| { Ok(ClassNetCache { object_ind: x.take(4, le_i32)?, parent_id: x.take(4, le_i32)?, cache_id: x.take(4, le_i32)?, properties: x.parse_cacheprop()?, }) }) } } const MULTIPLY_DE_BRUIJN_BIT_POSITION2: [u32; 32] = [ 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9, ]; // https://graphics.stanford.edu/~seander/bithacks.html#IntegerLogDeBruijn fn log2(v: u32) -> u32 { MULTIPLY_DE_BRUIJN_BIT_POSITION2[((v.wrapping_mul(0x077C_B531)) >> 27) as usize] } /// Reads a string of a given size from the data. The size includes a null /// character as the last character, so we drop it in the returned string /// slice. It may seem redundant to store this information, but stackoverflow /// contains a nice reasoning for why it may have been done this way: /// <http://stackoverflow.com/q/6293457/433785> fn decode_str(input: &[u8]) -> Result<&str, ParseError> { if input.is_empty() { Err(ParseError::ZeroSize) } else { Ok(::std::str::from_utf8(&input[..input.len() - 1])?) } } pub fn decode_utf16(input: &[u8]) -> Result<Cow<str>, ParseError> { if input.len() < 2 { Err(ParseError::ZeroSize) } else { let (s, _) = UTF_16LE.decode_without_bom_handling(&input[..input.len() - 2]); Ok(s) } } pub fn decode_windows1252(input: &[u8]) -> Result<Cow<str>, ParseError> { if input.is_empty() { Err(ParseError::ZeroSize) } else { let (s, _) = WINDOWS_1252.decode_without_bom_handling(&input[..input.len() - 1]); Ok(s) } } #[inline] fn le_i32(d: &[u8]) -> i32 { LittleEndian::read_i32(d) } #[inline] fn le_f32(d: &[u8]) -> f32 { LittleEndian::read_f32(d) } #[inline] fn le_i64(d: &[u8]) -> i64 { LittleEndian::read_i64(d) } #[cfg(test)] mod tests { use super::{CrcCheck, NetworkParse, Parser}; use errors::ParseError; use models::{HeaderProp, TickMark}; use std::borrow::Cow; #[test] fn parse_text_encoding() { // dd skip=16 count=28 if=rumble.replay of=text.replay bs=1 let data = include_bytes!("../assets/text.replay"); let mut parser = Parser::new(&data[..], CrcCheck::Never, NetworkParse::Never); assert_eq!(parser.parse_str().unwrap(), "TAGame.Replay_Soccar_TA"); } #[test] fn parse_text_encoding_bad() { // dd skip=16 count=28 if=rumble.replay of=text.replay bs=1 let data = include_bytes!("../assets/text.replay"); let mut parser = Parser::new( &data[..data.len() - 1], CrcCheck::Never, NetworkParse::Never, ); let res = parser.parse_str(); assert!(res.is_err()); let error = res.unwrap_err(); assert_eq!(error, ParseError::InsufficientData(24, 23)); } #[test] fn parse_text_zero_size() { let mut parser = Parser::new(&[0, 0, 0, 0, 0], CrcCheck::Never, NetworkParse::Never); let res = parser.parse_str(); assert!(res.is_err()); let error = res.unwrap_err(); assert_eq!(error, ParseError::ZeroSize); } #[test] fn parse_text_encoding_bad_2() { // Test for when there is not enough data to decode text length // dd skip=16 count=28 if=rumble.replay of=text.replay bs=1 let data = include_bytes!("../assets/text.replay"); let mut parser = Parser::new(&data[..2], CrcCheck::Never, NetworkParse::Never); let res = parser.parse_str(); assert!(res.is_err()); let error = res.unwrap_err(); assert_eq!(error, ParseError::InsufficientData(4, 2)); } #[test] fn parse_utf16_string() { // dd skip=((0x120)) count=28 if=utf-16.replay of=utf-16-text.replay bs=1 let data = include_bytes!("../assets/utf-16-text.replay"); let mut parser = Parser::new(&data[..], CrcCheck::Never, NetworkParse::Never); let res = parser.parse_text().unwrap(); assert_eq!(res, "\u{2623}D[e]!v1zz\u{2623}"); } #[test] fn test_windows1252_string() { let data = include_bytes!("../assets/windows_1252.replay"); let mut parser = Parser::new(&data[0x1ad..0x1c4], CrcCheck::Never, NetworkParse::Never); let res = parser.parse_text().unwrap(); assert_eq!(res, "caudillman6000\u{b3}(2)"); } /// Define behavior on invalid UTF-16 sequences. #[test] fn parse_invalid_utf16_string() { let data = [0xfd, 0xff, 0xff, 0xff, 0xd8, 0xd8, 0x00, 0x00, 0x00, 0x00]; let mut parser = Parser::new(&data[..], CrcCheck::Never, NetworkParse::Never); let res = parser.parse_text().unwrap(); assert_eq!(res, "�\u{0}"); } #[test] fn rdict_no_elements() { let data = [0x05, 0x00, 0x00, 0x00, b'N', b'o', b'n', b'e', 0x00]; let mut parser = Parser::new(&data[..], CrcCheck::Never, NetworkParse::Never); let res = parser.parse_rdict().unwrap(); assert_eq!(res, Vec::new()); } #[test] fn rdict_one_element() { // dd skip=$((0x1269)) count=$((0x12a8 - 0x1269)) if=rumble.replay of=rdict_one.replay bs=1 let data = include_bytes!("../assets/rdict_one.replay"); let mut parser = Parser::new(&data[..], CrcCheck::Never, NetworkParse::Never); let res = parser.parse_rdict().unwrap(); assert_eq!( res, vec![("PlayerName", HeaderProp::Str(Cow::Borrowed("comagoosie")))] ); } #[test] fn rdict_one_int_element() { // dd skip=$((0x250)) count=$((0x284 - 0x250)) if=rumble.replay of=rdict_int.replay bs=1 let data = include_bytes!("../assets/rdict_int.replay"); let mut parser = Parser::new(&data[..], CrcCheck::Never, NetworkParse::Never); let res = parser.parse_rdict().unwrap(); assert_eq!(res, vec![("PlayerTeam", HeaderProp::Int(0))]); } #[test] fn rdict_one_bool_element() { // dd skip=$((0xa0f)) count=$((0xa3b - 0xa0f)) if=rumble.replay of=rdict_bool.replay bs=1 let data = include_bytes!("../assets/rdict_bool.replay"); let mut parser = Parser::new(&data[..], CrcCheck::Never, NetworkParse::Never); let res = parser.parse_rdict().unwrap(); assert_eq!(res, vec![("bBot", HeaderProp::Bool(false))]); } fn append_none(input: &[u8]) -> Vec<u8> { let append = [0x05, 0x00, 0x00, 0x00, b'N', b'o', b'n', b'e', 0x00]; let mut v = Vec::new(); v.extend_from_slice(input); v.extend_from_slice(&append); v } #[test] fn rdict_one_name_element() { // dd skip=$((0x1237)) count=$((0x1269 - 0x1237)) if=rumble.replay of=rdict_name.replay bs=1 let data = append_none(include_bytes!("../assets/rdict_name.replay")); let mut parser = Parser::new(&data[..], CrcCheck::Never, NetworkParse::Never); let res = parser.parse_rdict().unwrap(); assert_eq!( res, vec![("MatchType", HeaderProp::Name(Cow::Borrowed("Online")))] ); } #[test] fn rdict_one_float_element() { // dd skip=$((0x10a2)) count=$((0x10ce - 0x10a2)) if=rumble.replay of=rdict_float.replay bs=1 let data = append_none(include_bytes!("../assets/rdict_float.replay")); let mut parser = Parser::new(&data[..], CrcCheck::Never, NetworkParse::Never); let res = parser.parse_rdict().unwrap(); assert_eq!(res, vec![("RecordFPS", HeaderProp::Float(30.0))]); } #[test] fn rdict_one_qword_element() { // dd skip=$((0x576)) count=$((0x5a5 - 0x576)) if=rumble.replay of=rdict_qword.replay bs=1 let data = append_none(include_bytes!("../assets/rdict_qword.replay")); let mut parser = Parser::new(&data[..], CrcCheck::Never, NetworkParse::Never); let res = parser.parse_rdict().unwrap(); assert_eq!( res, vec![("OnlineID", HeaderProp::QWord(76561198101748375))] ); } #[test] fn rdict_one_array_element() { // dd skip=$((0xab)) count=$((0x3f7 + 36)) if=rumble.replay of=rdict_array.replay bs=1 let data = append_none(include_bytes!("../assets/rdict_array.replay")); let mut parser = Parser::new(&data[..], CrcCheck::Never, NetworkParse::Never); let res = parser.parse_rdict().unwrap(); let expected = vec![ vec![ ("frame", HeaderProp::Int(441)), ("PlayerName", HeaderProp::Str(Cow::Borrowed("Cakeboss"))), ("PlayerTeam", HeaderProp::Int(1)), ], vec![ ("frame", HeaderProp::Int(1738)), ("PlayerName", HeaderProp::Str(Cow::Borrowed("Sasha Kaun"))), ("PlayerTeam", HeaderProp::Int(0)), ], vec![ ("frame", HeaderProp::Int(3504)), ( "PlayerName", HeaderProp::Str(Cow::Borrowed("SilentWarrior")), ), ("PlayerTeam", HeaderProp::Int(0)), ], vec![ ("frame", HeaderProp::Int(5058)), ("PlayerName", HeaderProp::Str(Cow::Borrowed("jeffreyj1"))), ("PlayerTeam", HeaderProp::Int(1)), ], vec![ ("frame", HeaderProp::Int(5751)), ("PlayerName", HeaderProp::Str(Cow::Borrowed("GOOSE LORD"))), ("PlayerTeam", HeaderProp::Int(0)), ], vec![ ("frame", HeaderProp::Int(6083)), ("PlayerName", HeaderProp::Str(Cow::Borrowed("GOOSE LORD"))), ("PlayerTeam", HeaderProp::Int(0)), ], vec![ ("frame", HeaderProp::Int(7021)), ( "PlayerName", HeaderProp::Str(Cow::Borrowed("SilentWarrior")), ), ("PlayerTeam", HeaderProp::Int(0)), ], ]; assert_eq!(res, vec![("Goals", HeaderProp::Array(expected))]); } #[test] fn rdict_one_byte_element() { // dd skip=$((0xdf0)) count=$((0xe41 - 0xdf0)) if=rumble.replay of=rdict_byte.replay bs=1 let data = append_none(include_bytes!("../assets/rdict_byte.replay")); let mut parser = Parser::new(&data[..], CrcCheck::Never, NetworkParse::Never); let res = parser.parse_rdict().unwrap(); assert_eq!(res, vec![("Platform", HeaderProp::Byte)]); } #[test] fn key_frame_list() { let data = include_bytes!("../assets/rumble.replay"); // List is 2A long, each keyframe is 12 bytes. Then add four for list length = 508 let mut parser = Parser::new( &data[0x12ca..0x12ca + 508], CrcCheck::Never, NetworkParse::Never, ); let frames = parser.parse_keyframe().unwrap(); assert_eq!(frames.len(), 42); } #[test] fn tickmark_list() { let data = include_bytes!("../assets/rumble.replay"); // 7 tick marks at 8 bytes + size of tick list let mut parser = Parser::new( &data[0xf6cce..0xf6d50], CrcCheck::Never, NetworkParse::Never, ); let ticks = parser.parse_tickmarks().unwrap(); assert_eq!(ticks.len(), 7); assert_eq!( ticks[0], TickMark { description: Cow::Borrowed("Team1Goal"), frame: 396, } ); } #[test] fn test_the_parsing_empty() { let mut parser = Parser::new(&[], CrcCheck::Never, NetworkParse::Never); assert!(parser.parse().is_err()); } #[test] fn test_the_parsing_text_too_long() { let data = include_bytes!("../assets/fuzz-string-too-long.replay"); let mut parser = Parser::new(&data[..], CrcCheck::Never, NetworkParse::Never); assert!(parser.parse().is_err()) } #[test] fn test_fuzz_corpus_slice_index() { let data = include_bytes!("../assets/fuzz-slice-index.replay"); let mut parser = Parser::new(&data[..], CrcCheck::Never, NetworkParse::Never); assert!(parser.parse().is_err()) } #[test] fn test_the_fuzz_corpus_abs_panic() { let data = include_bytes!("../assets/fuzz-corpus.replay"); let mut parser = Parser::new(&data[..], CrcCheck::Never, NetworkParse::Never); assert!(parser.parse().is_err()) } #[test] fn test_the_fuzz_corpus_large_list() { let data = include_bytes!("../assets/fuzz-list-too-large.replay"); let mut parser = Parser::new(&data[..], CrcCheck::Never, NetworkParse::Never); let err = parser.parse().unwrap_err(); assert!( format!("{}", err).starts_with( "Could not decode replay debug info at offset (1010894): list of size" ) ); } #[test] fn test_the_fuzz_corpus_large_list_on_error_crc() { let data = include_bytes!("../assets/fuzz-list-too-large.replay"); let mut parser = Parser::new(&data[..], CrcCheck::OnError, NetworkParse::Never); let err = parser.parse().unwrap_err(); assert_eq!( "Failed to parse body and crc check failed. Replay is corrupt", format!("{}", err) ); assert!( format!("{}", err.cause().cause().unwrap()).starts_with( "Could not decode replay debug info at offset (1010894): list of size" ) ); } #[test] fn test_the_fuzz_corpus_large_list_always_crc() { let data = include_bytes!("../assets/fuzz-list-too-large.replay"); let mut parser = Parser::new(&data[..], CrcCheck::Always, NetworkParse::Never); let err = parser.parse().unwrap_err(); assert_eq!( "Crc mismatch. Expected 3765941959 but received 1314727725", format!("{}", err) ); assert!(err.cause().cause().is_none()); } #[test] fn test_crc_check_with_bad() { let mut data = include_bytes!("../assets/rumble.replay").to_vec(); // Changing this byte won't make the parsing fail but will make the crc check fail data[4775] = 100; let mut parser = Parser::new(&data[..], CrcCheck::Always, NetworkParse::Never); let res = parser.parse(); assert!(res.is_err()); assert_eq!( "Crc mismatch. Expected 337843175 but received 2877465516", format!("{}", res.unwrap_err()) ); parser = Parser::new(&data[..], CrcCheck::OnError, NetworkParse::Never); assert!(parser.parse().is_ok()); } } 10% speedup by using Fnv hash in hot loop //! # Parsing //! //! A Rocket League game replay is a little endian binary encoded file with an emphasis. The number //! 100 would be represented as the four byte sequence: //! //! ```plain //! 0x64 0x00 0x00 0x00 //! ``` //! //! This in contrast to big-endian, which would represent the number as: //! //! ```plain //! 0x00 0x00 0x00 0x64 //! ``` //! //! A replay is split into three major sections, a header, body, and footer. //! //! ## Header //! //! The first four bytes of a replay is the number of bytes that comprises the header. A length //! prefixed integer is very common throughout a replay. This prefix may either be in reference to //! the number of bytes an elements takes up, as just seen, or the number of elements in a list. //! //! The next four bytes make up the [cyclic redundancy check //! (CRC)](https://en.wikipedia.org/wiki/Cyclic_redundancy_check) for the header. The check ensures //! that the data has not be tampered with or, more likely, corrupted. //! //! The game's major and minor version follow, each 32bit integers. //! //! Subsequently, the game type is encoded as a string. Strings in Rocket League Replay files are //! length prefixed and null terminated. //! //! The properties is where all the good nuggets of info reside. Visualize the properties as a map //! of strings to various types (number, string, array) that continues until a "None" key is found. //! //! ## Body //! //! Out of the body we get: //! //! - Levels (what level did the match take place) //! - `KeyFrames` //! - The body's crc. This check is actually for the rest of the content (including the footer). //! //! Since everything is length prefixed, we're able to skip the network stream data. This would be //! 90% of the file. Most of the interesting bits like player stats and goals are contained in the //! header, so it's not a tremendous loss if we can't parse the network data. //! //! ## Footer //! //! After the network stream there we see: //! //! - Debug info //! - Tickmarks //! - Packages //! - Etc use encoding_rs::{UTF_16LE, WINDOWS_1252}; use models::*; use crc::calc_crc; use errors::{AttributeError, NetworkError, ParseError}; use std::borrow::Cow; use failure::{Error, ResultExt}; use byteorder::{ByteOrder, LittleEndian}; use bitter::BitGet; use hashes::{ATTRIBUTES, OBJECT_CLASSES, PARENT_CLASSES, SPAWN_STATS}; use network::{normalize_object, Frame, NewActor, SpawnTrajectory, Trajectory, UpdatedAttribute}; use attributes::{AttributeDecoder, AttributeTag}; use std::collections::HashMap; use fnv::FnvHashMap; use std::ops::Deref; use multimap::MultiMap; /// Determines under what circumstances the parser should perform the crc check for replay /// corruption. Since the crc check is the most time consuming check for parsing (causing /// microseconds to turn into milliseconds), clients should choose under what circumstances a crc /// check is performed. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum CrcCheck { /// Always perform the crc check. Useful when the replay has had its contents modified. This /// will catch a user that increased the number of goals they scored (easy) but only if they /// didn't update the crc as well (not as easy). Always, /// Never perform the crc check. Useful only when it doesn't matter to know if a replay is /// corrupt or not, you either want the data or the parsing error. Never, /// Only perform the crc check when parsing a section fails. This option gets the best of both /// worlds. If parsing fails, the crc check will determine if it is a programming error or the /// replay is corrupt. If parsing succeeds it won't precious time performing the check. This /// option is the default for parsing. OnError, } /// Determines how the parser should handle the network data, which is the most /// intensive and volatile section of the replay. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum NetworkParse { /// If the network data fails parse return an error Always, /// Skip parsing the network data Never, /// Attempt to parse the network data, but if unsuccessful ignore the error /// and continue parsing IgnoreOnError, } /// Intermediate parsing structure for the header #[derive(Debug, PartialEq)] pub struct Header<'a> { pub major_version: i32, pub minor_version: i32, pub net_version: Option<i32>, pub game_type: Cow<'a, str>, pub properties: Vec<(&'a str, HeaderProp<'a>)>, } impl<'a> Header<'a> { fn num_frames(&self) -> Option<i32> { self.properties .iter() .find(|&&(key, _)| key == "NumFrames") .and_then(|&(_, ref prop)| { if let HeaderProp::Int(v) = *prop { Some(v) } else { None } }) } fn max_channels(&self) -> Option<i32> { self.properties .iter() .find(|&&(key, _)| key == "MaxChannels") .and_then(|&(_, ref prop)| { if let HeaderProp::Int(v) = *prop { Some(v) } else { None } }) } } /// Intermediate parsing structure for the body / footer #[derive(Debug, PartialEq)] struct ReplayBody<'a> { levels: Vec<Cow<'a, str>>, keyframes: Vec<KeyFrame>, debug_info: Vec<DebugInfo<'a>>, tick_marks: Vec<TickMark<'a>>, packages: Vec<Cow<'a, str>>, objects: Vec<Cow<'a, str>>, names: Vec<Cow<'a, str>>, class_indices: Vec<ClassIndex<'a>>, net_cache: Vec<ClassNetCache>, network_data: &'a [u8], } /// The main entry point to parsing replays in boxcars. Allows one to customize parsing options, /// such as only parsing the header and forgoing crc (corruption) checks. #[derive(Debug, Clone, PartialEq)] pub struct ParserBuilder<'a> { data: &'a [u8], crc_check: Option<CrcCheck>, network_parse: Option<NetworkParse>, } impl<'a> ParserBuilder<'a> { pub fn new(data: &'a [u8]) -> Self { ParserBuilder { data, crc_check: None, network_parse: None, } } pub fn always_check_crc(mut self) -> ParserBuilder<'a> { self.crc_check = Some(CrcCheck::Always); self } pub fn never_check_crc(mut self) -> ParserBuilder<'a> { self.crc_check = Some(CrcCheck::Never); self } pub fn on_error_check_crc(mut self) -> ParserBuilder<'a> { self.crc_check = Some(CrcCheck::OnError); self } pub fn with_crc_check(mut self, check: CrcCheck) -> ParserBuilder<'a> { self.crc_check = Some(check); self } pub fn must_parse_network_data(mut self) -> ParserBuilder<'a> { self.network_parse = Some(NetworkParse::Always); self } pub fn never_parse_network_data(mut self) -> ParserBuilder<'a> { self.network_parse = Some(NetworkParse::Never); self } pub fn ignore_network_data_on_error(mut self) -> ParserBuilder<'a> { self.network_parse = Some(NetworkParse::IgnoreOnError); self } pub fn with_network_parse(mut self, parse: NetworkParse) -> ParserBuilder<'a> { self.network_parse = Some(parse); self } pub fn parse(self) -> Result<Replay<'a>, Error> { let mut parser = Parser::new( self.data, self.crc_check.unwrap_or(CrcCheck::OnError), self.network_parse.unwrap_or(NetworkParse::IgnoreOnError), ); parser.parse() } } #[derive(Debug, Clone, Copy, PartialEq, Eq)] struct ObjectAttribute { attribute: AttributeTag, object_index: i32, } struct CacheInfo { max_prop_id: i32, prop_id_bits: i32, attributes: FnvHashMap<i32, AttributeTag>, } struct FrameDecoder<'a, 'b: 'a> { frames_len: usize, color_ind: u32, painted_ind: u32, channel_bits: i32, header: &'a Header<'b>, body: &'a ReplayBody<'b>, spawns: &'a Vec<SpawnTrajectory>, object_ind_attributes: FnvHashMap<i32, CacheInfo>, object_ind_attrs: HashMap<i32, HashMap<i32, ObjectAttribute>>, } impl<'a, 'b> FrameDecoder<'a, 'b> { fn object_ind_to_string(&self, ind: i32) -> String { String::from( self.body .objects .get(ind as usize) .map(Deref::deref) .unwrap_or("Out of bounds"), ) } fn missing_attribute( &self, cache_info: &CacheInfo, actor_id: i32, type_id: i32, prop_id: i32, ) -> NetworkError { NetworkError::MissingAttribute( actor_id, type_id, self.object_ind_to_string(type_id), prop_id, cache_info .attributes .keys() .map(|x| x.to_string()) .collect::<Vec<_>>() .join(","), ) } fn unimplemented_attribute(&self, actor_id: i32, type_id: i32, prop_id: i32) -> NetworkError { NetworkError::UnimplementedAttribute( actor_id, type_id, self.object_ind_to_string(type_id), prop_id, self.object_ind_attrs .get(&type_id) .and_then(|x| x.get(&prop_id)) .map(|x| self.object_ind_to_string(x.object_index)) .unwrap_or_else(|| "type id not recognized".to_string()), ) } fn parse_new_actor( &self, mut bits: &mut BitGet, actor_id: i32, ) -> Result<NewActor, NetworkError> { if_chain! { if let Some(name_id) = if self.header.major_version > 868 || (self.header.major_version == 868 && self.header.minor_version >= 14) { bits.read_i32().map(Some) } else { Some(None) }; if let Some(_) = bits.read_bit(); if let Some(type_id) = bits.read_i32(); let spawn = self.spawns.get(type_id as usize) .ok_or_else(|| NetworkError::TypeIdOutOfRange(type_id))?; if let Some(traj) = Trajectory::from_spawn(&mut bits, *spawn); then { Ok(NewActor { actor_id, name_id, object_ind: type_id, initial_trajectory: traj }) } else { Err(NetworkError::NotEnoughDataFor("New Actor")) } } } fn decode_frame( &self, attr_decoder: &AttributeDecoder, mut bits: &mut BitGet, actors: &mut FnvHashMap<i32, i32>, time: f32, delta: f32, ) -> Result<Frame, NetworkError> { let mut new_actors = Vec::new(); let mut updated_actors = Vec::new(); let mut deleted_actors = Vec::new(); while bits.read_bit() .ok_or_else(|| NetworkError::NotEnoughDataFor("Actor data"))? { let actor_id = bits.read_i32_bits(self.channel_bits) .ok_or_else(|| NetworkError::NotEnoughDataFor("Actor Id"))?; // alive if bits.read_bit() .ok_or_else(|| NetworkError::NotEnoughDataFor("Is actor alive"))? { // new if bits.read_bit() .ok_or_else(|| NetworkError::NotEnoughDataFor("Is new actor"))? { let actor = self.parse_new_actor(&mut bits, actor_id)?; // Insert the new actor so we can keep track of it for attribute // updates. It's common for an actor id to already exist, so we // overwrite it. actors.insert(actor.actor_id, actor.object_ind); new_actors.push(actor); } else { // We'll be updating an existing actor with some attributes so we need // to track down what the actor's type is let type_id = actors .get(&actor_id) .ok_or_else(|| NetworkError::MissingActor(actor_id))?; // Once we have the type we need to look up what attributes are // available for said type let cache_info = self.object_ind_attributes.get(type_id).ok_or_else(|| { NetworkError::MissingCache( actor_id, *type_id, self.object_ind_to_string(*type_id), ) })?; // While there are more attributes to update for our actor: while bits.read_bit() .ok_or_else(|| NetworkError::NotEnoughDataFor("Is prop present"))? { // We've previously calculated the max the property id can be for a // given type and how many bits that it encompasses so use those // values now let prop_id = bits.read_bits_max(cache_info.prop_id_bits, cache_info.max_prop_id) .map(|x| x as i32) .ok_or_else(|| NetworkError::NotEnoughDataFor("Prop id"))?; // Look the property id up and find the corresponding attribute // decoding function. Experience has told me replays that fail to // parse, fail to do so here, so a large chunk is dedicated to // generating an error message with context let attr = cache_info.attributes.get(&prop_id).ok_or_else(|| { self.missing_attribute(cache_info, actor_id, *type_id, prop_id) })?; let attribute = attr_decoder.decode(*attr, &mut bits).map_err(|e| match e { AttributeError::Unimplemented => { self.unimplemented_attribute(actor_id, *type_id, prop_id) } _ => NetworkError::AttributeError(e), })?; updated_actors.push(UpdatedAttribute { actor_id, attribute_id: prop_id, attribute, }); } } } else { deleted_actors.push(actor_id); actors.remove(&actor_id); } } Ok(Frame { time, delta, new_actors, deleted_actors, updated_actors, }) } pub fn decode_frames(&self) -> Result<Vec<Frame>, Error> { let attr_decoder = AttributeDecoder::new(self.header, self.color_ind, self.painted_ind); let mut frames: Vec<Frame> = Vec::with_capacity(self.frames_len); let mut actors = FnvHashMap::default(); let mut bits = BitGet::new(self.body.network_data); while !bits.is_empty() && frames.len() < self.frames_len { let time = bits.read_f32() .ok_or_else(|| NetworkError::NotEnoughDataFor("Time"))?; if time < 0.0 || (time > 0.0 && time < 1e-10) { return Err(NetworkError::TimeOutOfRange(time))?; } let delta = bits.read_f32() .ok_or_else(|| NetworkError::NotEnoughDataFor("Delta"))?; if delta < 0.0 || (delta > 0.0 && delta < 1e-10) { return Err(NetworkError::DeltaOutOfRange(delta))?; } if time == 0.0 && delta == 0.0 { break; } let frame = self.decode_frame(&attr_decoder, &mut bits, &mut actors, time, delta)?; frames.push(frame); } Ok(frames) } } /// Holds the current state of parsing a replay #[derive(Debug, Clone, PartialEq)] pub struct Parser<'a> { /// A slice (not the whole) view of the replay. Bytes are popped off as data is read. data: &'a [u8], /// Current offset in regards to the whole view of the replay col: i32, crc_check: CrcCheck, network_parse: NetworkParse, } impl<'a> Parser<'a> { fn new(data: &'a [u8], crc_check: CrcCheck, network_parse: NetworkParse) -> Self { Parser { data, col: 0, crc_check, network_parse, } } fn err_str(&self, desc: &'static str, e: &ParseError) -> String { format!( "Could not decode replay {} at offset ({}): {}", desc, self.col, e ) } fn parse(&mut self) -> Result<Replay<'a>, Error> { let header_size = self.take(4, le_i32) .with_context(|e| self.err_str("header size", e))?; let header_crc = self.take(4, le_i32) .with_context(|e| self.err_str("header crc", e))?; let header_data = self.view_data(header_size as usize) .with_context(|e| self.err_str("header data", e))?; let header = self.crc_section(header_data, header_crc as u32, "header", Self::parse_header)?; let content_size = self.take(4, le_i32) .with_context(|e| self.err_str("content size", e))?; let content_crc = self.take(4, le_i32) .with_context(|e| self.err_str("content crc", e))?; let content_data = self.view_data(content_size as usize) .with_context(|e| self.err_str("content data", e))?; let body = self.crc_section(content_data, content_crc as u32, "body", Self::parse_body)?; let mut network: Option<NetworkFrames> = None; match self.network_parse { NetworkParse::Always => { network = Some(self.parse_network(&header, &body)?); } NetworkParse::IgnoreOnError => { if let Ok(v) = self.parse_network(&header, &body) { network = Some(v); } } NetworkParse::Never => network = None, } Ok(Replay { header_size, header_crc, major_version: header.major_version, minor_version: header.minor_version, net_version: header.net_version, game_type: header.game_type, properties: header.properties, content_size, content_crc, network_frames: network, levels: body.levels, keyframes: body.keyframes, debug_info: body.debug_info, tick_marks: body.tick_marks, packages: body.packages, objects: body.objects, names: body.names, class_indices: body.class_indices, net_cache: body.net_cache, }) } fn parse_network( &mut self, header: &Header, body: &ReplayBody, ) -> Result<NetworkFrames, Error> { // Create a parallel vector where each object has it's name normalized let normalized_objects: Vec<&str> = body.objects .iter() .map(|x| normalize_object(x.deref())) .collect(); // Create a parallel vector where we lookup how to decode an object's initial trajectory // when they spawn as a new actor let spawns: Vec<SpawnTrajectory> = body.objects .iter() .map(|x| { SPAWN_STATS .get(x.deref()) .cloned() .unwrap_or(SpawnTrajectory::None) }) .collect(); let attrs: Vec<_> = normalized_objects .iter() .map(|x| { ATTRIBUTES .get(x.deref()) .cloned() .unwrap_or(AttributeTag::NotImplemented) }) .collect(); // Create a map of an object's normalized name to a list of indices in the object // vector that have that same normalized name let normalized_name_obj_ind: MultiMap<&str, usize> = normalized_objects .iter() .enumerate() .map(|(i, x)| (*x, i)) .collect(); // Map each object's name to it's index let name_obj_ind: HashMap<&str, usize> = body.objects .iter() .enumerate() .map(|(ind, name)| (name.deref(), ind)) .collect(); let mut object_ind_attrs: HashMap<i32, HashMap<i32, ObjectAttribute>> = HashMap::new(); for cache in &body.net_cache { let mut all_props: HashMap<i32, ObjectAttribute> = cache .properties .iter() .map(|x| { let attr = attrs.get(x.object_ind as usize).ok_or_else(|| { NetworkError::StreamTooLargeIndex(x.stream_id, x.object_ind) })?; Ok(( x.stream_id, ObjectAttribute { attribute: *attr, object_index: x.object_ind, }, )) }) .collect::<Result<HashMap<_, _>, NetworkError>>()?; let mut had_parent = false; // We are going to recursively resolve an object's name to find their direct parent. // Parents have parents as well (etc), so we repeatedly walk up the chain picking up // attributes on parent objects until we reach an object with no parent (`Core.Object`) let mut object_name: &str = &*body.objects[cache.object_ind as usize]; while let Some(parent_name) = PARENT_CLASSES.get(object_name) { had_parent = true; if let Some(parent_ind) = name_obj_ind.get(parent_name) { if let Some(parent_attrs) = object_ind_attrs.get(&(*parent_ind as i32)) { all_props.extend(parent_attrs.iter()); } } object_name = parent_name; } // Sometimes our hierarchy set up in build.rs isn't perfect so if we don't find a // parent and a parent cache id is set, try and find this parent id and carry down // their props. if !had_parent && cache.parent_id != 0 { if let Some(parent) = body.net_cache .iter() .find(|x| x.cache_id == cache.parent_id) { if let Some(parent_attrs) = object_ind_attrs.get(&parent.object_ind) { all_props.extend(parent_attrs.iter()); } } } object_ind_attrs.insert(cache.object_ind, all_props); } for (obj, parent) in OBJECT_CLASSES.entries() { // It's ok if an object class doesn't appear in our replay. For instance, basketball // objects don't appear in a soccer replay. if let Some(indices) = normalized_name_obj_ind.get_vec(obj) { let parent_ind = name_obj_ind.get(parent).ok_or_else(|| { NetworkError::MissingParentClass(String::from(*obj), String::from(*parent)) })?; for i in indices { let parent_attrs: HashMap<_, _> = object_ind_attrs .get(&(*parent_ind as i32)) .ok_or_else(|| { NetworkError::ParentIndexHasNoAttributes(*parent_ind as i32, *i as i32) })? .clone(); object_ind_attrs.insert(*i as i32, parent_attrs); } } } let object_ind_attributes: FnvHashMap<i32, CacheInfo> = object_ind_attrs .iter() .map(|(obj_ind, attrs)| { let key = *obj_ind; let max = *attrs.keys().max().unwrap_or(&2) + 1; let next_max = (max as u32) .checked_next_power_of_two() .ok_or_else(|| NetworkError::PropIdsTooLarge(max, key))?; Ok(( key, CacheInfo { max_prop_id: max, prop_id_bits: log2(next_max) as i32, attributes: attrs.iter().map(|(k, o)| (*k, o.attribute)).collect(), }, )) }) .collect::<Result<FnvHashMap<_, _>, NetworkError>>()?; let color_ind = *name_obj_ind .get("TAGame.ProductAttribute_UserColor_TA") .unwrap_or(&0) as u32; let painted_ind = *name_obj_ind .get("TAGame.ProductAttribute_Painted_TA") .unwrap_or(&0) as u32; // 1023 stolen from rattletrap let channels = header.max_channels().unwrap_or(1023); let channels = (channels as u32) .checked_next_power_of_two() .ok_or_else(|| NetworkError::ChannelsTooLarge(channels))?; let channel_bits = log2(channels as u32) as i32; let num_frames = header.num_frames(); if let Some(frame_len) = num_frames { let frame_decoder = FrameDecoder { frames_len: frame_len as usize, color_ind, painted_ind, channel_bits, header, body, spawns: &spawns, object_ind_attributes, object_ind_attrs, }; Ok(NetworkFrames { frames: frame_decoder.decode_frames()?, }) } else { Ok(NetworkFrames { frames: Vec::new() }) } } fn parse_header(&mut self) -> Result<Header<'a>, Error> { let major_version = self.take(4, le_i32) .with_context(|e| self.err_str("major version", e))?; let minor_version = self.take(4, le_i32) .with_context(|e| self.err_str("minor version", e))?; let net_version = if major_version > 865 && minor_version > 17 { Some(self.take(4, le_i32) .with_context(|e| self.err_str("net version", e))?) } else { None }; let game_type = self.parse_text() .with_context(|e| self.err_str("game type", e))?; let properties = self.parse_rdict() .with_context(|e| self.err_str("header properties", e))?; Ok(Header { major_version, minor_version, net_version, game_type, properties, }) } /// Parses a section and performs a crc check as configured fn crc_section<T, F>( &mut self, data: &[u8], crc: u32, section: &str, mut f: F, ) -> Result<T, Error> where F: FnMut(&mut Self) -> Result<T, Error>, { match (self.crc_check, f(self)) { (CrcCheck::Always, res) => { let actual = calc_crc(data); if actual != crc as u32 { Err(Error::from(ParseError::CrcMismatch(crc, actual))) } else { res } } (CrcCheck::OnError, Err(e)) => { let actual = calc_crc(data); if actual != crc as u32 { Err(e.context(format!( "Failed to parse {} and crc check failed. Replay is corrupt", section )).into()) } else { Err(e) } } (CrcCheck::OnError, Ok(s)) => Ok(s), (CrcCheck::Never, res) => res, } } fn parse_body(&mut self) -> Result<ReplayBody<'a>, Error> { let levels = self.text_list() .with_context(|e| self.err_str("levels", e))?; let keyframes = self.parse_keyframe() .with_context(|e| self.err_str("keyframes", e))?; let network_size = self.take(4, le_i32) .with_context(|e| self.err_str("network size", e))?; let network_data = self.take(network_size as usize, |d| d) .with_context(|e| self.err_str("network data", e))?; let debug_infos = self.parse_debuginfo() .with_context(|e| self.err_str("debug info", e))?; let tickmarks = self.parse_tickmarks() .with_context(|e| self.err_str("tickmarks", e))?; let packages = self.text_list() .with_context(|e| self.err_str("packages", e))?; let objects = self.text_list() .with_context(|e| self.err_str("objects", e))?; let names = self.text_list().with_context(|e| self.err_str("names", e))?; let class_index = self.parse_classindex() .with_context(|e| self.err_str("class index", e))?; let net_cache = self.parse_classcache() .with_context(|e| self.err_str("net cache", e))?; Ok(ReplayBody { levels, keyframes, debug_info: debug_infos, tick_marks: tickmarks, packages, objects, names, class_indices: class_index, net_cache, network_data, }) } /// Used for skipping some amount of data fn advance(&mut self, ind: usize) { self.col += ind as i32; self.data = &self.data[ind..]; } /// Returns a slice of the replay after ensuring there is enough space for the requested slice fn view_data(&self, size: usize) -> Result<&'a [u8], ParseError> { if size > self.data.len() { Err(ParseError::InsufficientData( size as i32, self.data.len() as i32, )) } else { Ok(&self.data[..size]) } } /// Take the next `size` of bytes and interpret them in an infallible fashion #[inline] fn take<F, T>(&mut self, size: usize, mut f: F) -> Result<T, ParseError> where F: FnMut(&'a [u8]) -> T, { let res = f(self.view_data(size)?); self.advance(size); Ok(res) } /// Take the next `size` of bytes and interpret them, but this interpretation can fail fn take_res<F, T>(&mut self, size: usize, mut f: F) -> Result<T, ParseError> where F: FnMut(&'a [u8]) -> Result<T, ParseError>, { let res = f(self.view_data(size)?)?; self.advance(size); Ok(res) } /// Repeatedly parse the same elements from replay until `size` elements parsed fn repeat<F, T>(&mut self, size: usize, mut f: F) -> Result<Vec<T>, ParseError> where F: FnMut(&mut Self) -> Result<T, ParseError>, { if size > 25_000 { return Err(ParseError::ListTooLarge(size)); } let mut res = Vec::with_capacity(size); for _ in 0..size { res.push(f(self)?); } Ok(res) } fn list_of<F, T>(&mut self, f: F) -> Result<Vec<T>, ParseError> where F: FnMut(&mut Self) -> Result<T, ParseError>, { let size = self.take(4, le_i32)?; self.repeat(size as usize, f) } fn text_list(&mut self) -> Result<Vec<Cow<'a, str>>, ParseError> { self.list_of(|s| s.parse_text()) } /// Parses UTF-8 string from replay fn parse_str(&mut self) -> Result<&'a str, ParseError> { let mut size = self.take(4, le_i32)? as usize; // Replay 6688 has a property name that is listed as having a length of 0x5000000, but it's // really the `\0\0\0None` property. I'm guess at some point in Rocket League, this was a // bug that was fixed. What's interesting is that I couldn't find this constant in // `RocketLeagueReplayParser`, only rattletrap. if size == 0x5_000_000 { size = 8; } self.take_res(size, decode_str) } /// Parses either UTF-16 or Windows-1252 encoded strings fn parse_text(&mut self) -> Result<Cow<'a, str>, ParseError> { // The number of bytes that the string is composed of. If negative, the string is UTF-16, // else the string is windows 1252 encoded. let characters = self.take(4, le_i32)?; // size.abs() will panic at min_value, so we eschew it for manual checking if characters == 0 { Err(ParseError::ZeroSize) } else if characters > 10_000 || characters < -10_000 { Err(ParseError::TextTooLarge(characters)) } else if characters < 0 { // We're dealing with UTF-16 and each character is two bytes, we // multiply the size by 2. The last two bytes included in the count are // null terminators let size = characters * -2; self.take_res(size as usize, |d| decode_utf16(d)) } else { self.take_res(characters as usize, |d| decode_windows1252(d)) } } fn parse_rdict(&mut self) -> Result<Vec<(&'a str, HeaderProp<'a>)>, ParseError> { // Other the actual network data, the header property associative array is the hardest to parse. // The format is to: // - Read string // - If string is "None", we're done // - else we're dealing with a property, and the string just read is the key. Now deserialize the // value. // The return type of this function is a key value vector because since there is no format // specification, we can't rule out duplicate keys. Possibly consider a multi-map in the future. let mut res: Vec<_> = Vec::new(); loop { let key = self.parse_str()?; if key == "None" || key == "\0\0\0None" { break; } let a = self.parse_str()?; let val = match a { "ArrayProperty" => self.array_property(), "BoolProperty" => self.bool_property(), "ByteProperty" => self.byte_property(), "FloatProperty" => self.float_property(), "IntProperty" => self.int_property(), "NameProperty" => self.name_property(), "QWordProperty" => self.qword_property(), "StrProperty" => self.str_property(), x => Err(ParseError::UnexpectedProperty(String::from(x))), }?; res.push((key, val)); } Ok(res) } // Header properties are encoded in a pretty simple format, with some oddities. The first 64bits // is data that can be discarded, some people think that the 64bits is the length of the data // while others think that the first 32bits is the header length in bytes with the subsequent // 32bits unknown. Doesn't matter to us, we throw it out anyways. The rest of the bytes are // decoded property type specific. fn byte_property(&mut self) -> Result<HeaderProp<'a>, ParseError> { // It's unknown (to me at least) why the byte property has two strings in it. self.take(8, |_d| ())?; if self.parse_str()?.deref() != "OnlinePlatform_Steam" { self.parse_str()?; } Ok(HeaderProp::Byte) } fn str_property(&mut self) -> Result<HeaderProp<'a>, ParseError> { self.take(8, |_d| ())?; Ok(HeaderProp::Str(self.parse_text()?)) } fn name_property(&mut self) -> Result<HeaderProp<'a>, ParseError> { self.take(8, |_d| ())?; Ok(HeaderProp::Name(self.parse_text()?)) } fn int_property(&mut self) -> Result<HeaderProp<'a>, ParseError> { self.take(12, |d| HeaderProp::Int(le_i32(&d[8..]))) } fn bool_property(&mut self) -> Result<HeaderProp<'a>, ParseError> { self.take(9, |d| HeaderProp::Bool(d[8] == 1)) } fn float_property(&mut self) -> Result<HeaderProp<'a>, ParseError> { self.take(12, |d| HeaderProp::Float(le_f32(&d[8..]))) } fn qword_property(&mut self) -> Result<HeaderProp<'a>, ParseError> { self.take(16, |d| HeaderProp::QWord(le_i64(&d[8..]))) } fn array_property(&mut self) -> Result<HeaderProp<'a>, ParseError> { let size = self.take(12, |d| le_i32(&d[8..]))?; let arr = self.repeat(size as usize, |s| s.parse_rdict())?; Ok(HeaderProp::Array(arr)) } fn parse_tickmarks(&mut self) -> Result<Vec<TickMark<'a>>, ParseError> { self.list_of(|s| { Ok(TickMark { description: s.parse_text()?, frame: s.take(4, le_i32)?, }) }) } fn parse_keyframe(&mut self) -> Result<Vec<KeyFrame>, ParseError> { self.list_of(|s| { Ok(KeyFrame { time: s.take(4, le_f32)?, frame: s.take(4, le_i32)?, position: s.take(4, le_i32)?, }) }) } fn parse_debuginfo(&mut self) -> Result<Vec<DebugInfo<'a>>, ParseError> { self.list_of(|s| { Ok(DebugInfo { frame: s.take(4, le_i32)?, user: s.parse_text()?, text: s.parse_text()?, }) }) } fn parse_classindex(&mut self) -> Result<Vec<ClassIndex<'a>>, ParseError> { self.list_of(|s| { Ok(ClassIndex { class: s.parse_str()?, index: s.take(4, le_i32)?, }) }) } fn parse_cacheprop(&mut self) -> Result<Vec<CacheProp>, ParseError> { self.list_of(|s| { Ok(CacheProp { object_ind: s.take(4, le_i32)?, stream_id: s.take(4, le_i32)?, }) }) } fn parse_classcache(&mut self) -> Result<Vec<ClassNetCache>, ParseError> { self.list_of(|x| { Ok(ClassNetCache { object_ind: x.take(4, le_i32)?, parent_id: x.take(4, le_i32)?, cache_id: x.take(4, le_i32)?, properties: x.parse_cacheprop()?, }) }) } } const MULTIPLY_DE_BRUIJN_BIT_POSITION2: [u32; 32] = [ 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9, ]; // https://graphics.stanford.edu/~seander/bithacks.html#IntegerLogDeBruijn fn log2(v: u32) -> u32 { MULTIPLY_DE_BRUIJN_BIT_POSITION2[((v.wrapping_mul(0x077C_B531)) >> 27) as usize] } /// Reads a string of a given size from the data. The size includes a null /// character as the last character, so we drop it in the returned string /// slice. It may seem redundant to store this information, but stackoverflow /// contains a nice reasoning for why it may have been done this way: /// <http://stackoverflow.com/q/6293457/433785> fn decode_str(input: &[u8]) -> Result<&str, ParseError> { if input.is_empty() { Err(ParseError::ZeroSize) } else { Ok(::std::str::from_utf8(&input[..input.len() - 1])?) } } pub fn decode_utf16(input: &[u8]) -> Result<Cow<str>, ParseError> { if input.len() < 2 { Err(ParseError::ZeroSize) } else { let (s, _) = UTF_16LE.decode_without_bom_handling(&input[..input.len() - 2]); Ok(s) } } pub fn decode_windows1252(input: &[u8]) -> Result<Cow<str>, ParseError> { if input.is_empty() { Err(ParseError::ZeroSize) } else { let (s, _) = WINDOWS_1252.decode_without_bom_handling(&input[..input.len() - 1]); Ok(s) } } #[inline] fn le_i32(d: &[u8]) -> i32 { LittleEndian::read_i32(d) } #[inline] fn le_f32(d: &[u8]) -> f32 { LittleEndian::read_f32(d) } #[inline] fn le_i64(d: &[u8]) -> i64 { LittleEndian::read_i64(d) } #[cfg(test)] mod tests { use super::{CrcCheck, NetworkParse, Parser}; use errors::ParseError; use models::{HeaderProp, TickMark}; use std::borrow::Cow; #[test] fn parse_text_encoding() { // dd skip=16 count=28 if=rumble.replay of=text.replay bs=1 let data = include_bytes!("../assets/text.replay"); let mut parser = Parser::new(&data[..], CrcCheck::Never, NetworkParse::Never); assert_eq!(parser.parse_str().unwrap(), "TAGame.Replay_Soccar_TA"); } #[test] fn parse_text_encoding_bad() { // dd skip=16 count=28 if=rumble.replay of=text.replay bs=1 let data = include_bytes!("../assets/text.replay"); let mut parser = Parser::new( &data[..data.len() - 1], CrcCheck::Never, NetworkParse::Never, ); let res = parser.parse_str(); assert!(res.is_err()); let error = res.unwrap_err(); assert_eq!(error, ParseError::InsufficientData(24, 23)); } #[test] fn parse_text_zero_size() { let mut parser = Parser::new(&[0, 0, 0, 0, 0], CrcCheck::Never, NetworkParse::Never); let res = parser.parse_str(); assert!(res.is_err()); let error = res.unwrap_err(); assert_eq!(error, ParseError::ZeroSize); } #[test] fn parse_text_encoding_bad_2() { // Test for when there is not enough data to decode text length // dd skip=16 count=28 if=rumble.replay of=text.replay bs=1 let data = include_bytes!("../assets/text.replay"); let mut parser = Parser::new(&data[..2], CrcCheck::Never, NetworkParse::Never); let res = parser.parse_str(); assert!(res.is_err()); let error = res.unwrap_err(); assert_eq!(error, ParseError::InsufficientData(4, 2)); } #[test] fn parse_utf16_string() { // dd skip=((0x120)) count=28 if=utf-16.replay of=utf-16-text.replay bs=1 let data = include_bytes!("../assets/utf-16-text.replay"); let mut parser = Parser::new(&data[..], CrcCheck::Never, NetworkParse::Never); let res = parser.parse_text().unwrap(); assert_eq!(res, "\u{2623}D[e]!v1zz\u{2623}"); } #[test] fn test_windows1252_string() { let data = include_bytes!("../assets/windows_1252.replay"); let mut parser = Parser::new(&data[0x1ad..0x1c4], CrcCheck::Never, NetworkParse::Never); let res = parser.parse_text().unwrap(); assert_eq!(res, "caudillman6000\u{b3}(2)"); } /// Define behavior on invalid UTF-16 sequences. #[test] fn parse_invalid_utf16_string() { let data = [0xfd, 0xff, 0xff, 0xff, 0xd8, 0xd8, 0x00, 0x00, 0x00, 0x00]; let mut parser = Parser::new(&data[..], CrcCheck::Never, NetworkParse::Never); let res = parser.parse_text().unwrap(); assert_eq!(res, "�\u{0}"); } #[test] fn rdict_no_elements() { let data = [0x05, 0x00, 0x00, 0x00, b'N', b'o', b'n', b'e', 0x00]; let mut parser = Parser::new(&data[..], CrcCheck::Never, NetworkParse::Never); let res = parser.parse_rdict().unwrap(); assert_eq!(res, Vec::new()); } #[test] fn rdict_one_element() { // dd skip=$((0x1269)) count=$((0x12a8 - 0x1269)) if=rumble.replay of=rdict_one.replay bs=1 let data = include_bytes!("../assets/rdict_one.replay"); let mut parser = Parser::new(&data[..], CrcCheck::Never, NetworkParse::Never); let res = parser.parse_rdict().unwrap(); assert_eq!( res, vec![("PlayerName", HeaderProp::Str(Cow::Borrowed("comagoosie")))] ); } #[test] fn rdict_one_int_element() { // dd skip=$((0x250)) count=$((0x284 - 0x250)) if=rumble.replay of=rdict_int.replay bs=1 let data = include_bytes!("../assets/rdict_int.replay"); let mut parser = Parser::new(&data[..], CrcCheck::Never, NetworkParse::Never); let res = parser.parse_rdict().unwrap(); assert_eq!(res, vec![("PlayerTeam", HeaderProp::Int(0))]); } #[test] fn rdict_one_bool_element() { // dd skip=$((0xa0f)) count=$((0xa3b - 0xa0f)) if=rumble.replay of=rdict_bool.replay bs=1 let data = include_bytes!("../assets/rdict_bool.replay"); let mut parser = Parser::new(&data[..], CrcCheck::Never, NetworkParse::Never); let res = parser.parse_rdict().unwrap(); assert_eq!(res, vec![("bBot", HeaderProp::Bool(false))]); } fn append_none(input: &[u8]) -> Vec<u8> { let append = [0x05, 0x00, 0x00, 0x00, b'N', b'o', b'n', b'e', 0x00]; let mut v = Vec::new(); v.extend_from_slice(input); v.extend_from_slice(&append); v } #[test] fn rdict_one_name_element() { // dd skip=$((0x1237)) count=$((0x1269 - 0x1237)) if=rumble.replay of=rdict_name.replay bs=1 let data = append_none(include_bytes!("../assets/rdict_name.replay")); let mut parser = Parser::new(&data[..], CrcCheck::Never, NetworkParse::Never); let res = parser.parse_rdict().unwrap(); assert_eq!( res, vec![("MatchType", HeaderProp::Name(Cow::Borrowed("Online")))] ); } #[test] fn rdict_one_float_element() { // dd skip=$((0x10a2)) count=$((0x10ce - 0x10a2)) if=rumble.replay of=rdict_float.replay bs=1 let data = append_none(include_bytes!("../assets/rdict_float.replay")); let mut parser = Parser::new(&data[..], CrcCheck::Never, NetworkParse::Never); let res = parser.parse_rdict().unwrap(); assert_eq!(res, vec![("RecordFPS", HeaderProp::Float(30.0))]); } #[test] fn rdict_one_qword_element() { // dd skip=$((0x576)) count=$((0x5a5 - 0x576)) if=rumble.replay of=rdict_qword.replay bs=1 let data = append_none(include_bytes!("../assets/rdict_qword.replay")); let mut parser = Parser::new(&data[..], CrcCheck::Never, NetworkParse::Never); let res = parser.parse_rdict().unwrap(); assert_eq!( res, vec![("OnlineID", HeaderProp::QWord(76561198101748375))] ); } #[test] fn rdict_one_array_element() { // dd skip=$((0xab)) count=$((0x3f7 + 36)) if=rumble.replay of=rdict_array.replay bs=1 let data = append_none(include_bytes!("../assets/rdict_array.replay")); let mut parser = Parser::new(&data[..], CrcCheck::Never, NetworkParse::Never); let res = parser.parse_rdict().unwrap(); let expected = vec![ vec![ ("frame", HeaderProp::Int(441)), ("PlayerName", HeaderProp::Str(Cow::Borrowed("Cakeboss"))), ("PlayerTeam", HeaderProp::Int(1)), ], vec![ ("frame", HeaderProp::Int(1738)), ("PlayerName", HeaderProp::Str(Cow::Borrowed("Sasha Kaun"))), ("PlayerTeam", HeaderProp::Int(0)), ], vec![ ("frame", HeaderProp::Int(3504)), ( "PlayerName", HeaderProp::Str(Cow::Borrowed("SilentWarrior")), ), ("PlayerTeam", HeaderProp::Int(0)), ], vec![ ("frame", HeaderProp::Int(5058)), ("PlayerName", HeaderProp::Str(Cow::Borrowed("jeffreyj1"))), ("PlayerTeam", HeaderProp::Int(1)), ], vec![ ("frame", HeaderProp::Int(5751)), ("PlayerName", HeaderProp::Str(Cow::Borrowed("GOOSE LORD"))), ("PlayerTeam", HeaderProp::Int(0)), ], vec![ ("frame", HeaderProp::Int(6083)), ("PlayerName", HeaderProp::Str(Cow::Borrowed("GOOSE LORD"))), ("PlayerTeam", HeaderProp::Int(0)), ], vec![ ("frame", HeaderProp::Int(7021)), ( "PlayerName", HeaderProp::Str(Cow::Borrowed("SilentWarrior")), ), ("PlayerTeam", HeaderProp::Int(0)), ], ]; assert_eq!(res, vec![("Goals", HeaderProp::Array(expected))]); } #[test] fn rdict_one_byte_element() { // dd skip=$((0xdf0)) count=$((0xe41 - 0xdf0)) if=rumble.replay of=rdict_byte.replay bs=1 let data = append_none(include_bytes!("../assets/rdict_byte.replay")); let mut parser = Parser::new(&data[..], CrcCheck::Never, NetworkParse::Never); let res = parser.parse_rdict().unwrap(); assert_eq!(res, vec![("Platform", HeaderProp::Byte)]); } #[test] fn key_frame_list() { let data = include_bytes!("../assets/rumble.replay"); // List is 2A long, each keyframe is 12 bytes. Then add four for list length = 508 let mut parser = Parser::new( &data[0x12ca..0x12ca + 508], CrcCheck::Never, NetworkParse::Never, ); let frames = parser.parse_keyframe().unwrap(); assert_eq!(frames.len(), 42); } #[test] fn tickmark_list() { let data = include_bytes!("../assets/rumble.replay"); // 7 tick marks at 8 bytes + size of tick list let mut parser = Parser::new( &data[0xf6cce..0xf6d50], CrcCheck::Never, NetworkParse::Never, ); let ticks = parser.parse_tickmarks().unwrap(); assert_eq!(ticks.len(), 7); assert_eq!( ticks[0], TickMark { description: Cow::Borrowed("Team1Goal"), frame: 396, } ); } #[test] fn test_the_parsing_empty() { let mut parser = Parser::new(&[], CrcCheck::Never, NetworkParse::Never); assert!(parser.parse().is_err()); } #[test] fn test_the_parsing_text_too_long() { let data = include_bytes!("../assets/fuzz-string-too-long.replay"); let mut parser = Parser::new(&data[..], CrcCheck::Never, NetworkParse::Never); assert!(parser.parse().is_err()) } #[test] fn test_fuzz_corpus_slice_index() { let data = include_bytes!("../assets/fuzz-slice-index.replay"); let mut parser = Parser::new(&data[..], CrcCheck::Never, NetworkParse::Never); assert!(parser.parse().is_err()) } #[test] fn test_the_fuzz_corpus_abs_panic() { let data = include_bytes!("../assets/fuzz-corpus.replay"); let mut parser = Parser::new(&data[..], CrcCheck::Never, NetworkParse::Never); assert!(parser.parse().is_err()) } #[test] fn test_the_fuzz_corpus_large_list() { let data = include_bytes!("../assets/fuzz-list-too-large.replay"); let mut parser = Parser::new(&data[..], CrcCheck::Never, NetworkParse::Never); let err = parser.parse().unwrap_err(); assert!( format!("{}", err).starts_with( "Could not decode replay debug info at offset (1010894): list of size" ) ); } #[test] fn test_the_fuzz_corpus_large_list_on_error_crc() { let data = include_bytes!("../assets/fuzz-list-too-large.replay"); let mut parser = Parser::new(&data[..], CrcCheck::OnError, NetworkParse::Never); let err = parser.parse().unwrap_err(); assert_eq!( "Failed to parse body and crc check failed. Replay is corrupt", format!("{}", err) ); assert!( format!("{}", err.cause().cause().unwrap()).starts_with( "Could not decode replay debug info at offset (1010894): list of size" ) ); } #[test] fn test_the_fuzz_corpus_large_list_always_crc() { let data = include_bytes!("../assets/fuzz-list-too-large.replay"); let mut parser = Parser::new(&data[..], CrcCheck::Always, NetworkParse::Never); let err = parser.parse().unwrap_err(); assert_eq!( "Crc mismatch. Expected 3765941959 but received 1314727725", format!("{}", err) ); assert!(err.cause().cause().is_none()); } #[test] fn test_crc_check_with_bad() { let mut data = include_bytes!("../assets/rumble.replay").to_vec(); // Changing this byte won't make the parsing fail but will make the crc check fail data[4775] = 100; let mut parser = Parser::new(&data[..], CrcCheck::Always, NetworkParse::Never); let res = parser.parse(); assert!(res.is_err()); assert_eq!( "Crc mismatch. Expected 337843175 but received 2877465516", format!("{}", res.unwrap_err()) ); parser = Parser::new(&data[..], CrcCheck::OnError, NetworkParse::Never); assert!(parser.parse().is_ok()); } }
use byteorder::ReadBytesExt; use std::error::Error; use std::io; use std::io::prelude::*; use std::fmt; use std::net::TcpStream; use std::time::Duration; use bufstream::BufStream; #[cfg(feature = "with-unix_socket")] use unix_socket::UnixStream; #[cfg(all(not(feature = "with-unix_socket"), all(unix, feature = "nightly")))] use std::os::unix::net::UnixStream; #[cfg(unix)] use std::os::unix::io::{AsRawFd, RawFd}; #[cfg(windows)] use std::os::windows::io::{AsRawSocket, RawSocket}; use {TlsMode, ConnectParams, ConnectTarget}; use error::ConnectError; use io::TlsStream; use message::{self, WriteMessage}; use message::Frontend; const DEFAULT_PORT: u16 = 5432; #[doc(hidden)] pub trait StreamOptions { fn set_read_timeout(&self, timeout: Option<Duration>) -> io::Result<()>; fn set_nonblocking(&self, nonblock: bool) -> io::Result<()>; } impl StreamOptions for BufStream<Box<TlsStream>> { fn set_read_timeout(&self, timeout: Option<Duration>) -> io::Result<()> { match self.get_ref().get_ref().0 { InternalStream::Tcp(ref s) => s.set_read_timeout(timeout), #[cfg(any(feature = "with-unix_socket", all(unix, feature = "nightly")))] InternalStream::Unix(ref s) => s.set_read_timeout(timeout), } } fn set_nonblocking(&self, nonblock: bool) -> io::Result<()> { match self.get_ref().get_ref().0 { InternalStream::Tcp(ref s) => s.set_nonblocking(nonblock), #[cfg(any(feature = "with-unix_socket", all(unix, feature = "nightly")))] InternalStream::Unix(ref s) => s.set_nonblocking(nonblock), } } } /// A connection to the Postgres server. /// /// It implements `Read`, `Write` and `TlsStream`, as well as `AsRawFd` on /// Unix platforms and `AsRawSocket` on Windows platforms. pub struct Stream(InternalStream); impl fmt::Debug for Stream { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { match self.0 { InternalStream::Tcp(ref s) => fmt::Debug::fmt(s, fmt), #[cfg(any(feature = "with-unix_socket", all(unix, feature = "nightly")))] InternalStream::Unix(ref s) => fmt::Debug::fmt(s, fmt), } } } impl Read for Stream { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.0.read(buf) } } impl Write for Stream { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { self.0.write(buf) } fn flush(&mut self) -> io::Result<()> { self.0.flush() } } impl TlsStream for Stream { fn get_ref(&self) -> &Stream { self } fn get_mut(&mut self) -> &mut Stream { self } } #[cfg(unix)] impl AsRawFd for Stream { fn as_raw_fd(&self) -> RawFd { match self.0 { InternalStream::Tcp(ref s) => s.as_raw_fd(), #[cfg(any(feature = "with-unix_socket", all(unix, feature = "nightly")))] InternalStream::Unix(ref s) => s.as_raw_fd(), } } } #[cfg(windows)] impl AsRawSocket for Stream { fn as_raw_socket(&self) -> RawSocket { // Unix sockets aren't supported on windows, so no need to match match self.0 { InternalStream::Tcp(ref s) => s.as_raw_socket(), } } } enum InternalStream { Tcp(TcpStream), #[cfg(any(feature = "with-unix_socket", all(unix, feature = "nightly")))] Unix(UnixStream), } impl Read for InternalStream { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { match *self { InternalStream::Tcp(ref mut s) => s.read(buf), #[cfg(any(feature = "with-unix_socket", all(unix, feature = "nightly")))] InternalStream::Unix(ref mut s) => s.read(buf), } } } impl Write for InternalStream { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { match *self { InternalStream::Tcp(ref mut s) => s.write(buf), #[cfg(any(feature = "with-unix_socket", all(unix, feature = "nightly")))] InternalStream::Unix(ref mut s) => s.write(buf), } } fn flush(&mut self) -> io::Result<()> { match *self { InternalStream::Tcp(ref mut s) => s.flush(), #[cfg(any(feature = "with-unix_socket", all(unix, feature = "nightly")))] InternalStream::Unix(ref mut s) => s.flush(), } } } fn open_socket(params: &ConnectParams) -> Result<InternalStream, ConnectError> { let port = params.port.unwrap_or(DEFAULT_PORT); match params.target { ConnectTarget::Tcp(ref host) => { Ok(try!(TcpStream::connect(&(&**host, port)).map(InternalStream::Tcp))) } #[cfg(any(feature = "with-unix_socket", all(unix, feature = "nightly")))] ConnectTarget::Unix(ref path) => { let path = path.join(&format!(".s.PGSQL.{}", port)); Ok(try!(UnixStream::connect(&path).map(InternalStream::Unix))) } } } pub fn initialize_stream(params: &ConnectParams, ssl: TlsMode) -> Result<Box<TlsStream>, ConnectError> { let mut socket = Stream(try!(open_socket(params))); let (ssl_required, negotiator) = match ssl { TlsMode::None => return Ok(Box::new(socket)), TlsMode::Prefer(negotiator) => (false, negotiator), TlsMode::Require(negotiator) => (true, negotiator), }; try!(socket.write_message(&Frontend::SslRequest { code: message::SSL_CODE })); try!(socket.flush()); if try!(socket.read_u8()) == b'N' { if ssl_required { let err: Box<Error + Sync + Send> = "The server does not support SSL".into(); return Err(ConnectError::Ssl(err)); } else { return Ok(Box::new(socket)); } } // Postgres doesn't support SSL over unix sockets let host = match params.target { ConnectTarget::Tcp(ref host) => host, #[cfg(any(feature = "with-unix_socket", all(unix, feature = "nightly")))] ConnectTarget::Unix(_) => return Err(ConnectError::Io(::bad_response())), }; negotiator.tls_handshake(host, socket).map_err(ConnectError::Ssl) } Cleanup use byteorder::ReadBytesExt; use std::io; use std::io::prelude::*; use std::fmt; use std::net::TcpStream; use std::time::Duration; use bufstream::BufStream; #[cfg(feature = "with-unix_socket")] use unix_socket::UnixStream; #[cfg(all(not(feature = "with-unix_socket"), all(unix, feature = "nightly")))] use std::os::unix::net::UnixStream; #[cfg(unix)] use std::os::unix::io::{AsRawFd, RawFd}; #[cfg(windows)] use std::os::windows::io::{AsRawSocket, RawSocket}; use {TlsMode, ConnectParams, ConnectTarget}; use error::ConnectError; use io::TlsStream; use message::{self, WriteMessage}; use message::Frontend; const DEFAULT_PORT: u16 = 5432; #[doc(hidden)] pub trait StreamOptions { fn set_read_timeout(&self, timeout: Option<Duration>) -> io::Result<()>; fn set_nonblocking(&self, nonblock: bool) -> io::Result<()>; } impl StreamOptions for BufStream<Box<TlsStream>> { fn set_read_timeout(&self, timeout: Option<Duration>) -> io::Result<()> { match self.get_ref().get_ref().0 { InternalStream::Tcp(ref s) => s.set_read_timeout(timeout), #[cfg(any(feature = "with-unix_socket", all(unix, feature = "nightly")))] InternalStream::Unix(ref s) => s.set_read_timeout(timeout), } } fn set_nonblocking(&self, nonblock: bool) -> io::Result<()> { match self.get_ref().get_ref().0 { InternalStream::Tcp(ref s) => s.set_nonblocking(nonblock), #[cfg(any(feature = "with-unix_socket", all(unix, feature = "nightly")))] InternalStream::Unix(ref s) => s.set_nonblocking(nonblock), } } } /// A connection to the Postgres server. /// /// It implements `Read`, `Write` and `TlsStream`, as well as `AsRawFd` on /// Unix platforms and `AsRawSocket` on Windows platforms. pub struct Stream(InternalStream); impl fmt::Debug for Stream { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { match self.0 { InternalStream::Tcp(ref s) => fmt::Debug::fmt(s, fmt), #[cfg(any(feature = "with-unix_socket", all(unix, feature = "nightly")))] InternalStream::Unix(ref s) => fmt::Debug::fmt(s, fmt), } } } impl Read for Stream { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.0.read(buf) } } impl Write for Stream { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { self.0.write(buf) } fn flush(&mut self) -> io::Result<()> { self.0.flush() } } impl TlsStream for Stream { fn get_ref(&self) -> &Stream { self } fn get_mut(&mut self) -> &mut Stream { self } } #[cfg(unix)] impl AsRawFd for Stream { fn as_raw_fd(&self) -> RawFd { match self.0 { InternalStream::Tcp(ref s) => s.as_raw_fd(), #[cfg(any(feature = "with-unix_socket", all(unix, feature = "nightly")))] InternalStream::Unix(ref s) => s.as_raw_fd(), } } } #[cfg(windows)] impl AsRawSocket for Stream { fn as_raw_socket(&self) -> RawSocket { // Unix sockets aren't supported on windows, so no need to match match self.0 { InternalStream::Tcp(ref s) => s.as_raw_socket(), } } } enum InternalStream { Tcp(TcpStream), #[cfg(any(feature = "with-unix_socket", all(unix, feature = "nightly")))] Unix(UnixStream), } impl Read for InternalStream { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { match *self { InternalStream::Tcp(ref mut s) => s.read(buf), #[cfg(any(feature = "with-unix_socket", all(unix, feature = "nightly")))] InternalStream::Unix(ref mut s) => s.read(buf), } } } impl Write for InternalStream { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { match *self { InternalStream::Tcp(ref mut s) => s.write(buf), #[cfg(any(feature = "with-unix_socket", all(unix, feature = "nightly")))] InternalStream::Unix(ref mut s) => s.write(buf), } } fn flush(&mut self) -> io::Result<()> { match *self { InternalStream::Tcp(ref mut s) => s.flush(), #[cfg(any(feature = "with-unix_socket", all(unix, feature = "nightly")))] InternalStream::Unix(ref mut s) => s.flush(), } } } fn open_socket(params: &ConnectParams) -> Result<InternalStream, ConnectError> { let port = params.port.unwrap_or(DEFAULT_PORT); match params.target { ConnectTarget::Tcp(ref host) => { Ok(try!(TcpStream::connect(&(&**host, port)).map(InternalStream::Tcp))) } #[cfg(any(feature = "with-unix_socket", all(unix, feature = "nightly")))] ConnectTarget::Unix(ref path) => { let path = path.join(&format!(".s.PGSQL.{}", port)); Ok(try!(UnixStream::connect(&path).map(InternalStream::Unix))) } } } pub fn initialize_stream(params: &ConnectParams, tls: TlsMode) -> Result<Box<TlsStream>, ConnectError> { let mut socket = Stream(try!(open_socket(params))); let (tls_required, handshaker) = match tls { TlsMode::None => return Ok(Box::new(socket)), TlsMode::Prefer(handshaker) => (false, handshaker), TlsMode::Require(handshaker) => (true, handshaker), }; try!(socket.write_message(&Frontend::SslRequest { code: message::SSL_CODE })); try!(socket.flush()); if try!(socket.read_u8()) == b'N' { if tls_required { return Err(ConnectError::Ssl("the server does not support TLS".into())); } else { return Ok(Box::new(socket)); } } let host = match params.target { ConnectTarget::Tcp(ref host) => host, // Postgres doesn't support TLS over unix sockets #[cfg(any(feature = "with-unix_socket", all(unix, feature = "nightly")))] ConnectTarget::Unix(_) => return Err(ConnectError::Io(::bad_response())), }; handshaker.tls_handshake(host, socket).map_err(ConnectError::Ssl) }
use command::Command; use vm::VM; pub struct Program { instructions: Vec<Command>, instruction_pointer: Option<usize>, current_depth: u64, pub status: ProgramStatus, } #[derive(Clone, Copy, PartialEq, Debug)] pub enum ProgramStatus { Normal, Seeking(u64), } impl Program { pub fn new() -> Program { Program { instructions: Vec::new(), instruction_pointer: None, current_depth: 0, status: ProgramStatus::Normal, } } pub fn append(&mut self, instructions: &[Command]) { self.instructions.extend(instructions.iter().cloned()); if self.instruction_pointer.is_none() { self.instruction_pointer = Some(0); } } pub fn execute(&mut self, vm: &mut VM) { match (self.instruction_pointer, self.status) { (Some(index), ProgramStatus::Seeking(_)) => { let (new_index, new_depth, new_status) = self.handle_jump_forward(vm, index); self.instruction_pointer = Some(new_index); self.current_depth = new_depth; self.status = new_status; if new_index < self.instructions.len() { self.execute(vm); } } (Some(mut index), ProgramStatus::Normal) => { while index < self.instructions.len() { match self.instructions[index] { Command::JumpBackward => self.handle_jump_backward(vm, &mut index), Command::JumpForward => { let (new_index, new_depth, new_status) = self.handle_jump_forward(vm, index); index = new_index; self.current_depth = new_depth; self.status = new_status; } command => { vm.apply(command); index += 1; } } } self.instruction_pointer = Some(index); } _ => {} } } fn handle_jump_backward(&mut self, vm: &VM, index_ref: &mut usize) { match vm.cells[vm.data_pointer] { 0 => { self.current_depth -= 1; *index_ref += 1; } _ => { let goal_depth = self.current_depth; for index in (0..*index_ref).rev() { match self.instructions[index] { Command::JumpBackward => self.current_depth += 1, Command::JumpForward => { if self.current_depth == goal_depth { *index_ref = index + 1; return; } self.current_depth -= 1; } _ => {} } } panic!("No starting brace found!"); } } } fn seek_forward(&self, starting_index: usize) -> (usize, u64, ProgramStatus) { match self.status { ProgramStatus::Seeking(goal_depth) => { let mut depth = self.current_depth; for index in starting_index..self.instructions.len() { match self.instructions[index] { Command::JumpForward => depth += 1, Command::JumpBackward => depth -= 1, _ => {} } if depth == goal_depth { return (index + 1, self.current_depth, ProgramStatus::Normal); } } return (self.instructions.len() , self.current_depth , ProgramStatus::Seeking(goal_depth)); } ProgramStatus::Normal => return (starting_index, self.current_depth, self.status) } } fn handle_jump_forward(&self, vm: &VM, index: usize) -> (usize, u64, ProgramStatus){ match (vm.cells[vm.data_pointer], self.status) { (0, ProgramStatus::Normal) => self.seek_forward(index), (_, ProgramStatus::Normal) => (index + 1, self.current_depth + 1, ProgramStatus::Normal), (_, ProgramStatus::Seeking(_)) => self.seek_forward(index), } } } #[cfg(test)] mod tests { use super::*; #[test] pub fn test_seek_forward_normal() { let program = Program { instruction_pointer: None, current_depth: 0, status: ProgramStatus::Seeking(0), instructions: vec![ Command::JumpForward, Command::JumpForward, Command::JumpBackward, Command::JumpBackward, ], }; let (new_index, new_depth, new_status) = program.seek_forward(0); assert_eq!(new_depth, 0); assert_eq!(new_index, 4); assert_eq!(new_status, ProgramStatus::Normal); } #[test] pub fn test_seek_forward_seeking() { let program = Program { instruction_pointer: None, current_depth: 0, status: ProgramStatus::Seeking(0), instructions: vec![ Command::JumpForward, Command::JumpForward, Command::JumpBackward, ], }; let (new_index, new_depth, new_status) = program.seek_forward(0); assert_eq!(new_depth, 1); assert_eq!(new_index, 3); assert_eq!(new_status, ProgramStatus::Seeking(0)); } } Start from scratch on execute logic.. I did not like ANY of the directions it was going. It is not enough logic to keep finicking with. A new approach will be explored. use command::Command; use vm::VM; pub struct Program { instructions: Vec<Command>, instruction_pointer: Option<usize>, current_depth: u64, pub status: ProgramStatus, } #[derive(Clone, Copy, PartialEq, Debug)] pub enum ProgramStatus { Normal, Seeking(u64), } impl Program { pub fn new() -> Program { Program { instructions: Vec::new(), instruction_pointer: None, current_depth: 0, status: ProgramStatus::Normal, } } pub fn append(&mut self, instructions: &[Command]) { self.instructions.extend(instructions.iter().cloned()); if self.instruction_pointer.is_none() { self.instruction_pointer = Some(0); } } pub fn execute(&mut self, vm: &mut VM) { } }
use std::collections::HashMap; use std::ffi::OsStr; use std::fs; use std::fmt; use std::path::{Path, PathBuf}; use std::result::Result as StdResult; use std::str; use java_properties; use toml; use walkdir::{DirEntry, WalkDir, WalkDirIterator}; use super::errors::*; use super::format::Style; use super::fsutils; use super::template::Template; type Context = HashMap<String, String>; #[derive(Debug)] pub struct Project { pub root_path: Option<String>, pub config_format: ConfigFormat, pub style: Style, } #[derive(Copy, Clone, Debug)] pub enum ConfigFormat { JavaProps, Toml, } impl fmt::Display for ConfigFormat { fn fmt(&self, f: &mut fmt::Formatter) -> StdResult<(), fmt::Error> { let name = match *self { ConfigFormat::JavaProps => "properties", ConfigFormat::Toml => "toml", }; write!(f, "default.{}", &name) } } impl Default for Project { fn default() -> Project { Project { root_path: None, config_format: ConfigFormat::Toml, style: Style::Simple, } } } impl Project { pub fn new_g8(root: Option<&str>) -> Project { Project { root_path: root.map(|v| v.to_string()), config_format: ConfigFormat::JavaProps, style: Style::Giter8, } } pub fn set_root_dir(&mut self, root: &str) -> &mut Project { self.root_path = Some(root.into()); self } pub fn resolve_root_dir(&self, clone_root: &Path) -> PathBuf { let mut buf = clone_root.to_path_buf(); if let Some(ref inner) = self.root_path { if fsutils::exists(clone_root.join(inner)) { buf.push(inner); } } buf } pub fn default_context(&self, clone_root: &Path) -> Result<Context> { let root = self.resolve_root_dir(clone_root); get_default_context(self, &root) } // TODO: give clear `Err` type // TODO: make it run async pub fn generate(&self, context: &Context, clone_root: &Path, dest: &Path, dry_run: bool) -> Result<()> { let root = self.resolve_root_dir(clone_root); let walker = WalkDir::new(&root).into_iter(); let mut file_map: HashMap<String, String> = HashMap::new(); let default_ctx = root.join(format!("{}", &self.config_format)); if !dry_run { fs::create_dir_all(dest).unwrap(); } for entry in walker.filter_entry(|e| !is_git_metadata(e)) { let entry = entry.unwrap(); if entry.path() == &root || entry.path() == &default_ctx { debug!("skipping {:?}", entry.file_name()); continue; } let mut segment: Vec<&OsStr> = Vec::new(); let mut rel_path_up = entry.path().parent(); let mut upwards = 1; while let Some(parent) = rel_path_up { if upwards >= entry.depth() { break; } else { segment.push(parent.file_name().unwrap_or("".as_ref())); } upwards += 1; rel_path_up = parent.parent(); } let base = entry.file_name().to_string_lossy().to_string(); let mut dest = dest.to_path_buf(); if !segment.is_empty() { segment.reverse(); for part in segment { if let Some(rep) = file_map.get(&part.to_string_lossy().to_string()) { debug!("File tree altered: {:?} => {:?}", &part, rep); dest.push(rep); } else { dest.push(part); } } } let mut writer = Vec::new(); Template::compile_inline(&mut writer, Style::Pathname, &base, context) .unwrap(); let name = str::from_utf8(&writer).unwrap(); if name != &base { file_map.insert(base.clone(), name.to_string()); } dest.push(name); debug!("Destination entry: {:?}", dest); // TODO: if !dry_run { if entry.file_type().is_file() { let mut tpl = Template::read_file(self.style.clone(), &entry.path()).unwrap(); let mut f = fs::OpenOptions::new() .write(true) .truncate(true) .create(true) .open(dest.as_path()) .unwrap(); tpl.write(&mut f, context).unwrap(); f.sync_data().unwrap(); // fs::copy(&entry.path(), dest.as_path()).expect("Failed to copy file"); } else if entry.file_type().is_dir() { fs::create_dir_all(dest.as_path()).expect("Failed to copy directory"); } } } debug!("{:?}", &file_map); Ok(()) } } fn is_git_metadata(entry: &DirEntry) -> bool { let is_git = entry.file_name().to_str().map(|s| s == ".git").unwrap_or(false); fsutils::is_directory(entry.path()) && is_git } fn get_default_context(project: &Project, root_dir: &Path) -> Result<Context> { let default_ctx = root_dir.join(format!("{}", project.config_format)); match project.config_format { ConfigFormat::JavaProps => { fs::File::open(&default_ctx) .map(|f| java_properties::read(f).unwrap()) .map_err(|e| ErrorKind::Io(e).into()) } ConfigFormat::Toml => { fsutils::read_file(&default_ctx) .map(|s| toml::decode_str::<HashMap<String, String>>(&s).unwrap()) .chain_err(|| ErrorKind::TomlDecodeFailure) } } } project: changed key type of cache to OsString from String, used in renaming plaeholders as pathnames use std::collections::HashMap; use std::ffi::{OsStr, OsString}; use std::fs; use std::fmt; use std::path::{Path, PathBuf}; use std::result::Result as StdResult; use std::str; use java_properties; use toml; use walkdir::{DirEntry, WalkDir, WalkDirIterator}; use super::errors::*; use super::format::Style; use super::fsutils; use super::template::Template; type Context = HashMap<String, String>; #[derive(Debug)] pub struct Project { pub root_path: Option<String>, pub config_format: ConfigFormat, pub style: Style, } #[derive(Copy, Clone, Debug)] pub enum ConfigFormat { JavaProps, Toml, } impl fmt::Display for ConfigFormat { fn fmt(&self, f: &mut fmt::Formatter) -> StdResult<(), fmt::Error> { let name = match *self { ConfigFormat::JavaProps => "properties", ConfigFormat::Toml => "toml", }; write!(f, "default.{}", &name) } } impl Default for Project { fn default() -> Project { Project { root_path: None, config_format: ConfigFormat::Toml, style: Style::Simple, } } } impl Project { pub fn new_g8(root: Option<&str>) -> Project { Project { root_path: root.map(|v| v.to_string()), config_format: ConfigFormat::JavaProps, style: Style::Giter8, } } pub fn set_root_dir(&mut self, root: &str) -> &mut Project { self.root_path = Some(root.into()); self } pub fn resolve_root_dir(&self, clone_root: &Path) -> PathBuf { let mut buf = clone_root.to_path_buf(); if let Some(ref inner) = self.root_path { if fsutils::exists(clone_root.join(inner)) { buf.push(inner); } } buf } pub fn default_context(&self, clone_root: &Path) -> Result<Context> { let root = self.resolve_root_dir(clone_root); get_default_context(self, &root) } // TODO: give clear `Err` type // TODO: make it run async pub fn generate(&self, context: &Context, clone_root: &Path, dest: &Path, dry_run: bool) -> Result<()> { let root = self.resolve_root_dir(clone_root); let walker = WalkDir::new(&root).into_iter(); let mut file_map: HashMap<OsString, String> = HashMap::new(); let default_ctx = root.join(format!("{}", &self.config_format)); if !dry_run { fs::create_dir_all(dest).unwrap(); } for entry in walker.filter_entry(|e| !is_git_metadata(e)) { let entry = entry.unwrap(); if entry.path() == &root || entry.path() == &default_ctx { debug!("skipping {:?}", entry.file_name()); continue; } let mut segment: Vec<&OsStr> = Vec::new(); let mut rel_path_up = entry.path().parent(); let mut upwards = 1; while let Some(parent) = rel_path_up { if upwards >= entry.depth() { break; } else { segment.push(parent.file_name().unwrap_or("".as_ref())); } upwards += 1; rel_path_up = parent.parent(); } let base = entry.file_name(); let mut dest = dest.to_path_buf(); if !segment.is_empty() { segment.reverse(); for part in segment { if let Some(rep) = file_map.get(part) { debug!("File tree altered: {:?} => {:?}", part, rep); dest.push(rep); } else { dest.push(part); } } } let mut buf = Vec::new(); Template::compile_inline(&mut buf, Style::Pathname, &base.to_string_lossy(), context) .unwrap(); let name = String::from_utf8(buf).unwrap(); if &name != base.to_string_lossy().as_ref() { file_map.insert(base.to_os_string(), name.clone()); } dest.push(&name); debug!("Destination entry: {:?}", dest); // TODO: if !dry_run { if entry.file_type().is_file() { let mut tpl = Template::read_file(self.style.clone(), &entry.path()).unwrap(); let mut f = fs::OpenOptions::new() .write(true) .truncate(true) .create(true) .open(dest.as_path()) .unwrap(); tpl.write(&mut f, context).unwrap(); f.sync_data().unwrap(); // fs::copy(&entry.path(), dest.as_path()).expect("Failed to copy file"); } else if entry.file_type().is_dir() { fs::create_dir_all(dest.as_path()).expect("Failed to copy directory"); } } } debug!("{:?}", &file_map); Ok(()) } } fn is_git_metadata(entry: &DirEntry) -> bool { let is_git = entry.file_name().to_str().map(|s| s == ".git").unwrap_or(false); fsutils::is_directory(entry.path()) && is_git } fn get_default_context(project: &Project, root_dir: &Path) -> Result<Context> { let default_ctx = root_dir.join(format!("{}", project.config_format)); match project.config_format { ConfigFormat::JavaProps => { fs::File::open(&default_ctx) .map(|f| java_properties::read(f).unwrap()) .map_err(|e| ErrorKind::Io(e).into()) } ConfigFormat::Toml => { fsutils::read_file(&default_ctx) .map(|s| toml::decode_str::<HashMap<String, String>>(&s).unwrap()) .chain_err(|| ErrorKind::TomlDecodeFailure) } } }
// This Source Code Form is subject to the terms of the Mozilla Public // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at http://mozilla.org/MPL/2.0/. //! Reading and writing LVM on-disk labels and metadata. // // label is at start of sectors 0-3, usually 1 // label includes offset of pvheader (also within 1st 4 sectors) // pvheader includes ptrs to data (1), metadata(0-2), and boot(0-1) areas // metadata area (MDA), located anywhere, starts with 512b mda header, then // large text area // mda header has 40b of stuff, then rlocns[]. // rlocns point into mda text area. rlocn 0 used for text metadata, rlocn 1 // points to precommitted data (not currently supported by Melvin) // text metadata written aligned to sector-size; text area treated as circular // and text may wrap across end to beginning // text metadata contains vg metadata in lvm config text format. Each write // increments seqno. // use std::io; use std::io::{Read, Write, Result, Error, Seek, SeekFrom}; use std::io::ErrorKind::Other; use std::path::{Path, PathBuf}; use std::fs::{File, read_dir, OpenOptions}; use std::cmp::min; use std::slice::bytes::copy_memory; use byteorder::{LittleEndian, ByteOrder}; use nix::sys::stat; use parser::{LvmTextMap, textmap_to_buf, buf_to_textmap}; use util::{align_to, crc32_calc}; const LABEL_SCAN_SECTORS: usize = 4; const ID_LEN: usize = 32; const MDA_MAGIC: &'static [u8] = b"\x20\x4c\x56\x4d\x32\x20\x78\x5b\x35\x41\x25\x72\x30\x4e\x2a\x3e"; const SECTOR_SIZE: usize = 512; const MDA_HEADER_SIZE: usize = 512; #[derive(Debug)] struct LabelHeader { id: String, sector: u64, crc: u32, offset: u32, label: String, } impl LabelHeader { fn from_buf(buf: &[u8]) -> Result<LabelHeader> { for x in 0..LABEL_SCAN_SECTORS { let sec_buf = &buf[x*SECTOR_SIZE..x*SECTOR_SIZE+SECTOR_SIZE]; if &sec_buf[..8] == b"LABELONE" { let crc = LittleEndian::read_u32(&sec_buf[16..20]); if crc != crc32_calc(&sec_buf[20..SECTOR_SIZE]) { return Err(Error::new(Other, "Label CRC error")); } let sector = LittleEndian::read_u64(&sec_buf[8..16]); if sector != x as u64 { return Err(Error::new(Other, "Sector field should equal sector count")); } return Ok(LabelHeader{ id: String::from_utf8_lossy(&sec_buf[..8]).into_owned(), sector: sector, crc: crc, // switch from "offset from label" to "offset from start", more convenient. offset: LittleEndian::read_u32(&sec_buf[20..24]) + (x*SECTOR_SIZE as usize) as u32, label: String::from_utf8_lossy(&sec_buf[24..32]).into_owned(), }) } } Err(Error::new(Other, "Label not found")) } fn write(&self, device: &Path) -> Result<()> { let mut sec_buf = [0u8; SECTOR_SIZE]; copy_memory(self.id.as_bytes(), &mut sec_buf[..8]); // b"LABELONE" LittleEndian::write_u64(&mut sec_buf[8..16], self.sector); // switch back to "offset from label" from the more convenient "offset from start". LittleEndian::write_u32( &mut sec_buf[20..24], self.offset - (self.sector * SECTOR_SIZE as u64) as u32); copy_memory(self.label.as_bytes(), &mut sec_buf[24..32]); let crc_val = crc32_calc(&sec_buf[20..]); LittleEndian::write_u32(&mut sec_buf[16..20], crc_val); let mut f = try!(OpenOptions::new().write(true).open(device)); try!(f.seek(SeekFrom::Start(self.sector * SECTOR_SIZE as u64))); f.write_all(&mut sec_buf) } } /// Describes an area within a PV #[derive(Debug, PartialEq, Clone, Copy)] pub struct PvArea { /// The offset from the start of the device in bytes pub offset: u64, /// The size in bytes pub size: u64, } #[derive(Debug)] struct PvAreaIter<'a> { area: &'a[u8], } fn iter_pv_area<'a>(buf: &'a[u8]) -> PvAreaIter<'a> { PvAreaIter { area: buf } } impl<'a> Iterator for PvAreaIter<'a> { type Item = PvArea; fn next (&mut self) -> Option<PvArea> { let off = LittleEndian::read_u64(&self.area[..8]); let size = LittleEndian::read_u64(&self.area[8..16]); if off == 0 { None } else { self.area = &self.area[16..]; Some(PvArea { offset: off, size: size, }) } } } #[derive(Debug, PartialEq, Clone, Copy)] struct RawLocn { offset: u64, size: u64, checksum: u32, ignored: bool, } #[derive(Debug)] struct RawLocnIter<'a> { area: &'a[u8], } fn iter_raw_locn<'a>(buf: &'a[u8]) -> RawLocnIter<'a> { RawLocnIter { area: buf } } impl<'a> Iterator for RawLocnIter<'a> { type Item = RawLocn; fn next (&mut self) -> Option<RawLocn> { let off = LittleEndian::read_u64(&self.area[..8]); let size = LittleEndian::read_u64(&self.area[8..16]); let checksum = LittleEndian::read_u32(&self.area[16..20]); let flags = LittleEndian::read_u32(&self.area[20..24]); if off == 0 { None } else { self.area = &self.area[24..]; Some(RawLocn { offset: off, size: size, checksum: checksum, ignored: (flags & 1) > 0, }) } } } /// A struct containing the values in the PV header. It contains pointers to /// the data area, and possibly metadata areas and bootloader area. #[derive(Debug)] pub struct PvHeader { /// The unique identifier. pub uuid: String, /// Size in bytes of the entire PV pub size: u64, /// Extension version. If 1, we look for an extension header that may contain a reference /// to a bootloader area. pub ext_version: u32, /// Extension flags, of which there are none. pub ext_flags: u32, /// A list of the data areas. pub data_areas: Vec<PvArea>, /// A list of the metadata areas. pub metadata_areas: Vec<PvArea>, /// A list of the bootloader areas. pub bootloader_areas: Vec<PvArea>, /// The device this pvheader is for pub dev_path: PathBuf, } impl PvHeader { // // PV HEADER LAYOUT: // - static header (uuid and size) // - 0+ data areas (actually max 1, usually 1; size 0 == "rest of blkdev") // - blank entry // - 0+ metadata areas (max 2, usually 1) // - blank entry // - 8 bytes of pvextension header // - if version > 0 // - 0+ bootloader areas (usually 0) // /// Parse a buf containing the on-disk pvheader and create a struct /// representing it. pub fn from_buf(buf: &[u8], path: &Path) -> Result<PvHeader> { let mut da_buf = &buf[ID_LEN+8..]; let da_vec: Vec<_> = iter_pv_area(da_buf).collect(); // move slice past any actual entries plus blank // terminating entry da_buf = &da_buf[(da_vec.len()+1)*16..]; let md_vec: Vec<_> = iter_pv_area(da_buf).collect(); da_buf = &da_buf[(md_vec.len()+1)*16..]; let ext_version = LittleEndian::read_u32(&da_buf[..4]); let mut ext_flags = 0; let mut ba_vec = Vec::new(); if ext_version != 0 { ext_flags = LittleEndian::read_u32(&da_buf[4..8]); da_buf = &da_buf[8..]; ba_vec = iter_pv_area(da_buf).collect(); } Ok(PvHeader{ uuid: String::from_utf8_lossy(&buf[..ID_LEN]).into_owned(), size: LittleEndian::read_u64(&buf[ID_LEN..ID_LEN+8]), ext_version: ext_version, ext_flags: ext_flags, data_areas: da_vec, metadata_areas: md_vec, bootloader_areas: ba_vec, dev_path: path.to_owned(), }) } /// Find the PvHeader struct in a given device. pub fn find_in_dev(path: &Path) -> Result<PvHeader> { let mut f = try!(File::open(path)); let mut buf = [0u8; LABEL_SCAN_SECTORS * SECTOR_SIZE]; try!(f.read(&mut buf)); let label_header = try!(LabelHeader::from_buf(&buf)); let pvheader = try!(PvHeader::from_buf(&buf[label_header.offset as usize..], path)); return Ok(pvheader); } fn get_rlocn0(buf: &[u8]) -> Option<RawLocn> { iter_raw_locn(&buf[40..]).next() } fn set_rlocn0(buf: &mut [u8], rl: &RawLocn) -> () { let mut raw_locn = &mut buf[40..]; LittleEndian::write_u64(&mut raw_locn[..8], rl.offset); LittleEndian::write_u64(&mut raw_locn[8..16], rl.size); LittleEndian::write_u32(&mut raw_locn[16..20], rl.checksum); let flags = rl.ignored as u32; LittleEndian::write_u32(&mut raw_locn[20..24], flags); } /// Read the metadata contained in the metadata area. /// In the case of multiple metadata areas, return the information /// from the first valid one. pub fn read_metadata(&self) -> io::Result<LvmTextMap> { let mut f = try!(OpenOptions::new().read(true).open(&self.dev_path)); for pvarea in &self.metadata_areas { let hdr = try!(Self::read_mda_header(&pvarea, &mut f)); let rl = match Self::get_rlocn0(&hdr) { None => continue, Some(x) => x, }; if rl.ignored { continue } let mut text = vec![0; rl.size as usize]; let first_read = min(pvarea.size - rl.offset, rl.size) as usize; try!(f.seek(SeekFrom::Start(pvarea.offset + rl.offset))); try!(f.read(&mut text[..first_read])); if first_read != rl.size as usize { try!(f.seek(SeekFrom::Start( pvarea.offset + MDA_HEADER_SIZE as u64))); try!(f.read(&mut text[rl.size as usize - first_read..])); } if rl.checksum != crc32_calc(&text) { return Err(Error::new(Other, "MDA text checksum failure")); } return buf_to_textmap(&text); } return Err(Error::new(Other, "No valid metadata found")); } /// Write the given metadata to all active metadata areas in the PV. pub fn write_metadata(&mut self, map: &LvmTextMap) -> io::Result<()> { let mut f = try!(OpenOptions::new().read(true).write(true) .open(&self.dev_path)); for pvarea in &self.metadata_areas { let mut hdr = try!(Self::read_mda_header(&pvarea, &mut f)); // If this is the first write, supply an initial RawLocn template let rl = match Self::get_rlocn0(&hdr) { None => RawLocn { offset: MDA_HEADER_SIZE as u64, size: 0, checksum: 0, ignored: false, }, Some(x) => x, }; if rl.ignored { continue } let mut text = textmap_to_buf(map); // Ends with one null text.push(b'\0'); // start at next sector in loop, but skip 0th sector let start_off = min(MDA_HEADER_SIZE as u64, (align_to( (rl.offset + rl.size) as usize, SECTOR_SIZE) % pvarea.size as usize) as u64); let tail_space = pvarea.size as u64 - start_off; assert_eq!(start_off % SECTOR_SIZE as u64, 0); assert_eq!(tail_space % SECTOR_SIZE as u64, 0); let written = if tail_space != 0 { try!(f.seek( SeekFrom::Start(pvarea.offset + start_off))); try!(f.write_all(&text[..min(tail_space as usize, text.len())])); min(tail_space as usize, text.len()) } else { 0 }; if written != text.len() { try!(f.seek( SeekFrom::Start(pvarea.offset + MDA_HEADER_SIZE as u64))); try!(f.write_all(&text[written as usize..])); } Self::set_rlocn0(&mut hdr, &RawLocn { offset: start_off, size: text.len() as u64, checksum: crc32_calc(&text), ignored: rl.ignored, }); try!(Self::write_mda_header(&pvarea, &mut hdr, &mut f)); } Ok(()) } fn read_mda_header(area: &PvArea, file: &mut File) -> io::Result<[u8; MDA_HEADER_SIZE]> { assert!(area.size as usize > MDA_HEADER_SIZE); try!(file.seek(SeekFrom::Start(area.offset))); let mut hdr = [0u8; MDA_HEADER_SIZE]; try!(file.read(&mut hdr)); if LittleEndian::read_u32(&hdr[..4]) != crc32_calc(&hdr[4..MDA_HEADER_SIZE]) { return Err(Error::new(Other, "MDA header checksum failure")); } if &hdr[4..20] != MDA_MAGIC { return Err(Error::new( Other, format!("'{}' doesn't match MDA_MAGIC", String::from_utf8_lossy(&hdr[4..20])))); } let ver = LittleEndian::read_u32(&hdr[20..24]); if ver != 1 { return Err(Error::new(Other, "Bad version, expected 1")); } // TODO: validate these somehow //println!("mdah start {}", LittleEndian::read_u64(&buf[24..32])); //println!("mdah size {}", LittleEndian::read_u64(&buf[32..40])); Ok(hdr) } fn write_mda_header(area: &PvArea, hdr: &mut [u8; MDA_HEADER_SIZE], file: &mut File) -> io::Result<()> { let csum = crc32_calc(&hdr[4..]); LittleEndian::write_u32(&mut hdr[..4], csum); try!(file.seek(SeekFrom::Start(area.offset))); try!(file.write_all(hdr)); Ok(()) } } /// Scan a list of directories for block devices containing LVM PV labels. pub fn scan_for_pvs(dirs: &[&Path]) -> Result<Vec<PathBuf>> { let mut ret_vec = Vec::new(); for dir in dirs { ret_vec.extend(try!(read_dir(dir)) .into_iter() .filter_map(|dir_e| if dir_e.is_ok() { Some(dir_e.unwrap().path()) } else {None} ) .filter(|path| { (stat::stat(path).unwrap().st_mode & 0x6000) == 0x6000 }) // S_IFBLK .filter(|path| { PvHeader::find_in_dev(&path).is_ok() }) .collect::<Vec<_>>()); } Ok(ret_vec) } Add PvHeader::initialize() Writes out the label and pvheader to a block device. Signed-off-by: Andy Grover <b7d524d2f5cc5aebadb6b92b08d3ab26911cde33@redhat.com> // This Source Code Form is subject to the terms of the Mozilla Public // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at http://mozilla.org/MPL/2.0/. //! Reading and writing LVM on-disk labels and metadata. // // label is at start of sectors 0-3, usually 1 // label includes offset of pvheader (also within 1st 4 sectors) // pvheader includes ptrs to data (1), metadata(0-2), and boot(0-1) areas // metadata area (MDA), located anywhere, starts with 512b mda header, then // large text area // mda header has 40b of stuff, then rlocns[]. // rlocns point into mda text area. rlocn 0 used for text metadata, rlocn 1 // points to precommitted data (not currently supported by Melvin) // text metadata written aligned to sector-size; text area treated as circular // and text may wrap across end to beginning // text metadata contains vg metadata in lvm config text format. Each write // increments seqno. // use std::io; use std::io::{Read, Write, Result, Error, Seek, SeekFrom}; use std::io::ErrorKind::Other; use std::path::{Path, PathBuf}; use std::fs::{File, read_dir, OpenOptions}; use std::cmp::min; use std::slice::bytes::copy_memory; use std::os::unix::io::AsRawFd; use byteorder::{LittleEndian, ByteOrder}; use nix::sys::{stat, ioctl}; use uuid::Uuid; use parser::{LvmTextMap, textmap_to_buf, buf_to_textmap}; use util::{align_to, crc32_calc}; use pv::Device; const LABEL_SCAN_SECTORS: usize = 4; const ID_LEN: usize = 32; const MDA_MAGIC: &'static [u8] = b"\x20\x4c\x56\x4d\x32\x20\x78\x5b\x35\x41\x25\x72\x30\x4e\x2a\x3e"; const LABEL_SIZE: usize = 32; const LABEL_SECTOR: usize = 1; const SECTOR_SIZE: usize = 512; const MDA_HEADER_SIZE: usize = 512; const DEFAULT_MDA_SIZE: u64 = (1024 * 1024); const EXTENSION_VERSION: usize = 1; #[derive(Debug)] struct LabelHeader { id: String, sector: u64, crc: u32, offset: u32, label: String, } impl LabelHeader { fn from_buf(buf: &[u8]) -> Result<LabelHeader> { for x in 0..LABEL_SCAN_SECTORS { let sec_buf = &buf[x*SECTOR_SIZE..x*SECTOR_SIZE+SECTOR_SIZE]; if &sec_buf[..8] == b"LABELONE" { let crc = LittleEndian::read_u32(&sec_buf[16..20]); if crc != crc32_calc(&sec_buf[20..SECTOR_SIZE]) { return Err(Error::new(Other, "Label CRC error")); } let sector = LittleEndian::read_u64(&sec_buf[8..16]); if sector != x as u64 { return Err(Error::new(Other, "Sector field should equal sector count")); } return Ok(LabelHeader{ id: String::from_utf8_lossy(&sec_buf[..8]).into_owned(), sector: sector, crc: crc, // switch from "offset from label" to "offset from start", more convenient. offset: LittleEndian::read_u32(&sec_buf[20..24]) + (x*SECTOR_SIZE as usize) as u32, label: String::from_utf8_lossy(&sec_buf[24..32]).into_owned(), }) } } Err(Error::new(Other, "Label not found")) } /// Initialize a device with a label header. fn initialize(sec_buf: &mut [u8; SECTOR_SIZE]) -> () { copy_memory(b"LABELONE", &mut sec_buf[..8]); LittleEndian::write_u64(&mut sec_buf[8..16], LABEL_SECTOR as u64); LittleEndian::write_u32(&mut sec_buf[20..24], LABEL_SIZE as u32); copy_memory(b"LVM2 001", &mut sec_buf[24..32]); let crc_val = crc32_calc(&sec_buf[20..]); LittleEndian::write_u32(&mut sec_buf[16..20], crc_val); } } /// Describes an area within a PV #[derive(Debug, PartialEq, Clone, Copy)] pub struct PvArea { /// The offset from the start of the device in bytes pub offset: u64, /// The size in bytes pub size: u64, } #[derive(Debug)] struct PvAreaIter<'a> { area: &'a[u8], } fn iter_pv_area<'a>(buf: &'a[u8]) -> PvAreaIter<'a> { PvAreaIter { area: buf } } impl<'a> Iterator for PvAreaIter<'a> { type Item = PvArea; fn next (&mut self) -> Option<PvArea> { let off = LittleEndian::read_u64(&self.area[..8]); let size = LittleEndian::read_u64(&self.area[8..16]); if off == 0 { None } else { self.area = &self.area[16..]; Some(PvArea { offset: off, size: size, }) } } } #[derive(Debug, PartialEq, Clone, Copy)] struct RawLocn { offset: u64, size: u64, checksum: u32, ignored: bool, } #[derive(Debug)] struct RawLocnIter<'a> { area: &'a[u8], } fn iter_raw_locn<'a>(buf: &'a[u8]) -> RawLocnIter<'a> { RawLocnIter { area: buf } } impl<'a> Iterator for RawLocnIter<'a> { type Item = RawLocn; fn next (&mut self) -> Option<RawLocn> { let off = LittleEndian::read_u64(&self.area[..8]); let size = LittleEndian::read_u64(&self.area[8..16]); let checksum = LittleEndian::read_u32(&self.area[16..20]); let flags = LittleEndian::read_u32(&self.area[20..24]); if off == 0 { None } else { self.area = &self.area[24..]; Some(RawLocn { offset: off, size: size, checksum: checksum, ignored: (flags & 1) > 0, }) } } } /// A struct containing the values in the PV header. It contains pointers to /// the data area, and possibly metadata areas and bootloader area. #[derive(Debug)] pub struct PvHeader { /// The unique identifier. pub uuid: String, /// Size in bytes of the entire PV pub size: u64, /// Extension version. If 1, we look for an extension header that may contain a reference /// to a bootloader area. pub ext_version: u32, /// Extension flags, of which there are none. pub ext_flags: u32, /// A list of the data areas. pub data_areas: Vec<PvArea>, /// A list of the metadata areas. pub metadata_areas: Vec<PvArea>, /// A list of the bootloader areas. pub bootloader_areas: Vec<PvArea>, /// The device this pvheader is for pub dev_path: PathBuf, } impl PvHeader { // // PV HEADER LAYOUT: // - static header (uuid and size) // - 0+ data areas (actually max 1, usually 1; size 0 == "rest of blkdev") // Remember to subtract mda1 size if present. // - blank entry // - 0+ metadata areas (max 2, usually 1) // - blank entry // - 8 bytes of pvextension header // - if version > 0 // - 0+ bootloader areas (usually 0) // /// Parse a buf containing the on-disk pvheader and create a struct /// representing it. pub fn from_buf(buf: &[u8], path: &Path) -> Result<PvHeader> { let mut da_buf = &buf[ID_LEN+8..]; let da_vec: Vec<_> = iter_pv_area(da_buf).collect(); // move slice past any actual entries plus blank // terminating entry da_buf = &da_buf[(da_vec.len()+1)*16..]; let md_vec: Vec<_> = iter_pv_area(da_buf).collect(); da_buf = &da_buf[(md_vec.len()+1)*16..]; let ext_version = LittleEndian::read_u32(&da_buf[..4]); let mut ext_flags = 0; let mut ba_vec = Vec::new(); if ext_version != 0 { ext_flags = LittleEndian::read_u32(&da_buf[4..8]); da_buf = &da_buf[8..]; ba_vec = iter_pv_area(da_buf).collect(); } Ok(PvHeader{ uuid: String::from_utf8_lossy(&buf[..ID_LEN]).into_owned(), size: LittleEndian::read_u64(&buf[ID_LEN..ID_LEN+8]), ext_version: ext_version, ext_flags: ext_flags, data_areas: da_vec, metadata_areas: md_vec, bootloader_areas: ba_vec, dev_path: path.to_owned(), }) } /// Find the PvHeader struct in a given device. pub fn find_in_dev(path: &Path) -> Result<PvHeader> { let mut f = try!(File::open(path)); let mut buf = [0u8; LABEL_SCAN_SECTORS * SECTOR_SIZE]; try!(f.read(&mut buf)); let label_header = try!(LabelHeader::from_buf(&buf)); let pvheader = try!(PvHeader::from_buf(&buf[label_header.offset as usize..], path)); return Ok(pvheader); } fn blkdev_size(file: &File) -> Result<u64> { // BLKGETSIZE64 let op = ioctl::op_read(0x12, 114, 8); let mut val: u64 = 0; match unsafe { ioctl::read_into(file.as_raw_fd(), op, &mut val) } { Err(_) => return Err((io::Error::last_os_error())), Ok(_) => Ok(val), } } /// Initialize a device as a PV with reasonable defaults: two metadata /// areas, no bootsector area, and size based on the device's size. pub fn initialize(dev: &Device) -> Result<()> { let pathbuf = match dev.path() { Some(x) => x, None => return Err(Error::new(Other, "Could not get path from Device")), }; let mut f = try!(OpenOptions::new().write(true).open(&pathbuf)); let mut sec_buf = [0u8; SECTOR_SIZE]; // Fill in pvheader { let mut pvh = &mut sec_buf[LABEL_SIZE..]; copy_memory(Uuid::new_v4().to_simple_string().as_bytes(), &mut pvh[..ID_LEN]); let dev_size = try!(Self::blkdev_size(&f)); LittleEndian::write_u64(&mut pvh[ID_LEN..ID_LEN+8], dev_size); let mut pvh = &mut pvh[ID_LEN+8..]; // define two mdas of 1024K and one data area // First mda starts at 5th sector let mda0_offset = (LABEL_SCAN_SECTORS * SECTOR_SIZE) as u64; let mda0_length = DEFAULT_MDA_SIZE; if dev_size < ((DEFAULT_MDA_SIZE * 2) + mda0_offset) { return Err(Error::new(Other, "Device too small")); } // mda0:da0:mda1 LittleEndian::write_u64(pvh, mda0_offset + mda0_length); let mut pvh = &mut pvh[8..]; LittleEndian::write_u64(pvh, 0); // da0 length is not used let mut pvh = &mut pvh[8..]; // skip 16 bytes to indicate end of da list let mut pvh = &mut pvh[16..]; // mda0 at start of PV LittleEndian::write_u64(pvh, mda0_offset); let mut pvh = &mut pvh[8..]; LittleEndian::write_u64(pvh, mda0_length); let mut pvh = &mut pvh[8..]; // mda1 at end of PV LittleEndian::write_u64(pvh, dev_size - DEFAULT_MDA_SIZE); let mut pvh = &mut pvh[8..]; LittleEndian::write_u64(pvh, DEFAULT_MDA_SIZE); let mut pvh = &mut pvh[8..]; // skip 16 bytes to indicate end of mda list let mut pvh = &mut pvh[16..]; // Extension header LittleEndian::write_u32(pvh, EXTENSION_VERSION as u32); // everything else is 0 so we're finished } // Must do label last since it calcs crc over everything LabelHeader::initialize(&mut sec_buf); try!(f.seek(SeekFrom::Start(LABEL_SECTOR as u64 * SECTOR_SIZE as u64))); f.write_all(&mut sec_buf) } fn get_rlocn0(buf: &[u8]) -> Option<RawLocn> { iter_raw_locn(&buf[40..]).next() } fn set_rlocn0(buf: &mut [u8], rl: &RawLocn) -> () { let mut raw_locn = &mut buf[40..]; LittleEndian::write_u64(&mut raw_locn[..8], rl.offset); LittleEndian::write_u64(&mut raw_locn[8..16], rl.size); LittleEndian::write_u32(&mut raw_locn[16..20], rl.checksum); let flags = rl.ignored as u32; LittleEndian::write_u32(&mut raw_locn[20..24], flags); } /// Read the metadata contained in the metadata area. /// In the case of multiple metadata areas, return the information /// from the first valid one. pub fn read_metadata(&self) -> io::Result<LvmTextMap> { let mut f = try!(OpenOptions::new().read(true).open(&self.dev_path)); for pvarea in &self.metadata_areas { let hdr = try!(Self::read_mda_header(&pvarea, &mut f)); let rl = match Self::get_rlocn0(&hdr) { None => continue, Some(x) => x, }; if rl.ignored { continue } let mut text = vec![0; rl.size as usize]; let first_read = min(pvarea.size - rl.offset, rl.size) as usize; try!(f.seek(SeekFrom::Start(pvarea.offset + rl.offset))); try!(f.read(&mut text[..first_read])); if first_read != rl.size as usize { try!(f.seek(SeekFrom::Start( pvarea.offset + MDA_HEADER_SIZE as u64))); try!(f.read(&mut text[rl.size as usize - first_read..])); } if rl.checksum != crc32_calc(&text) { return Err(Error::new(Other, "MDA text checksum failure")); } return buf_to_textmap(&text); } return Err(Error::new(Other, "No valid metadata found")); } /// Write the given metadata to all active metadata areas in the PV. pub fn write_metadata(&mut self, map: &LvmTextMap) -> io::Result<()> { let mut f = try!(OpenOptions::new().read(true).write(true) .open(&self.dev_path)); for pvarea in &self.metadata_areas { let mut hdr = try!(Self::read_mda_header(&pvarea, &mut f)); // If this is the first write, supply an initial RawLocn template let rl = match Self::get_rlocn0(&hdr) { None => RawLocn { offset: MDA_HEADER_SIZE as u64, size: 0, checksum: 0, ignored: false, }, Some(x) => x, }; if rl.ignored { continue } let mut text = textmap_to_buf(map); // Ends with one null text.push(b'\0'); // start at next sector in loop, but skip 0th sector let start_off = min(MDA_HEADER_SIZE as u64, (align_to( (rl.offset + rl.size) as usize, SECTOR_SIZE) % pvarea.size as usize) as u64); let tail_space = pvarea.size as u64 - start_off; assert_eq!(start_off % SECTOR_SIZE as u64, 0); assert_eq!(tail_space % SECTOR_SIZE as u64, 0); let written = if tail_space != 0 { try!(f.seek( SeekFrom::Start(pvarea.offset + start_off))); try!(f.write_all(&text[..min(tail_space as usize, text.len())])); min(tail_space as usize, text.len()) } else { 0 }; if written != text.len() { try!(f.seek( SeekFrom::Start(pvarea.offset + MDA_HEADER_SIZE as u64))); try!(f.write_all(&text[written as usize..])); } Self::set_rlocn0(&mut hdr, &RawLocn { offset: start_off, size: text.len() as u64, checksum: crc32_calc(&text), ignored: rl.ignored, }); try!(Self::write_mda_header(&pvarea, &mut hdr, &mut f)); } Ok(()) } fn read_mda_header(area: &PvArea, file: &mut File) -> io::Result<[u8; MDA_HEADER_SIZE]> { assert!(area.size as usize > MDA_HEADER_SIZE); try!(file.seek(SeekFrom::Start(area.offset))); let mut hdr = [0u8; MDA_HEADER_SIZE]; try!(file.read(&mut hdr)); if LittleEndian::read_u32(&hdr[..4]) != crc32_calc(&hdr[4..MDA_HEADER_SIZE]) { return Err(Error::new(Other, "MDA header checksum failure")); } if &hdr[4..20] != MDA_MAGIC { return Err(Error::new( Other, format!("'{}' doesn't match MDA_MAGIC", String::from_utf8_lossy(&hdr[4..20])))); } let ver = LittleEndian::read_u32(&hdr[20..24]); if ver != 1 { return Err(Error::new(Other, "Bad version, expected 1")); } // TODO: validate these somehow //println!("mdah start {}", LittleEndian::read_u64(&buf[24..32])); //println!("mdah size {}", LittleEndian::read_u64(&buf[32..40])); Ok(hdr) } fn write_mda_header(area: &PvArea, hdr: &mut [u8; MDA_HEADER_SIZE], file: &mut File) -> io::Result<()> { let csum = crc32_calc(&hdr[4..]); LittleEndian::write_u32(&mut hdr[..4], csum); try!(file.seek(SeekFrom::Start(area.offset))); try!(file.write_all(hdr)); Ok(()) } } /// Scan a list of directories for block devices containing LVM PV labels. pub fn scan_for_pvs(dirs: &[&Path]) -> Result<Vec<PathBuf>> { let mut ret_vec = Vec::new(); for dir in dirs { ret_vec.extend(try!(read_dir(dir)) .into_iter() .filter_map(|dir_e| if dir_e.is_ok() { Some(dir_e.unwrap().path()) } else {None} ) .filter(|path| { (stat::stat(path).unwrap().st_mode & 0x6000) == 0x6000 }) // S_IFBLK .filter(|path| { PvHeader::find_in_dev(&path).is_ok() }) .collect::<Vec<_>>()); } Ok(ret_vec) }
/*! Operations on raw finite state transducers. This sub-module exposes the guts of a finite state transducer. Many parts of it, such as construction and traversal, are mirrored in the `set` and `map` sub-modules. Other parts of it, such as direct access to nodes and transitions in the transducer, do not have any analog. # Overview of types `Fst` is a read only interface to pre-constructed finite state transducers. `Node` is a read only interface to a single node in a transducer. `Builder` is used to create new finite state transducers. (Once a transducer is created, it can never be modified.) `Stream` is a stream of all inputs and outputs in a transducer. `StreamBuilder` builds range queries. `OpBuilder` collects streams and executes set operations like `union` or `intersection` on them with the option of specifying a merge strategy for output values. Most of the rest of the types are streams from set operations. */ use std::borrow::Cow; use std::cmp; use std::fmt; use std::ops::Deref; #[cfg(feature = "mmap")] use std::path::Path; use std::sync::Arc; use byteorder::{ReadBytesExt, LittleEndian}; use automaton::{Automaton, AlwaysMatch}; use error::Result; use stream::{IntoStreamer, Streamer}; pub use self::build::Builder; pub use self::error::Error; pub use self::node::{Node, Transitions}; #[cfg(feature = "mmap")] pub use self::mmap::MmapReadOnly; use self::node::node_new; pub use self::ops::{ IndexedValue, OpBuilder, Intersection, Union, Difference, SymmetricDifference, }; mod build; mod common_inputs; mod counting_writer; mod error; #[cfg(feature = "mmap")] mod mmap; mod node; mod ops; mod pack; mod registry; mod registry_minimal; #[cfg(test)] mod tests; /// The API version of this crate. /// /// This version number is written to every finite state transducer created by /// this crate. When a finite state transducer is read, its version number is /// checked against this value. /// /// Currently, any version mismatch results in an error. Fixing this requires /// regenerating the finite state transducer or switching to a version of this /// crate that is compatible with the serialized transducer. This particular /// behavior may be relaxed in future versions. pub const VERSION: u64 = 2; /// A sentinel value used to indicate an empty final state. const EMPTY_ADDRESS: CompiledAddr = 0; /// A sentinel value used to indicate an invalid state. /// /// This is never the address of a node in a serialized transducer. const NONE_ADDRESS: CompiledAddr = 1; /// FstType is a convention used to indicate the type of the underlying /// transducer. /// /// This crate reserves the range 0-255 (inclusive) but currently leaves the /// meaning of 0-255 unspecified. pub type FstType = u64; /// CompiledAddr is the type used to address nodes in a finite state /// transducer. /// /// It is most useful as a pointer to nodes. It can be used in the `Fst::node` /// method to resolve the pointer. pub type CompiledAddr = usize; /// An acyclic deterministic finite state transducer. /// /// # How does it work? /// /// The short answer: it's just like a prefix trie, which compresses keys /// based only on their prefixes, except that a automaton/transducer also /// compresses suffixes. /// /// The longer answer is that keys in an automaton are stored only in the /// transitions from one state to another. A key can be acquired by tracing /// a path from the root of the automaton to any match state. The inputs along /// each transition are concatenated. Once a match state is reached, the /// concatenation of inputs up until that point corresponds to a single key. /// /// But why is it called a transducer instead of an automaton? A finite state /// transducer is just like a finite state automaton, except that it has output /// transitions in addition to input transitions. Namely, the value associated /// with any particular key is determined by summing the outputs along every /// input transition that leads to the key's corresponding match state. /// /// This is best demonstrated with a couple images. First, let's ignore the /// "transducer" aspect and focus on a plain automaton. /// /// Consider that your keys are abbreviations of some of the months in the /// Gregorian calendar: /// /// ```ignore /// jan /// feb /// mar /// apr /// may /// jun /// jul /// ``` /// /// The corresponding automaton that stores all of these as keys looks like /// this: /// /// ![finite state automaton](http://burntsushi.net/stuff/months-set.png) /// /// Notice here how the prefix and suffix of `jan` and `jun` are shared. /// Similarly, the prefixes of `jun` and `jul` are shared and the prefixes /// of `mar` and `may` are shared. /// /// All of the keys from this automaton can be enumerated in lexicographic /// order by following every transition from each node in lexicographic /// order. Since it is acyclic, the procedure will terminate. /// /// A key can be found by tracing it through the transitions in the automaton. /// For example, the key `aug` is known not to be in the automaton by only /// visiting the root state (because there is no `a` transition). For another /// example, the key `jax` is known not to be in the set only after moving /// through the transitions for `j` and `a`. Namely, after those transitions /// are followed, there are no transitions for `x`. /// /// Notice here that looking up a key is proportional the length of the key /// itself. Namely, lookup time is not affected by the number of keys in the /// automaton! /// /// Additionally, notice that the automaton exploits the fact that many keys /// share common prefixes and suffixes. For example, `jun` and `jul` are /// represented with no more states than would be required to represent either /// one on its own. Instead, the only change is a single extra transition. This /// is a form of compression and is key to how the automatons produced by this /// crate are so small. /// /// Let's move on to finite state transducers. Consider the same set of keys /// as above, but let's assign their numeric month values: /// /// ```ignore /// jan,1 /// feb,2 /// mar,3 /// apr,4 /// may,5 /// jun,6 /// jul,7 /// ``` /// /// The corresponding transducer looks very similar to the automaton above, /// except outputs have been added to some of the transitions: /// /// ![finite state transducer](http://burntsushi.net/stuff/months-map.png) /// /// All of the operations with a transducer are the same as described above /// for automatons. Additionally, the same compression techniques are used: /// common prefixes and suffixes in keys are exploited. /// /// The key difference is that some transitions have been given an output. /// As one follows input transitions, one must sum the outputs as they /// are seen. (A transition with no output represents the additive identity, /// or `0` in this case.) For example, when looking up `feb`, the transition /// `f` has output `2`, the transition `e` has output `0`, and the transition /// `b` also has output `0`. The sum of these is `2`, which is exactly the /// value we associated with `feb`. /// /// For another more interesting example, consider `jul`. The `j` transition /// has output `1`, the `u` transition has output `5` and the `l` transition /// has output `1`. Summing these together gets us `7`, which is again the /// correct value associated with `jul`. Notice that if we instead looked up /// the `jun` key, then the `n` transition would be followed instead of the /// `l` transition, which has no output. Therefore, the `jun` key equals /// `1+5+0=6`. /// /// The trick to transducers is that there exists a unique path through the /// transducer for every key, and its outputs are stored appropriately along /// this path such that the correct value is returned when they are all summed /// together. This process also enables the data that makes up each value to be /// shared across many values in the transducer in exactly the same way that /// keys are shared. This is yet another form of compression! /// /// # Bonus: a billion strings /// /// The amount of compression one can get from automata can be absolutely /// ridiuclous. Consider the particular case of storing all billion strings /// in the range `0000000001-1000000000`, e.g., /// /// ```ignore /// 0000000001 /// 0000000002 /// ... /// 0000000100 /// 0000000101 /// ... /// 0999999999 /// 1000000000 /// ``` /// /// The corresponding automaton looks like this: /// /// ![finite state automaton - one billion strings] /// (http://burntsushi.net/stuff/one-billion.png) /// /// Indeed, the on disk size of this automaton is a mere **251 bytes**. /// /// Of course, this is a bit of a pathological best case, but it does serve /// to show how good compression can be in the optimal case. /// /// Also, check out the /// [corresponding transducer](http://burntsushi.net/stuff/one-billion-map.svg) /// that maps each string to its integer value. It's a bit bigger, but still /// only takes up **896 bytes** of space on disk. This demonstrates that /// output values are also compressible. /// /// # Does this crate produce minimal transducers? /// /// For any non-trivial sized set of keys, it is unlikely that this crate will /// produce a minimal transducer. As far as this author knows, guaranteeing a /// minimal transducer requires working memory proportional to the number of /// states. This can be quite costly and is anathema to the main design goal of /// this crate: provide the ability to work with gigantic sets of strings with /// constant memory overhead. /// /// Instead, construction of a finite state transducer uses a cache of /// states. More frequently used states are cached and reused, which provides /// reasonably good compression ratios. (No comprehensive benchmarks exist to /// back up this claim.) /// /// It is possible that this crate may expose a way to guarantee minimal /// construction of transducers at the expense of exorbitant memory /// requirements. /// /// # Bibliography /// /// I initially got the idea to use finite state tranducers to represent /// ordered sets/maps from /// [Michael /// McCandless'](http://blog.mikemccandless.com/2010/12/using-finite-state-transducers-in.html) /// work on incorporating transducers in Lucene. /// /// However, my work would also not have been possible without the hard work /// of many academics, especially /// [Jan Daciuk](http://galaxy.eti.pg.gda.pl/katedry/kiw/pracownicy/Jan.Daciuk/personal/). /// /// * [Incremental construction of minimal acyclic finite-state automata](http://www.mitpressjournals.org/doi/pdfplus/10.1162/089120100561601) /// (Section 3 provides a decent overview of the algorithm used to construct /// transducers in this crate, assuming all outputs are `0`.) /// * [Direct Construction of Minimal Acyclic Subsequential Transducers](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.24.3698&rep=rep1&type=pdf) /// (The whole thing. The proof is dense but illuminating. The algorithm at /// the end is the money shot, namely, it incorporates output values.) /// * [Experiments with Automata Compression](http://www.researchgate.net/profile/Jii_Dvorsky/publication/221568039_Word_Random_Access_Compression/links/0c96052c095630d5b3000000.pdf#page=116), [Smaller Representation of Finite State Automata](http://www.cs.put.poznan.pl/dweiss/site/publications/download/fsacomp.pdf) /// (various compression techniques for representing states/transitions) /// * [Jan Daciuk's dissertation](http://www.pg.gda.pl/~jandac/thesis.ps.gz) /// (excellent for in depth overview) /// * [Comparison of Construction Algorithms for Minimal, Acyclic, Deterministic, Finite-State Automata from Sets of Strings](http://www.cs.mun.ca/~harold/Courses/Old/CS4750/Diary/q3p2qx4lv71m5vew.pdf) /// (excellent for surface level overview) pub struct Fst { version: u64, data: FstData, root_addr: CompiledAddr, ty: FstType, len: usize, } impl Fst { /// Opens a transducer stored at the given file path via a memory map. /// /// The fst must have been written with a compatible finite state /// transducer builder (`Builder` qualifies). If the format is invalid or /// if there is a mismatch between the API version of this library and the /// fst, then an error is returned. /// /// This is unsafe because Rust programs cannot guarantee that memory /// backed by a memory mapped file won't be mutably aliased. It is up to /// the caller to enforce that the memory map is not modified while it is /// opened. #[cfg(feature = "mmap")] pub unsafe fn from_path<P: AsRef<Path>>(path: P) -> Result<Self> { Fst::from_mmap(MmapReadOnly::open_path(path)?) } /// Opens a transducer from a `MmapReadOnly`. /// /// This is useful if a transducer is serialized to only a part of a file. /// A `MmapReadOnly` lets one control which region of the file is used for /// the transducer. #[cfg(feature = "mmap")] #[inline] pub fn from_mmap(mmap: MmapReadOnly) -> Result<Self> { Fst::new(FstData::Mmap(mmap)) } /// Creates a transducer from its representation as a raw byte sequence. /// /// Note that this operation is very cheap (no allocations and no copies). /// /// The fst must have been written with a compatible finite state /// transducer builder (`Builder` qualifies). If the format is invalid or /// if there is a mismatch between the API version of this library and the /// fst, then an error is returned. #[inline] pub fn from_bytes(bytes: Vec<u8>) -> Result<Self> { Fst::new(FstData::Cow(Cow::Owned(bytes))) } /// Creates a transducer from its representation as a raw byte sequence. /// /// This accepts a static byte slice, which may be useful if the Fst /// is embedded into source code. #[inline] pub fn from_static_slice(bytes: &'static [u8]) -> Result<Self> { Fst::new(FstData::Cow(Cow::Borrowed(bytes))) } /// Creates a transducer from a shared vector at the given offset and /// length. /// /// This permits creating multiple transducers from a single region of /// owned memory. #[inline] pub fn from_shared_bytes( bytes: Arc<Vec<u8>>, offset: usize, len: usize, ) -> Result<Self> { Fst::new(FstData::Shared { vec: bytes, offset: offset, len: len }) } fn new(data: FstData) -> Result<Self> { if data.len() < 32 { return Err(Error::Format.into()); } // The read_u64 unwraps below are OK because they can never fail. // They can only fail when there is an IO error or if there is an // unexpected EOF. However, we are reading from a byte slice (no // IO errors possible) and we've confirmed the byte slice is at least // N bytes (no unexpected EOF). let version = (&*data).read_u64::<LittleEndian>().unwrap(); if version == 0 || version > VERSION { return Err(Error::Version { expected: VERSION, got: version, }.into()); } let ty = (&data[8..]).read_u64::<LittleEndian>().unwrap(); let root_addr = { let mut last = &data[data.len() - 8..]; u64_to_usize(last.read_u64::<LittleEndian>().unwrap()) }; let len = { let mut last2 = &data[data.len() - 16..]; u64_to_usize(last2.read_u64::<LittleEndian>().unwrap()) }; // The root node is always the last node written, so its address should // be near the end. After the root node is written, we still have to // write the root *address* and the number of keys in the FST. // That's 16 bytes. The extra byte comes from the fact that the root // address points to the last byte in the root node, rather than the // byte immediately following the root node. // // If this check passes, it is still possible that the FST is invalid // but probably unlikely. If this check reports a false positive, then // the program will probably panic. In the worst case, the FST will // operate but be subtly wrong. (This would require the bytes to be in // a format expected by an FST, which is incredibly unlikely.) // // The special check for EMPTY_ADDRESS is needed since an empty FST // has a root node that is empty and final, which means it has the // special address `0`. In that case, the FST is the smallest it can // be: the version, type, root address and number of nodes. That's // 32 bytes (8 byte u64 each). // // This is essentially our own little checksum. if (root_addr == EMPTY_ADDRESS && data.len() != 32) && root_addr + 17 != data.len() { return Err(Error::Format.into()); } Ok(Fst { version: version, data: data, root_addr: root_addr, ty: ty, len: len, }) } /// Retrieves the value associated with a key. /// /// If the key does not exist, then `None` is returned. #[inline(never)] pub fn get<B: AsRef<[u8]>>(&self, key: B) -> Option<Output> { let mut node = self.root(); let mut out = Output::zero(); for &b in key.as_ref() { node = match node.find_input(b) { None => return None, Some(i) => { let t = node.transition(i); out = out.cat(t.out); self.node(t.addr) } } } if !node.is_final() { None } else { Some(out.cat(node.final_output())) } } /// Returns true if and only if the given key is in this FST. pub fn contains_key<B: AsRef<[u8]>>(&self, key: B) -> bool { let mut node = self.root(); for &b in key.as_ref() { node = match node.find_input(b) { None => return false, Some(i) => self.node(node.transition_addr(i)), } } node.is_final() } /// Return a lexicographically ordered stream of all key-value pairs in /// this fst. #[inline] pub fn stream(&self) -> Stream { StreamBuilder::new(self, AlwaysMatch).into_stream() } /// Return a builder for range queries. /// /// A range query returns a subset of key-value pairs in this fst in a /// range given in lexicographic order. #[inline] pub fn range(&self) -> StreamBuilder { StreamBuilder::new(self, AlwaysMatch) } /// Executes an automaton on the keys of this map. pub fn search<A: Automaton>(&self, aut: A) -> StreamBuilder<A> { StreamBuilder::new(self, aut) } /// Returns the number of keys in this fst. #[inline] pub fn len(&self) -> usize { self.len } /// Returns true if and only if this fst has no keys. #[inline] pub fn is_empty(&self) -> bool { self.len == 0 } /// Returns the number of bytes used by this fst. #[inline] pub fn size(&self) -> usize { self.data.len() } /// Creates a new fst operation with this fst added to it. /// /// The `OpBuilder` type can be used to add additional fst streams /// and perform set operations like union, intersection, difference and /// symmetric difference on the keys of the fst. These set operations also /// allow one to specify how conflicting values are merged in the stream. #[inline] pub fn op(&self) -> OpBuilder { OpBuilder::new().add(self) } /// Returns true if and only if the `self` fst is disjoint with the fst /// `stream`. /// /// `stream` must be a lexicographically ordered sequence of byte strings /// with associated values. pub fn is_disjoint<'f, I, S>(&self, stream: I) -> bool where I: for<'a> IntoStreamer<'a, Into=S, Item=(&'a [u8], Output)>, S: 'f + for<'a> Streamer<'a, Item=(&'a [u8], Output)> { self.op().add(stream).intersection().next().is_none() } /// Returns true if and only if the `self` fst is a subset of the fst /// `stream`. /// /// `stream` must be a lexicographically ordered sequence of byte strings /// with associated values. pub fn is_subset<'f, I, S>(&self, stream: I) -> bool where I: for<'a> IntoStreamer<'a, Into=S, Item=(&'a [u8], Output)>, S: 'f + for<'a> Streamer<'a, Item=(&'a [u8], Output)> { let mut op = self.op().add(stream).intersection(); let mut count = 0; while let Some(_) = op.next() { count += 1; } count == self.len() } /// Returns true if and only if the `self` fst is a superset of the fst /// `stream`. /// /// `stream` must be a lexicographically ordered sequence of byte strings /// with associated values. pub fn is_superset<'f, I, S>(&self, stream: I) -> bool where I: for<'a> IntoStreamer<'a, Into=S, Item=(&'a [u8], Output)>, S: 'f + for<'a> Streamer<'a, Item=(&'a [u8], Output)> { let mut op = self.op().add(stream).union(); let mut count = 0; while let Some(_) = op.next() { count += 1; } count == self.len() } /// Returns the underlying type of this fst. /// /// FstType is a convention used to indicate the type of the underlying /// transducer. /// /// This crate reserves the range 0-255 (inclusive) but currently leaves /// the meaning of 0-255 unspecified. #[inline] pub fn fst_type(&self) -> FstType { self.ty } /// Returns the root node of this fst. #[inline(always)] pub fn root(&self) -> Node { self.node(self.root_addr) } /// Returns the node at the given address. /// /// Node addresses can be obtained by reading transitions on `Node` values. #[inline] pub fn node(&self, addr: CompiledAddr) -> Node { node_new(self.version, addr, &self.data) } /// Returns a copy of the binary contents of this FST. #[inline] pub fn to_vec(&self) -> Vec<u8> { self.data.to_vec() } fn empty_final_output(&self) -> Option<Output> { let root = self.root(); if root.is_final() { Some(root.final_output()) } else { None } } } impl<'a, 'f> IntoStreamer<'a> for &'f Fst { type Item = (&'a [u8], Output); type Into = Stream<'f>; #[inline] fn into_stream(self) -> Self::Into { StreamBuilder::new(self, AlwaysMatch).into_stream() } } /// A builder for constructing range queries on streams. /// /// Once all bounds are set, one should call `into_stream` to get a /// `Stream`. /// /// Bounds are not additive. That is, if `ge` is called twice on the same /// builder, then the second setting wins. /// /// The `A` type parameter corresponds to an optional automaton to filter /// the stream. By default, no filtering is done. /// /// The `'f` lifetime parameter refers to the lifetime of the underlying fst. pub struct StreamBuilder<'f, A=AlwaysMatch> { fst: &'f Fst, aut: A, min: Bound, max: Bound, } impl<'f, A: Automaton> StreamBuilder<'f, A> { fn new(fst: &'f Fst, aut: A) -> Self { StreamBuilder { fst: fst, aut: aut, min: Bound::Unbounded, max: Bound::Unbounded, } } /// Specify a greater-than-or-equal-to bound. pub fn ge<T: AsRef<[u8]>>(mut self, bound: T) -> Self { self.min = Bound::Included(bound.as_ref().to_owned()); self } /// Specify a greater-than bound. pub fn gt<T: AsRef<[u8]>>(mut self, bound: T) -> Self { self.min = Bound::Excluded(bound.as_ref().to_owned()); self } /// Specify a less-than-or-equal-to bound. pub fn le<T: AsRef<[u8]>>(mut self, bound: T) -> Self { self.max = Bound::Included(bound.as_ref().to_owned()); self } /// Specify a less-than bound. pub fn lt<T: AsRef<[u8]>>(mut self, bound: T) -> Self { self.max = Bound::Excluded(bound.as_ref().to_owned()); self } } impl<'a, 'f, A: Automaton> IntoStreamer<'a> for StreamBuilder<'f, A> { type Item = (&'a [u8], Output); type Into = Stream<'f, A>; fn into_stream(self) -> Stream<'f, A> { Stream::new(self.fst, self.aut, self.min, self.max) } } #[derive(Debug)] enum Bound { Included(Vec<u8>), Excluded(Vec<u8>), Unbounded, } impl Bound { fn exceeded_by(&self, inp: &[u8]) -> bool { match *self { Bound::Included(ref v) => inp > v, Bound::Excluded(ref v) => inp >= v, Bound::Unbounded => false, } } fn is_empty(&self) -> bool { match *self { Bound::Included(ref v) => v.is_empty(), Bound::Excluded(ref v) => v.is_empty(), Bound::Unbounded => true, } } fn is_inclusive(&self) -> bool { match *self { Bound::Excluded(_) => false, _ => true, } } } /// A lexicographically ordered stream of key-value pairs from an fst. /// /// The `A` type parameter corresponds to an optional automaton to filter /// the stream. By default, no filtering is done. /// /// The `'f` lifetime parameter refers to the lifetime of the underlying fst. pub struct Stream<'f, A=AlwaysMatch> where A: Automaton { fst: &'f Fst, aut: A, inp: Vec<u8>, empty_output: Option<Output>, stack: Vec<StreamState<'f, A::State>>, end_at: Bound, } #[derive(Clone, Debug)] struct StreamState<'f, S> { node: Node<'f>, trans: usize, out: Output, aut_state: S, } impl<'f, A: Automaton> Stream<'f, A> { fn new(fst: &'f Fst, aut: A, min: Bound, max: Bound) -> Self { let mut rdr = Stream { fst: fst, aut: aut, inp: Vec::with_capacity(16), empty_output: None, stack: vec![], end_at: max, }; rdr.seek_min(min); rdr } /// Seeks the underlying stream such that the next key to be read is the /// smallest key in the underlying fst that satisfies the given minimum /// bound. /// /// This theoretically should be straight-forward, but we need to make /// sure our stack is correct, which includes accounting for automaton /// states. fn seek_min(&mut self, min: Bound) { if min.is_empty() { if min.is_inclusive() { self.empty_output = self.fst.empty_final_output(); } self.stack = vec![StreamState { node: self.fst.root(), trans: 0, out: Output::zero(), aut_state: self.aut.start(), }]; return; } let (key, inclusive) = match min { Bound::Excluded(ref min) => { (min, false) } Bound::Included(ref min) => { (min, true) } Bound::Unbounded => unreachable!(), }; // At this point, we need to find the starting location of `min` in // the FST. However, as we search, we need to maintain a stack of // reader states so that the reader can pick up where we left off. // N.B. We do not necessarily need to stop in a final state, unlike // the one-off `find` method. For the example, the given bound might // not actually exist in the FST. let mut node = self.fst.root(); let mut out = Output::zero(); let mut aut_state = self.aut.start(); for &b in key { match node.find_input(b) { Some(i) => { let t = node.transition(i); let prev_state = aut_state; aut_state = self.aut.accept(&prev_state, b); self.inp.push(b); self.stack.push(StreamState { node: node, trans: i+1, out: out, aut_state: prev_state, }); out = out.cat(t.out); node = self.fst.node(t.addr); } None => { // This is a little tricky. We're in this case if the // given bound is not a prefix of any key in the FST. // Since this is a minimum bound, we need to find the // first transition in this node that proceeds the current // input byte. self.stack.push(StreamState { node: node, trans: node.transitions() .position(|t| t.inp > b) .unwrap_or(node.len()), out: out, aut_state: aut_state, }); return; } } } if !self.stack.is_empty() { let last = self.stack.len() - 1; if inclusive { self.stack[last].trans -= 1; self.inp.pop(); } else { let node = self.stack[last].node; let trans = self.stack[last].trans; self.stack.push(StreamState { node: self.fst.node(node.transition(trans - 1).addr), trans: 0, out: out, aut_state: aut_state, }); } } } /// Convert this stream into a vector of byte strings and outputs. /// /// Note that this creates a new allocation for every key in the stream. pub fn into_byte_vec(mut self) -> Vec<(Vec<u8>, u64)> { let mut vs = vec![]; while let Some((k, v)) = self.next() { vs.push((k.to_vec(), v.value())); } vs } /// Convert this stream into a vector of Unicode strings and outputs. /// /// If any key is not valid UTF-8, then iteration on the stream is stopped /// and a UTF-8 decoding error is returned. /// /// Note that this creates a new allocation for every key in the stream. pub fn into_str_vec(mut self) -> Result<Vec<(String, u64)>> { let mut vs = vec![]; while let Some((k, v)) = self.next() { let k = String::from_utf8(k.to_vec()).map_err(Error::from)?; vs.push((k, v.value())); } Ok(vs) } /// Convert this stream into a vector of byte strings. /// /// Note that this creates a new allocation for every key in the stream. pub fn into_byte_keys(mut self) -> Vec<Vec<u8>> { let mut vs = vec![]; while let Some((k, _)) = self.next() { vs.push(k.to_vec()); } vs } /// Convert this stream into a vector of Unicode strings. /// /// If any key is not valid UTF-8, then iteration on the stream is stopped /// and a UTF-8 decoding error is returned. /// /// Note that this creates a new allocation for every key in the stream. pub fn into_str_keys(mut self) -> Result<Vec<String>> { let mut vs = vec![]; while let Some((k, _)) = self.next() { let k = String::from_utf8(k.to_vec()).map_err(Error::from)?; vs.push(k); } Ok(vs) } /// Convert this stream into a vector of outputs. pub fn into_values(mut self) -> Vec<u64> { let mut vs = vec![]; while let Some((_, v)) = self.next() { vs.push(v.value()); } vs } } impl<'f, 'a, A: Automaton> Streamer<'a> for Stream<'f, A> { type Item = (&'a [u8], Output); fn next(&'a mut self) -> Option<Self::Item> { if let Some(out) = self.empty_output.take() { if self.end_at.exceeded_by(&[]) { self.stack.clear(); return None; } if self.aut.is_match(&self.aut.start()) { return Some((&[], out)); } } while let Some(state) = self.stack.pop() { if state.trans >= state.node.len() || !self.aut.can_match(&state.aut_state) { if state.node.addr() != self.fst.root_addr { self.inp.pop().unwrap(); } continue; } let trans = state.node.transition(state.trans); let out = state.out.cat(trans.out); let next_state = self.aut.accept(&state.aut_state, trans.inp); let is_match = self.aut.is_match(&next_state); let next_node = self.fst.node(trans.addr); self.inp.push(trans.inp); self.stack.push(StreamState { trans: state.trans + 1, .. state }); self.stack.push(StreamState { node: next_node, trans: 0, out: out, aut_state: next_state, }); if self.end_at.exceeded_by(&self.inp) { // We are done, forever. self.stack.clear(); return None; } if next_node.is_final() && is_match { return Some((&self.inp, out.cat(next_node.final_output()))); } } None } } /// An output is a value that is associated with a key in a finite state /// transducer. /// /// Note that outputs must satisfy an algebra. Namely, it must have an additive /// identity and the following binary operations defined: `prefix`, /// `concatenation` and `subtraction`. `prefix` and `concatenation` are /// commutative while `subtraction` is not. `subtraction` is only defined on /// pairs of operands where the first operand is greater than or equal to the /// second operand. /// /// Currently, output values must be `u64`. However, in theory, an output value /// can be anything that satisfies the above algebra. Future versions of this /// crate may make outputs generic on this algebra. #[derive(Copy, Clone, Debug, Hash, Eq, Ord, PartialEq, PartialOrd)] pub struct Output(u64); impl Output { /// Create a new output from a `u64`. #[inline] pub fn new(v: u64) -> Output { Output(v) } /// Create a zero output. #[inline] pub fn zero() -> Output { Output(0) } /// Retrieve the value inside this output. #[inline] pub fn value(self) -> u64 { self.0 } /// Returns true if this is a zero output. #[inline] pub fn is_zero(self) -> bool { self.0 == 0 } /// Returns the prefix of this output and `o`. #[inline] pub fn prefix(self, o: Output) -> Output { Output(cmp::min(self.0, o.0)) } /// Returns the concatenation of this output and `o`. #[inline] pub fn cat(self, o: Output) -> Output { Output(self.0 + o.0) } /// Returns the subtraction of `o` from this output. /// /// This function panics if `self > o`. #[inline] pub fn sub(self, o: Output) -> Output { Output(self.0.checked_sub(o.0) .expect("BUG: underflow subtraction not allowed")) } } enum FstData { Cow(Cow<'static, [u8]>), Shared { vec: Arc<Vec<u8>>, offset: usize, len: usize, }, #[cfg(feature = "mmap")] Mmap(MmapReadOnly), } impl Deref for FstData { type Target = [u8]; fn deref(&self) -> &[u8] { match *self { FstData::Cow(ref v) => &**v, FstData::Shared { ref vec, offset, len } => { &vec[offset..offset + len] } #[cfg(feature = "mmap")] FstData::Mmap(ref v) => v.as_slice(), } } } /// A transition from one note to another. #[derive(Copy, Clone, Hash, Eq, PartialEq)] pub struct Transition { /// The byte input associated with this transition. pub inp: u8, /// The output associated with this transition. pub out: Output, /// The address of the node that this transition points to. pub addr: CompiledAddr, } impl Default for Transition { #[inline] fn default() -> Self { Transition { inp: 0, out: Output::zero(), addr: NONE_ADDRESS, } } } impl fmt::Debug for Transition { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if self.out.is_zero() { write!(f, "{} -> {}", self.inp as char, self.addr) } else { write!(f, "({}, {}) -> {}", self.inp as char, self.out.value(), self.addr) } } } #[inline] #[cfg(target_pointer_width = "64")] fn u64_to_usize(n: u64) -> usize { n as usize } #[inline] #[cfg(not(target_pointer_width = "64"))] fn u64_to_usize(n: u64) -> usize { if n > ::std::usize::MAX as u64 { panic!("\ Cannot convert node address {} to a pointer sized variable. If this FST is very large and was generated on a system with a larger pointer size than this system, then it is not possible to read this FST on this system.", n); } n as usize } doc: make the image of billion strings appear Just fixing mardown. (The image didn't show on docs.rs, because of the newline.) /*! Operations on raw finite state transducers. This sub-module exposes the guts of a finite state transducer. Many parts of it, such as construction and traversal, are mirrored in the `set` and `map` sub-modules. Other parts of it, such as direct access to nodes and transitions in the transducer, do not have any analog. # Overview of types `Fst` is a read only interface to pre-constructed finite state transducers. `Node` is a read only interface to a single node in a transducer. `Builder` is used to create new finite state transducers. (Once a transducer is created, it can never be modified.) `Stream` is a stream of all inputs and outputs in a transducer. `StreamBuilder` builds range queries. `OpBuilder` collects streams and executes set operations like `union` or `intersection` on them with the option of specifying a merge strategy for output values. Most of the rest of the types are streams from set operations. */ use std::borrow::Cow; use std::cmp; use std::fmt; use std::ops::Deref; #[cfg(feature = "mmap")] use std::path::Path; use std::sync::Arc; use byteorder::{ReadBytesExt, LittleEndian}; use automaton::{Automaton, AlwaysMatch}; use error::Result; use stream::{IntoStreamer, Streamer}; pub use self::build::Builder; pub use self::error::Error; pub use self::node::{Node, Transitions}; #[cfg(feature = "mmap")] pub use self::mmap::MmapReadOnly; use self::node::node_new; pub use self::ops::{ IndexedValue, OpBuilder, Intersection, Union, Difference, SymmetricDifference, }; mod build; mod common_inputs; mod counting_writer; mod error; #[cfg(feature = "mmap")] mod mmap; mod node; mod ops; mod pack; mod registry; mod registry_minimal; #[cfg(test)] mod tests; /// The API version of this crate. /// /// This version number is written to every finite state transducer created by /// this crate. When a finite state transducer is read, its version number is /// checked against this value. /// /// Currently, any version mismatch results in an error. Fixing this requires /// regenerating the finite state transducer or switching to a version of this /// crate that is compatible with the serialized transducer. This particular /// behavior may be relaxed in future versions. pub const VERSION: u64 = 2; /// A sentinel value used to indicate an empty final state. const EMPTY_ADDRESS: CompiledAddr = 0; /// A sentinel value used to indicate an invalid state. /// /// This is never the address of a node in a serialized transducer. const NONE_ADDRESS: CompiledAddr = 1; /// FstType is a convention used to indicate the type of the underlying /// transducer. /// /// This crate reserves the range 0-255 (inclusive) but currently leaves the /// meaning of 0-255 unspecified. pub type FstType = u64; /// CompiledAddr is the type used to address nodes in a finite state /// transducer. /// /// It is most useful as a pointer to nodes. It can be used in the `Fst::node` /// method to resolve the pointer. pub type CompiledAddr = usize; /// An acyclic deterministic finite state transducer. /// /// # How does it work? /// /// The short answer: it's just like a prefix trie, which compresses keys /// based only on their prefixes, except that a automaton/transducer also /// compresses suffixes. /// /// The longer answer is that keys in an automaton are stored only in the /// transitions from one state to another. A key can be acquired by tracing /// a path from the root of the automaton to any match state. The inputs along /// each transition are concatenated. Once a match state is reached, the /// concatenation of inputs up until that point corresponds to a single key. /// /// But why is it called a transducer instead of an automaton? A finite state /// transducer is just like a finite state automaton, except that it has output /// transitions in addition to input transitions. Namely, the value associated /// with any particular key is determined by summing the outputs along every /// input transition that leads to the key's corresponding match state. /// /// This is best demonstrated with a couple images. First, let's ignore the /// "transducer" aspect and focus on a plain automaton. /// /// Consider that your keys are abbreviations of some of the months in the /// Gregorian calendar: /// /// ```ignore /// jan /// feb /// mar /// apr /// may /// jun /// jul /// ``` /// /// The corresponding automaton that stores all of these as keys looks like /// this: /// /// ![finite state automaton](http://burntsushi.net/stuff/months-set.png) /// /// Notice here how the prefix and suffix of `jan` and `jun` are shared. /// Similarly, the prefixes of `jun` and `jul` are shared and the prefixes /// of `mar` and `may` are shared. /// /// All of the keys from this automaton can be enumerated in lexicographic /// order by following every transition from each node in lexicographic /// order. Since it is acyclic, the procedure will terminate. /// /// A key can be found by tracing it through the transitions in the automaton. /// For example, the key `aug` is known not to be in the automaton by only /// visiting the root state (because there is no `a` transition). For another /// example, the key `jax` is known not to be in the set only after moving /// through the transitions for `j` and `a`. Namely, after those transitions /// are followed, there are no transitions for `x`. /// /// Notice here that looking up a key is proportional the length of the key /// itself. Namely, lookup time is not affected by the number of keys in the /// automaton! /// /// Additionally, notice that the automaton exploits the fact that many keys /// share common prefixes and suffixes. For example, `jun` and `jul` are /// represented with no more states than would be required to represent either /// one on its own. Instead, the only change is a single extra transition. This /// is a form of compression and is key to how the automatons produced by this /// crate are so small. /// /// Let's move on to finite state transducers. Consider the same set of keys /// as above, but let's assign their numeric month values: /// /// ```ignore /// jan,1 /// feb,2 /// mar,3 /// apr,4 /// may,5 /// jun,6 /// jul,7 /// ``` /// /// The corresponding transducer looks very similar to the automaton above, /// except outputs have been added to some of the transitions: /// /// ![finite state transducer](http://burntsushi.net/stuff/months-map.png) /// /// All of the operations with a transducer are the same as described above /// for automatons. Additionally, the same compression techniques are used: /// common prefixes and suffixes in keys are exploited. /// /// The key difference is that some transitions have been given an output. /// As one follows input transitions, one must sum the outputs as they /// are seen. (A transition with no output represents the additive identity, /// or `0` in this case.) For example, when looking up `feb`, the transition /// `f` has output `2`, the transition `e` has output `0`, and the transition /// `b` also has output `0`. The sum of these is `2`, which is exactly the /// value we associated with `feb`. /// /// For another more interesting example, consider `jul`. The `j` transition /// has output `1`, the `u` transition has output `5` and the `l` transition /// has output `1`. Summing these together gets us `7`, which is again the /// correct value associated with `jul`. Notice that if we instead looked up /// the `jun` key, then the `n` transition would be followed instead of the /// `l` transition, which has no output. Therefore, the `jun` key equals /// `1+5+0=6`. /// /// The trick to transducers is that there exists a unique path through the /// transducer for every key, and its outputs are stored appropriately along /// this path such that the correct value is returned when they are all summed /// together. This process also enables the data that makes up each value to be /// shared across many values in the transducer in exactly the same way that /// keys are shared. This is yet another form of compression! /// /// # Bonus: a billion strings /// /// The amount of compression one can get from automata can be absolutely /// ridiuclous. Consider the particular case of storing all billion strings /// in the range `0000000001-1000000000`, e.g., /// /// ```ignore /// 0000000001 /// 0000000002 /// ... /// 0000000100 /// 0000000101 /// ... /// 0999999999 /// 1000000000 /// ``` /// /// The corresponding automaton looks like this: /// /// ![finite state automaton - one billion strings](http://burntsushi.net/stuff/one-billion.png) /// /// Indeed, the on disk size of this automaton is a mere **251 bytes**. /// /// Of course, this is a bit of a pathological best case, but it does serve /// to show how good compression can be in the optimal case. /// /// Also, check out the /// [corresponding transducer](http://burntsushi.net/stuff/one-billion-map.svg) /// that maps each string to its integer value. It's a bit bigger, but still /// only takes up **896 bytes** of space on disk. This demonstrates that /// output values are also compressible. /// /// # Does this crate produce minimal transducers? /// /// For any non-trivial sized set of keys, it is unlikely that this crate will /// produce a minimal transducer. As far as this author knows, guaranteeing a /// minimal transducer requires working memory proportional to the number of /// states. This can be quite costly and is anathema to the main design goal of /// this crate: provide the ability to work with gigantic sets of strings with /// constant memory overhead. /// /// Instead, construction of a finite state transducer uses a cache of /// states. More frequently used states are cached and reused, which provides /// reasonably good compression ratios. (No comprehensive benchmarks exist to /// back up this claim.) /// /// It is possible that this crate may expose a way to guarantee minimal /// construction of transducers at the expense of exorbitant memory /// requirements. /// /// # Bibliography /// /// I initially got the idea to use finite state tranducers to represent /// ordered sets/maps from /// [Michael /// McCandless'](http://blog.mikemccandless.com/2010/12/using-finite-state-transducers-in.html) /// work on incorporating transducers in Lucene. /// /// However, my work would also not have been possible without the hard work /// of many academics, especially /// [Jan Daciuk](http://galaxy.eti.pg.gda.pl/katedry/kiw/pracownicy/Jan.Daciuk/personal/). /// /// * [Incremental construction of minimal acyclic finite-state automata](http://www.mitpressjournals.org/doi/pdfplus/10.1162/089120100561601) /// (Section 3 provides a decent overview of the algorithm used to construct /// transducers in this crate, assuming all outputs are `0`.) /// * [Direct Construction of Minimal Acyclic Subsequential Transducers](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.24.3698&rep=rep1&type=pdf) /// (The whole thing. The proof is dense but illuminating. The algorithm at /// the end is the money shot, namely, it incorporates output values.) /// * [Experiments with Automata Compression](http://www.researchgate.net/profile/Jii_Dvorsky/publication/221568039_Word_Random_Access_Compression/links/0c96052c095630d5b3000000.pdf#page=116), [Smaller Representation of Finite State Automata](http://www.cs.put.poznan.pl/dweiss/site/publications/download/fsacomp.pdf) /// (various compression techniques for representing states/transitions) /// * [Jan Daciuk's dissertation](http://www.pg.gda.pl/~jandac/thesis.ps.gz) /// (excellent for in depth overview) /// * [Comparison of Construction Algorithms for Minimal, Acyclic, Deterministic, Finite-State Automata from Sets of Strings](http://www.cs.mun.ca/~harold/Courses/Old/CS4750/Diary/q3p2qx4lv71m5vew.pdf) /// (excellent for surface level overview) pub struct Fst { version: u64, data: FstData, root_addr: CompiledAddr, ty: FstType, len: usize, } impl Fst { /// Opens a transducer stored at the given file path via a memory map. /// /// The fst must have been written with a compatible finite state /// transducer builder (`Builder` qualifies). If the format is invalid or /// if there is a mismatch between the API version of this library and the /// fst, then an error is returned. /// /// This is unsafe because Rust programs cannot guarantee that memory /// backed by a memory mapped file won't be mutably aliased. It is up to /// the caller to enforce that the memory map is not modified while it is /// opened. #[cfg(feature = "mmap")] pub unsafe fn from_path<P: AsRef<Path>>(path: P) -> Result<Self> { Fst::from_mmap(MmapReadOnly::open_path(path)?) } /// Opens a transducer from a `MmapReadOnly`. /// /// This is useful if a transducer is serialized to only a part of a file. /// A `MmapReadOnly` lets one control which region of the file is used for /// the transducer. #[cfg(feature = "mmap")] #[inline] pub fn from_mmap(mmap: MmapReadOnly) -> Result<Self> { Fst::new(FstData::Mmap(mmap)) } /// Creates a transducer from its representation as a raw byte sequence. /// /// Note that this operation is very cheap (no allocations and no copies). /// /// The fst must have been written with a compatible finite state /// transducer builder (`Builder` qualifies). If the format is invalid or /// if there is a mismatch between the API version of this library and the /// fst, then an error is returned. #[inline] pub fn from_bytes(bytes: Vec<u8>) -> Result<Self> { Fst::new(FstData::Cow(Cow::Owned(bytes))) } /// Creates a transducer from its representation as a raw byte sequence. /// /// This accepts a static byte slice, which may be useful if the Fst /// is embedded into source code. #[inline] pub fn from_static_slice(bytes: &'static [u8]) -> Result<Self> { Fst::new(FstData::Cow(Cow::Borrowed(bytes))) } /// Creates a transducer from a shared vector at the given offset and /// length. /// /// This permits creating multiple transducers from a single region of /// owned memory. #[inline] pub fn from_shared_bytes( bytes: Arc<Vec<u8>>, offset: usize, len: usize, ) -> Result<Self> { Fst::new(FstData::Shared { vec: bytes, offset: offset, len: len }) } fn new(data: FstData) -> Result<Self> { if data.len() < 32 { return Err(Error::Format.into()); } // The read_u64 unwraps below are OK because they can never fail. // They can only fail when there is an IO error or if there is an // unexpected EOF. However, we are reading from a byte slice (no // IO errors possible) and we've confirmed the byte slice is at least // N bytes (no unexpected EOF). let version = (&*data).read_u64::<LittleEndian>().unwrap(); if version == 0 || version > VERSION { return Err(Error::Version { expected: VERSION, got: version, }.into()); } let ty = (&data[8..]).read_u64::<LittleEndian>().unwrap(); let root_addr = { let mut last = &data[data.len() - 8..]; u64_to_usize(last.read_u64::<LittleEndian>().unwrap()) }; let len = { let mut last2 = &data[data.len() - 16..]; u64_to_usize(last2.read_u64::<LittleEndian>().unwrap()) }; // The root node is always the last node written, so its address should // be near the end. After the root node is written, we still have to // write the root *address* and the number of keys in the FST. // That's 16 bytes. The extra byte comes from the fact that the root // address points to the last byte in the root node, rather than the // byte immediately following the root node. // // If this check passes, it is still possible that the FST is invalid // but probably unlikely. If this check reports a false positive, then // the program will probably panic. In the worst case, the FST will // operate but be subtly wrong. (This would require the bytes to be in // a format expected by an FST, which is incredibly unlikely.) // // The special check for EMPTY_ADDRESS is needed since an empty FST // has a root node that is empty and final, which means it has the // special address `0`. In that case, the FST is the smallest it can // be: the version, type, root address and number of nodes. That's // 32 bytes (8 byte u64 each). // // This is essentially our own little checksum. if (root_addr == EMPTY_ADDRESS && data.len() != 32) && root_addr + 17 != data.len() { return Err(Error::Format.into()); } Ok(Fst { version: version, data: data, root_addr: root_addr, ty: ty, len: len, }) } /// Retrieves the value associated with a key. /// /// If the key does not exist, then `None` is returned. #[inline(never)] pub fn get<B: AsRef<[u8]>>(&self, key: B) -> Option<Output> { let mut node = self.root(); let mut out = Output::zero(); for &b in key.as_ref() { node = match node.find_input(b) { None => return None, Some(i) => { let t = node.transition(i); out = out.cat(t.out); self.node(t.addr) } } } if !node.is_final() { None } else { Some(out.cat(node.final_output())) } } /// Returns true if and only if the given key is in this FST. pub fn contains_key<B: AsRef<[u8]>>(&self, key: B) -> bool { let mut node = self.root(); for &b in key.as_ref() { node = match node.find_input(b) { None => return false, Some(i) => self.node(node.transition_addr(i)), } } node.is_final() } /// Return a lexicographically ordered stream of all key-value pairs in /// this fst. #[inline] pub fn stream(&self) -> Stream { StreamBuilder::new(self, AlwaysMatch).into_stream() } /// Return a builder for range queries. /// /// A range query returns a subset of key-value pairs in this fst in a /// range given in lexicographic order. #[inline] pub fn range(&self) -> StreamBuilder { StreamBuilder::new(self, AlwaysMatch) } /// Executes an automaton on the keys of this map. pub fn search<A: Automaton>(&self, aut: A) -> StreamBuilder<A> { StreamBuilder::new(self, aut) } /// Returns the number of keys in this fst. #[inline] pub fn len(&self) -> usize { self.len } /// Returns true if and only if this fst has no keys. #[inline] pub fn is_empty(&self) -> bool { self.len == 0 } /// Returns the number of bytes used by this fst. #[inline] pub fn size(&self) -> usize { self.data.len() } /// Creates a new fst operation with this fst added to it. /// /// The `OpBuilder` type can be used to add additional fst streams /// and perform set operations like union, intersection, difference and /// symmetric difference on the keys of the fst. These set operations also /// allow one to specify how conflicting values are merged in the stream. #[inline] pub fn op(&self) -> OpBuilder { OpBuilder::new().add(self) } /// Returns true if and only if the `self` fst is disjoint with the fst /// `stream`. /// /// `stream` must be a lexicographically ordered sequence of byte strings /// with associated values. pub fn is_disjoint<'f, I, S>(&self, stream: I) -> bool where I: for<'a> IntoStreamer<'a, Into=S, Item=(&'a [u8], Output)>, S: 'f + for<'a> Streamer<'a, Item=(&'a [u8], Output)> { self.op().add(stream).intersection().next().is_none() } /// Returns true if and only if the `self` fst is a subset of the fst /// `stream`. /// /// `stream` must be a lexicographically ordered sequence of byte strings /// with associated values. pub fn is_subset<'f, I, S>(&self, stream: I) -> bool where I: for<'a> IntoStreamer<'a, Into=S, Item=(&'a [u8], Output)>, S: 'f + for<'a> Streamer<'a, Item=(&'a [u8], Output)> { let mut op = self.op().add(stream).intersection(); let mut count = 0; while let Some(_) = op.next() { count += 1; } count == self.len() } /// Returns true if and only if the `self` fst is a superset of the fst /// `stream`. /// /// `stream` must be a lexicographically ordered sequence of byte strings /// with associated values. pub fn is_superset<'f, I, S>(&self, stream: I) -> bool where I: for<'a> IntoStreamer<'a, Into=S, Item=(&'a [u8], Output)>, S: 'f + for<'a> Streamer<'a, Item=(&'a [u8], Output)> { let mut op = self.op().add(stream).union(); let mut count = 0; while let Some(_) = op.next() { count += 1; } count == self.len() } /// Returns the underlying type of this fst. /// /// FstType is a convention used to indicate the type of the underlying /// transducer. /// /// This crate reserves the range 0-255 (inclusive) but currently leaves /// the meaning of 0-255 unspecified. #[inline] pub fn fst_type(&self) -> FstType { self.ty } /// Returns the root node of this fst. #[inline(always)] pub fn root(&self) -> Node { self.node(self.root_addr) } /// Returns the node at the given address. /// /// Node addresses can be obtained by reading transitions on `Node` values. #[inline] pub fn node(&self, addr: CompiledAddr) -> Node { node_new(self.version, addr, &self.data) } /// Returns a copy of the binary contents of this FST. #[inline] pub fn to_vec(&self) -> Vec<u8> { self.data.to_vec() } fn empty_final_output(&self) -> Option<Output> { let root = self.root(); if root.is_final() { Some(root.final_output()) } else { None } } } impl<'a, 'f> IntoStreamer<'a> for &'f Fst { type Item = (&'a [u8], Output); type Into = Stream<'f>; #[inline] fn into_stream(self) -> Self::Into { StreamBuilder::new(self, AlwaysMatch).into_stream() } } /// A builder for constructing range queries on streams. /// /// Once all bounds are set, one should call `into_stream` to get a /// `Stream`. /// /// Bounds are not additive. That is, if `ge` is called twice on the same /// builder, then the second setting wins. /// /// The `A` type parameter corresponds to an optional automaton to filter /// the stream. By default, no filtering is done. /// /// The `'f` lifetime parameter refers to the lifetime of the underlying fst. pub struct StreamBuilder<'f, A=AlwaysMatch> { fst: &'f Fst, aut: A, min: Bound, max: Bound, } impl<'f, A: Automaton> StreamBuilder<'f, A> { fn new(fst: &'f Fst, aut: A) -> Self { StreamBuilder { fst: fst, aut: aut, min: Bound::Unbounded, max: Bound::Unbounded, } } /// Specify a greater-than-or-equal-to bound. pub fn ge<T: AsRef<[u8]>>(mut self, bound: T) -> Self { self.min = Bound::Included(bound.as_ref().to_owned()); self } /// Specify a greater-than bound. pub fn gt<T: AsRef<[u8]>>(mut self, bound: T) -> Self { self.min = Bound::Excluded(bound.as_ref().to_owned()); self } /// Specify a less-than-or-equal-to bound. pub fn le<T: AsRef<[u8]>>(mut self, bound: T) -> Self { self.max = Bound::Included(bound.as_ref().to_owned()); self } /// Specify a less-than bound. pub fn lt<T: AsRef<[u8]>>(mut self, bound: T) -> Self { self.max = Bound::Excluded(bound.as_ref().to_owned()); self } } impl<'a, 'f, A: Automaton> IntoStreamer<'a> for StreamBuilder<'f, A> { type Item = (&'a [u8], Output); type Into = Stream<'f, A>; fn into_stream(self) -> Stream<'f, A> { Stream::new(self.fst, self.aut, self.min, self.max) } } #[derive(Debug)] enum Bound { Included(Vec<u8>), Excluded(Vec<u8>), Unbounded, } impl Bound { fn exceeded_by(&self, inp: &[u8]) -> bool { match *self { Bound::Included(ref v) => inp > v, Bound::Excluded(ref v) => inp >= v, Bound::Unbounded => false, } } fn is_empty(&self) -> bool { match *self { Bound::Included(ref v) => v.is_empty(), Bound::Excluded(ref v) => v.is_empty(), Bound::Unbounded => true, } } fn is_inclusive(&self) -> bool { match *self { Bound::Excluded(_) => false, _ => true, } } } /// A lexicographically ordered stream of key-value pairs from an fst. /// /// The `A` type parameter corresponds to an optional automaton to filter /// the stream. By default, no filtering is done. /// /// The `'f` lifetime parameter refers to the lifetime of the underlying fst. pub struct Stream<'f, A=AlwaysMatch> where A: Automaton { fst: &'f Fst, aut: A, inp: Vec<u8>, empty_output: Option<Output>, stack: Vec<StreamState<'f, A::State>>, end_at: Bound, } #[derive(Clone, Debug)] struct StreamState<'f, S> { node: Node<'f>, trans: usize, out: Output, aut_state: S, } impl<'f, A: Automaton> Stream<'f, A> { fn new(fst: &'f Fst, aut: A, min: Bound, max: Bound) -> Self { let mut rdr = Stream { fst: fst, aut: aut, inp: Vec::with_capacity(16), empty_output: None, stack: vec![], end_at: max, }; rdr.seek_min(min); rdr } /// Seeks the underlying stream such that the next key to be read is the /// smallest key in the underlying fst that satisfies the given minimum /// bound. /// /// This theoretically should be straight-forward, but we need to make /// sure our stack is correct, which includes accounting for automaton /// states. fn seek_min(&mut self, min: Bound) { if min.is_empty() { if min.is_inclusive() { self.empty_output = self.fst.empty_final_output(); } self.stack = vec![StreamState { node: self.fst.root(), trans: 0, out: Output::zero(), aut_state: self.aut.start(), }]; return; } let (key, inclusive) = match min { Bound::Excluded(ref min) => { (min, false) } Bound::Included(ref min) => { (min, true) } Bound::Unbounded => unreachable!(), }; // At this point, we need to find the starting location of `min` in // the FST. However, as we search, we need to maintain a stack of // reader states so that the reader can pick up where we left off. // N.B. We do not necessarily need to stop in a final state, unlike // the one-off `find` method. For the example, the given bound might // not actually exist in the FST. let mut node = self.fst.root(); let mut out = Output::zero(); let mut aut_state = self.aut.start(); for &b in key { match node.find_input(b) { Some(i) => { let t = node.transition(i); let prev_state = aut_state; aut_state = self.aut.accept(&prev_state, b); self.inp.push(b); self.stack.push(StreamState { node: node, trans: i+1, out: out, aut_state: prev_state, }); out = out.cat(t.out); node = self.fst.node(t.addr); } None => { // This is a little tricky. We're in this case if the // given bound is not a prefix of any key in the FST. // Since this is a minimum bound, we need to find the // first transition in this node that proceeds the current // input byte. self.stack.push(StreamState { node: node, trans: node.transitions() .position(|t| t.inp > b) .unwrap_or(node.len()), out: out, aut_state: aut_state, }); return; } } } if !self.stack.is_empty() { let last = self.stack.len() - 1; if inclusive { self.stack[last].trans -= 1; self.inp.pop(); } else { let node = self.stack[last].node; let trans = self.stack[last].trans; self.stack.push(StreamState { node: self.fst.node(node.transition(trans - 1).addr), trans: 0, out: out, aut_state: aut_state, }); } } } /// Convert this stream into a vector of byte strings and outputs. /// /// Note that this creates a new allocation for every key in the stream. pub fn into_byte_vec(mut self) -> Vec<(Vec<u8>, u64)> { let mut vs = vec![]; while let Some((k, v)) = self.next() { vs.push((k.to_vec(), v.value())); } vs } /// Convert this stream into a vector of Unicode strings and outputs. /// /// If any key is not valid UTF-8, then iteration on the stream is stopped /// and a UTF-8 decoding error is returned. /// /// Note that this creates a new allocation for every key in the stream. pub fn into_str_vec(mut self) -> Result<Vec<(String, u64)>> { let mut vs = vec![]; while let Some((k, v)) = self.next() { let k = String::from_utf8(k.to_vec()).map_err(Error::from)?; vs.push((k, v.value())); } Ok(vs) } /// Convert this stream into a vector of byte strings. /// /// Note that this creates a new allocation for every key in the stream. pub fn into_byte_keys(mut self) -> Vec<Vec<u8>> { let mut vs = vec![]; while let Some((k, _)) = self.next() { vs.push(k.to_vec()); } vs } /// Convert this stream into a vector of Unicode strings. /// /// If any key is not valid UTF-8, then iteration on the stream is stopped /// and a UTF-8 decoding error is returned. /// /// Note that this creates a new allocation for every key in the stream. pub fn into_str_keys(mut self) -> Result<Vec<String>> { let mut vs = vec![]; while let Some((k, _)) = self.next() { let k = String::from_utf8(k.to_vec()).map_err(Error::from)?; vs.push(k); } Ok(vs) } /// Convert this stream into a vector of outputs. pub fn into_values(mut self) -> Vec<u64> { let mut vs = vec![]; while let Some((_, v)) = self.next() { vs.push(v.value()); } vs } } impl<'f, 'a, A: Automaton> Streamer<'a> for Stream<'f, A> { type Item = (&'a [u8], Output); fn next(&'a mut self) -> Option<Self::Item> { if let Some(out) = self.empty_output.take() { if self.end_at.exceeded_by(&[]) { self.stack.clear(); return None; } if self.aut.is_match(&self.aut.start()) { return Some((&[], out)); } } while let Some(state) = self.stack.pop() { if state.trans >= state.node.len() || !self.aut.can_match(&state.aut_state) { if state.node.addr() != self.fst.root_addr { self.inp.pop().unwrap(); } continue; } let trans = state.node.transition(state.trans); let out = state.out.cat(trans.out); let next_state = self.aut.accept(&state.aut_state, trans.inp); let is_match = self.aut.is_match(&next_state); let next_node = self.fst.node(trans.addr); self.inp.push(trans.inp); self.stack.push(StreamState { trans: state.trans + 1, .. state }); self.stack.push(StreamState { node: next_node, trans: 0, out: out, aut_state: next_state, }); if self.end_at.exceeded_by(&self.inp) { // We are done, forever. self.stack.clear(); return None; } if next_node.is_final() && is_match { return Some((&self.inp, out.cat(next_node.final_output()))); } } None } } /// An output is a value that is associated with a key in a finite state /// transducer. /// /// Note that outputs must satisfy an algebra. Namely, it must have an additive /// identity and the following binary operations defined: `prefix`, /// `concatenation` and `subtraction`. `prefix` and `concatenation` are /// commutative while `subtraction` is not. `subtraction` is only defined on /// pairs of operands where the first operand is greater than or equal to the /// second operand. /// /// Currently, output values must be `u64`. However, in theory, an output value /// can be anything that satisfies the above algebra. Future versions of this /// crate may make outputs generic on this algebra. #[derive(Copy, Clone, Debug, Hash, Eq, Ord, PartialEq, PartialOrd)] pub struct Output(u64); impl Output { /// Create a new output from a `u64`. #[inline] pub fn new(v: u64) -> Output { Output(v) } /// Create a zero output. #[inline] pub fn zero() -> Output { Output(0) } /// Retrieve the value inside this output. #[inline] pub fn value(self) -> u64 { self.0 } /// Returns true if this is a zero output. #[inline] pub fn is_zero(self) -> bool { self.0 == 0 } /// Returns the prefix of this output and `o`. #[inline] pub fn prefix(self, o: Output) -> Output { Output(cmp::min(self.0, o.0)) } /// Returns the concatenation of this output and `o`. #[inline] pub fn cat(self, o: Output) -> Output { Output(self.0 + o.0) } /// Returns the subtraction of `o` from this output. /// /// This function panics if `self > o`. #[inline] pub fn sub(self, o: Output) -> Output { Output(self.0.checked_sub(o.0) .expect("BUG: underflow subtraction not allowed")) } } enum FstData { Cow(Cow<'static, [u8]>), Shared { vec: Arc<Vec<u8>>, offset: usize, len: usize, }, #[cfg(feature = "mmap")] Mmap(MmapReadOnly), } impl Deref for FstData { type Target = [u8]; fn deref(&self) -> &[u8] { match *self { FstData::Cow(ref v) => &**v, FstData::Shared { ref vec, offset, len } => { &vec[offset..offset + len] } #[cfg(feature = "mmap")] FstData::Mmap(ref v) => v.as_slice(), } } } /// A transition from one note to another. #[derive(Copy, Clone, Hash, Eq, PartialEq)] pub struct Transition { /// The byte input associated with this transition. pub inp: u8, /// The output associated with this transition. pub out: Output, /// The address of the node that this transition points to. pub addr: CompiledAddr, } impl Default for Transition { #[inline] fn default() -> Self { Transition { inp: 0, out: Output::zero(), addr: NONE_ADDRESS, } } } impl fmt::Debug for Transition { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if self.out.is_zero() { write!(f, "{} -> {}", self.inp as char, self.addr) } else { write!(f, "({}, {}) -> {}", self.inp as char, self.out.value(), self.addr) } } } #[inline] #[cfg(target_pointer_width = "64")] fn u64_to_usize(n: u64) -> usize { n as usize } #[inline] #[cfg(not(target_pointer_width = "64"))] fn u64_to_usize(n: u64) -> usize { if n > ::std::usize::MAX as u64 { panic!("\ Cannot convert node address {} to a pointer sized variable. If this FST is very large and was generated on a system with a larger pointer size than this system, then it is not possible to read this FST on this system.", n); } n as usize }
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use api::{BuiltDisplayList, ColorF, DynamicProperties, Epoch, LayerSize, LayoutSize}; use api::{FilterOp, LayoutTransform, PipelineId, PropertyBinding, PropertyBindingId}; use api::{ItemRange, MixBlendMode, StackingContext}; use internal_types::FastHashMap; use std::sync::Arc; /// Stores a map of the animated property bindings for the current display list. These /// can be used to animate the transform and/or opacity of a display list without /// re-submitting the display list itself. #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] #[derive(Clone)] pub struct SceneProperties { transform_properties: FastHashMap<PropertyBindingId, LayoutTransform>, float_properties: FastHashMap<PropertyBindingId, f32>, } impl SceneProperties { pub fn new() -> Self { SceneProperties { transform_properties: FastHashMap::default(), float_properties: FastHashMap::default(), } } /// Set the current property list for this display list. pub fn set_properties(&mut self, properties: DynamicProperties) { self.transform_properties.clear(); self.float_properties.clear(); for property in properties.transforms { self.transform_properties .insert(property.key.id, property.value); } for property in properties.floats { self.float_properties .insert(property.key.id, property.value); } } /// Get the current value for a transform property. pub fn resolve_layout_transform( &self, property: &PropertyBinding<LayoutTransform>, ) -> LayoutTransform { match *property { PropertyBinding::Value(value) => value, PropertyBinding::Binding(ref key) => { self.transform_properties .get(&key.id) .cloned() .unwrap_or_else(|| { warn!("Property binding has an invalid value."); debug!("key={:?}", key); LayoutTransform::identity() }) } } } /// Get the current value for a float property. pub fn resolve_float( &self, property: &PropertyBinding<f32>, default_value: f32 ) -> f32 { match *property { PropertyBinding::Value(value) => value, PropertyBinding::Binding(ref key) => { self.float_properties .get(&key.id) .cloned() .unwrap_or_else(|| { warn!("Property binding has an invalid value."); debug!("key={:?}", key); default_value }) } } } } /// A representation of the layout within the display port for a given document or iframe. #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] #[derive(Clone)] pub struct ScenePipeline { pub pipeline_id: PipelineId, pub viewport_size: LayerSize, pub content_size: LayoutSize, pub background_color: Option<ColorF>, pub display_list: BuiltDisplayList, } /// A complete representation of the layout bundling visible pipelines together. #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] #[derive(Clone)] pub struct Scene { pub root_pipeline_id: Option<PipelineId>, pub pipelines: FastHashMap<PipelineId, Arc<ScenePipeline>>, pub pipeline_epochs: FastHashMap<PipelineId, Epoch>, } impl Scene { pub fn new() -> Self { Scene { root_pipeline_id: None, pipelines: FastHashMap::default(), pipeline_epochs: FastHashMap::default(), } } pub fn set_root_pipeline_id(&mut self, pipeline_id: PipelineId) { self.root_pipeline_id = Some(pipeline_id); } pub fn set_display_list( &mut self, pipeline_id: PipelineId, epoch: Epoch, display_list: BuiltDisplayList, background_color: Option<ColorF>, viewport_size: LayerSize, content_size: LayoutSize, ) { let new_pipeline = ScenePipeline { pipeline_id, viewport_size, content_size, background_color, display_list, }; self.pipelines.insert(pipeline_id, Arc::new(new_pipeline)); self.pipeline_epochs.insert(pipeline_id, epoch); } pub fn remove_pipeline(&mut self, pipeline_id: PipelineId) { if self.root_pipeline_id == Some(pipeline_id) { self.root_pipeline_id = None; } self.pipelines.remove(&pipeline_id); } pub fn update_epoch(&mut self, pipeline_id: PipelineId, epoch: Epoch) { self.pipeline_epochs.insert(pipeline_id, epoch); } } /// An arbitrary number which we assume opacity is invisible below. pub const OPACITY_EPSILON: f32 = 0.001; pub trait FilterOpHelpers { fn is_visible(&self) -> bool; fn is_noop(&self) -> bool; } impl FilterOpHelpers for FilterOp { fn is_visible(&self) -> bool { match *self { FilterOp::Blur(..) | FilterOp::Brightness(..) | FilterOp::Contrast(..) | FilterOp::Grayscale(..) | FilterOp::HueRotate(..) | FilterOp::Invert(..) | FilterOp::Saturate(..) | FilterOp::Sepia(..) | FilterOp::DropShadow(..) | FilterOp::ColorMatrix(..) => true, FilterOp::Opacity(_, amount) => { amount > OPACITY_EPSILON } } } fn is_noop(&self) -> bool { match *self { FilterOp::Blur(length) => length == 0.0, FilterOp::Brightness(amount) => amount == 1.0, FilterOp::Contrast(amount) => amount == 1.0, FilterOp::Grayscale(amount) => amount == 0.0, FilterOp::HueRotate(amount) => amount == 0.0, FilterOp::Invert(amount) => amount == 0.0, FilterOp::Opacity(_, amount) => amount >= 1.0, FilterOp::Saturate(amount) => amount == 1.0, FilterOp::Sepia(amount) => amount == 0.0, FilterOp::DropShadow(offset, blur, _) => { offset.x == 0.0 && offset.y == 0.0 && blur == 0.0 }, FilterOp::ColorMatrix(matrix) => { matrix == [1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0] } } } } pub trait StackingContextHelpers { fn mix_blend_mode_for_compositing(&self) -> Option<MixBlendMode>; fn filter_ops_for_compositing( &self, display_list: &BuiltDisplayList, input_filters: ItemRange<FilterOp>, ) -> Vec<FilterOp>; } impl StackingContextHelpers for StackingContext { fn mix_blend_mode_for_compositing(&self) -> Option<MixBlendMode> { match self.mix_blend_mode { MixBlendMode::Normal => None, _ => Some(self.mix_blend_mode), } } fn filter_ops_for_compositing( &self, display_list: &BuiltDisplayList, input_filters: ItemRange<FilterOp>, ) -> Vec<FilterOp> { // TODO(gw): Now that we resolve these later on, // we could probably make it a bit // more efficient than cloning these here. let mut filters = vec![]; for filter in display_list.get(input_filters) { filters.push(filter); } filters } } Remove the pipeline's epoch from the scene when removing the pipeline itself. /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use api::{BuiltDisplayList, ColorF, DynamicProperties, Epoch, LayerSize, LayoutSize}; use api::{FilterOp, LayoutTransform, PipelineId, PropertyBinding, PropertyBindingId}; use api::{ItemRange, MixBlendMode, StackingContext}; use internal_types::FastHashMap; use std::sync::Arc; /// Stores a map of the animated property bindings for the current display list. These /// can be used to animate the transform and/or opacity of a display list without /// re-submitting the display list itself. #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] #[derive(Clone)] pub struct SceneProperties { transform_properties: FastHashMap<PropertyBindingId, LayoutTransform>, float_properties: FastHashMap<PropertyBindingId, f32>, } impl SceneProperties { pub fn new() -> Self { SceneProperties { transform_properties: FastHashMap::default(), float_properties: FastHashMap::default(), } } /// Set the current property list for this display list. pub fn set_properties(&mut self, properties: DynamicProperties) { self.transform_properties.clear(); self.float_properties.clear(); for property in properties.transforms { self.transform_properties .insert(property.key.id, property.value); } for property in properties.floats { self.float_properties .insert(property.key.id, property.value); } } /// Get the current value for a transform property. pub fn resolve_layout_transform( &self, property: &PropertyBinding<LayoutTransform>, ) -> LayoutTransform { match *property { PropertyBinding::Value(value) => value, PropertyBinding::Binding(ref key) => { self.transform_properties .get(&key.id) .cloned() .unwrap_or_else(|| { warn!("Property binding has an invalid value."); debug!("key={:?}", key); LayoutTransform::identity() }) } } } /// Get the current value for a float property. pub fn resolve_float( &self, property: &PropertyBinding<f32>, default_value: f32 ) -> f32 { match *property { PropertyBinding::Value(value) => value, PropertyBinding::Binding(ref key) => { self.float_properties .get(&key.id) .cloned() .unwrap_or_else(|| { warn!("Property binding has an invalid value."); debug!("key={:?}", key); default_value }) } } } } /// A representation of the layout within the display port for a given document or iframe. #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] #[derive(Clone)] pub struct ScenePipeline { pub pipeline_id: PipelineId, pub viewport_size: LayerSize, pub content_size: LayoutSize, pub background_color: Option<ColorF>, pub display_list: BuiltDisplayList, } /// A complete representation of the layout bundling visible pipelines together. #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] #[derive(Clone)] pub struct Scene { pub root_pipeline_id: Option<PipelineId>, pub pipelines: FastHashMap<PipelineId, Arc<ScenePipeline>>, pub pipeline_epochs: FastHashMap<PipelineId, Epoch>, } impl Scene { pub fn new() -> Self { Scene { root_pipeline_id: None, pipelines: FastHashMap::default(), pipeline_epochs: FastHashMap::default(), } } pub fn set_root_pipeline_id(&mut self, pipeline_id: PipelineId) { self.root_pipeline_id = Some(pipeline_id); } pub fn set_display_list( &mut self, pipeline_id: PipelineId, epoch: Epoch, display_list: BuiltDisplayList, background_color: Option<ColorF>, viewport_size: LayerSize, content_size: LayoutSize, ) { let new_pipeline = ScenePipeline { pipeline_id, viewport_size, content_size, background_color, display_list, }; self.pipelines.insert(pipeline_id, Arc::new(new_pipeline)); self.pipeline_epochs.insert(pipeline_id, epoch); } pub fn remove_pipeline(&mut self, pipeline_id: PipelineId) { if self.root_pipeline_id == Some(pipeline_id) { self.root_pipeline_id = None; } self.pipelines.remove(&pipeline_id); self.pipeline_epochs.remove(&pipeline_id); } pub fn update_epoch(&mut self, pipeline_id: PipelineId, epoch: Epoch) { self.pipeline_epochs.insert(pipeline_id, epoch); } } /// An arbitrary number which we assume opacity is invisible below. pub const OPACITY_EPSILON: f32 = 0.001; pub trait FilterOpHelpers { fn is_visible(&self) -> bool; fn is_noop(&self) -> bool; } impl FilterOpHelpers for FilterOp { fn is_visible(&self) -> bool { match *self { FilterOp::Blur(..) | FilterOp::Brightness(..) | FilterOp::Contrast(..) | FilterOp::Grayscale(..) | FilterOp::HueRotate(..) | FilterOp::Invert(..) | FilterOp::Saturate(..) | FilterOp::Sepia(..) | FilterOp::DropShadow(..) | FilterOp::ColorMatrix(..) => true, FilterOp::Opacity(_, amount) => { amount > OPACITY_EPSILON } } } fn is_noop(&self) -> bool { match *self { FilterOp::Blur(length) => length == 0.0, FilterOp::Brightness(amount) => amount == 1.0, FilterOp::Contrast(amount) => amount == 1.0, FilterOp::Grayscale(amount) => amount == 0.0, FilterOp::HueRotate(amount) => amount == 0.0, FilterOp::Invert(amount) => amount == 0.0, FilterOp::Opacity(_, amount) => amount >= 1.0, FilterOp::Saturate(amount) => amount == 1.0, FilterOp::Sepia(amount) => amount == 0.0, FilterOp::DropShadow(offset, blur, _) => { offset.x == 0.0 && offset.y == 0.0 && blur == 0.0 }, FilterOp::ColorMatrix(matrix) => { matrix == [1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0] } } } } pub trait StackingContextHelpers { fn mix_blend_mode_for_compositing(&self) -> Option<MixBlendMode>; fn filter_ops_for_compositing( &self, display_list: &BuiltDisplayList, input_filters: ItemRange<FilterOp>, ) -> Vec<FilterOp>; } impl StackingContextHelpers for StackingContext { fn mix_blend_mode_for_compositing(&self) -> Option<MixBlendMode> { match self.mix_blend_mode { MixBlendMode::Normal => None, _ => Some(self.mix_blend_mode), } } fn filter_ops_for_compositing( &self, display_list: &BuiltDisplayList, input_filters: ItemRange<FilterOp>, ) -> Vec<FilterOp> { // TODO(gw): Now that we resolve these later on, // we could probably make it a bit // more efficient than cloning these here. let mut filters = vec![]; for filter in display_list.get(input_filters) { filters.push(filter); } filters } }
//! Handling of everything related to the calling convention. Also fills `fx.local_map`. #[cfg(debug_assertions)] mod comments; mod pass_mode; mod returning; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; use rustc_target::spec::abi::Abi; use cranelift_codegen::ir::{AbiParam, ArgumentPurpose}; use self::pass_mode::*; use crate::prelude::*; pub(crate) use self::returning::{can_return_to_ssa_var, codegen_return}; // Copied from https://github.com/rust-lang/rust/blob/f52c72948aa1dd718cc1f168d21c91c584c0a662/src/librustc_middle/ty/layout.rs#L2301 #[rustfmt::skip] pub(crate) fn fn_sig_for_fn_abi<'tcx>(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> ty::PolyFnSig<'tcx> { use rustc_middle::ty::subst::Subst; // FIXME(davidtwco,eddyb): A `ParamEnv` should be passed through to this function. let ty = instance.ty(tcx, ty::ParamEnv::reveal_all()); match *ty.kind() { ty::FnDef(..) => { // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering // parameters unused if they show up in the signature, but not in the `mir::Body` // (i.e. due to being inside a projection that got normalized, see // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping // track of a polymorphization `ParamEnv` to allow normalizing later. let mut sig = match *ty.kind() { ty::FnDef(def_id, substs) => tcx .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id)) .subst(tcx, substs), _ => unreachable!(), }; if let ty::InstanceDef::VtableShim(..) = instance.def { // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`. sig = sig.map_bound(|mut sig| { let mut inputs_and_output = sig.inputs_and_output.to_vec(); inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]); sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output); sig }); } sig } ty::Closure(def_id, substs) => { let sig = substs.as_closure().sig(); let env_ty = tcx.closure_env_ty(def_id, substs).unwrap(); sig.map_bound(|sig| { tcx.mk_fn_sig( std::iter::once(env_ty.skip_binder()).chain(sig.inputs().iter().cloned()), sig.output(), sig.c_variadic, sig.unsafety, sig.abi, ) }) } ty::Generator(_, substs, _) => { let sig = substs.as_generator().poly_sig(); let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv); let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty); let pin_did = tcx.require_lang_item(rustc_hir::LangItem::Pin, None); let pin_adt_ref = tcx.adt_def(pin_did); let pin_substs = tcx.intern_substs(&[env_ty.into()]); let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs); sig.map_bound(|sig| { let state_did = tcx.require_lang_item(rustc_hir::LangItem::GeneratorState, None); let state_adt_ref = tcx.adt_def(state_did); let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]); let ret_ty = tcx.mk_adt(state_adt_ref, state_substs); tcx.mk_fn_sig( [env_ty, sig.resume_ty].iter(), &ret_ty, false, rustc_hir::Unsafety::Normal, rustc_target::spec::abi::Abi::Rust, ) }) } _ => bug!("unexpected type {:?} in Instance::fn_sig", ty), } } fn clif_sig_from_fn_sig<'tcx>( tcx: TyCtxt<'tcx>, triple: &target_lexicon::Triple, sig: FnSig<'tcx>, span: Span, is_vtable_fn: bool, requires_caller_location: bool, ) -> Signature { let abi = match sig.abi { Abi::System => Abi::C, abi => abi, }; let (call_conv, inputs, output): (CallConv, Vec<Ty<'tcx>>, Ty<'tcx>) = match abi { Abi::Rust => ( CallConv::triple_default(triple), sig.inputs().to_vec(), sig.output(), ), Abi::C | Abi::Unadjusted => ( CallConv::triple_default(triple), sig.inputs().to_vec(), sig.output(), ), Abi::SysV64 => (CallConv::SystemV, sig.inputs().to_vec(), sig.output()), Abi::RustCall => { assert_eq!(sig.inputs().len(), 2); let extra_args = match sig.inputs().last().unwrap().kind() { ty::Tuple(ref tupled_arguments) => tupled_arguments, _ => bug!("argument to function with \"rust-call\" ABI is not a tuple"), }; let mut inputs: Vec<Ty<'tcx>> = vec![sig.inputs()[0]]; inputs.extend(extra_args.types()); (CallConv::triple_default(triple), inputs, sig.output()) } Abi::System => unreachable!(), Abi::RustIntrinsic => ( CallConv::triple_default(triple), sig.inputs().to_vec(), sig.output(), ), _ => unimplemented!("unsupported abi {:?}", sig.abi), }; let inputs = inputs .into_iter() .enumerate() .map(|(i, ty)| { let mut layout = tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap(); if i == 0 && is_vtable_fn { // Virtual calls turn their self param into a thin pointer. // See https://github.com/rust-lang/rust/blob/37b6a5e5e82497caf5353d9d856e4eb5d14cbe06/src/librustc/ty/layout.rs#L2519-L2572 for more info layout = tcx .layout_of(ParamEnv::reveal_all().and(tcx.mk_mut_ptr(tcx.mk_unit()))) .unwrap(); } let pass_mode = get_pass_mode(tcx, layout); if abi != Abi::Rust && abi != Abi::RustCall && abi != Abi::RustIntrinsic { match pass_mode { PassMode::NoPass | PassMode::ByVal(_) => {} PassMode::ByRef { size: Some(size) } => { let purpose = ArgumentPurpose::StructArgument(u32::try_from(size.bytes()).expect("struct too big to pass on stack")); return EmptySinglePair::Single(AbiParam::special(pointer_ty(tcx), purpose)).into_iter(); } PassMode::ByValPair(_, _) | PassMode::ByRef { size: None } => { tcx.sess.span_warn( span, &format!( "Argument of type `{:?}` with pass mode `{:?}` is not yet supported \ for non-rust abi `{}`. Calling this function may result in a crash.", layout.ty, pass_mode, abi, ), ); } } } pass_mode.get_param_ty(tcx).map(AbiParam::new).into_iter() }) .flatten(); let (mut params, returns): (Vec<_>, Vec<_>) = match get_pass_mode( tcx, tcx.layout_of(ParamEnv::reveal_all().and(output)).unwrap(), ) { PassMode::NoPass => (inputs.collect(), vec![]), PassMode::ByVal(ret_ty) => (inputs.collect(), vec![AbiParam::new(ret_ty)]), PassMode::ByValPair(ret_ty_a, ret_ty_b) => ( inputs.collect(), vec![AbiParam::new(ret_ty_a), AbiParam::new(ret_ty_b)], ), PassMode::ByRef { size: Some(_) } => { ( Some(pointer_ty(tcx)) // First param is place to put return val .into_iter() .map(|ty| AbiParam::special(ty, ArgumentPurpose::StructReturn)) .chain(inputs) .collect(), vec![], ) } PassMode::ByRef { size: None } => todo!(), }; if requires_caller_location { params.push(AbiParam::new(pointer_ty(tcx))); } Signature { params, returns, call_conv, } } pub(crate) fn get_function_name_and_sig<'tcx>( tcx: TyCtxt<'tcx>, triple: &target_lexicon::Triple, inst: Instance<'tcx>, support_vararg: bool, ) -> (String, Signature) { assert!(!inst.substs.needs_infer()); let fn_sig = tcx .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), fn_sig_for_fn_abi(tcx, inst)); if fn_sig.c_variadic && !support_vararg { tcx.sess.span_fatal( tcx.def_span(inst.def_id()), "Variadic function definitions are not yet supported", ); } let sig = clif_sig_from_fn_sig( tcx, triple, fn_sig, tcx.def_span(inst.def_id()), false, inst.def.requires_caller_location(tcx), ); (tcx.symbol_name(inst).name.to_string(), sig) } /// Instance must be monomorphized pub(crate) fn import_function<'tcx>( tcx: TyCtxt<'tcx>, module: &mut impl Module, inst: Instance<'tcx>, ) -> FuncId { let (name, sig) = get_function_name_and_sig(tcx, module.isa().triple(), inst, true); module .declare_function(&name, Linkage::Import, &sig) .unwrap() } impl<'tcx, M: Module> FunctionCx<'_, 'tcx, M> { /// Instance must be monomorphized pub(crate) fn get_function_ref(&mut self, inst: Instance<'tcx>) -> FuncRef { let func_id = import_function(self.tcx, &mut self.cx.module, inst); let func_ref = self .cx .module .declare_func_in_func(func_id, &mut self.bcx.func); #[cfg(debug_assertions)] self.add_comment(func_ref, format!("{:?}", inst)); func_ref } pub(crate) fn lib_call( &mut self, name: &str, input_tys: Vec<types::Type>, output_tys: Vec<types::Type>, args: &[Value], ) -> &[Value] { let sig = Signature { params: input_tys.iter().cloned().map(AbiParam::new).collect(), returns: output_tys.iter().cloned().map(AbiParam::new).collect(), call_conv: CallConv::triple_default(self.triple()), }; let func_id = self .cx .module .declare_function(&name, Linkage::Import, &sig) .unwrap(); let func_ref = self .cx .module .declare_func_in_func(func_id, &mut self.bcx.func); let call_inst = self.bcx.ins().call(func_ref, args); #[cfg(debug_assertions)] { self.add_comment(call_inst, format!("easy_call {}", name)); } let results = self.bcx.inst_results(call_inst); assert!(results.len() <= 2, "{}", results.len()); results } pub(crate) fn easy_call( &mut self, name: &str, args: &[CValue<'tcx>], return_ty: Ty<'tcx>, ) -> CValue<'tcx> { let (input_tys, args): (Vec<_>, Vec<_>) = args .iter() .map(|arg| { ( self.clif_type(arg.layout().ty).unwrap(), arg.load_scalar(self), ) }) .unzip(); let return_layout = self.layout_of(return_ty); let return_tys = if let ty::Tuple(tup) = return_ty.kind() { tup.types().map(|ty| self.clif_type(ty).unwrap()).collect() } else { vec![self.clif_type(return_ty).unwrap()] }; let ret_vals = self.lib_call(name, input_tys, return_tys, &args); match *ret_vals { [] => CValue::by_ref( Pointer::const_addr(self, i64::from(self.pointer_type.bytes())), return_layout, ), [val] => CValue::by_val(val, return_layout), [val, extra] => CValue::by_val_pair(val, extra, return_layout), _ => unreachable!(), } } } /// Make a [`CPlace`] capable of holding value of the specified type. fn make_local_place<'tcx>( fx: &mut FunctionCx<'_, 'tcx, impl Module>, local: Local, layout: TyAndLayout<'tcx>, is_ssa: bool, ) -> CPlace<'tcx> { let place = if is_ssa { if let rustc_target::abi::Abi::ScalarPair(_, _) = layout.abi { CPlace::new_var_pair(fx, local, layout) } else { CPlace::new_var(fx, local, layout) } } else { CPlace::new_stack_slot(fx, layout) }; #[cfg(debug_assertions)] self::comments::add_local_place_comments(fx, place, local); place } pub(crate) fn codegen_fn_prelude<'tcx>( fx: &mut FunctionCx<'_, 'tcx, impl Module>, start_block: Block, ) { let ssa_analyzed = crate::analyze::analyze(fx); #[cfg(debug_assertions)] self::comments::add_args_header_comment(fx); let ret_place = self::returning::codegen_return_param(fx, &ssa_analyzed, start_block); assert_eq!(fx.local_map.push(ret_place), RETURN_PLACE); // None means pass_mode == NoPass enum ArgKind<'tcx> { Normal(Option<CValue<'tcx>>), Spread(Vec<Option<CValue<'tcx>>>), } let func_params = fx .mir .args_iter() .map(|local| { let arg_ty = fx.monomorphize(fx.mir.local_decls[local].ty); // Adapted from https://github.com/rust-lang/rust/blob/145155dc96757002c7b2e9de8489416e2fdbbd57/src/librustc_codegen_llvm/mir/mod.rs#L442-L482 if Some(local) == fx.mir.spread_arg { // This argument (e.g. the last argument in the "rust-call" ABI) // is a tuple that was spread at the ABI level and now we have // to reconstruct it into a tuple local variable, from multiple // individual function arguments. let tupled_arg_tys = match arg_ty.kind() { ty::Tuple(ref tys) => tys, _ => bug!("spread argument isn't a tuple?! but {:?}", arg_ty), }; let mut params = Vec::new(); for (i, arg_ty) in tupled_arg_tys.types().enumerate() { let param = cvalue_for_param(fx, start_block, Some(local), Some(i), arg_ty); params.push(param); } (local, ArgKind::Spread(params), arg_ty) } else { let param = cvalue_for_param(fx, start_block, Some(local), None, arg_ty); (local, ArgKind::Normal(param), arg_ty) } }) .collect::<Vec<(Local, ArgKind<'tcx>, Ty<'tcx>)>>(); assert!(fx.caller_location.is_none()); if fx.instance.def.requires_caller_location(fx.tcx) { // Store caller location for `#[track_caller]`. fx.caller_location = Some( cvalue_for_param(fx, start_block, None, None, fx.tcx.caller_location_ty()).unwrap(), ); } fx.bcx.switch_to_block(start_block); fx.bcx.ins().nop(); #[cfg(debug_assertions)] self::comments::add_locals_header_comment(fx); for (local, arg_kind, ty) in func_params { let layout = fx.layout_of(ty); let is_ssa = ssa_analyzed[local] == crate::analyze::SsaKind::Ssa; // While this is normally an optimization to prevent an unnecessary copy when an argument is // not mutated by the current function, this is necessary to support unsized arguments. if let ArgKind::Normal(Some(val)) = arg_kind { if let Some((addr, meta)) = val.try_to_ptr() { let local_decl = &fx.mir.local_decls[local]; // v this ! is important let internally_mutable = !val.layout().ty.is_freeze( fx.tcx.at(local_decl.source_info.span), ParamEnv::reveal_all(), ); if local_decl.mutability == mir::Mutability::Not && !internally_mutable { // We wont mutate this argument, so it is fine to borrow the backing storage // of this argument, to prevent a copy. let place = if let Some(meta) = meta { CPlace::for_ptr_with_extra(addr, meta, val.layout()) } else { CPlace::for_ptr(addr, val.layout()) }; #[cfg(debug_assertions)] self::comments::add_local_place_comments(fx, place, local); assert_eq!(fx.local_map.push(place), local); continue; } } } let place = make_local_place(fx, local, layout, is_ssa); assert_eq!(fx.local_map.push(place), local); match arg_kind { ArgKind::Normal(param) => { if let Some(param) = param { place.write_cvalue(fx, param); } } ArgKind::Spread(params) => { for (i, param) in params.into_iter().enumerate() { if let Some(param) = param { place .place_field(fx, mir::Field::new(i)) .write_cvalue(fx, param); } } } } } for local in fx.mir.vars_and_temps_iter() { let ty = fx.monomorphize(fx.mir.local_decls[local].ty); let layout = fx.layout_of(ty); let is_ssa = ssa_analyzed[local] == crate::analyze::SsaKind::Ssa; let place = make_local_place(fx, local, layout, is_ssa); assert_eq!(fx.local_map.push(place), local); } fx.bcx .ins() .jump(*fx.block_map.get(START_BLOCK).unwrap(), &[]); } pub(crate) fn codegen_terminator_call<'tcx>( fx: &mut FunctionCx<'_, 'tcx, impl Module>, span: Span, current_block: Block, func: &Operand<'tcx>, args: &[Operand<'tcx>], destination: Option<(Place<'tcx>, BasicBlock)>, ) { let fn_ty = fx.monomorphize(func.ty(fx.mir, fx.tcx)); let fn_sig = fx .tcx .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), fn_ty.fn_sig(fx.tcx)); let destination = destination.map(|(place, bb)| (codegen_place(fx, place), bb)); // Handle special calls like instrinsics and empty drop glue. let instance = if let ty::FnDef(def_id, substs) = *fn_ty.kind() { let instance = ty::Instance::resolve(fx.tcx, ty::ParamEnv::reveal_all(), def_id, substs) .unwrap() .unwrap() .polymorphize(fx.tcx); if fx.tcx.symbol_name(instance).name.starts_with("llvm.") { crate::intrinsics::codegen_llvm_intrinsic_call( fx, &fx.tcx.symbol_name(instance).name, substs, args, destination, ); return; } match instance.def { InstanceDef::Intrinsic(_) => { crate::intrinsics::codegen_intrinsic_call(fx, instance, args, destination, span); return; } InstanceDef::DropGlue(_, None) => { // empty drop glue - a nop. let (_, dest) = destination.expect("Non terminating drop_in_place_real???"); let ret_block = fx.get_block(dest); fx.bcx.ins().jump(ret_block, &[]); return; } _ => Some(instance), } } else { None }; let is_cold = instance .map(|inst| { fx.tcx .codegen_fn_attrs(inst.def_id()) .flags .contains(CodegenFnAttrFlags::COLD) }) .unwrap_or(false); if is_cold { fx.cold_blocks.insert(current_block); } // Unpack arguments tuple for closures let args = if fn_sig.abi == Abi::RustCall { assert_eq!(args.len(), 2, "rust-call abi requires two arguments"); let self_arg = codegen_operand(fx, &args[0]); let pack_arg = codegen_operand(fx, &args[1]); let tupled_arguments = match pack_arg.layout().ty.kind() { ty::Tuple(ref tupled_arguments) => tupled_arguments, _ => bug!("argument to function with \"rust-call\" ABI is not a tuple"), }; let mut args = Vec::with_capacity(1 + tupled_arguments.len()); args.push(self_arg); for i in 0..tupled_arguments.len() { args.push(pack_arg.value_field(fx, mir::Field::new(i))); } args } else { args.iter() .map(|arg| codegen_operand(fx, arg)) .collect::<Vec<_>>() }; // | indirect call target // | | the first argument to be passed // v v v virtual calls are special cased below let (func_ref, first_arg, is_virtual_call) = match instance { // Trait object call Some(Instance { def: InstanceDef::Virtual(_, idx), .. }) => { #[cfg(debug_assertions)] { let nop_inst = fx.bcx.ins().nop(); fx.add_comment( nop_inst, format!( "virtual call; self arg pass mode: {:?}", get_pass_mode(fx.tcx, args[0].layout()) ), ); } let (ptr, method) = crate::vtable::get_ptr_and_method_ref(fx, args[0], idx); (Some(method), Single(ptr), true) } // Normal call Some(_) => ( None, args.get(0) .map(|arg| adjust_arg_for_abi(fx, *arg)) .unwrap_or(Empty), false, ), // Indirect call None => { #[cfg(debug_assertions)] { let nop_inst = fx.bcx.ins().nop(); fx.add_comment(nop_inst, "indirect call"); } let func = codegen_operand(fx, func).load_scalar(fx); ( Some(func), args.get(0) .map(|arg| adjust_arg_for_abi(fx, *arg)) .unwrap_or(Empty), false, ) } }; let ret_place = destination.map(|(place, _)| place); let (call_inst, call_args) = self::returning::codegen_with_call_return_arg(fx, fn_sig, ret_place, |fx, return_ptr| { let mut call_args: Vec<Value> = return_ptr .into_iter() .chain(first_arg.into_iter()) .chain( args.into_iter() .skip(1) .map(|arg| adjust_arg_for_abi(fx, arg).into_iter()) .flatten(), ) .collect::<Vec<_>>(); if instance .map(|inst| inst.def.requires_caller_location(fx.tcx)) .unwrap_or(false) { // Pass the caller location for `#[track_caller]`. let caller_location = fx.get_caller_location(span); call_args.extend(adjust_arg_for_abi(fx, caller_location).into_iter()); } let call_inst = if let Some(func_ref) = func_ref { let sig = clif_sig_from_fn_sig( fx.tcx, fx.triple(), fn_sig, span, is_virtual_call, false, // calls through function pointers never pass the caller location ); let sig = fx.bcx.import_signature(sig); fx.bcx.ins().call_indirect(sig, func_ref, &call_args) } else { let func_ref = fx.get_function_ref(instance.expect("non-indirect call on non-FnDef type")); fx.bcx.ins().call(func_ref, &call_args) }; (call_inst, call_args) }); // FIXME find a cleaner way to support varargs if fn_sig.c_variadic { if fn_sig.abi != Abi::C { fx.tcx.sess.span_fatal( span, &format!("Variadic call for non-C abi {:?}", fn_sig.abi), ); } let sig_ref = fx.bcx.func.dfg.call_signature(call_inst).unwrap(); let abi_params = call_args .into_iter() .map(|arg| { let ty = fx.bcx.func.dfg.value_type(arg); if !ty.is_int() { // FIXME set %al to upperbound on float args once floats are supported fx.tcx .sess .span_fatal(span, &format!("Non int ty {:?} for variadic call", ty)); } AbiParam::new(ty) }) .collect::<Vec<AbiParam>>(); fx.bcx.func.dfg.signatures[sig_ref].params = abi_params; } if let Some((_, dest)) = destination { let ret_block = fx.get_block(dest); fx.bcx.ins().jump(ret_block, &[]); } else { trap_unreachable(fx, "[corruption] Diverging function returned"); } } pub(crate) fn codegen_drop<'tcx>( fx: &mut FunctionCx<'_, 'tcx, impl Module>, span: Span, drop_place: CPlace<'tcx>, ) { let ty = drop_place.layout().ty; let drop_fn = Instance::resolve_drop_in_place(fx.tcx, ty).polymorphize(fx.tcx); if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def { // we don't actually need to drop anything } else { let drop_fn_ty = drop_fn.ty(fx.tcx, ParamEnv::reveal_all()); let fn_sig = fx.tcx.normalize_erasing_late_bound_regions( ParamEnv::reveal_all(), drop_fn_ty.fn_sig(fx.tcx), ); assert_eq!(fn_sig.output(), fx.tcx.mk_unit()); match ty.kind() { ty::Dynamic(..) => { let (ptr, vtable) = drop_place.to_ptr_maybe_unsized(); let ptr = ptr.get_addr(fx); let drop_fn = crate::vtable::drop_fn_of_obj(fx, vtable.unwrap()); let sig = clif_sig_from_fn_sig( fx.tcx, fx.triple(), fn_sig, span, true, false, // `drop_in_place` is never `#[track_caller]` ); let sig = fx.bcx.import_signature(sig); fx.bcx.ins().call_indirect(sig, drop_fn, &[ptr]); } _ => { assert!(!matches!(drop_fn.def, InstanceDef::Virtual(_, _))); let arg_value = drop_place.place_ref( fx, fx.layout_of(fx.tcx.mk_ref( &ty::RegionKind::ReErased, TypeAndMut { ty, mutbl: crate::rustc_hir::Mutability::Mut, }, )), ); let arg_value = adjust_arg_for_abi(fx, arg_value); let mut call_args: Vec<Value> = arg_value.into_iter().collect::<Vec<_>>(); if drop_fn.def.requires_caller_location(fx.tcx) { // Pass the caller location for `#[track_caller]`. let caller_location = fx.get_caller_location(span); call_args.extend(adjust_arg_for_abi(fx, caller_location).into_iter()); } let func_ref = fx.get_function_ref(drop_fn); fx.bcx.ins().call(func_ref, &call_args); } } } } Sync from rust 28d73a3ee3e2c32f2768d1cbc9b42cb63472e9e9 //! Handling of everything related to the calling convention. Also fills `fx.local_map`. #[cfg(debug_assertions)] mod comments; mod pass_mode; mod returning; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; use rustc_target::spec::abi::Abi; use cranelift_codegen::ir::{AbiParam, ArgumentPurpose}; use self::pass_mode::*; use crate::prelude::*; pub(crate) use self::returning::{can_return_to_ssa_var, codegen_return}; // Copied from https://github.com/rust-lang/rust/blob/f52c72948aa1dd718cc1f168d21c91c584c0a662/src/librustc_middle/ty/layout.rs#L2301 #[rustfmt::skip] pub(crate) fn fn_sig_for_fn_abi<'tcx>(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> ty::PolyFnSig<'tcx> { use rustc_middle::ty::subst::Subst; // FIXME(davidtwco,eddyb): A `ParamEnv` should be passed through to this function. let ty = instance.ty(tcx, ty::ParamEnv::reveal_all()); match *ty.kind() { ty::FnDef(..) => { // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering // parameters unused if they show up in the signature, but not in the `mir::Body` // (i.e. due to being inside a projection that got normalized, see // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping // track of a polymorphization `ParamEnv` to allow normalizing later. let mut sig = match *ty.kind() { ty::FnDef(def_id, substs) => tcx .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id)) .subst(tcx, substs), _ => unreachable!(), }; if let ty::InstanceDef::VtableShim(..) = instance.def { // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`. sig = sig.map_bound(|mut sig| { let mut inputs_and_output = sig.inputs_and_output.to_vec(); inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]); sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output); sig }); } sig } ty::Closure(def_id, substs) => { let sig = substs.as_closure().sig(); let env_ty = tcx.closure_env_ty(def_id, substs).unwrap(); sig.map_bound(|sig| { tcx.mk_fn_sig( std::iter::once(env_ty.skip_binder()).chain(sig.inputs().iter().cloned()), sig.output(), sig.c_variadic, sig.unsafety, sig.abi, ) }) } ty::Generator(_, substs, _) => { let sig = substs.as_generator().poly_sig(); let env_region = ty::ReLateBound(ty::INNERMOST, ty::BoundRegion { kind: ty::BrEnv }); let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty); let pin_did = tcx.require_lang_item(rustc_hir::LangItem::Pin, None); let pin_adt_ref = tcx.adt_def(pin_did); let pin_substs = tcx.intern_substs(&[env_ty.into()]); let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs); sig.map_bound(|sig| { let state_did = tcx.require_lang_item(rustc_hir::LangItem::GeneratorState, None); let state_adt_ref = tcx.adt_def(state_did); let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]); let ret_ty = tcx.mk_adt(state_adt_ref, state_substs); tcx.mk_fn_sig( [env_ty, sig.resume_ty].iter(), &ret_ty, false, rustc_hir::Unsafety::Normal, rustc_target::spec::abi::Abi::Rust, ) }) } _ => bug!("unexpected type {:?} in Instance::fn_sig", ty), } } fn clif_sig_from_fn_sig<'tcx>( tcx: TyCtxt<'tcx>, triple: &target_lexicon::Triple, sig: FnSig<'tcx>, span: Span, is_vtable_fn: bool, requires_caller_location: bool, ) -> Signature { let abi = match sig.abi { Abi::System => Abi::C, abi => abi, }; let (call_conv, inputs, output): (CallConv, Vec<Ty<'tcx>>, Ty<'tcx>) = match abi { Abi::Rust => ( CallConv::triple_default(triple), sig.inputs().to_vec(), sig.output(), ), Abi::C | Abi::Unadjusted => ( CallConv::triple_default(triple), sig.inputs().to_vec(), sig.output(), ), Abi::SysV64 => (CallConv::SystemV, sig.inputs().to_vec(), sig.output()), Abi::RustCall => { assert_eq!(sig.inputs().len(), 2); let extra_args = match sig.inputs().last().unwrap().kind() { ty::Tuple(ref tupled_arguments) => tupled_arguments, _ => bug!("argument to function with \"rust-call\" ABI is not a tuple"), }; let mut inputs: Vec<Ty<'tcx>> = vec![sig.inputs()[0]]; inputs.extend(extra_args.types()); (CallConv::triple_default(triple), inputs, sig.output()) } Abi::System => unreachable!(), Abi::RustIntrinsic => ( CallConv::triple_default(triple), sig.inputs().to_vec(), sig.output(), ), _ => unimplemented!("unsupported abi {:?}", sig.abi), }; let inputs = inputs .into_iter() .enumerate() .map(|(i, ty)| { let mut layout = tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap(); if i == 0 && is_vtable_fn { // Virtual calls turn their self param into a thin pointer. // See https://github.com/rust-lang/rust/blob/37b6a5e5e82497caf5353d9d856e4eb5d14cbe06/src/librustc/ty/layout.rs#L2519-L2572 for more info layout = tcx .layout_of(ParamEnv::reveal_all().and(tcx.mk_mut_ptr(tcx.mk_unit()))) .unwrap(); } let pass_mode = get_pass_mode(tcx, layout); if abi != Abi::Rust && abi != Abi::RustCall && abi != Abi::RustIntrinsic { match pass_mode { PassMode::NoPass | PassMode::ByVal(_) => {} PassMode::ByRef { size: Some(size) } => { let purpose = ArgumentPurpose::StructArgument(u32::try_from(size.bytes()).expect("struct too big to pass on stack")); return EmptySinglePair::Single(AbiParam::special(pointer_ty(tcx), purpose)).into_iter(); } PassMode::ByValPair(_, _) | PassMode::ByRef { size: None } => { tcx.sess.span_warn( span, &format!( "Argument of type `{:?}` with pass mode `{:?}` is not yet supported \ for non-rust abi `{}`. Calling this function may result in a crash.", layout.ty, pass_mode, abi, ), ); } } } pass_mode.get_param_ty(tcx).map(AbiParam::new).into_iter() }) .flatten(); let (mut params, returns): (Vec<_>, Vec<_>) = match get_pass_mode( tcx, tcx.layout_of(ParamEnv::reveal_all().and(output)).unwrap(), ) { PassMode::NoPass => (inputs.collect(), vec![]), PassMode::ByVal(ret_ty) => (inputs.collect(), vec![AbiParam::new(ret_ty)]), PassMode::ByValPair(ret_ty_a, ret_ty_b) => ( inputs.collect(), vec![AbiParam::new(ret_ty_a), AbiParam::new(ret_ty_b)], ), PassMode::ByRef { size: Some(_) } => { ( Some(pointer_ty(tcx)) // First param is place to put return val .into_iter() .map(|ty| AbiParam::special(ty, ArgumentPurpose::StructReturn)) .chain(inputs) .collect(), vec![], ) } PassMode::ByRef { size: None } => todo!(), }; if requires_caller_location { params.push(AbiParam::new(pointer_ty(tcx))); } Signature { params, returns, call_conv, } } pub(crate) fn get_function_name_and_sig<'tcx>( tcx: TyCtxt<'tcx>, triple: &target_lexicon::Triple, inst: Instance<'tcx>, support_vararg: bool, ) -> (String, Signature) { assert!(!inst.substs.needs_infer()); let fn_sig = tcx .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), fn_sig_for_fn_abi(tcx, inst)); if fn_sig.c_variadic && !support_vararg { tcx.sess.span_fatal( tcx.def_span(inst.def_id()), "Variadic function definitions are not yet supported", ); } let sig = clif_sig_from_fn_sig( tcx, triple, fn_sig, tcx.def_span(inst.def_id()), false, inst.def.requires_caller_location(tcx), ); (tcx.symbol_name(inst).name.to_string(), sig) } /// Instance must be monomorphized pub(crate) fn import_function<'tcx>( tcx: TyCtxt<'tcx>, module: &mut impl Module, inst: Instance<'tcx>, ) -> FuncId { let (name, sig) = get_function_name_and_sig(tcx, module.isa().triple(), inst, true); module .declare_function(&name, Linkage::Import, &sig) .unwrap() } impl<'tcx, M: Module> FunctionCx<'_, 'tcx, M> { /// Instance must be monomorphized pub(crate) fn get_function_ref(&mut self, inst: Instance<'tcx>) -> FuncRef { let func_id = import_function(self.tcx, &mut self.cx.module, inst); let func_ref = self .cx .module .declare_func_in_func(func_id, &mut self.bcx.func); #[cfg(debug_assertions)] self.add_comment(func_ref, format!("{:?}", inst)); func_ref } pub(crate) fn lib_call( &mut self, name: &str, input_tys: Vec<types::Type>, output_tys: Vec<types::Type>, args: &[Value], ) -> &[Value] { let sig = Signature { params: input_tys.iter().cloned().map(AbiParam::new).collect(), returns: output_tys.iter().cloned().map(AbiParam::new).collect(), call_conv: CallConv::triple_default(self.triple()), }; let func_id = self .cx .module .declare_function(&name, Linkage::Import, &sig) .unwrap(); let func_ref = self .cx .module .declare_func_in_func(func_id, &mut self.bcx.func); let call_inst = self.bcx.ins().call(func_ref, args); #[cfg(debug_assertions)] { self.add_comment(call_inst, format!("easy_call {}", name)); } let results = self.bcx.inst_results(call_inst); assert!(results.len() <= 2, "{}", results.len()); results } pub(crate) fn easy_call( &mut self, name: &str, args: &[CValue<'tcx>], return_ty: Ty<'tcx>, ) -> CValue<'tcx> { let (input_tys, args): (Vec<_>, Vec<_>) = args .iter() .map(|arg| { ( self.clif_type(arg.layout().ty).unwrap(), arg.load_scalar(self), ) }) .unzip(); let return_layout = self.layout_of(return_ty); let return_tys = if let ty::Tuple(tup) = return_ty.kind() { tup.types().map(|ty| self.clif_type(ty).unwrap()).collect() } else { vec![self.clif_type(return_ty).unwrap()] }; let ret_vals = self.lib_call(name, input_tys, return_tys, &args); match *ret_vals { [] => CValue::by_ref( Pointer::const_addr(self, i64::from(self.pointer_type.bytes())), return_layout, ), [val] => CValue::by_val(val, return_layout), [val, extra] => CValue::by_val_pair(val, extra, return_layout), _ => unreachable!(), } } } /// Make a [`CPlace`] capable of holding value of the specified type. fn make_local_place<'tcx>( fx: &mut FunctionCx<'_, 'tcx, impl Module>, local: Local, layout: TyAndLayout<'tcx>, is_ssa: bool, ) -> CPlace<'tcx> { let place = if is_ssa { if let rustc_target::abi::Abi::ScalarPair(_, _) = layout.abi { CPlace::new_var_pair(fx, local, layout) } else { CPlace::new_var(fx, local, layout) } } else { CPlace::new_stack_slot(fx, layout) }; #[cfg(debug_assertions)] self::comments::add_local_place_comments(fx, place, local); place } pub(crate) fn codegen_fn_prelude<'tcx>( fx: &mut FunctionCx<'_, 'tcx, impl Module>, start_block: Block, ) { let ssa_analyzed = crate::analyze::analyze(fx); #[cfg(debug_assertions)] self::comments::add_args_header_comment(fx); let ret_place = self::returning::codegen_return_param(fx, &ssa_analyzed, start_block); assert_eq!(fx.local_map.push(ret_place), RETURN_PLACE); // None means pass_mode == NoPass enum ArgKind<'tcx> { Normal(Option<CValue<'tcx>>), Spread(Vec<Option<CValue<'tcx>>>), } let func_params = fx .mir .args_iter() .map(|local| { let arg_ty = fx.monomorphize(fx.mir.local_decls[local].ty); // Adapted from https://github.com/rust-lang/rust/blob/145155dc96757002c7b2e9de8489416e2fdbbd57/src/librustc_codegen_llvm/mir/mod.rs#L442-L482 if Some(local) == fx.mir.spread_arg { // This argument (e.g. the last argument in the "rust-call" ABI) // is a tuple that was spread at the ABI level and now we have // to reconstruct it into a tuple local variable, from multiple // individual function arguments. let tupled_arg_tys = match arg_ty.kind() { ty::Tuple(ref tys) => tys, _ => bug!("spread argument isn't a tuple?! but {:?}", arg_ty), }; let mut params = Vec::new(); for (i, arg_ty) in tupled_arg_tys.types().enumerate() { let param = cvalue_for_param(fx, start_block, Some(local), Some(i), arg_ty); params.push(param); } (local, ArgKind::Spread(params), arg_ty) } else { let param = cvalue_for_param(fx, start_block, Some(local), None, arg_ty); (local, ArgKind::Normal(param), arg_ty) } }) .collect::<Vec<(Local, ArgKind<'tcx>, Ty<'tcx>)>>(); assert!(fx.caller_location.is_none()); if fx.instance.def.requires_caller_location(fx.tcx) { // Store caller location for `#[track_caller]`. fx.caller_location = Some( cvalue_for_param(fx, start_block, None, None, fx.tcx.caller_location_ty()).unwrap(), ); } fx.bcx.switch_to_block(start_block); fx.bcx.ins().nop(); #[cfg(debug_assertions)] self::comments::add_locals_header_comment(fx); for (local, arg_kind, ty) in func_params { let layout = fx.layout_of(ty); let is_ssa = ssa_analyzed[local] == crate::analyze::SsaKind::Ssa; // While this is normally an optimization to prevent an unnecessary copy when an argument is // not mutated by the current function, this is necessary to support unsized arguments. if let ArgKind::Normal(Some(val)) = arg_kind { if let Some((addr, meta)) = val.try_to_ptr() { let local_decl = &fx.mir.local_decls[local]; // v this ! is important let internally_mutable = !val.layout().ty.is_freeze( fx.tcx.at(local_decl.source_info.span), ParamEnv::reveal_all(), ); if local_decl.mutability == mir::Mutability::Not && !internally_mutable { // We wont mutate this argument, so it is fine to borrow the backing storage // of this argument, to prevent a copy. let place = if let Some(meta) = meta { CPlace::for_ptr_with_extra(addr, meta, val.layout()) } else { CPlace::for_ptr(addr, val.layout()) }; #[cfg(debug_assertions)] self::comments::add_local_place_comments(fx, place, local); assert_eq!(fx.local_map.push(place), local); continue; } } } let place = make_local_place(fx, local, layout, is_ssa); assert_eq!(fx.local_map.push(place), local); match arg_kind { ArgKind::Normal(param) => { if let Some(param) = param { place.write_cvalue(fx, param); } } ArgKind::Spread(params) => { for (i, param) in params.into_iter().enumerate() { if let Some(param) = param { place .place_field(fx, mir::Field::new(i)) .write_cvalue(fx, param); } } } } } for local in fx.mir.vars_and_temps_iter() { let ty = fx.monomorphize(fx.mir.local_decls[local].ty); let layout = fx.layout_of(ty); let is_ssa = ssa_analyzed[local] == crate::analyze::SsaKind::Ssa; let place = make_local_place(fx, local, layout, is_ssa); assert_eq!(fx.local_map.push(place), local); } fx.bcx .ins() .jump(*fx.block_map.get(START_BLOCK).unwrap(), &[]); } pub(crate) fn codegen_terminator_call<'tcx>( fx: &mut FunctionCx<'_, 'tcx, impl Module>, span: Span, current_block: Block, func: &Operand<'tcx>, args: &[Operand<'tcx>], destination: Option<(Place<'tcx>, BasicBlock)>, ) { let fn_ty = fx.monomorphize(func.ty(fx.mir, fx.tcx)); let fn_sig = fx .tcx .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), fn_ty.fn_sig(fx.tcx)); let destination = destination.map(|(place, bb)| (codegen_place(fx, place), bb)); // Handle special calls like instrinsics and empty drop glue. let instance = if let ty::FnDef(def_id, substs) = *fn_ty.kind() { let instance = ty::Instance::resolve(fx.tcx, ty::ParamEnv::reveal_all(), def_id, substs) .unwrap() .unwrap() .polymorphize(fx.tcx); if fx.tcx.symbol_name(instance).name.starts_with("llvm.") { crate::intrinsics::codegen_llvm_intrinsic_call( fx, &fx.tcx.symbol_name(instance).name, substs, args, destination, ); return; } match instance.def { InstanceDef::Intrinsic(_) => { crate::intrinsics::codegen_intrinsic_call(fx, instance, args, destination, span); return; } InstanceDef::DropGlue(_, None) => { // empty drop glue - a nop. let (_, dest) = destination.expect("Non terminating drop_in_place_real???"); let ret_block = fx.get_block(dest); fx.bcx.ins().jump(ret_block, &[]); return; } _ => Some(instance), } } else { None }; let is_cold = instance .map(|inst| { fx.tcx .codegen_fn_attrs(inst.def_id()) .flags .contains(CodegenFnAttrFlags::COLD) }) .unwrap_or(false); if is_cold { fx.cold_blocks.insert(current_block); } // Unpack arguments tuple for closures let args = if fn_sig.abi == Abi::RustCall { assert_eq!(args.len(), 2, "rust-call abi requires two arguments"); let self_arg = codegen_operand(fx, &args[0]); let pack_arg = codegen_operand(fx, &args[1]); let tupled_arguments = match pack_arg.layout().ty.kind() { ty::Tuple(ref tupled_arguments) => tupled_arguments, _ => bug!("argument to function with \"rust-call\" ABI is not a tuple"), }; let mut args = Vec::with_capacity(1 + tupled_arguments.len()); args.push(self_arg); for i in 0..tupled_arguments.len() { args.push(pack_arg.value_field(fx, mir::Field::new(i))); } args } else { args.iter() .map(|arg| codegen_operand(fx, arg)) .collect::<Vec<_>>() }; // | indirect call target // | | the first argument to be passed // v v v virtual calls are special cased below let (func_ref, first_arg, is_virtual_call) = match instance { // Trait object call Some(Instance { def: InstanceDef::Virtual(_, idx), .. }) => { #[cfg(debug_assertions)] { let nop_inst = fx.bcx.ins().nop(); fx.add_comment( nop_inst, format!( "virtual call; self arg pass mode: {:?}", get_pass_mode(fx.tcx, args[0].layout()) ), ); } let (ptr, method) = crate::vtable::get_ptr_and_method_ref(fx, args[0], idx); (Some(method), Single(ptr), true) } // Normal call Some(_) => ( None, args.get(0) .map(|arg| adjust_arg_for_abi(fx, *arg)) .unwrap_or(Empty), false, ), // Indirect call None => { #[cfg(debug_assertions)] { let nop_inst = fx.bcx.ins().nop(); fx.add_comment(nop_inst, "indirect call"); } let func = codegen_operand(fx, func).load_scalar(fx); ( Some(func), args.get(0) .map(|arg| adjust_arg_for_abi(fx, *arg)) .unwrap_or(Empty), false, ) } }; let ret_place = destination.map(|(place, _)| place); let (call_inst, call_args) = self::returning::codegen_with_call_return_arg(fx, fn_sig, ret_place, |fx, return_ptr| { let mut call_args: Vec<Value> = return_ptr .into_iter() .chain(first_arg.into_iter()) .chain( args.into_iter() .skip(1) .map(|arg| adjust_arg_for_abi(fx, arg).into_iter()) .flatten(), ) .collect::<Vec<_>>(); if instance .map(|inst| inst.def.requires_caller_location(fx.tcx)) .unwrap_or(false) { // Pass the caller location for `#[track_caller]`. let caller_location = fx.get_caller_location(span); call_args.extend(adjust_arg_for_abi(fx, caller_location).into_iter()); } let call_inst = if let Some(func_ref) = func_ref { let sig = clif_sig_from_fn_sig( fx.tcx, fx.triple(), fn_sig, span, is_virtual_call, false, // calls through function pointers never pass the caller location ); let sig = fx.bcx.import_signature(sig); fx.bcx.ins().call_indirect(sig, func_ref, &call_args) } else { let func_ref = fx.get_function_ref(instance.expect("non-indirect call on non-FnDef type")); fx.bcx.ins().call(func_ref, &call_args) }; (call_inst, call_args) }); // FIXME find a cleaner way to support varargs if fn_sig.c_variadic { if fn_sig.abi != Abi::C { fx.tcx.sess.span_fatal( span, &format!("Variadic call for non-C abi {:?}", fn_sig.abi), ); } let sig_ref = fx.bcx.func.dfg.call_signature(call_inst).unwrap(); let abi_params = call_args .into_iter() .map(|arg| { let ty = fx.bcx.func.dfg.value_type(arg); if !ty.is_int() { // FIXME set %al to upperbound on float args once floats are supported fx.tcx .sess .span_fatal(span, &format!("Non int ty {:?} for variadic call", ty)); } AbiParam::new(ty) }) .collect::<Vec<AbiParam>>(); fx.bcx.func.dfg.signatures[sig_ref].params = abi_params; } if let Some((_, dest)) = destination { let ret_block = fx.get_block(dest); fx.bcx.ins().jump(ret_block, &[]); } else { trap_unreachable(fx, "[corruption] Diverging function returned"); } } pub(crate) fn codegen_drop<'tcx>( fx: &mut FunctionCx<'_, 'tcx, impl Module>, span: Span, drop_place: CPlace<'tcx>, ) { let ty = drop_place.layout().ty; let drop_fn = Instance::resolve_drop_in_place(fx.tcx, ty).polymorphize(fx.tcx); if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def { // we don't actually need to drop anything } else { let drop_fn_ty = drop_fn.ty(fx.tcx, ParamEnv::reveal_all()); let fn_sig = fx.tcx.normalize_erasing_late_bound_regions( ParamEnv::reveal_all(), drop_fn_ty.fn_sig(fx.tcx), ); assert_eq!(fn_sig.output(), fx.tcx.mk_unit()); match ty.kind() { ty::Dynamic(..) => { let (ptr, vtable) = drop_place.to_ptr_maybe_unsized(); let ptr = ptr.get_addr(fx); let drop_fn = crate::vtable::drop_fn_of_obj(fx, vtable.unwrap()); let sig = clif_sig_from_fn_sig( fx.tcx, fx.triple(), fn_sig, span, true, false, // `drop_in_place` is never `#[track_caller]` ); let sig = fx.bcx.import_signature(sig); fx.bcx.ins().call_indirect(sig, drop_fn, &[ptr]); } _ => { assert!(!matches!(drop_fn.def, InstanceDef::Virtual(_, _))); let arg_value = drop_place.place_ref( fx, fx.layout_of(fx.tcx.mk_ref( &ty::RegionKind::ReErased, TypeAndMut { ty, mutbl: crate::rustc_hir::Mutability::Mut, }, )), ); let arg_value = adjust_arg_for_abi(fx, arg_value); let mut call_args: Vec<Value> = arg_value.into_iter().collect::<Vec<_>>(); if drop_fn.def.requires_caller_location(fx.tcx) { // Pass the caller location for `#[track_caller]`. let caller_location = fx.get_caller_location(span); call_args.extend(adjust_arg_for_abi(fx, caller_location).into_iter()); } let func_ref = fx.get_function_ref(drop_fn); fx.bcx.ins().call(func_ref, &call_args); } } } }
use ndarray::*; use super::traits::*; pub struct TimeSeries<'a, TEO, S, D> where S: DataMut, D: Dimension, TEO: TimeEvolutionBase<S, D> + 'a { state: ArrayBase<S, D>, teo: &'a TEO, } pub fn time_series<'a, TEO, S, D>(x0: ArrayBase<S, D>, teo: &'a TEO) -> TimeSeries<'a, TEO, S, D> where S: DataMut, D: Dimension, TEO: TimeEvolutionBase<S, D> { TimeSeries { state: x0, teo: teo, } } impl<'a, TEO, S, D> TimeSeries<'a, TEO, S, D> where S: DataMut + DataClone, D: Dimension, TEO: TimeEvolutionBase<S, D> { pub fn iterate(&mut self) { self.teo.iterate(&mut self.state); } } impl<'a, TEO, S, D> Iterator for TimeSeries<'a, TEO, S, D> where S: DataMut + DataClone, D: Dimension, TEO: TimeEvolutionBase<S, D> { type Item = ArrayBase<S, D>; fn next(&mut self) -> Option<Self::Item> { self.iterate(); Some(self.state.clone()) } } NStep adaptor use ndarray::*; use ndarray_linalg::*; use super::traits::*; pub struct TimeSeries<'a, TEO, S, D> where S: DataMut, D: Dimension, TEO: TimeEvolutionBase<S, D> + 'a { state: ArrayBase<S, D>, teo: &'a TEO, } pub fn time_series<'a, TEO, S, D>(x0: ArrayBase<S, D>, teo: &'a TEO) -> TimeSeries<'a, TEO, S, D> where S: DataMut, D: Dimension, TEO: TimeEvolutionBase<S, D> { TimeSeries { state: x0, teo: teo, } } impl<'a, TEO, S, D> TimeSeries<'a, TEO, S, D> where S: DataMut + DataClone, D: Dimension, TEO: TimeEvolutionBase<S, D> { pub fn iterate(&mut self) { self.teo.iterate(&mut self.state); } } impl<'a, TEO, S, D> Iterator for TimeSeries<'a, TEO, S, D> where S: DataMut + DataClone, D: Dimension, TEO: TimeEvolutionBase<S, D> { type Item = ArrayBase<S, D>; fn next(&mut self) -> Option<Self::Item> { self.iterate(); Some(self.state.clone()) } } #[derive(new)] pub struct NStep<TEO> { teo: TEO, n: usize, } impl<TEO, D> ModelSize<D> for NStep<TEO> where TEO: ModelSize<D>, D: Dimension { fn model_size(&self) -> D::Pattern { self.teo.model_size() } } impl<TEO> TimeStep for NStep<TEO> where TEO: TimeStep { type Time = TEO::Time; fn get_dt(&self) -> Self::Time { self.teo.get_dt() * into_scalar(self.n as f64) } fn set_dt(&mut self, dt: Self::Time) { self.teo.set_dt(dt / into_scalar(self.n as f64)); } } impl<TEO, S, D> TimeEvolutionBase<S, D> for NStep<TEO> where TEO: TimeEvolutionBase<S, D>, S: DataMut, D: Dimension { type Scalar = TEO::Scalar; type Time = TEO::Time; fn iterate<'a>(&self, x: &'a mut ArrayBase<S, D>) -> &'a mut ArrayBase<S, D> { for _ in 0..self.n { self.teo.iterate(x); } x } }
#[cfg(test)] mod test; use std::fmt; #[derive(Clone, Debug, PartialEq, Eq)] pub enum Type { Array, Bool, Int, Str, Void, } impl Type { pub fn as_string_with_article(&self) -> &str { // Returns the name of the type with the correct English indefinite article prepended. match *self { Type::Array => "an array", Type::Bool => "a boolean", Type::Int => "an int", Type::Str => "a string", Type::Void => "nothing", } } } impl fmt::Display for Type { fn fmt(&self, mut fmt: &mut fmt::Formatter) -> fmt::Result { match *self { Type::Array => write!(fmt, "array"), Type::Bool => write!(fmt, "boolean"), Type::Int => write!(fmt, "int"), Type::Str => write!(fmt, "string"), Type::Void => write!(fmt, "void"), } } } #[derive(Clone, Debug, Eq, PartialEq)] pub enum Value { Array(Vec<Value>), Bool(bool), Int(i64), Str(String), } impl Value { // Returns the value's type with the correct English indefinite article prepended. pub fn type_string_with_article(&self) -> &str { match *self { Value::Array(_) => "an array", Value::Bool(_) => "a boolean", Value::Int(_) => "an int", Value::Str(_) => "a string", } } // Checks whether the value is of a certain type. pub fn is_a(&self, t: &Type) -> bool { match (self, t) { (&Value::Array(_), &Type::Array) | (&Value::Bool(_), &Type::Bool) | (&Value::Int(_), &Type::Int) | (&Value::Str(_), &Type::Str) => true, _ => false } } } impl fmt::Display for Value { fn fmt(&self, mut fmt: &mut fmt::Formatter) -> fmt::Result { match *self { Value::Array(ref vec) => { try!(write!(fmt, "[")); for (i, val) in vec.iter().enumerate() { if i != 0 { try!(write!(fmt, ", ")); } try!(write!(fmt, "{}", val)); } write!(fmt, "]") } Value::Bool(b) => write!(fmt, "{}", b), Value::Int(i) => write!(fmt, "{}", i), Value::Str(ref s) => write!(fmt, "{}", s), } } } #[derive(Clone, Debug)] pub enum BinOp { // Boolean And, Or, // Comparisons Equal, NotEqual, GreaterOrEqual, GreaterThan, LessOrEqual, LessThan, // Arithmetic Plus, Minus, Times, Divide, Modulus, } impl BinOp { // Gets the precedence of an operator. fn precedence(&self) -> Precedence { match *self { BinOp::And => Precedence::And, BinOp::Or => Precedence::Or, BinOp::Equal | BinOp::NotEqual => Precedence::Equality, BinOp::GreaterOrEqual | BinOp::GreaterThan | BinOp::LessOrEqual | BinOp::LessThan => Precedence::Inequality, BinOp::Plus | BinOp::Minus => Precedence::Addition, BinOp::Times | BinOp::Divide | BinOp::Modulus => Precedence::Multiplication, } } } #[derive(PartialEq, Eq, PartialOrd, Ord)] enum Precedence { // Ordered correctly for derivation to be sound; each variant has higher precedence than the previous Or, And, Equality, Inequality, Addition, Multiplication, Constant, } impl fmt::Display for BinOp { fn fmt(&self, mut fmt: &mut fmt::Formatter) -> fmt::Result { match *self { BinOp::And => write!(fmt, "&&"), BinOp::Or => write!(fmt, "||"), BinOp::Equal => write!(fmt, "=="), BinOp::NotEqual => write!(fmt, "!="), BinOp::GreaterOrEqual => write!(fmt, ">="), BinOp::GreaterThan => write!(fmt, ">"), BinOp::LessOrEqual => write!(fmt, "<="), BinOp::LessThan => write!(fmt, "<"), BinOp::Plus => write!(fmt, "+"), BinOp::Minus => write!(fmt, "-"), BinOp::Times => write!(fmt, "*"), BinOp::Divide => write!(fmt, "/"), BinOp::Modulus => write!(fmt, "%"), } } } #[derive(Clone, Debug)] pub enum Expr { Array(Vec<Expr>), ArrayElement(String, Box<Expr>, Vec<Expr>), BinExp(Box<Expr>, BinOp, Box<Expr>), Call(String, Vec<Expr>), Not(Box<Expr>), Value(Value), Var(String), } impl Expr { // Returns the precedence level of the expression. fn precedence(&self) -> Precedence { match *self { Expr::BinExp(_, ref o, _) => o.precedence(), _ => Precedence::Constant, } } } impl fmt::Display for Expr { fn fmt(&self, mut fmt: &mut fmt::Formatter) -> fmt::Result { match *self { Expr::Array(ref vec) => { try!(write!(fmt, "[")); for (i, val) in vec.iter().enumerate() { if i != 0 { try!(write!(fmt, ", ")); } try!(write!(fmt, "{}", val)); } write!(fmt, "]") } Expr::ArrayElement(ref var, ref index, ref indexes) => { try!(write!(fmt, "{}[{}]", var, index)); for i in indexes.iter() { try!(write!(fmt, "[{}]", i)); } Ok(()) } Expr::BinExp(ref exp1, ref op, ref exp2) => { let op_precendence = op.precedence(); // Wrap the left-hand side in parentheses if its precedence is lower than the operator if exp1.precedence() < op_precendence { try!(write!(fmt, "({})", exp1)); } else { try!(write!(fmt, "{}", exp1)); } try!(write!(fmt, " {} ", op)); // Wrap the left-hand side in parentheses if its precedence is not greater than the operator if exp2.precedence() <= op_precendence { write!(fmt, "({})", exp2) } else { write!(fmt, "{}", exp2) } } Expr::Call(ref func, ref args) => { try!(write!(fmt, "{}(", func)); // Write the arguments, separated by commas for (i, arg) in args.iter().enumerate() { if i != 0 { try!(write!(fmt, ", ")); } try!(write!(fmt, "{}", arg)); } write!(fmt, ")") } Expr::Not(ref exp) => write!(fmt, "!{}", exp), Expr::Value(ref val) => write!(fmt, "{}", val), Expr::Var(ref var) => write!(fmt, "{}", var), } } } #[derive(Clone, Debug)] pub enum Statement { ArrayElemAssign(String, Expr, Vec<Expr>, Expr), VarAssign(String, Expr), Defun(Type, String, Vec<String>, Vec<Statement>), If(Expr, Vec<Statement>, Vec<Statement>), Let(String, Expr), Print(Expr), Return(Expr), VoidCall(String, Vec<Expr>), While(Expr, Vec<Statement>), } impl Statement { // Formats the expression with indentation before it. fn fmt_with_indent(&self, mut fmt: &mut fmt::Formatter, indent_level: u32) -> fmt::Result { // Creates a new string that is as many spaces as `indent level * 4`. let indentation : String = (0..indent_level * 4).map(|_| " ").collect(); match *self { Statement::ArrayElemAssign(ref var, ref index, ref indexes, ref exp) => { try!(write!(fmt, "{}{}[{}]", indentation, var, index)); for i in indexes.iter() { try!(write!(fmt, "[{}]", i)); } write!(fmt, " = {};", exp) } Statement::Defun(ref return_type, ref name, ref params, ref body) => { try!(write!(fmt, "{}{} {}(", indentation, return_type, name)); // Write the parameters, separated by commas for (i, param) in params.iter().enumerate() { if i != 0 { try!(write!(fmt, ", ")); } try!(write!(fmt, "{}", param)); } try!(writeln!(fmt, ") {{")); // Write the function body statements with one more level of indentation for stmt in body.iter() { try!(stmt.fmt_with_indent(fmt, indent_level + 1)); } writeln!(fmt, "{}}}", indentation) } Statement::If(ref clause, ref true_block, ref false_block) => { try!(writeln!(fmt, "{}if ({}) {{", indentation, clause)); // Write the block statements with one more level of indentation for stmt in true_block.iter() { try!(stmt.fmt_with_indent(fmt, indent_level + 1)); } // Don't write the "else" clause/block unless there is something in the block. if false_block.is_empty() { return Ok(()); } try!(writeln!(fmt, "{}}} else {{", indentation)); // Write the block statements with one more level of indentation for stmt in false_block.iter() { try!(stmt.fmt_with_indent(fmt, indent_level + 1)); } writeln!(fmt, "{}}}", indentation) } Statement::Let(ref var, ref exp) => writeln!(fmt, "{}let {} = {};", indentation, var, exp), Statement::Print(ref exp) => writeln!(fmt, "{}print {};", indentation, exp), Statement::Return(ref exp) => writeln!(fmt, "{}return {};", indentation, exp), Statement::VarAssign(ref var, ref exp) => writeln!(fmt, "{}{} = {};", indentation, var, exp), Statement::VoidCall(ref name, ref args) => { try!(write!(fmt, "{}{}(", indentation, name)); // Write the arguments, separated by commas for (i, arg) in args.iter().enumerate() { if i != 0 { try!(write!(fmt, ", ")); } try!(write!(fmt, "{}", arg)); } write!(fmt, ");") } Statement::While(ref clause, ref block) => { try!(writeln!(fmt, "{}while ({}) {{", indentation, clause)); // Write the block statements with one more level of indentation for stmt in block.iter() { try!(stmt.fmt_with_indent(fmt, indent_level + 1)); } writeln!(fmt, "{}}}", indentation) } } } } impl fmt::Display for Statement { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { self.fmt_with_indent(fmt, 0) } } fix mistake #[cfg(test)] mod test; use std::fmt; #[derive(Clone, Debug, PartialEq, Eq)] pub enum Type { Array, Bool, Int, Str, Void, } impl Type { pub fn as_string_with_article(&self) -> &str { // Returns the name of the type with the correct English indefinite article prepended. match *self { Type::Array => "an array", Type::Bool => "a boolean", Type::Int => "an int", Type::Str => "a string", Type::Void => "nothing", } } } impl fmt::Display for Type { fn fmt(&self, mut fmt: &mut fmt::Formatter) -> fmt::Result { match *self { Type::Array => write!(fmt, "array"), Type::Bool => write!(fmt, "boolean"), Type::Int => write!(fmt, "int"), Type::Str => write!(fmt, "string"), Type::Void => write!(fmt, "void"), } } } #[derive(Clone, Debug, Eq, PartialEq)] pub enum Value { Array(Vec<Value>), Bool(bool), Int(i64), Str(String), } impl Value { // Returns the value's type with the correct English indefinite article prepended. pub fn type_string_with_article(&self) -> &str { match *self { Value::Array(_) => "an array", Value::Bool(_) => "a boolean", Value::Int(_) => "an int", Value::Str(_) => "a string", } } // Checks whether the value is of a certain type. pub fn is_a(&self, t: &Type) -> bool { match (self, t) { (&Value::Array(_), &Type::Array) | (&Value::Bool(_), &Type::Bool) | (&Value::Int(_), &Type::Int) | (&Value::Str(_), &Type::Str) => true, _ => false } } } impl fmt::Display for Value { fn fmt(&self, mut fmt: &mut fmt::Formatter) -> fmt::Result { match *self { Value::Array(ref vec) => { try!(write!(fmt, "[")); for (i, val) in vec.iter().enumerate() { if i != 0 { try!(write!(fmt, ", ")); } try!(write!(fmt, "{}", val)); } write!(fmt, "]") } Value::Bool(b) => write!(fmt, "{}", b), Value::Int(i) => write!(fmt, "{}", i), Value::Str(ref s) => write!(fmt, "{}", s), } } } #[derive(Clone, Debug)] pub enum BinOp { // Boolean And, Or, // Comparisons Equal, NotEqual, GreaterOrEqual, GreaterThan, LessOrEqual, LessThan, // Arithmetic Plus, Minus, Times, Divide, Modulus, } impl BinOp { // Gets the precedence of an operator. fn precedence(&self) -> Precedence { match *self { BinOp::And => Precedence::And, BinOp::Or => Precedence::Or, BinOp::Equal | BinOp::NotEqual => Precedence::Equality, BinOp::GreaterOrEqual | BinOp::GreaterThan | BinOp::LessOrEqual | BinOp::LessThan => Precedence::Inequality, BinOp::Plus | BinOp::Minus => Precedence::Addition, BinOp::Times | BinOp::Divide | BinOp::Modulus => Precedence::Multiplication, } } } #[derive(PartialEq, Eq, PartialOrd, Ord)] enum Precedence { // Ordered correctly for derivation to be sound; each variant has higher precedence than the previous Or, And, Equality, Inequality, Addition, Multiplication, Constant, } impl fmt::Display for BinOp { fn fmt(&self, mut fmt: &mut fmt::Formatter) -> fmt::Result { match *self { BinOp::And => write!(fmt, "&&"), BinOp::Or => write!(fmt, "||"), BinOp::Equal => write!(fmt, "=="), BinOp::NotEqual => write!(fmt, "!="), BinOp::GreaterOrEqual => write!(fmt, ">="), BinOp::GreaterThan => write!(fmt, ">"), BinOp::LessOrEqual => write!(fmt, "<="), BinOp::LessThan => write!(fmt, "<"), BinOp::Plus => write!(fmt, "+"), BinOp::Minus => write!(fmt, "-"), BinOp::Times => write!(fmt, "*"), BinOp::Divide => write!(fmt, "/"), BinOp::Modulus => write!(fmt, "%"), } } } #[derive(Clone, Debug)] pub enum Expr { Array(Vec<Expr>), ArrayElement(String, Box<Expr>, Vec<Expr>), BinExp(Box<Expr>, BinOp, Box<Expr>), Call(String, Vec<Expr>), Not(Box<Expr>), Value(Value), Var(String), } impl Expr { // Returns the precedence level of the expression. fn precedence(&self) -> Precedence { match *self { Expr::BinExp(_, ref o, _) => o.precedence(), _ => Precedence::Constant, } } } impl fmt::Display for Expr { fn fmt(&self, mut fmt: &mut fmt::Formatter) -> fmt::Result { match *self { Expr::Array(ref vec) => { try!(write!(fmt, "[")); for (i, val) in vec.iter().enumerate() { if i != 0 { try!(write!(fmt, ", ")); } try!(write!(fmt, "{}", val)); } write!(fmt, "]") } Expr::ArrayElement(ref var, ref index, ref indexes) => { try!(write!(fmt, "{}[{}]", var, index)); for i in indexes.iter() { try!(write!(fmt, "[{}]", i)); } Ok(()) } Expr::BinExp(ref exp1, ref op, ref exp2) => { let op_precendence = op.precedence(); // Wrap the left-hand side in parentheses if its precedence is lower than the operator if exp1.precedence() < op_precendence { try!(write!(fmt, "({})", exp1)); } else { try!(write!(fmt, "{}", exp1)); } try!(write!(fmt, " {} ", op)); // Wrap the left-hand side in parentheses if its precedence is not greater than the operator if exp2.precedence() <= op_precendence { write!(fmt, "({})", exp2) } else { write!(fmt, "{}", exp2) } } Expr::Call(ref func, ref args) => { try!(write!(fmt, "{}(", func)); // Write the arguments, separated by commas for (i, arg) in args.iter().enumerate() { if i != 0 { try!(write!(fmt, ", ")); } try!(write!(fmt, "{}", arg)); } write!(fmt, ")") } Expr::Not(ref exp) => write!(fmt, "!{}", exp), Expr::Value(ref val) => write!(fmt, "{}", val), Expr::Var(ref var) => write!(fmt, "{}", var), } } } #[derive(Clone, Debug)] pub enum Statement { ArrayElemAssign(String, Expr, Vec<Expr>, Expr), VarAssign(String, Expr), Defun(Type, String, Vec<String>, Vec<Statement>), If(Expr, Vec<Statement>, Vec<Statement>), Let(String, Expr), Print(Expr), Return(Expr), VoidCall(String, Vec<Expr>), While(Expr, Vec<Statement>), } impl Statement { // Formats the expression with indentation before it. fn fmt_with_indent(&self, mut fmt: &mut fmt::Formatter, indent_level: u32) -> fmt::Result { // Creates a new string that is as many spaces as `indent level * 4`. let indentation : String = (0..indent_level * 4).map(|_| " ").collect(); match *self { Statement::ArrayElemAssign(ref var, ref index, ref indexes, ref exp) => { try!(write!(fmt, "{}{}[{}]", indentation, var, index)); for i in indexes.iter() { try!(write!(fmt, "[{}]", i)); } writeln!(fmt, " = {};", exp) } Statement::Defun(ref return_type, ref name, ref params, ref body) => { try!(write!(fmt, "{}{} {}(", indentation, return_type, name)); // Write the parameters, separated by commas for (i, param) in params.iter().enumerate() { if i != 0 { try!(write!(fmt, ", ")); } try!(write!(fmt, "{}", param)); } try!(writeln!(fmt, ") {{")); // Write the function body statements with one more level of indentation for stmt in body.iter() { try!(stmt.fmt_with_indent(fmt, indent_level + 1)); } writeln!(fmt, "{}}}", indentation) } Statement::If(ref clause, ref true_block, ref false_block) => { try!(writeln!(fmt, "{}if ({}) {{", indentation, clause)); // Write the block statements with one more level of indentation for stmt in true_block.iter() { try!(stmt.fmt_with_indent(fmt, indent_level + 1)); } // Don't write the "else" clause/block unless there is something in the block. if false_block.is_empty() { return Ok(()); } try!(writeln!(fmt, "{}}} else {{", indentation)); // Write the block statements with one more level of indentation for stmt in false_block.iter() { try!(stmt.fmt_with_indent(fmt, indent_level + 1)); } writeln!(fmt, "{}}}", indentation) } Statement::Let(ref var, ref exp) => writeln!(fmt, "{}let {} = {};", indentation, var, exp), Statement::Print(ref exp) => writeln!(fmt, "{}print {};", indentation, exp), Statement::Return(ref exp) => writeln!(fmt, "{}return {};", indentation, exp), Statement::VarAssign(ref var, ref exp) => writeln!(fmt, "{}{} = {};", indentation, var, exp), Statement::VoidCall(ref name, ref args) => { try!(write!(fmt, "{}{}(", indentation, name)); // Write the arguments, separated by commas for (i, arg) in args.iter().enumerate() { if i != 0 { try!(write!(fmt, ", ")); } try!(write!(fmt, "{}", arg)); } write!(fmt, ");") } Statement::While(ref clause, ref block) => { try!(writeln!(fmt, "{}while ({}) {{", indentation, clause)); // Write the block statements with one more level of indentation for stmt in block.iter() { try!(stmt.fmt_with_indent(fmt, indent_level + 1)); } writeln!(fmt, "{}}}", indentation) } } } } impl fmt::Display for Statement { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { self.fmt_with_indent(fmt, 0) } }
// Copyright 2014 Arjan Topolovec // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![crate_name = "bencode"] #![license = "MIT/ASL2"] #![crate_type = "rlib"] #![crate_type = "dylib"] #![feature(macro_rules)] /*! Bencode parsing and serialization # Encoding ## Using `Encodable` ```rust extern crate serialize; extern crate bencode; use serialize::Encodable; use bencode::Encoder; #[deriving(Encodable)] struct MyStruct { string: String, id: uint, } fn main() { let s = MyStruct { string: "Hello bencode".to_string(), id: 1 }; let result: Vec<u8> = Encoder::buffer_encode(&s).unwrap(); } ``` ## Using `ToBencode` ```rust extern crate collections; extern crate bencode; use collections::TreeMap; use bencode::{Bencode, ToBencode}; use bencode::util::ByteString; struct MyStruct { a: int, b: String, c: Vec<u8>, } impl ToBencode for MyStruct { fn to_bencode(&self) -> bencode::Bencode { let mut m = TreeMap::new(); m.insert(ByteString::from_str("a"), self.a.to_bencode()); m.insert(ByteString::from_str("b"), self.b.to_bencode()); m.insert(ByteString::from_str("c"), Bencode::ByteString(self.c.as_slice().to_vec())); Bencode::Dict(m) } } fn main() { let s = MyStruct{ a: 5, b: "foo".to_string(), c: vec![1, 2, 3, 4] }; let bencode: bencode::Bencode = s.to_bencode(); let result: Vec<u8> = bencode.to_bytes().unwrap(); } ``` # Decoding ## Using `Decodable` ```rust extern crate serialize; extern crate bencode; use serialize::{Encodable, Decodable}; use bencode::{Encoder, Decoder}; #[deriving(Encodable, Decodable, PartialEq)] struct MyStruct { a: int, b: String, c: Vec<u8>, } fn main() { let s = MyStruct{ a: 5, b: "foo".to_string(), c: vec![1, 2, 3, 4] }; let enc: Vec<u8> = Encoder::buffer_encode(&s).unwrap(); let bencode: bencode::Bencode = bencode::from_vec(enc).unwrap(); let mut decoder = Decoder::new(&bencode); let result: MyStruct = Decodable::decode(&mut decoder).unwrap(); assert!(s == result) } ``` ## Using `FromBencode` ```rust extern crate collections; extern crate bencode; use collections::TreeMap; use bencode::{FromBencode, ToBencode, Bencode}; use bencode::util::ByteString; #[deriving(PartialEq)] struct MyStruct { a: int } impl ToBencode for MyStruct { fn to_bencode(&self) -> bencode::Bencode { let mut m = TreeMap::new(); m.insert(ByteString::from_str("a"), self.a.to_bencode()); Bencode::Dict(m) } } impl FromBencode for MyStruct { fn from_bencode(bencode: &bencode::Bencode) -> Option<MyStruct> { match bencode { &Bencode::Dict(ref m) => { match m.get(&ByteString::from_str("a")) { Some(a) => FromBencode::from_bencode(a).map(|a| { MyStruct{ a: a } }), _ => None } } _ => None } } } fn main() { let s = MyStruct{ a: 5 }; let enc: Vec<u8> = s.to_bencode().to_bytes().unwrap(); let bencode: bencode::Bencode = bencode::from_vec(enc).unwrap(); let result: MyStruct = FromBencode::from_bencode(&bencode).unwrap(); assert!(s == result) } ``` ## Using Streaming Parser ```rust extern crate serialize; extern crate bencode; use bencode::streaming; use bencode::streaming::StreamingParser; use serialize::Encodable; use bencode::Encoder; #[deriving(Encodable, Decodable, PartialEq)] struct MyStruct { a: int, b: String, c: Vec<u8>, } fn main() { let s = MyStruct{ a: 5, b: "foo".to_string(), c: vec![2, 2, 3, 4] }; let enc: Vec<u8> = Encoder::buffer_encode(&s).unwrap(); let mut streaming = StreamingParser::new(enc.into_iter()); for event in streaming { match event { streaming::DictStart => println!("dict start"), streaming::DictEnd => println!("dict end"), streaming::NumberValue(n) => println!("number = {}", n), // ... _ => println!("Unhandled event: {}", event) } } } ``` */ extern crate serialize; use std::io; use std::io::{IoResult, IoError}; use std::fmt; use std::str; use std::str::raw; use std::vec::Vec; use std::num::FromStrRadix; use serialize::{Encodable}; use std::collections::TreeMap; use std::collections::HashMap; use streaming::{StreamingParser, Error}; use streaming::{BencodeEvent, NumberValue, ByteStringValue, ListStart, ListEnd, DictStart, DictKey, DictEnd, ParseError}; pub mod streaming; pub mod util; #[deriving(PartialEq, Clone)] pub enum Bencode { Empty, Number(i64), ByteString(Vec<u8>), List(ListVec), Dict(DictMap), } impl fmt::Show for Bencode { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { match self { &Bencode::Empty => { Ok(()) } &Bencode::Number(v) => write!(fmt, "{}", v), &Bencode::ByteString(ref v) => write!(fmt, "s{}", v), &Bencode::List(ref v) => write!(fmt, "{}", v), &Bencode::Dict(ref v) => { try!(write!(fmt, "{{")); let mut first = true; for (key, value) in v.iter() { if first { first = false; } else { try!(write!(fmt, ", ")); } try!(write!(fmt, "{}: {}", *key, *value)); } write!(fmt, "}}") } } } } pub type ListVec = Vec<Bencode>; pub type DictMap = TreeMap<util::ByteString, Bencode>; impl Bencode { pub fn to_writer(&self, writer: &mut io::Writer) -> io::IoResult<()> { let mut encoder = Encoder::new(writer); self.encode(&mut encoder) } pub fn to_bytes(&self) -> io::IoResult<Vec<u8>> { let mut writer = io::MemWriter::new(); match self.to_writer(&mut writer) { Ok(_) => Ok(writer.unwrap()), Err(err) => Err(err) } } } impl<E, S: serialize::Encoder<E>> Encodable<S, E> for Bencode { fn encode(&self, e: &mut S) -> Result<(), E> { match self { &Bencode::Empty => Ok(()), &Bencode::Number(v) => e.emit_i64(v), &Bencode::ByteString(ref v) => e.emit_str(unsafe { raw::from_utf8(v.as_slice()) }), &Bencode::List(ref v) => v.encode(e), &Bencode::Dict(ref v) => v.encode(e) } } } pub trait ToBencode { fn to_bencode(&self) -> Bencode; } pub trait FromBencode { fn from_bencode(&Bencode) -> Option<Self>; } impl ToBencode for () { fn to_bencode(&self) -> Bencode { Bencode::ByteString(Vec::new()) } } impl FromBencode for () { fn from_bencode(bencode: &Bencode) -> Option<()> { match bencode { &Bencode::ByteString(ref v) => { if v.len() == 0 { Some(()) } else { None } } _ => None } } } impl<T: ToBencode> ToBencode for Option<T> { fn to_bencode(&self) -> Bencode { match self { &Some(ref v) => v.to_bencode(), &None => Bencode::ByteString(b"nil".to_vec()) } } } impl<T: FromBencode> FromBencode for Option<T> { fn from_bencode(bencode: &Bencode) -> Option<Option<T>> { match bencode { &Bencode::ByteString(ref v) => { if v.as_slice() == b"nil" { return Some(None) } } _ => () } FromBencode::from_bencode(bencode).map(|v| Some(v)) } } macro_rules! derive_num_to_bencode(($t:ty) => ( impl ToBencode for $t { fn to_bencode(&self) -> Bencode { Bencode::Number(*self as i64) } } )) macro_rules! derive_num_from_bencode(($t:ty) => ( impl FromBencode for $t { fn from_bencode(bencode: &Bencode) -> Option<$t> { match bencode { &Bencode::Number(v) => Some(v as $t), _ => None } } } )) derive_num_to_bencode!(int) derive_num_from_bencode!(int) derive_num_to_bencode!(i8) derive_num_from_bencode!(i8) derive_num_to_bencode!(i16) derive_num_from_bencode!(i16) derive_num_to_bencode!(i32) derive_num_from_bencode!(i32) derive_num_to_bencode!(i64) derive_num_from_bencode!(i64) derive_num_to_bencode!(uint) derive_num_from_bencode!(uint) derive_num_to_bencode!(u8) derive_num_from_bencode!(u8) derive_num_to_bencode!(u16) derive_num_from_bencode!(u16) derive_num_to_bencode!(u32) derive_num_from_bencode!(u32) derive_num_to_bencode!(u64) derive_num_from_bencode!(u64) impl ToBencode for f32 { fn to_bencode(&self) -> Bencode { Bencode::ByteString(std::f32::to_str_hex(*self).as_bytes().to_vec()) } } impl FromBencode for f32 { fn from_bencode(bencode: &Bencode) -> Option<f32> { match bencode { &Bencode::ByteString(ref v) => { match str::from_utf8(v.as_slice()) { Some(s) => FromStrRadix::from_str_radix(s, 16), None => None } } _ => None } } } impl ToBencode for f64 { fn to_bencode(&self) -> Bencode { Bencode::ByteString(std::f64::to_str_hex(*self).as_bytes().to_vec()) } } impl FromBencode for f64 { fn from_bencode(bencode: &Bencode) -> Option<f64> { match bencode { &Bencode::ByteString(ref v) => { match str::from_utf8(v.as_slice()) { Some(s) => FromStrRadix::from_str_radix(s, 16), None => None } } _ => None } } } impl ToBencode for bool { fn to_bencode(&self) -> Bencode { if *self { Bencode::ByteString(b"true".to_vec()) } else { Bencode::ByteString(b"false".to_vec()) } } } impl FromBencode for bool { fn from_bencode(bencode: &Bencode) -> Option<bool> { match bencode { &Bencode::ByteString(ref v) => { if v.as_slice() == b"true" { Some(true) } else if v.as_slice() == b"false" { Some(false) } else { None } } _ => None } } } impl ToBencode for char { fn to_bencode(&self) -> Bencode { Bencode::ByteString(self.to_string().as_bytes().to_vec()) } } impl FromBencode for char { fn from_bencode(bencode: &Bencode) -> Option<char> { let s: Option<String> = FromBencode::from_bencode(bencode); s.and_then(|s| { if s.as_slice().char_len() == 1 { Some(s.as_slice().char_at(0)) } else { None } }) } } impl ToBencode for String { fn to_bencode(&self) -> Bencode { Bencode::ByteString(self.as_bytes().to_vec()) } } impl FromBencode for String { fn from_bencode(bencode: &Bencode) -> Option<String> { match bencode { &Bencode::ByteString(ref v) => std::str::from_utf8(v.as_slice()).map(|s| s.to_string()), _ => None } } } impl<T: ToBencode> ToBencode for Vec<T> { fn to_bencode(&self) -> Bencode { Bencode::List(self.iter().map(|e| e.to_bencode()).collect()) } } impl<T: FromBencode> FromBencode for Vec<T> { fn from_bencode(bencode: &Bencode) -> Option<Vec<T>> { match bencode { &Bencode::List(ref es) => { let mut list = Vec::new(); for e in es.iter() { match FromBencode::from_bencode(e) { Some(v) => list.push(v), None => return None } } Some(list) } _ => None } } } macro_rules! map_to_bencode { ($m:expr) => {{ let mut m = TreeMap::new(); for (key, value) in $m.iter() { m.insert(util::ByteString::from_vec(key.as_bytes().to_vec()), value.to_bencode()); } Bencode::Dict(m) }} } macro_rules! map_from_bencode { ($mty:ident, $bencode:expr) => {{ let res = match $bencode { &Bencode::Dict(ref map) => { let mut m = $mty::new(); for (key, value) in map.iter() { match str::from_utf8(key.as_slice()) { Some(k) => { let val: Option<T> = FromBencode::from_bencode(value); match val { Some(v) => m.insert(k.to_string(), v), None => return None } } None => return None }; } Some(m) } _ => None }; res }} } impl<T: ToBencode> ToBencode for TreeMap<String, T> { fn to_bencode(&self) -> Bencode { map_to_bencode!(self) } } impl<T: FromBencode> FromBencode for TreeMap<String, T> { fn from_bencode(bencode: &Bencode) -> Option<TreeMap<String, T>> { map_from_bencode!(TreeMap, bencode) } } impl<T: ToBencode> ToBencode for HashMap<String, T> { fn to_bencode(&self) -> Bencode { map_to_bencode!(self) } } impl<T: FromBencode> FromBencode for HashMap<String, T> { fn from_bencode(bencode: &Bencode) -> Option<HashMap<String, T>> { map_from_bencode!(HashMap, bencode) } } pub fn from_buffer(buf: &[u8]) -> Result<Bencode, Error> { from_vec(buf.to_vec()) } pub fn from_vec(buf: Vec<u8>) -> Result<Bencode, Error> { from_iter(buf.into_iter()) } pub fn from_iter<T: Iterator<u8>>(iter: T) -> Result<Bencode, Error> { let streaming_parser = StreamingParser::new(iter); let mut parser = Parser::new(streaming_parser); parser.parse() } macro_rules! tryenc(($e:expr) => ( match $e { Ok(e) => e, Err(e) => { self.error = Err(e); return } } )) pub type EncoderResult<T> = IoResult<T>; pub struct Encoder<'a> { writer: &'a mut io::Writer + 'a, writers: Vec<io::MemWriter>, expect_key: bool, keys: Vec<util::ByteString>, error: io::IoResult<()>, is_none: bool, stack: Vec<TreeMap<util::ByteString, Vec<u8>>>, } impl<'a> Encoder<'a> { pub fn new(writer: &'a mut io::Writer) -> Encoder<'a> { Encoder { writer: writer, writers: Vec::new(), expect_key: false, keys: Vec::new(), error: Ok(()), is_none: false, stack: Vec::new() } } pub fn buffer_encode<T: Encodable<Encoder<'a>, IoError>>(val: &T) -> EncoderResult<Vec<u8>> { use std::mem::transmute; let mut writer = io::MemWriter::new(); // FIXME: same as json rust-lang/rust#14302 unsafe { let mut encoder = Encoder::new(&mut writer); try!(val.encode(transmute(&mut encoder))); if encoder.error.is_err() { return Err(encoder.error.unwrap_err()) } } Ok(writer.unwrap()) } fn get_writer<'a>(&'a mut self) -> &'a mut io::Writer { if self.writers.len() == 0 { &mut self.writer as &'a mut io::Writer } else { self.writers.last_mut().unwrap() as &'a mut io::Writer } } fn encode_dict(&mut self, dict: &TreeMap<util::ByteString, Vec<u8>>) -> EncoderResult<()> { try!(write!(self.get_writer(), "d")); for (key, value) in dict.iter() { try!(key.encode(self)); try!(self.get_writer().write(value.as_slice())); } write!(self.get_writer(), "e") } fn error(&mut self, msg: &'static str) -> EncoderResult<()> { Err(IoError { kind: io::InvalidInput, desc: msg, detail: None }) } } macro_rules! expect_value(($slf:expr) => { if $slf.expect_key { return $slf.error("Only 'string' map keys allowed"); } }) impl<'a> serialize::Encoder<IoError> for Encoder<'a> { fn emit_nil(&mut self) -> EncoderResult<()> { expect_value!(self); write!(self.get_writer(), "0:") } fn emit_uint(&mut self, v: uint) -> EncoderResult<()> { self.emit_i64(v as i64) } fn emit_u8(&mut self, v: u8) -> EncoderResult<()> { self.emit_i64(v as i64) } fn emit_u16(&mut self, v: u16) -> EncoderResult<()> { self.emit_i64(v as i64) } fn emit_u32(&mut self, v: u32) -> EncoderResult<()> { self.emit_i64(v as i64) } fn emit_u64(&mut self, v: u64) -> EncoderResult<()> { self.emit_i64(v as i64) } fn emit_int(&mut self, v: int) -> EncoderResult<()> { self.emit_i64(v as i64) } fn emit_i8(&mut self, v: i8) -> EncoderResult<()> { self.emit_i64(v as i64) } fn emit_i16(&mut self, v: i16) -> EncoderResult<()> { self.emit_i64(v as i64) } fn emit_i32(&mut self, v: i32) -> EncoderResult<()> { self.emit_i64(v as i64) } fn emit_i64(&mut self, v: i64) -> EncoderResult<()> { expect_value!(self); write!(self.get_writer(), "i{}e", v) } fn emit_bool(&mut self, v: bool) -> EncoderResult<()> { expect_value!(self); if v { self.emit_str("true") } else { self.emit_str("false") } } fn emit_f32(&mut self, v: f32) -> EncoderResult<()> { expect_value!(self); self.emit_str(std::f32::to_str_hex(v).as_slice()) } fn emit_f64(&mut self, v: f64) -> EncoderResult<()> { expect_value!(self); self.emit_str(std::f64::to_str_hex(v).as_slice()) } fn emit_char(&mut self, v: char) -> EncoderResult<()> { expect_value!(self); self.emit_str(v.to_string().as_slice()) } fn emit_str(&mut self, v: &str) -> EncoderResult<()> { if self.expect_key { self.keys.push(util::ByteString::from_slice(v.as_bytes())); Ok(()) } else { try!(write!(self.get_writer(), "{}:", v.len())); self.get_writer().write(v.as_bytes()) } } fn emit_enum(&mut self, _name: &str, _f: |&mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { self.error("emit_enum not implemented") } fn emit_enum_variant(&mut self, _v_name: &str, _v_id: uint, _len: uint, _f: |&mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { self.error("emit_enum_variant not implemented") } fn emit_enum_variant_arg(&mut self, _a_idx: uint, _f: |&mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { self.error("emit_enum_variant_arg not implemented") } fn emit_enum_struct_variant(&mut self, _v_name: &str, _v_id: uint, _len: uint, _f: |&mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { self.error("emit_enum_struct_variant not implemented") } fn emit_enum_struct_variant_field(&mut self, _f_name: &str, _f_idx: uint, _f: |&mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { self.error("emit_enum_struct_variant_field not implemented") } fn emit_struct(&mut self, _name: &str, _len: uint, f: |&mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { expect_value!(self); self.stack.push(TreeMap::new()); try!(f(self)); let dict = self.stack.pop().unwrap(); try!(self.encode_dict(&dict)); self.is_none = false; Ok(()) } fn emit_struct_field(&mut self, f_name: &str, _f_idx: uint, f: |&mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { expect_value!(self); self.writers.push(io::MemWriter::new()); try!(f(self)); let data = self.writers.pop().unwrap(); let dict = self.stack.last_mut().unwrap(); if !self.is_none { dict.insert(util::ByteString::from_slice(f_name.as_bytes()), data.unwrap()); } self.is_none = false; Ok(()) } fn emit_tuple(&mut self, _len: uint, _f: |&mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { self.error("emit_tuple not implemented") } fn emit_tuple_arg(&mut self, _idx: uint, _f: |&mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { self.error("emit_tuple_arg not implemented") } fn emit_tuple_struct(&mut self, _name: &str, _len: uint, _f: |&mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { self.error("emit_tuple_struct not implemented") } fn emit_tuple_struct_arg(&mut self, _f_idx: uint, _f: |&mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { self.error("emit_tuple_struct_arg not implemented") } fn emit_option(&mut self, f: |&mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { expect_value!(self); f(self) } fn emit_option_none(&mut self) -> EncoderResult<()> { expect_value!(self); self.is_none = true; write!(self.get_writer(), "3:nil") } fn emit_option_some(&mut self, f: |&mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { expect_value!(self); f(self) } fn emit_seq(&mut self, _len: uint, f: |this: &mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { expect_value!(self); try!(write!(self.get_writer(), "l")); try!(f(self)); self.is_none = false; write!(self.get_writer(), "e") } fn emit_seq_elt(&mut self, _idx: uint, f: |this: &mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { expect_value!(self); try!(f(self)); self.is_none = false; Ok(()) } fn emit_map(&mut self, _len: uint, f: |&mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { expect_value!(self); self.stack.push(TreeMap::new()); try!(f(self)); let dict = self.stack.pop().unwrap(); try!(self.encode_dict(&dict)); self.is_none = false; Ok(()) } fn emit_map_elt_key(&mut self, _idx: uint, f: |&mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { expect_value!(self); self.writers.push(io::MemWriter::new()); self.expect_key = true; try!(f(self)); self.expect_key = false; self.is_none = false; Ok(()) } fn emit_map_elt_val(&mut self, _idx: uint, f: |&mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { expect_value!(self); try!(f(self)); let key = self.keys.pop(); let data = self.writers.pop().unwrap(); let dict = self.stack.last_mut().unwrap(); dict.insert(key.unwrap(), data.unwrap()); self.is_none = false; Ok(()) } } pub struct Parser<T> { reader: T, depth: u32, } impl<T: Iterator<BencodeEvent>> Parser<T> { pub fn new(reader: T) -> Parser<T> { Parser { reader: reader, depth: 0 } } pub fn parse(&mut self) -> Result<Bencode, Error> { let next = self.reader.next(); self.parse_elem(next) } fn parse_elem(&mut self, current: Option<BencodeEvent>) -> Result<Bencode, Error> { let res = match current { Some(NumberValue(v)) => Ok(Bencode::Number(v)), Some(ByteStringValue(v)) => Ok(Bencode::ByteString(v)), Some(ListStart) => self.parse_list(current), Some(DictStart) => self.parse_dict(current), Some(ParseError(err)) => Err(err), None => Ok(Empty), x => panic!("[root] Unreachable but got {}", x) }; if self.depth == 0 { let next = self.reader.next(); match res { Err(_) => res, _ => { match next { Some(ParseError(err)) => Err(err), None => res, x => panic!("Unreachable but got {}", x) } } } } else { res } } fn parse_list(&mut self, mut current: Option<BencodeEvent>) -> Result<Bencode, Error> { self.depth += 1; let mut list = Vec::new(); loop { current = self.reader.next(); match current { Some(ListEnd) => break, Some(ParseError(err)) => return Err(err), Some(_) => { match self.parse_elem(current) { Ok(v) => list.push(v), err@Err(_) => return err } } x => panic!("[list] Unreachable but got {}", x) } } self.depth -= 1; Ok(Bencode::List(list)) } fn parse_dict(&mut self, mut current: Option<BencodeEvent>) -> Result<Bencode, Error> { self.depth += 1; let mut map = TreeMap::new(); loop { current = self.reader.next(); let key = match current { Some(DictEnd) => break, Some(DictKey(v)) => util::ByteString::from_vec(v), Some(ParseError(err)) => return Err(err), x => panic!("[dict] Unreachable but got {}", x) }; current = self.reader.next(); let value = try!(self.parse_elem(current)); map.insert(key, value); } self.depth -= 1; Ok(Bencode::Dict(map)) } } macro_rules! dec_expect_value(($slf:expr) => { if $slf.expect_key { return Err(Message("Only 'string' map keys allowed".to_string())) } }) static EMPTY: Bencode = Empty; #[deriving(Eq, PartialEq, Clone, Show)] pub enum DecoderError { Message(String), StringEncoding(Vec<u8>), Expecting(&'static str, String), Unimplemented(&'static str), } pub type DecoderResult<T> = Result<T, DecoderError>; pub struct Decoder<'a> { keys: Vec<util::ByteString>, expect_key: bool, stack: Vec<&'a Bencode>, } impl<'a> Decoder<'a> { pub fn new(bencode: &'a Bencode) -> Decoder<'a> { Decoder { keys: Vec::new(), expect_key: false, stack: vec![bencode], } } fn try_read<T: FromBencode>(&mut self, ty: &'static str) -> DecoderResult<T> { let val = self.stack.pop(); match val.and_then(|b| FromBencode::from_bencode(b)) { Some(v) => Ok(v), None => Err(Message(format!("Error decoding value as '{}': {}", ty, val))) } } fn unimplemented<T>(&self, m: &'static str) -> DecoderResult<T> { Err(Unimplemented(m)) } } impl<'a> serialize::Decoder<DecoderError> for Decoder<'a> { fn error(&mut self, err: &str) -> DecoderError { Message(err.to_string()) } fn read_nil(&mut self) -> DecoderResult<()> { dec_expect_value!(self); self.try_read("nil") } fn read_uint(&mut self) -> DecoderResult<uint> { dec_expect_value!(self); self.try_read("uint") } fn read_u8(&mut self) -> DecoderResult<u8> { dec_expect_value!(self); self.try_read("u8") } fn read_u16(&mut self) -> DecoderResult<u16> { dec_expect_value!(self); self.try_read("u16") } fn read_u32(&mut self) -> DecoderResult<u32> { dec_expect_value!(self); self.try_read("u32") } fn read_u64(&mut self) -> DecoderResult<u64> { dec_expect_value!(self); self.try_read("u64") } fn read_int(&mut self) -> DecoderResult<int> { dec_expect_value!(self); self.try_read("int") } fn read_i8(&mut self) -> DecoderResult<i8> { dec_expect_value!(self); self.try_read("i8") } fn read_i16(&mut self) -> DecoderResult<i16> { dec_expect_value!(self); self.try_read("i16") } fn read_i32(&mut self) -> DecoderResult<i32> { dec_expect_value!(self); self.try_read("i32") } fn read_i64(&mut self) -> DecoderResult<i64> { dec_expect_value!(self); self.try_read("i64") } fn read_bool(&mut self) -> DecoderResult<bool> { dec_expect_value!(self); self.try_read("bool") } fn read_f32(&mut self) -> DecoderResult<f32> { dec_expect_value!(self); self.try_read("f32") } fn read_f64(&mut self) -> DecoderResult<f64> { dec_expect_value!(self); self.try_read("f64") } fn read_char(&mut self) -> DecoderResult<char> { dec_expect_value!(self); self.try_read("char") } fn read_str(&mut self) -> DecoderResult<String> { if self.expect_key { let b = self.keys.pop().unwrap().unwrap(); match String::from_utf8(b) { Ok(s) => Ok(s), Err(v) => Err(StringEncoding(v)) } } else { let bencode = self.stack.pop(); match bencode { Some(&Bencode::ByteString(ref v)) => { String::from_utf8(v.clone()).map_err(|b| StringEncoding(b)) } _ => Err(self.error(format!("Error decoding value as str: {}", bencode).as_slice())) } } } fn read_enum<T>(&mut self, _name: &str, _f: |&mut Decoder<'a>| -> DecoderResult<T>) -> DecoderResult<T> { self.unimplemented("read_enum") } fn read_enum_variant<T>(&mut self, _names: &[&str], _f: |&mut Decoder<'a>, uint| -> DecoderResult<T>) -> DecoderResult<T> { self.unimplemented("read_enum_variant") } fn read_enum_variant_arg<T>(&mut self, _a_idx: uint, _f: |&mut Decoder<'a>| -> DecoderResult<T>) -> DecoderResult<T> { self.unimplemented("read_enum_variant_arg") } fn read_enum_struct_variant<T>(&mut self, _names: &[&str], _f: |&mut Decoder<'a>, uint| -> DecoderResult<T>) -> DecoderResult<T> { self.unimplemented("read_enum_struct_variant") } fn read_enum_struct_variant_field<T>(&mut self, _f_name: &str, _f_idx: uint, _f: |&mut Decoder<'a>| -> DecoderResult<T>) -> DecoderResult<T> { self.unimplemented("read_enum_struct_variant_field") } fn read_struct<T>(&mut self, _s_name: &str, _len: uint, f: |&mut Decoder<'a>| -> DecoderResult<T>) -> DecoderResult<T> { dec_expect_value!(self); let res = try!(f(self)); self.stack.pop(); Ok(res) } fn read_struct_field<T>(&mut self, f_name: &str, _f_idx: uint, f: |&mut Decoder<'a>| -> DecoderResult<T>) -> DecoderResult<T> { dec_expect_value!(self); let val = match self.stack.last() { Some(v) => { match *v { &Bencode::Dict(ref m) => { match m.get(&util::ByteString::from_slice(f_name.as_bytes())) { Some(v) => v, None => &EMPTY } } _ => return Err(Expecting("Dict", v.to_string())) } } None => return Err(Expecting("Dict", "None".to_string())) }; self.stack.push(val); f(self) } fn read_tuple<T>(&mut self, _: uint, _f: |&mut Decoder<'a>| -> DecoderResult<T>) -> DecoderResult<T> { self.unimplemented("read_tuple") } fn read_tuple_arg<T>(&mut self, _a_idx: uint, _f: |&mut Decoder<'a>| -> DecoderResult<T>) -> DecoderResult<T> { self.unimplemented("read_tuple_arg") } fn read_tuple_struct<T>(&mut self, _s_name: &str, _: uint, _f: |&mut Decoder<'a>| -> DecoderResult<T>) -> DecoderResult<T> { self.unimplemented("read_tuple_struct") } fn read_tuple_struct_arg<T>(&mut self, _a_idx: uint, _f: |&mut Decoder<'a>| -> DecoderResult<T>) -> DecoderResult<T> { self.unimplemented("read_tuple_struct_arg") } fn read_option<T>(&mut self, f: |&mut Decoder<'a>, bool| -> DecoderResult<T>) -> DecoderResult<T> { let value = self.stack.pop(); match value { Some(&Bencode::Empty) => f(self, false), Some(&Bencode::ByteString(ref v)) => { if v.as_slice() == b"nil" { f(self, false) } else { self.stack.push(value.unwrap()); f(self, true) } }, Some(v) => { self.stack.push(v); f(self, true) } None => return Err(Expecting("Bencode", "None".to_string())) } } fn read_seq<T>(&mut self, f: |&mut Decoder<'a>, uint| -> DecoderResult<T>) -> DecoderResult<T> { dec_expect_value!(self); let len = match self.stack.pop() { Some(&Bencode::List(ref list)) => { for v in list.as_slice().iter().rev() { self.stack.push(v); } list.len() } val => return Err(Expecting("List", val.to_string())) }; f(self, len) } fn read_seq_elt<T>(&mut self, _idx: uint, f: |&mut Decoder<'a>| -> DecoderResult<T>) -> DecoderResult<T> { dec_expect_value!(self); f(self) } fn read_map<T>(&mut self, f: |&mut Decoder<'a>, uint| -> DecoderResult<T>) -> DecoderResult<T> { dec_expect_value!(self); let len = match self.stack.pop() { Some(&Bencode::Dict(ref m)) => { for (key, value) in m.iter() { self.keys.push(key.clone()); self.stack.push(value); } m.len() } val => return Err(Expecting("Dict", val.to_string())) }; f(self, len) } fn read_map_elt_key<T>(&mut self, _idx: uint, f: |&mut Decoder<'a>| -> DecoderResult<T>) -> DecoderResult<T> { dec_expect_value!(self); self.expect_key = true; let res = try!(f(self)); self.expect_key = false; Ok(res) } fn read_map_elt_val<T>(&mut self, _idx: uint, f: |&mut Decoder<'a>| -> DecoderResult<T>) -> DecoderResult<T> { dec_expect_value!(self); f(self) } } #[cfg(test)] mod tests { use serialize::{Encodable, Decodable}; use std::collections::TreeMap; use std::collections::HashMap; use streaming::Error; use streaming::{BencodeEvent, NumberValue, ByteStringValue, ListStart, ListEnd, DictStart, DictKey, DictEnd, ParseError}; use super::{Bencode, ToBencode}; use super::{Parser, Encoder, Decoder, DecoderResult}; use super::util; macro_rules! assert_encoding(($value:expr, $expected:expr) => ({ let value = $value; let encoded = match Encoder::buffer_encode(&value) { Ok(e) => e, Err(err) => panic!("Unexpected failure: {}", err) }; assert_eq!($expected.as_slice(), encoded.as_slice()); })) macro_rules! assert_decoding(($enc:expr, $value:expr) => ({ let bencode = super::from_vec($enc).unwrap(); let mut decoder = Decoder::new(&bencode); let result = Decodable::decode(&mut decoder); assert_eq!(Ok($value), result); })) macro_rules! gen_encode_test(($name:ident, $($val:expr -> $enc:expr),+) => { #[test] fn $name() { $(assert_encoding!($val, $enc);)+ } }) macro_rules! gen_tobencode_test(($name:ident, $($val:expr -> $enc:expr),+) => { #[test] fn $name() { $({ let value = $val.to_bencode(); assert_encoding!(value, $enc) };)+ } }) macro_rules! assert_identity(($value:expr) => ({ let value = $value; let encoded = match Encoder::buffer_encode(&value) { Ok(e) => e, Err(err) => panic!("Unexpected failure: {}", err) }; let bencode = super::from_vec(encoded).unwrap(); let mut decoder = Decoder::new(&bencode); let result = Decodable::decode(&mut decoder); assert_eq!(Ok(value), result); })) macro_rules! gen_identity_test(($name:ident, $($val:expr),+) => { #[test] fn $name() { $(assert_identity!($val);)+ } }) macro_rules! gen_encode_identity_test(($name_enc:ident, $name_ident:ident, $($val:expr -> $enc:expr),+) => { gen_encode_test!($name_enc, $($val -> $enc),+) gen_identity_test!($name_ident, $($val),+) }) macro_rules! gen_complete_test(($name_enc:ident, $name_benc:ident, $name_ident:ident, $($val:expr -> $enc:expr),+) => { gen_encode_test!($name_enc, $($val -> $enc),+) gen_tobencode_test!($name_benc, $($val -> $enc),+) gen_identity_test!($name_ident, $($val),+) }) fn bytes(s: &str) -> Vec<u8> { s.as_bytes().to_vec() } gen_complete_test!(encodes_unit, tobencode_unit, identity_unit, () -> bytes("0:")) gen_complete_test!(encodes_option_none, tobencode_option_none, identity_option_none, { let none: Option<int> = None; none } -> bytes("3:nil")) gen_complete_test!(encodes_option_some, tobencode_option_some, identity_option_some, Some(1i) -> bytes("i1e"), Some("rust".to_string()) -> bytes("4:rust"), Some(vec![(), ()]) -> bytes("l0:0:e")) gen_complete_test!(encodes_nested_option, tobencode_nested_option, identity_nested_option, Some(Some(1i)) -> bytes("i1e"), Some(Some("rust".to_string())) -> bytes("4:rust")) #[test] fn option_is_none_if_any_nested_option_is_none() { let value: Option<Option<int>> = Some(None); let encoded = match Encoder::buffer_encode(&value) { Ok(e) => e, Err(err) => panic!("Unexpected failure: {}", err) }; let none: Option<Option<int>> = None; assert_decoding!(encoded, none); } gen_complete_test!(encodes_zero_int, tobencode_zero_int, identity_zero_int, 0i -> bytes("i0e")) gen_complete_test!(encodes_positive_int, tobencode_positive_int, identity_positive_int, 5i -> bytes("i5e"), 99i -> bytes("i99e"), ::std::int::MAX -> bytes(format!("i{}e", ::std::int::MAX).as_slice())) gen_complete_test!(encodes_negative_int, tobencode_negative_int, identity_negative_int, -5i -> bytes("i-5e"), -99i -> bytes("i-99e"), ::std::int::MIN -> bytes(format!("i{}e", ::std::int::MIN).as_slice())) gen_complete_test!(encodes_zero_i8, tobencode_zero_i8, identity_zero_i8, 0i8 -> bytes("i0e")) gen_complete_test!(encodes_positive_i8, tobencode_positive_i8, identity_positive_i8, 5i8 -> bytes("i5e"), 99i8 -> bytes("i99e"), ::std::i8::MAX -> bytes(format!("i{}e", ::std::i8::MAX).as_slice())) gen_complete_test!(encodes_negative_i8, tobencode_negative_i8, identity_negative_i8, -5i8 -> bytes("i-5e"), -99i8 -> bytes("i-99e"), ::std::i8::MIN -> bytes(format!("i{}e", ::std::i8::MIN).as_slice())) gen_complete_test!(encodes_zero_i16, tobencode_zero_i16, identity_zero_i16, 0i16 -> bytes("i0e")) gen_complete_test!(encodes_positive_i16, tobencode_positive_i16, identity_positive_i16, 5i16 -> bytes("i5e"), 99i16 -> bytes("i99e"), ::std::i16::MAX -> bytes(format!("i{}e", ::std::i16::MAX).as_slice())) gen_complete_test!(encodes_negative_i16, tobencode_negative_i16, identity_negative_i16, -5i16 -> bytes("i-5e"), -99i16 -> bytes("i-99e"), ::std::i16::MIN -> bytes(format!("i{}e", ::std::i16::MIN).as_slice())) gen_complete_test!(encodes_zero_i32, tobencode_zero_i32, identity_zero_i32, 0i32 -> bytes("i0e")) gen_complete_test!(encodes_positive_i32, tobencode_positive_i32, identity_positive_i32, 5i32 -> bytes("i5e"), 99i32 -> bytes("i99e"), ::std::i32::MAX -> bytes(format!("i{}e", ::std::i32::MAX).as_slice())) gen_complete_test!(encodes_negative_i32, tobencode_negative_i32, identity_negative_i32, -5i32 -> bytes("i-5e"), -99i32 -> bytes("i-99e"), ::std::i32::MIN -> bytes(format!("i{}e", ::std::i32::MIN).as_slice())) gen_complete_test!(encodes_zero_i64, tobencode_zero_i64, identity_zero_i64, 0i64 -> bytes("i0e")) gen_complete_test!(encodes_positive_i64, tobencode_positive_i64, identity_positive_i64, 5i64 -> bytes("i5e"), 99i64 -> bytes("i99e"), ::std::i64::MAX -> bytes(format!("i{}e", ::std::i64::MAX).as_slice())) gen_complete_test!(encodes_negative_i64, tobencode_negative_i64, identity_negative_i64, -5i64 -> bytes("i-5e"), -99i64 -> bytes("i-99e"), ::std::i64::MIN -> bytes(format!("i{}e", ::std::i64::MIN).as_slice())) gen_complete_test!(encodes_zero_uint, tobencode_zero_uint, identity_zero_uint, 0u -> bytes("i0e")) gen_complete_test!(encodes_positive_uint, tobencode_positive_uint, identity_positive_uint, 5u -> bytes("i5e"), 99u -> bytes("i99e"), ::std::uint::MAX / 2 -> bytes(format!("i{}e", ::std::uint::MAX / 2).as_slice())) gen_complete_test!(encodes_zero_u8, tobencode_zero_u8, identity_zero_u8, 0u8 -> bytes("i0e")) gen_complete_test!(encodes_positive_u8, tobencode_positive_u8, identity_positive_u8, 5u8 -> bytes("i5e"), 99u8 -> bytes("i99e"), ::std::u8::MAX -> bytes(format!("i{}e", ::std::u8::MAX).as_slice())) gen_complete_test!(encodes_zero_u16, tobencode_zero_u16, identity_zero_u16, 0u16 -> bytes("i0e")) gen_complete_test!(encodes_positive_u16, tobencode_positive_u16, identity_positive_u16, 5u16 -> bytes("i5e"), 99u16 -> bytes("i99e"), ::std::u16::MAX -> bytes(format!("i{}e", ::std::u16::MAX).as_slice())) gen_complete_test!(encodes_zero_u32, tobencode_zero_u32, identity_zero_u32, 0u32 -> bytes("i0e")) gen_complete_test!(encodes_positive_u32, tobencode_positive_u32, identity_positive_u32, 5u32 -> bytes("i5e"), 99u32 -> bytes("i99e"), ::std::u32::MAX -> bytes(format!("i{}e", ::std::u32::MAX).as_slice())) gen_complete_test!(encodes_zero_u64, tobencode_zero_u64, identity_zero_u64, 0u64 -> bytes("i0e")) gen_complete_test!(encodes_positive_u64, tobencode_positive_u64, identity_positive_u64, 5u64 -> bytes("i5e"), 99u64 -> bytes("i99e"), ::std::u64::MAX / 2 -> bytes(format!("i{}e", ::std::u64::MAX / 2).as_slice())) gen_complete_test!(encodes_bool, tobencode_bool, identity_bool, true -> bytes("4:true"), false -> bytes("5:false")) gen_complete_test!(encodes_zero_f32, tobencode_zero_f32, identity_zero_f32, 0.0f32 -> bytes("1:0")) gen_complete_test!(encodes_positive_f32, tobencode_positive_f32, identity_positive_f32, 99.0f32 -> bytes("2:63"), 101.12345f32 -> bytes("8:65.1f9a8")) gen_complete_test!(encodes_negative_f32, tobencode_negative_f32, identity_negative_f32, -99.0f32 -> bytes("3:-63"), -101.12345f32 -> bytes("9:-65.1f9a8")) gen_complete_test!(encodes_zero_f64, tobencode_zero_f64, identity_zero_f64, 0.0f64 -> bytes("1:0")) gen_complete_test!(encodes_positive_f64, tobencode_positive_f64, identity_positive_f64, 99.0f64 -> bytes("2:63"), 101.12345f64 -> bytes("15:65.1f9a6b50b0f4")) gen_complete_test!(encodes_negative_f64, tobencode_negative_f64, identity_negative_f64, -99.0f64 -> bytes("3:-63"), -101.12345f64 -> bytes("16:-65.1f9a6b50b0f4")) gen_complete_test!(encodes_lower_letter_char, tobencode_lower_letter_char, identity_lower_letter_char, 'a' -> bytes("1:a"), 'c' -> bytes("1:c"), 'z' -> bytes("1:z")) gen_complete_test!(encodes_upper_letter_char, tobencode_upper_letter_char, identity_upper_letter_char, 'A' -> bytes("1:A"), 'C' -> bytes("1:C"), 'Z' -> bytes("1:Z")) gen_complete_test!(encodes_multibyte_char, tobencode_multibyte_char, identity_multibyte_char, 'ệ' -> bytes("3:ệ"), '虎' -> bytes("3:虎")) gen_complete_test!(encodes_control_char, tobencode_control_char, identity_control_char, '\n' -> bytes("1:\n"), '\r' -> bytes("1:\r"), '\0' -> bytes("1:\0")) gen_complete_test!(encode_empty_str, tobencode_empty_str, identity_empty_str, "".to_string() -> bytes("0:")) gen_complete_test!(encode_str, tobencode_str, identity_str, "a".to_string() -> bytes("1:a"), "foo".to_string() -> bytes("3:foo"), "This is nice!?#$%".to_string() -> bytes("17:This is nice!?#$%")) gen_complete_test!(encode_str_with_multibyte_chars, tobencode_str_with_multibyte_chars, identity_str_with_multibyte_chars, "Löwe 老虎 Léopard".to_string() -> bytes("21:Löwe 老虎 Léopard"), "いろはにほへとちりぬるを".to_string() -> bytes("36:いろはにほへとちりぬるを")) gen_complete_test!(encodes_empty_vec, tobencode_empty_vec, identity_empty_vec, { let empty: Vec<u8> = Vec::new(); empty } -> bytes("le")) gen_complete_test!(encodes_nonmpty_vec, tobencode_nonmpty_vec, identity_nonmpty_vec, vec![0i, 1i, 3i, 4i] -> bytes("li0ei1ei3ei4ee"), vec!["foo".to_string(), "b".to_string()] -> bytes("l3:foo1:be")) gen_complete_test!(encodes_nested_vec, tobencode_nested_vec, identity_nested_vec, vec![vec![1i], vec![2i, 3i], vec![]] -> bytes("lli1eeli2ei3eelee")) #[deriving(Eq, PartialEq, Show, Encodable, Decodable)] struct SimpleStruct { a: uint, b: Vec<String>, } #[deriving(Eq, PartialEq, Show, Encodable, Decodable)] struct InnerStruct { field_one: (), list: Vec<uint>, abc: String } #[deriving(Eq, PartialEq, Show, Encodable, Decodable)] struct OuterStruct { inner: Vec<InnerStruct>, is_true: bool } gen_encode_identity_test!(encodes_struct, identity_struct, SimpleStruct { b: vec!["foo".to_string(), "baar".to_string()], a: 123 } -> bytes("d1:ai123e1:bl3:foo4:baaree"), SimpleStruct { a: 1234567890, b: vec![] } -> bytes("d1:ai1234567890e1:blee")) gen_encode_identity_test!(encodes_nested_struct, identity_nested_struct, OuterStruct { is_true: true, inner: vec![InnerStruct { field_one: (), list: vec![99u, 5u], abc: "rust".to_string() }, InnerStruct { field_one: (), list: vec![], abc: "".to_string() }] } -> bytes("d\ 5:inner\ l\ d\ 3:abc4:rust\ 9:field_one0:\ 4:list\ l\ i99e\ i5e\ e\ e\ d\ 3:abc0:\ 9:field_one0:\ 4:listle\ e\ e\ 7:is_true4:true\ e")) macro_rules! map(($m:ident, $(($key:expr, $val:expr)),*) => {{ let mut _m = $m::new(); $(_m.insert($key, $val);)* _m }}) gen_complete_test!(encodes_hashmap, bencode_hashmap, identity_hashmap, map!(HashMap, ("a".to_string(), 1i)) -> bytes("d1:ai1ee"), map!(HashMap, ("foo".to_string(), "a".to_string()), ("bar".to_string(), "bb".to_string())) -> bytes("d3:bar2:bb3:foo1:ae")) gen_complete_test!(encodes_nested_hashmap, bencode_nested_hashmap, identity_nested_hashmap, map!(HashMap, ("a".to_string(), map!(HashMap, ("foo".to_string(), 101i), ("bar".to_string(), 102i)))) -> bytes("d1:ad3:bari102e3:fooi101eee")) #[test] fn decode_error_on_wrong_map_key_type() { let benc = Bencode::Dict(map!(TreeMap, (util::ByteString::from_vec(bytes("foo")), Bencode::ByteString(bytes("bar"))))); let mut decoder = Decoder::new(&benc); let res: DecoderResult<TreeMap<int, String>> = Decodable::decode(&mut decoder); assert!(res.is_err()); } #[test] fn encode_error_on_wrong_map_key_type() { let m = map!(HashMap, (1i, "foo")); let encoded = Encoder::buffer_encode(&m); assert!(encoded.is_err()) } #[test] fn encodes_struct_fields_in_sorted_order() { #[deriving(Encodable)] struct OrderedStruct { z: int, a: int, ab: int, aa: int, } let s = OrderedStruct { z: 4, a: 1, ab: 3, aa: 2 }; assert_eq!(Encoder::buffer_encode(&s), Ok(bytes("d1:ai1e2:aai2e2:abi3e1:zi4ee"))); } #[deriving(Encodable, Decodable, Eq, PartialEq, Show, Clone)] struct OptionalStruct { a: Option<int>, b: int, c: Option<Vec<Option<bool>>>, } #[deriving(Encodable, Decodable, Eq, PartialEq, Show)] struct OptionalStructOuter { a: Option<OptionalStruct>, b: Option<int>, } static OPT_STRUCT: OptionalStruct = OptionalStruct { a: None, b: 10, c: None }; #[test] fn struct_option_none_fields_are_not_encoded() { assert_encoding!(OPT_STRUCT.clone(), bytes("d1:bi10ee")); } #[test] fn struct_options_not_present_default_to_none() { assert_decoding!(bytes("d1:bi10ee"), OPT_STRUCT.clone()); } gen_encode_identity_test!(encodes_nested_struct_fields, identity_nested_struct_field, { OptionalStructOuter { a: Some(OPT_STRUCT.clone()), b: None } } -> bytes("d1:ad1:bi10eee"), { let a = OptionalStruct { a: None, b: 10, c: Some(vec![Some(true), None]) }; OptionalStructOuter { a: Some(a), b: Some(99) } } -> bytes("d1:ad1:bi10e1:cl4:true3:nilee1:bi99ee")) fn try_bencode(bencode: Bencode) -> Vec<u8> { match bencode.to_bytes() { Ok(v) => v, Err(err) => panic!("Unexpected error: {}", err) } } #[test] fn encodes_empty_bytestring() { assert_eq!(try_bencode(Bencode::ByteString(Vec::new())), bytes("0:")); } #[test] fn encodes_nonempty_bytestring() { assert_eq!(try_bencode(Bencode::ByteString(b"abc".to_vec())), bytes("3:abc")); assert_eq!(try_bencode(Bencode::ByteString(vec![0, 1, 2, 3])), bytes("4:\x00\x01\x02\x03")); } #[test] fn encodes_empty_list() { assert_eq!(try_bencode(Bencode::List(Vec::new())), bytes("le")); } #[test] fn encodes_nonempty_list() { assert_eq!(try_bencode(Bencode::List(vec![Bencode::Number(1)])), bytes("li1ee")); assert_eq!(try_bencode(Bencode::List(vec![Bencode::ByteString("foobar".as_bytes().to_vec()), Bencode::Number(-1)])), bytes("l6:foobari-1ee")); } #[test] fn encodes_nested_list() { assert_eq!(try_bencode(Bencode::List(vec![Bencode::List(vec![])])), bytes("llee")); let list = Bencode::List(vec![Bencode::Number(1988), Bencode::List(vec![Bencode::Number(2014)])]); assert_eq!(try_bencode(list), bytes("li1988eli2014eee")); } #[test] fn encodes_empty_dict() { assert_eq!(try_bencode(Bencode::Dict(TreeMap::new())), bytes("de")); } #[test] fn encodes_dict_with_items() { let mut m = TreeMap::new(); m.insert(util::ByteString::from_str("k1"), Bencode::Number(1)); assert_eq!(try_bencode(Bencode::Dict(m.clone())), bytes("d2:k1i1ee")); m.insert(util::ByteString::from_str("k2"), Bencode::ByteString(vec![0, 0])); assert_eq!(try_bencode(Bencode::Dict(m.clone())), bytes("d2:k1i1e2:k22:\0\0e")); } #[test] fn encodes_nested_dict() { let mut outer = TreeMap::new(); let mut inner = TreeMap::new(); inner.insert(util::ByteString::from_str("val"), Bencode::ByteString(vec![68, 0, 90])); outer.insert(util::ByteString::from_str("inner"), Bencode::Dict(inner)); assert_eq!(try_bencode(Bencode::Dict(outer)), bytes("d5:innerd3:val3:D\0Zee")); } #[test] fn encodes_dict_fields_in_sorted_order() { let mut m = TreeMap::new(); m.insert(util::ByteString::from_str("z"), Bencode::Number(1)); m.insert(util::ByteString::from_str("abd"), Bencode::Number(3)); m.insert(util::ByteString::from_str("abc"), Bencode::Number(2)); assert_eq!(try_bencode(Bencode::Dict(m)), bytes("d3:abci2e3:abdi3e1:zi1ee")); } fn assert_decoded_eq(events: &[BencodeEvent], expected: Result<Bencode, Error>) { let mut parser = Parser::new(events.to_vec().into_iter()); let result = parser.parse(); assert_eq!(expected, result); } #[test] fn decodes_empty_input() { assert_decoded_eq([], Ok(Bencode::Empty)); } #[test] fn decodes_number() { assert_decoded_eq([NumberValue(25)], Ok(Bencode::Number(25))); } #[test] fn decodes_bytestring() { assert_decoded_eq([ByteStringValue(bytes("foo"))], Ok(Bencode::ByteString(bytes("foo")))); } #[test] fn decodes_empty_list() { assert_decoded_eq([ListStart, ListEnd], Ok(Bencode::List(vec![]))); } #[test] fn decodes_list_with_elements() { assert_decoded_eq([ListStart, NumberValue(1), ListEnd], Ok(Bencode::List(vec![Bencode::Number(1)]))); assert_decoded_eq([ListStart, ByteStringValue(bytes("str")), NumberValue(11), ListEnd], Ok(Bencode::List(vec![Bencode::ByteString(bytes("str")), Bencode::Number(11)]))); } #[test] fn decodes_nested_list() { assert_decoded_eq([ListStart, ListStart, NumberValue(13), ListEnd, ByteStringValue(bytes("rust")), ListEnd], Ok(Bencode::List(vec![Bencode::List(vec![Bencode::Number(13)]), Bencode::ByteString(bytes("rust"))]))); } #[test] fn decodes_empty_dict() { assert_decoded_eq([DictStart, DictEnd], Ok(Bencode::Dict(TreeMap::new()))); } #[test] fn decodes_dict_with_value() { let mut map = TreeMap::new(); map.insert(util::ByteString::from_str("foo"), Bencode::ByteString(bytes("rust"))); assert_decoded_eq([DictStart, DictKey(bytes("foo")), ByteStringValue(bytes("rust")), DictEnd], Ok(Bencode::Dict(map))); } #[test] fn decodes_dict_with_values() { let mut map = TreeMap::new(); map.insert(util::ByteString::from_str("num"), Bencode::Number(9)); map.insert(util::ByteString::from_str("str"), Bencode::ByteString(bytes("abc"))); map.insert(util::ByteString::from_str("list"), Bencode::List(vec![Bencode::Number(99)])); assert_decoded_eq([DictStart, DictKey(bytes("num")), NumberValue(9), DictKey(bytes("str")), ByteStringValue(bytes("abc")), DictKey(bytes("list")), ListStart, NumberValue(99), ListEnd, DictEnd], Ok(Bencode::Dict(map))); } #[test] fn decodes_nested_dict() { let mut inner = TreeMap::new(); inner.insert(util::ByteString::from_str("inner"), Bencode::Number(2)); let mut outer = TreeMap::new(); outer.insert(util::ByteString::from_str("dict"), Bencode::Dict(inner)); outer.insert(util::ByteString::from_str("outer"), Bencode::Number(1)); assert_decoded_eq([DictStart, DictKey(bytes("outer")), NumberValue(1), DictKey(bytes("dict")), DictStart, DictKey(bytes("inner")), NumberValue(2), DictEnd, DictEnd], Ok(Bencode::Dict(outer))); } #[test] fn decode_error_on_parse_error() { let err = Error{ pos: 1, msg: "error msg".to_string() }; let perr = ParseError(err.clone()); assert_decoded_eq([perr.clone()], Err(err.clone())); assert_decoded_eq([NumberValue(1), perr.clone()], Err(err.clone())); assert_decoded_eq([ListStart, perr.clone()], Err(err.clone())); assert_decoded_eq([ListStart, ByteStringValue(bytes("foo")), perr.clone()], Err(err.clone())); assert_decoded_eq([DictStart, perr.clone()], Err(err.clone())); assert_decoded_eq([DictStart, DictKey(bytes("foo")), perr.clone()], Err(err.clone())); } } #[cfg(test)] mod bench { extern crate test; use self::test::Bencher; use std::io; use serialize::{Encodable, Decodable}; use streaming::StreamingParser; use super::{Encoder, Decoder, Parser, DecoderResult}; #[bench] fn encode_large_vec_of_uint(bh: &mut Bencher) { let v = Vec::from_fn(100, |n| n); bh.iter(|| { let mut w = io::MemWriter::with_capacity(v.len() * 10); { let mut enc = Encoder::new(&mut w); let _ = v.encode(&mut enc); } w.unwrap() }); bh.bytes = v.len() as u64 * 4; } #[bench] fn decode_large_vec_of_uint(bh: &mut Bencher) { let v = Vec::from_fn(100, |n| n); let b = Encoder::buffer_encode(&v).unwrap(); bh.iter(|| { let streaming_parser = StreamingParser::new(b.clone().into_iter()); let mut parser = Parser::new(streaming_parser); let bencode = parser.parse().unwrap(); let mut decoder = Decoder::new(&bencode); let result: DecoderResult<Vec<uint>> = Decodable::decode(&mut decoder); result }); bh.bytes = b.len() as u64 * 4; } } Show utf8 ByteStrings as strings // Copyright 2014 Arjan Topolovec // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![crate_name = "bencode"] #![license = "MIT/ASL2"] #![crate_type = "rlib"] #![crate_type = "dylib"] #![feature(macro_rules)] /*! Bencode parsing and serialization # Encoding ## Using `Encodable` ```rust extern crate serialize; extern crate bencode; use serialize::Encodable; use bencode::Encoder; #[deriving(Encodable)] struct MyStruct { string: String, id: uint, } fn main() { let s = MyStruct { string: "Hello bencode".to_string(), id: 1 }; let result: Vec<u8> = Encoder::buffer_encode(&s).unwrap(); } ``` ## Using `ToBencode` ```rust extern crate collections; extern crate bencode; use collections::TreeMap; use bencode::{Bencode, ToBencode}; use bencode::util::ByteString; struct MyStruct { a: int, b: String, c: Vec<u8>, } impl ToBencode for MyStruct { fn to_bencode(&self) -> bencode::Bencode { let mut m = TreeMap::new(); m.insert(ByteString::from_str("a"), self.a.to_bencode()); m.insert(ByteString::from_str("b"), self.b.to_bencode()); m.insert(ByteString::from_str("c"), Bencode::ByteString(self.c.as_slice().to_vec())); Bencode::Dict(m) } } fn main() { let s = MyStruct{ a: 5, b: "foo".to_string(), c: vec![1, 2, 3, 4] }; let bencode: bencode::Bencode = s.to_bencode(); let result: Vec<u8> = bencode.to_bytes().unwrap(); } ``` # Decoding ## Using `Decodable` ```rust extern crate serialize; extern crate bencode; use serialize::{Encodable, Decodable}; use bencode::{Encoder, Decoder}; #[deriving(Encodable, Decodable, PartialEq)] struct MyStruct { a: int, b: String, c: Vec<u8>, } fn main() { let s = MyStruct{ a: 5, b: "foo".to_string(), c: vec![1, 2, 3, 4] }; let enc: Vec<u8> = Encoder::buffer_encode(&s).unwrap(); let bencode: bencode::Bencode = bencode::from_vec(enc).unwrap(); let mut decoder = Decoder::new(&bencode); let result: MyStruct = Decodable::decode(&mut decoder).unwrap(); assert!(s == result) } ``` ## Using `FromBencode` ```rust extern crate collections; extern crate bencode; use collections::TreeMap; use bencode::{FromBencode, ToBencode, Bencode}; use bencode::util::ByteString; #[deriving(PartialEq)] struct MyStruct { a: int } impl ToBencode for MyStruct { fn to_bencode(&self) -> bencode::Bencode { let mut m = TreeMap::new(); m.insert(ByteString::from_str("a"), self.a.to_bencode()); Bencode::Dict(m) } } impl FromBencode for MyStruct { fn from_bencode(bencode: &bencode::Bencode) -> Option<MyStruct> { match bencode { &Bencode::Dict(ref m) => { match m.get(&ByteString::from_str("a")) { Some(a) => FromBencode::from_bencode(a).map(|a| { MyStruct{ a: a } }), _ => None } } _ => None } } } fn main() { let s = MyStruct{ a: 5 }; let enc: Vec<u8> = s.to_bencode().to_bytes().unwrap(); let bencode: bencode::Bencode = bencode::from_vec(enc).unwrap(); let result: MyStruct = FromBencode::from_bencode(&bencode).unwrap(); assert!(s == result) } ``` ## Using Streaming Parser ```rust extern crate serialize; extern crate bencode; use bencode::streaming; use bencode::streaming::StreamingParser; use serialize::Encodable; use bencode::Encoder; #[deriving(Encodable, Decodable, PartialEq)] struct MyStruct { a: int, b: String, c: Vec<u8>, } fn main() { let s = MyStruct{ a: 5, b: "foo".to_string(), c: vec![2, 2, 3, 4] }; let enc: Vec<u8> = Encoder::buffer_encode(&s).unwrap(); let mut streaming = StreamingParser::new(enc.into_iter()); for event in streaming { match event { streaming::DictStart => println!("dict start"), streaming::DictEnd => println!("dict end"), streaming::NumberValue(n) => println!("number = {}", n), // ... _ => println!("Unhandled event: {}", event) } } } ``` */ extern crate serialize; use std::io; use std::io::{IoResult, IoError}; use std::fmt; use std::str; use std::str::raw; use std::vec::Vec; use std::num::FromStrRadix; use serialize::{Encodable}; use std::collections::TreeMap; use std::collections::HashMap; use streaming::{StreamingParser, Error}; use streaming::{BencodeEvent, NumberValue, ByteStringValue, ListStart, ListEnd, DictStart, DictKey, DictEnd, ParseError}; pub mod streaming; pub mod util; #[deriving(PartialEq, Clone)] pub enum Bencode { Empty, Number(i64), ByteString(Vec<u8>), List(ListVec), Dict(DictMap), } impl fmt::Show for Bencode { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { match self { &Bencode::Empty => { Ok(()) } &Bencode::Number(v) => write!(fmt, "{}", v), &Bencode::ByteString(ref v) => match str::from_utf8(v.as_slice()) { Some(s) => write!(fmt, "s\"{}\"", s), None => write!(fmt, "s{}", v), }, &Bencode::List(ref v) => write!(fmt, "{}", v), &Bencode::Dict(ref v) => { try!(write!(fmt, "{{")); let mut first = true; for (key, value) in v.iter() { if first { first = false; } else { try!(write!(fmt, ", ")); } try!(write!(fmt, "{}: {}", *key, *value)); } write!(fmt, "}}") } } } } pub type ListVec = Vec<Bencode>; pub type DictMap = TreeMap<util::ByteString, Bencode>; impl Bencode { pub fn to_writer(&self, writer: &mut io::Writer) -> io::IoResult<()> { let mut encoder = Encoder::new(writer); self.encode(&mut encoder) } pub fn to_bytes(&self) -> io::IoResult<Vec<u8>> { let mut writer = io::MemWriter::new(); match self.to_writer(&mut writer) { Ok(_) => Ok(writer.unwrap()), Err(err) => Err(err) } } } impl<E, S: serialize::Encoder<E>> Encodable<S, E> for Bencode { fn encode(&self, e: &mut S) -> Result<(), E> { match self { &Bencode::Empty => Ok(()), &Bencode::Number(v) => e.emit_i64(v), &Bencode::ByteString(ref v) => e.emit_str(unsafe { raw::from_utf8(v.as_slice()) }), &Bencode::List(ref v) => v.encode(e), &Bencode::Dict(ref v) => v.encode(e) } } } pub trait ToBencode { fn to_bencode(&self) -> Bencode; } pub trait FromBencode { fn from_bencode(&Bencode) -> Option<Self>; } impl ToBencode for () { fn to_bencode(&self) -> Bencode { Bencode::ByteString(Vec::new()) } } impl FromBencode for () { fn from_bencode(bencode: &Bencode) -> Option<()> { match bencode { &Bencode::ByteString(ref v) => { if v.len() == 0 { Some(()) } else { None } } _ => None } } } impl<T: ToBencode> ToBencode for Option<T> { fn to_bencode(&self) -> Bencode { match self { &Some(ref v) => v.to_bencode(), &None => Bencode::ByteString(b"nil".to_vec()) } } } impl<T: FromBencode> FromBencode for Option<T> { fn from_bencode(bencode: &Bencode) -> Option<Option<T>> { match bencode { &Bencode::ByteString(ref v) => { if v.as_slice() == b"nil" { return Some(None) } } _ => () } FromBencode::from_bencode(bencode).map(|v| Some(v)) } } macro_rules! derive_num_to_bencode(($t:ty) => ( impl ToBencode for $t { fn to_bencode(&self) -> Bencode { Bencode::Number(*self as i64) } } )) macro_rules! derive_num_from_bencode(($t:ty) => ( impl FromBencode for $t { fn from_bencode(bencode: &Bencode) -> Option<$t> { match bencode { &Bencode::Number(v) => Some(v as $t), _ => None } } } )) derive_num_to_bencode!(int) derive_num_from_bencode!(int) derive_num_to_bencode!(i8) derive_num_from_bencode!(i8) derive_num_to_bencode!(i16) derive_num_from_bencode!(i16) derive_num_to_bencode!(i32) derive_num_from_bencode!(i32) derive_num_to_bencode!(i64) derive_num_from_bencode!(i64) derive_num_to_bencode!(uint) derive_num_from_bencode!(uint) derive_num_to_bencode!(u8) derive_num_from_bencode!(u8) derive_num_to_bencode!(u16) derive_num_from_bencode!(u16) derive_num_to_bencode!(u32) derive_num_from_bencode!(u32) derive_num_to_bencode!(u64) derive_num_from_bencode!(u64) impl ToBencode for f32 { fn to_bencode(&self) -> Bencode { Bencode::ByteString(std::f32::to_str_hex(*self).as_bytes().to_vec()) } } impl FromBencode for f32 { fn from_bencode(bencode: &Bencode) -> Option<f32> { match bencode { &Bencode::ByteString(ref v) => { match str::from_utf8(v.as_slice()) { Some(s) => FromStrRadix::from_str_radix(s, 16), None => None } } _ => None } } } impl ToBencode for f64 { fn to_bencode(&self) -> Bencode { Bencode::ByteString(std::f64::to_str_hex(*self).as_bytes().to_vec()) } } impl FromBencode for f64 { fn from_bencode(bencode: &Bencode) -> Option<f64> { match bencode { &Bencode::ByteString(ref v) => { match str::from_utf8(v.as_slice()) { Some(s) => FromStrRadix::from_str_radix(s, 16), None => None } } _ => None } } } impl ToBencode for bool { fn to_bencode(&self) -> Bencode { if *self { Bencode::ByteString(b"true".to_vec()) } else { Bencode::ByteString(b"false".to_vec()) } } } impl FromBencode for bool { fn from_bencode(bencode: &Bencode) -> Option<bool> { match bencode { &Bencode::ByteString(ref v) => { if v.as_slice() == b"true" { Some(true) } else if v.as_slice() == b"false" { Some(false) } else { None } } _ => None } } } impl ToBencode for char { fn to_bencode(&self) -> Bencode { Bencode::ByteString(self.to_string().as_bytes().to_vec()) } } impl FromBencode for char { fn from_bencode(bencode: &Bencode) -> Option<char> { let s: Option<String> = FromBencode::from_bencode(bencode); s.and_then(|s| { if s.as_slice().char_len() == 1 { Some(s.as_slice().char_at(0)) } else { None } }) } } impl ToBencode for String { fn to_bencode(&self) -> Bencode { Bencode::ByteString(self.as_bytes().to_vec()) } } impl FromBencode for String { fn from_bencode(bencode: &Bencode) -> Option<String> { match bencode { &Bencode::ByteString(ref v) => std::str::from_utf8(v.as_slice()).map(|s| s.to_string()), _ => None } } } impl<T: ToBencode> ToBencode for Vec<T> { fn to_bencode(&self) -> Bencode { Bencode::List(self.iter().map(|e| e.to_bencode()).collect()) } } impl<T: FromBencode> FromBencode for Vec<T> { fn from_bencode(bencode: &Bencode) -> Option<Vec<T>> { match bencode { &Bencode::List(ref es) => { let mut list = Vec::new(); for e in es.iter() { match FromBencode::from_bencode(e) { Some(v) => list.push(v), None => return None } } Some(list) } _ => None } } } macro_rules! map_to_bencode { ($m:expr) => {{ let mut m = TreeMap::new(); for (key, value) in $m.iter() { m.insert(util::ByteString::from_vec(key.as_bytes().to_vec()), value.to_bencode()); } Bencode::Dict(m) }} } macro_rules! map_from_bencode { ($mty:ident, $bencode:expr) => {{ let res = match $bencode { &Bencode::Dict(ref map) => { let mut m = $mty::new(); for (key, value) in map.iter() { match str::from_utf8(key.as_slice()) { Some(k) => { let val: Option<T> = FromBencode::from_bencode(value); match val { Some(v) => m.insert(k.to_string(), v), None => return None } } None => return None }; } Some(m) } _ => None }; res }} } impl<T: ToBencode> ToBencode for TreeMap<String, T> { fn to_bencode(&self) -> Bencode { map_to_bencode!(self) } } impl<T: FromBencode> FromBencode for TreeMap<String, T> { fn from_bencode(bencode: &Bencode) -> Option<TreeMap<String, T>> { map_from_bencode!(TreeMap, bencode) } } impl<T: ToBencode> ToBencode for HashMap<String, T> { fn to_bencode(&self) -> Bencode { map_to_bencode!(self) } } impl<T: FromBencode> FromBencode for HashMap<String, T> { fn from_bencode(bencode: &Bencode) -> Option<HashMap<String, T>> { map_from_bencode!(HashMap, bencode) } } pub fn from_buffer(buf: &[u8]) -> Result<Bencode, Error> { from_vec(buf.to_vec()) } pub fn from_vec(buf: Vec<u8>) -> Result<Bencode, Error> { from_iter(buf.into_iter()) } pub fn from_iter<T: Iterator<u8>>(iter: T) -> Result<Bencode, Error> { let streaming_parser = StreamingParser::new(iter); let mut parser = Parser::new(streaming_parser); parser.parse() } macro_rules! tryenc(($e:expr) => ( match $e { Ok(e) => e, Err(e) => { self.error = Err(e); return } } )) pub type EncoderResult<T> = IoResult<T>; pub struct Encoder<'a> { writer: &'a mut io::Writer + 'a, writers: Vec<io::MemWriter>, expect_key: bool, keys: Vec<util::ByteString>, error: io::IoResult<()>, is_none: bool, stack: Vec<TreeMap<util::ByteString, Vec<u8>>>, } impl<'a> Encoder<'a> { pub fn new(writer: &'a mut io::Writer) -> Encoder<'a> { Encoder { writer: writer, writers: Vec::new(), expect_key: false, keys: Vec::new(), error: Ok(()), is_none: false, stack: Vec::new() } } pub fn buffer_encode<T: Encodable<Encoder<'a>, IoError>>(val: &T) -> EncoderResult<Vec<u8>> { use std::mem::transmute; let mut writer = io::MemWriter::new(); // FIXME: same as json rust-lang/rust#14302 unsafe { let mut encoder = Encoder::new(&mut writer); try!(val.encode(transmute(&mut encoder))); if encoder.error.is_err() { return Err(encoder.error.unwrap_err()) } } Ok(writer.unwrap()) } fn get_writer<'a>(&'a mut self) -> &'a mut io::Writer { if self.writers.len() == 0 { &mut self.writer as &'a mut io::Writer } else { self.writers.last_mut().unwrap() as &'a mut io::Writer } } fn encode_dict(&mut self, dict: &TreeMap<util::ByteString, Vec<u8>>) -> EncoderResult<()> { try!(write!(self.get_writer(), "d")); for (key, value) in dict.iter() { try!(key.encode(self)); try!(self.get_writer().write(value.as_slice())); } write!(self.get_writer(), "e") } fn error(&mut self, msg: &'static str) -> EncoderResult<()> { Err(IoError { kind: io::InvalidInput, desc: msg, detail: None }) } } macro_rules! expect_value(($slf:expr) => { if $slf.expect_key { return $slf.error("Only 'string' map keys allowed"); } }) impl<'a> serialize::Encoder<IoError> for Encoder<'a> { fn emit_nil(&mut self) -> EncoderResult<()> { expect_value!(self); write!(self.get_writer(), "0:") } fn emit_uint(&mut self, v: uint) -> EncoderResult<()> { self.emit_i64(v as i64) } fn emit_u8(&mut self, v: u8) -> EncoderResult<()> { self.emit_i64(v as i64) } fn emit_u16(&mut self, v: u16) -> EncoderResult<()> { self.emit_i64(v as i64) } fn emit_u32(&mut self, v: u32) -> EncoderResult<()> { self.emit_i64(v as i64) } fn emit_u64(&mut self, v: u64) -> EncoderResult<()> { self.emit_i64(v as i64) } fn emit_int(&mut self, v: int) -> EncoderResult<()> { self.emit_i64(v as i64) } fn emit_i8(&mut self, v: i8) -> EncoderResult<()> { self.emit_i64(v as i64) } fn emit_i16(&mut self, v: i16) -> EncoderResult<()> { self.emit_i64(v as i64) } fn emit_i32(&mut self, v: i32) -> EncoderResult<()> { self.emit_i64(v as i64) } fn emit_i64(&mut self, v: i64) -> EncoderResult<()> { expect_value!(self); write!(self.get_writer(), "i{}e", v) } fn emit_bool(&mut self, v: bool) -> EncoderResult<()> { expect_value!(self); if v { self.emit_str("true") } else { self.emit_str("false") } } fn emit_f32(&mut self, v: f32) -> EncoderResult<()> { expect_value!(self); self.emit_str(std::f32::to_str_hex(v).as_slice()) } fn emit_f64(&mut self, v: f64) -> EncoderResult<()> { expect_value!(self); self.emit_str(std::f64::to_str_hex(v).as_slice()) } fn emit_char(&mut self, v: char) -> EncoderResult<()> { expect_value!(self); self.emit_str(v.to_string().as_slice()) } fn emit_str(&mut self, v: &str) -> EncoderResult<()> { if self.expect_key { self.keys.push(util::ByteString::from_slice(v.as_bytes())); Ok(()) } else { try!(write!(self.get_writer(), "{}:", v.len())); self.get_writer().write(v.as_bytes()) } } fn emit_enum(&mut self, _name: &str, _f: |&mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { self.error("emit_enum not implemented") } fn emit_enum_variant(&mut self, _v_name: &str, _v_id: uint, _len: uint, _f: |&mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { self.error("emit_enum_variant not implemented") } fn emit_enum_variant_arg(&mut self, _a_idx: uint, _f: |&mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { self.error("emit_enum_variant_arg not implemented") } fn emit_enum_struct_variant(&mut self, _v_name: &str, _v_id: uint, _len: uint, _f: |&mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { self.error("emit_enum_struct_variant not implemented") } fn emit_enum_struct_variant_field(&mut self, _f_name: &str, _f_idx: uint, _f: |&mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { self.error("emit_enum_struct_variant_field not implemented") } fn emit_struct(&mut self, _name: &str, _len: uint, f: |&mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { expect_value!(self); self.stack.push(TreeMap::new()); try!(f(self)); let dict = self.stack.pop().unwrap(); try!(self.encode_dict(&dict)); self.is_none = false; Ok(()) } fn emit_struct_field(&mut self, f_name: &str, _f_idx: uint, f: |&mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { expect_value!(self); self.writers.push(io::MemWriter::new()); try!(f(self)); let data = self.writers.pop().unwrap(); let dict = self.stack.last_mut().unwrap(); if !self.is_none { dict.insert(util::ByteString::from_slice(f_name.as_bytes()), data.unwrap()); } self.is_none = false; Ok(()) } fn emit_tuple(&mut self, _len: uint, _f: |&mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { self.error("emit_tuple not implemented") } fn emit_tuple_arg(&mut self, _idx: uint, _f: |&mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { self.error("emit_tuple_arg not implemented") } fn emit_tuple_struct(&mut self, _name: &str, _len: uint, _f: |&mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { self.error("emit_tuple_struct not implemented") } fn emit_tuple_struct_arg(&mut self, _f_idx: uint, _f: |&mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { self.error("emit_tuple_struct_arg not implemented") } fn emit_option(&mut self, f: |&mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { expect_value!(self); f(self) } fn emit_option_none(&mut self) -> EncoderResult<()> { expect_value!(self); self.is_none = true; write!(self.get_writer(), "3:nil") } fn emit_option_some(&mut self, f: |&mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { expect_value!(self); f(self) } fn emit_seq(&mut self, _len: uint, f: |this: &mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { expect_value!(self); try!(write!(self.get_writer(), "l")); try!(f(self)); self.is_none = false; write!(self.get_writer(), "e") } fn emit_seq_elt(&mut self, _idx: uint, f: |this: &mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { expect_value!(self); try!(f(self)); self.is_none = false; Ok(()) } fn emit_map(&mut self, _len: uint, f: |&mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { expect_value!(self); self.stack.push(TreeMap::new()); try!(f(self)); let dict = self.stack.pop().unwrap(); try!(self.encode_dict(&dict)); self.is_none = false; Ok(()) } fn emit_map_elt_key(&mut self, _idx: uint, f: |&mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { expect_value!(self); self.writers.push(io::MemWriter::new()); self.expect_key = true; try!(f(self)); self.expect_key = false; self.is_none = false; Ok(()) } fn emit_map_elt_val(&mut self, _idx: uint, f: |&mut Encoder<'a>| -> EncoderResult<()>) -> EncoderResult<()> { expect_value!(self); try!(f(self)); let key = self.keys.pop(); let data = self.writers.pop().unwrap(); let dict = self.stack.last_mut().unwrap(); dict.insert(key.unwrap(), data.unwrap()); self.is_none = false; Ok(()) } } pub struct Parser<T> { reader: T, depth: u32, } impl<T: Iterator<BencodeEvent>> Parser<T> { pub fn new(reader: T) -> Parser<T> { Parser { reader: reader, depth: 0 } } pub fn parse(&mut self) -> Result<Bencode, Error> { let next = self.reader.next(); self.parse_elem(next) } fn parse_elem(&mut self, current: Option<BencodeEvent>) -> Result<Bencode, Error> { let res = match current { Some(NumberValue(v)) => Ok(Bencode::Number(v)), Some(ByteStringValue(v)) => Ok(Bencode::ByteString(v)), Some(ListStart) => self.parse_list(current), Some(DictStart) => self.parse_dict(current), Some(ParseError(err)) => Err(err), None => Ok(Empty), x => panic!("[root] Unreachable but got {}", x) }; if self.depth == 0 { let next = self.reader.next(); match res { Err(_) => res, _ => { match next { Some(ParseError(err)) => Err(err), None => res, x => panic!("Unreachable but got {}", x) } } } } else { res } } fn parse_list(&mut self, mut current: Option<BencodeEvent>) -> Result<Bencode, Error> { self.depth += 1; let mut list = Vec::new(); loop { current = self.reader.next(); match current { Some(ListEnd) => break, Some(ParseError(err)) => return Err(err), Some(_) => { match self.parse_elem(current) { Ok(v) => list.push(v), err@Err(_) => return err } } x => panic!("[list] Unreachable but got {}", x) } } self.depth -= 1; Ok(Bencode::List(list)) } fn parse_dict(&mut self, mut current: Option<BencodeEvent>) -> Result<Bencode, Error> { self.depth += 1; let mut map = TreeMap::new(); loop { current = self.reader.next(); let key = match current { Some(DictEnd) => break, Some(DictKey(v)) => util::ByteString::from_vec(v), Some(ParseError(err)) => return Err(err), x => panic!("[dict] Unreachable but got {}", x) }; current = self.reader.next(); let value = try!(self.parse_elem(current)); map.insert(key, value); } self.depth -= 1; Ok(Bencode::Dict(map)) } } macro_rules! dec_expect_value(($slf:expr) => { if $slf.expect_key { return Err(Message("Only 'string' map keys allowed".to_string())) } }) static EMPTY: Bencode = Empty; #[deriving(Eq, PartialEq, Clone, Show)] pub enum DecoderError { Message(String), StringEncoding(Vec<u8>), Expecting(&'static str, String), Unimplemented(&'static str), } pub type DecoderResult<T> = Result<T, DecoderError>; pub struct Decoder<'a> { keys: Vec<util::ByteString>, expect_key: bool, stack: Vec<&'a Bencode>, } impl<'a> Decoder<'a> { pub fn new(bencode: &'a Bencode) -> Decoder<'a> { Decoder { keys: Vec::new(), expect_key: false, stack: vec![bencode], } } fn try_read<T: FromBencode>(&mut self, ty: &'static str) -> DecoderResult<T> { let val = self.stack.pop(); match val.and_then(|b| FromBencode::from_bencode(b)) { Some(v) => Ok(v), None => Err(Message(format!("Error decoding value as '{}': {}", ty, val))) } } fn unimplemented<T>(&self, m: &'static str) -> DecoderResult<T> { Err(Unimplemented(m)) } } impl<'a> serialize::Decoder<DecoderError> for Decoder<'a> { fn error(&mut self, err: &str) -> DecoderError { Message(err.to_string()) } fn read_nil(&mut self) -> DecoderResult<()> { dec_expect_value!(self); self.try_read("nil") } fn read_uint(&mut self) -> DecoderResult<uint> { dec_expect_value!(self); self.try_read("uint") } fn read_u8(&mut self) -> DecoderResult<u8> { dec_expect_value!(self); self.try_read("u8") } fn read_u16(&mut self) -> DecoderResult<u16> { dec_expect_value!(self); self.try_read("u16") } fn read_u32(&mut self) -> DecoderResult<u32> { dec_expect_value!(self); self.try_read("u32") } fn read_u64(&mut self) -> DecoderResult<u64> { dec_expect_value!(self); self.try_read("u64") } fn read_int(&mut self) -> DecoderResult<int> { dec_expect_value!(self); self.try_read("int") } fn read_i8(&mut self) -> DecoderResult<i8> { dec_expect_value!(self); self.try_read("i8") } fn read_i16(&mut self) -> DecoderResult<i16> { dec_expect_value!(self); self.try_read("i16") } fn read_i32(&mut self) -> DecoderResult<i32> { dec_expect_value!(self); self.try_read("i32") } fn read_i64(&mut self) -> DecoderResult<i64> { dec_expect_value!(self); self.try_read("i64") } fn read_bool(&mut self) -> DecoderResult<bool> { dec_expect_value!(self); self.try_read("bool") } fn read_f32(&mut self) -> DecoderResult<f32> { dec_expect_value!(self); self.try_read("f32") } fn read_f64(&mut self) -> DecoderResult<f64> { dec_expect_value!(self); self.try_read("f64") } fn read_char(&mut self) -> DecoderResult<char> { dec_expect_value!(self); self.try_read("char") } fn read_str(&mut self) -> DecoderResult<String> { if self.expect_key { let b = self.keys.pop().unwrap().unwrap(); match String::from_utf8(b) { Ok(s) => Ok(s), Err(v) => Err(StringEncoding(v)) } } else { let bencode = self.stack.pop(); match bencode { Some(&Bencode::ByteString(ref v)) => { String::from_utf8(v.clone()).map_err(|b| StringEncoding(b)) } _ => Err(self.error(format!("Error decoding value as str: {}", bencode).as_slice())) } } } fn read_enum<T>(&mut self, _name: &str, _f: |&mut Decoder<'a>| -> DecoderResult<T>) -> DecoderResult<T> { self.unimplemented("read_enum") } fn read_enum_variant<T>(&mut self, _names: &[&str], _f: |&mut Decoder<'a>, uint| -> DecoderResult<T>) -> DecoderResult<T> { self.unimplemented("read_enum_variant") } fn read_enum_variant_arg<T>(&mut self, _a_idx: uint, _f: |&mut Decoder<'a>| -> DecoderResult<T>) -> DecoderResult<T> { self.unimplemented("read_enum_variant_arg") } fn read_enum_struct_variant<T>(&mut self, _names: &[&str], _f: |&mut Decoder<'a>, uint| -> DecoderResult<T>) -> DecoderResult<T> { self.unimplemented("read_enum_struct_variant") } fn read_enum_struct_variant_field<T>(&mut self, _f_name: &str, _f_idx: uint, _f: |&mut Decoder<'a>| -> DecoderResult<T>) -> DecoderResult<T> { self.unimplemented("read_enum_struct_variant_field") } fn read_struct<T>(&mut self, _s_name: &str, _len: uint, f: |&mut Decoder<'a>| -> DecoderResult<T>) -> DecoderResult<T> { dec_expect_value!(self); let res = try!(f(self)); self.stack.pop(); Ok(res) } fn read_struct_field<T>(&mut self, f_name: &str, _f_idx: uint, f: |&mut Decoder<'a>| -> DecoderResult<T>) -> DecoderResult<T> { dec_expect_value!(self); let val = match self.stack.last() { Some(v) => { match *v { &Bencode::Dict(ref m) => { match m.get(&util::ByteString::from_slice(f_name.as_bytes())) { Some(v) => v, None => &EMPTY } } _ => return Err(Expecting("Dict", v.to_string())) } } None => return Err(Expecting("Dict", "None".to_string())) }; self.stack.push(val); f(self) } fn read_tuple<T>(&mut self, _: uint, _f: |&mut Decoder<'a>| -> DecoderResult<T>) -> DecoderResult<T> { self.unimplemented("read_tuple") } fn read_tuple_arg<T>(&mut self, _a_idx: uint, _f: |&mut Decoder<'a>| -> DecoderResult<T>) -> DecoderResult<T> { self.unimplemented("read_tuple_arg") } fn read_tuple_struct<T>(&mut self, _s_name: &str, _: uint, _f: |&mut Decoder<'a>| -> DecoderResult<T>) -> DecoderResult<T> { self.unimplemented("read_tuple_struct") } fn read_tuple_struct_arg<T>(&mut self, _a_idx: uint, _f: |&mut Decoder<'a>| -> DecoderResult<T>) -> DecoderResult<T> { self.unimplemented("read_tuple_struct_arg") } fn read_option<T>(&mut self, f: |&mut Decoder<'a>, bool| -> DecoderResult<T>) -> DecoderResult<T> { let value = self.stack.pop(); match value { Some(&Bencode::Empty) => f(self, false), Some(&Bencode::ByteString(ref v)) => { if v.as_slice() == b"nil" { f(self, false) } else { self.stack.push(value.unwrap()); f(self, true) } }, Some(v) => { self.stack.push(v); f(self, true) } None => return Err(Expecting("Bencode", "None".to_string())) } } fn read_seq<T>(&mut self, f: |&mut Decoder<'a>, uint| -> DecoderResult<T>) -> DecoderResult<T> { dec_expect_value!(self); let len = match self.stack.pop() { Some(&Bencode::List(ref list)) => { for v in list.as_slice().iter().rev() { self.stack.push(v); } list.len() } val => return Err(Expecting("List", val.to_string())) }; f(self, len) } fn read_seq_elt<T>(&mut self, _idx: uint, f: |&mut Decoder<'a>| -> DecoderResult<T>) -> DecoderResult<T> { dec_expect_value!(self); f(self) } fn read_map<T>(&mut self, f: |&mut Decoder<'a>, uint| -> DecoderResult<T>) -> DecoderResult<T> { dec_expect_value!(self); let len = match self.stack.pop() { Some(&Bencode::Dict(ref m)) => { for (key, value) in m.iter() { self.keys.push(key.clone()); self.stack.push(value); } m.len() } val => return Err(Expecting("Dict", val.to_string())) }; f(self, len) } fn read_map_elt_key<T>(&mut self, _idx: uint, f: |&mut Decoder<'a>| -> DecoderResult<T>) -> DecoderResult<T> { dec_expect_value!(self); self.expect_key = true; let res = try!(f(self)); self.expect_key = false; Ok(res) } fn read_map_elt_val<T>(&mut self, _idx: uint, f: |&mut Decoder<'a>| -> DecoderResult<T>) -> DecoderResult<T> { dec_expect_value!(self); f(self) } } #[cfg(test)] mod tests { use serialize::{Encodable, Decodable}; use std::collections::TreeMap; use std::collections::HashMap; use streaming::Error; use streaming::{BencodeEvent, NumberValue, ByteStringValue, ListStart, ListEnd, DictStart, DictKey, DictEnd, ParseError}; use super::{Bencode, ToBencode}; use super::{Parser, Encoder, Decoder, DecoderResult}; use super::util; macro_rules! assert_encoding(($value:expr, $expected:expr) => ({ let value = $value; let encoded = match Encoder::buffer_encode(&value) { Ok(e) => e, Err(err) => panic!("Unexpected failure: {}", err) }; assert_eq!($expected.as_slice(), encoded.as_slice()); })) macro_rules! assert_decoding(($enc:expr, $value:expr) => ({ let bencode = super::from_vec($enc).unwrap(); let mut decoder = Decoder::new(&bencode); let result = Decodable::decode(&mut decoder); assert_eq!(Ok($value), result); })) macro_rules! gen_encode_test(($name:ident, $($val:expr -> $enc:expr),+) => { #[test] fn $name() { $(assert_encoding!($val, $enc);)+ } }) macro_rules! gen_tobencode_test(($name:ident, $($val:expr -> $enc:expr),+) => { #[test] fn $name() { $({ let value = $val.to_bencode(); assert_encoding!(value, $enc) };)+ } }) macro_rules! assert_identity(($value:expr) => ({ let value = $value; let encoded = match Encoder::buffer_encode(&value) { Ok(e) => e, Err(err) => panic!("Unexpected failure: {}", err) }; let bencode = super::from_vec(encoded).unwrap(); let mut decoder = Decoder::new(&bencode); let result = Decodable::decode(&mut decoder); assert_eq!(Ok(value), result); })) macro_rules! gen_identity_test(($name:ident, $($val:expr),+) => { #[test] fn $name() { $(assert_identity!($val);)+ } }) macro_rules! gen_encode_identity_test(($name_enc:ident, $name_ident:ident, $($val:expr -> $enc:expr),+) => { gen_encode_test!($name_enc, $($val -> $enc),+) gen_identity_test!($name_ident, $($val),+) }) macro_rules! gen_complete_test(($name_enc:ident, $name_benc:ident, $name_ident:ident, $($val:expr -> $enc:expr),+) => { gen_encode_test!($name_enc, $($val -> $enc),+) gen_tobencode_test!($name_benc, $($val -> $enc),+) gen_identity_test!($name_ident, $($val),+) }) fn bytes(s: &str) -> Vec<u8> { s.as_bytes().to_vec() } gen_complete_test!(encodes_unit, tobencode_unit, identity_unit, () -> bytes("0:")) gen_complete_test!(encodes_option_none, tobencode_option_none, identity_option_none, { let none: Option<int> = None; none } -> bytes("3:nil")) gen_complete_test!(encodes_option_some, tobencode_option_some, identity_option_some, Some(1i) -> bytes("i1e"), Some("rust".to_string()) -> bytes("4:rust"), Some(vec![(), ()]) -> bytes("l0:0:e")) gen_complete_test!(encodes_nested_option, tobencode_nested_option, identity_nested_option, Some(Some(1i)) -> bytes("i1e"), Some(Some("rust".to_string())) -> bytes("4:rust")) #[test] fn option_is_none_if_any_nested_option_is_none() { let value: Option<Option<int>> = Some(None); let encoded = match Encoder::buffer_encode(&value) { Ok(e) => e, Err(err) => panic!("Unexpected failure: {}", err) }; let none: Option<Option<int>> = None; assert_decoding!(encoded, none); } gen_complete_test!(encodes_zero_int, tobencode_zero_int, identity_zero_int, 0i -> bytes("i0e")) gen_complete_test!(encodes_positive_int, tobencode_positive_int, identity_positive_int, 5i -> bytes("i5e"), 99i -> bytes("i99e"), ::std::int::MAX -> bytes(format!("i{}e", ::std::int::MAX).as_slice())) gen_complete_test!(encodes_negative_int, tobencode_negative_int, identity_negative_int, -5i -> bytes("i-5e"), -99i -> bytes("i-99e"), ::std::int::MIN -> bytes(format!("i{}e", ::std::int::MIN).as_slice())) gen_complete_test!(encodes_zero_i8, tobencode_zero_i8, identity_zero_i8, 0i8 -> bytes("i0e")) gen_complete_test!(encodes_positive_i8, tobencode_positive_i8, identity_positive_i8, 5i8 -> bytes("i5e"), 99i8 -> bytes("i99e"), ::std::i8::MAX -> bytes(format!("i{}e", ::std::i8::MAX).as_slice())) gen_complete_test!(encodes_negative_i8, tobencode_negative_i8, identity_negative_i8, -5i8 -> bytes("i-5e"), -99i8 -> bytes("i-99e"), ::std::i8::MIN -> bytes(format!("i{}e", ::std::i8::MIN).as_slice())) gen_complete_test!(encodes_zero_i16, tobencode_zero_i16, identity_zero_i16, 0i16 -> bytes("i0e")) gen_complete_test!(encodes_positive_i16, tobencode_positive_i16, identity_positive_i16, 5i16 -> bytes("i5e"), 99i16 -> bytes("i99e"), ::std::i16::MAX -> bytes(format!("i{}e", ::std::i16::MAX).as_slice())) gen_complete_test!(encodes_negative_i16, tobencode_negative_i16, identity_negative_i16, -5i16 -> bytes("i-5e"), -99i16 -> bytes("i-99e"), ::std::i16::MIN -> bytes(format!("i{}e", ::std::i16::MIN).as_slice())) gen_complete_test!(encodes_zero_i32, tobencode_zero_i32, identity_zero_i32, 0i32 -> bytes("i0e")) gen_complete_test!(encodes_positive_i32, tobencode_positive_i32, identity_positive_i32, 5i32 -> bytes("i5e"), 99i32 -> bytes("i99e"), ::std::i32::MAX -> bytes(format!("i{}e", ::std::i32::MAX).as_slice())) gen_complete_test!(encodes_negative_i32, tobencode_negative_i32, identity_negative_i32, -5i32 -> bytes("i-5e"), -99i32 -> bytes("i-99e"), ::std::i32::MIN -> bytes(format!("i{}e", ::std::i32::MIN).as_slice())) gen_complete_test!(encodes_zero_i64, tobencode_zero_i64, identity_zero_i64, 0i64 -> bytes("i0e")) gen_complete_test!(encodes_positive_i64, tobencode_positive_i64, identity_positive_i64, 5i64 -> bytes("i5e"), 99i64 -> bytes("i99e"), ::std::i64::MAX -> bytes(format!("i{}e", ::std::i64::MAX).as_slice())) gen_complete_test!(encodes_negative_i64, tobencode_negative_i64, identity_negative_i64, -5i64 -> bytes("i-5e"), -99i64 -> bytes("i-99e"), ::std::i64::MIN -> bytes(format!("i{}e", ::std::i64::MIN).as_slice())) gen_complete_test!(encodes_zero_uint, tobencode_zero_uint, identity_zero_uint, 0u -> bytes("i0e")) gen_complete_test!(encodes_positive_uint, tobencode_positive_uint, identity_positive_uint, 5u -> bytes("i5e"), 99u -> bytes("i99e"), ::std::uint::MAX / 2 -> bytes(format!("i{}e", ::std::uint::MAX / 2).as_slice())) gen_complete_test!(encodes_zero_u8, tobencode_zero_u8, identity_zero_u8, 0u8 -> bytes("i0e")) gen_complete_test!(encodes_positive_u8, tobencode_positive_u8, identity_positive_u8, 5u8 -> bytes("i5e"), 99u8 -> bytes("i99e"), ::std::u8::MAX -> bytes(format!("i{}e", ::std::u8::MAX).as_slice())) gen_complete_test!(encodes_zero_u16, tobencode_zero_u16, identity_zero_u16, 0u16 -> bytes("i0e")) gen_complete_test!(encodes_positive_u16, tobencode_positive_u16, identity_positive_u16, 5u16 -> bytes("i5e"), 99u16 -> bytes("i99e"), ::std::u16::MAX -> bytes(format!("i{}e", ::std::u16::MAX).as_slice())) gen_complete_test!(encodes_zero_u32, tobencode_zero_u32, identity_zero_u32, 0u32 -> bytes("i0e")) gen_complete_test!(encodes_positive_u32, tobencode_positive_u32, identity_positive_u32, 5u32 -> bytes("i5e"), 99u32 -> bytes("i99e"), ::std::u32::MAX -> bytes(format!("i{}e", ::std::u32::MAX).as_slice())) gen_complete_test!(encodes_zero_u64, tobencode_zero_u64, identity_zero_u64, 0u64 -> bytes("i0e")) gen_complete_test!(encodes_positive_u64, tobencode_positive_u64, identity_positive_u64, 5u64 -> bytes("i5e"), 99u64 -> bytes("i99e"), ::std::u64::MAX / 2 -> bytes(format!("i{}e", ::std::u64::MAX / 2).as_slice())) gen_complete_test!(encodes_bool, tobencode_bool, identity_bool, true -> bytes("4:true"), false -> bytes("5:false")) gen_complete_test!(encodes_zero_f32, tobencode_zero_f32, identity_zero_f32, 0.0f32 -> bytes("1:0")) gen_complete_test!(encodes_positive_f32, tobencode_positive_f32, identity_positive_f32, 99.0f32 -> bytes("2:63"), 101.12345f32 -> bytes("8:65.1f9a8")) gen_complete_test!(encodes_negative_f32, tobencode_negative_f32, identity_negative_f32, -99.0f32 -> bytes("3:-63"), -101.12345f32 -> bytes("9:-65.1f9a8")) gen_complete_test!(encodes_zero_f64, tobencode_zero_f64, identity_zero_f64, 0.0f64 -> bytes("1:0")) gen_complete_test!(encodes_positive_f64, tobencode_positive_f64, identity_positive_f64, 99.0f64 -> bytes("2:63"), 101.12345f64 -> bytes("15:65.1f9a6b50b0f4")) gen_complete_test!(encodes_negative_f64, tobencode_negative_f64, identity_negative_f64, -99.0f64 -> bytes("3:-63"), -101.12345f64 -> bytes("16:-65.1f9a6b50b0f4")) gen_complete_test!(encodes_lower_letter_char, tobencode_lower_letter_char, identity_lower_letter_char, 'a' -> bytes("1:a"), 'c' -> bytes("1:c"), 'z' -> bytes("1:z")) gen_complete_test!(encodes_upper_letter_char, tobencode_upper_letter_char, identity_upper_letter_char, 'A' -> bytes("1:A"), 'C' -> bytes("1:C"), 'Z' -> bytes("1:Z")) gen_complete_test!(encodes_multibyte_char, tobencode_multibyte_char, identity_multibyte_char, 'ệ' -> bytes("3:ệ"), '虎' -> bytes("3:虎")) gen_complete_test!(encodes_control_char, tobencode_control_char, identity_control_char, '\n' -> bytes("1:\n"), '\r' -> bytes("1:\r"), '\0' -> bytes("1:\0")) gen_complete_test!(encode_empty_str, tobencode_empty_str, identity_empty_str, "".to_string() -> bytes("0:")) gen_complete_test!(encode_str, tobencode_str, identity_str, "a".to_string() -> bytes("1:a"), "foo".to_string() -> bytes("3:foo"), "This is nice!?#$%".to_string() -> bytes("17:This is nice!?#$%")) gen_complete_test!(encode_str_with_multibyte_chars, tobencode_str_with_multibyte_chars, identity_str_with_multibyte_chars, "Löwe 老虎 Léopard".to_string() -> bytes("21:Löwe 老虎 Léopard"), "いろはにほへとちりぬるを".to_string() -> bytes("36:いろはにほへとちりぬるを")) gen_complete_test!(encodes_empty_vec, tobencode_empty_vec, identity_empty_vec, { let empty: Vec<u8> = Vec::new(); empty } -> bytes("le")) gen_complete_test!(encodes_nonmpty_vec, tobencode_nonmpty_vec, identity_nonmpty_vec, vec![0i, 1i, 3i, 4i] -> bytes("li0ei1ei3ei4ee"), vec!["foo".to_string(), "b".to_string()] -> bytes("l3:foo1:be")) gen_complete_test!(encodes_nested_vec, tobencode_nested_vec, identity_nested_vec, vec![vec![1i], vec![2i, 3i], vec![]] -> bytes("lli1eeli2ei3eelee")) #[deriving(Eq, PartialEq, Show, Encodable, Decodable)] struct SimpleStruct { a: uint, b: Vec<String>, } #[deriving(Eq, PartialEq, Show, Encodable, Decodable)] struct InnerStruct { field_one: (), list: Vec<uint>, abc: String } #[deriving(Eq, PartialEq, Show, Encodable, Decodable)] struct OuterStruct { inner: Vec<InnerStruct>, is_true: bool } gen_encode_identity_test!(encodes_struct, identity_struct, SimpleStruct { b: vec!["foo".to_string(), "baar".to_string()], a: 123 } -> bytes("d1:ai123e1:bl3:foo4:baaree"), SimpleStruct { a: 1234567890, b: vec![] } -> bytes("d1:ai1234567890e1:blee")) gen_encode_identity_test!(encodes_nested_struct, identity_nested_struct, OuterStruct { is_true: true, inner: vec![InnerStruct { field_one: (), list: vec![99u, 5u], abc: "rust".to_string() }, InnerStruct { field_one: (), list: vec![], abc: "".to_string() }] } -> bytes("d\ 5:inner\ l\ d\ 3:abc4:rust\ 9:field_one0:\ 4:list\ l\ i99e\ i5e\ e\ e\ d\ 3:abc0:\ 9:field_one0:\ 4:listle\ e\ e\ 7:is_true4:true\ e")) macro_rules! map(($m:ident, $(($key:expr, $val:expr)),*) => {{ let mut _m = $m::new(); $(_m.insert($key, $val);)* _m }}) gen_complete_test!(encodes_hashmap, bencode_hashmap, identity_hashmap, map!(HashMap, ("a".to_string(), 1i)) -> bytes("d1:ai1ee"), map!(HashMap, ("foo".to_string(), "a".to_string()), ("bar".to_string(), "bb".to_string())) -> bytes("d3:bar2:bb3:foo1:ae")) gen_complete_test!(encodes_nested_hashmap, bencode_nested_hashmap, identity_nested_hashmap, map!(HashMap, ("a".to_string(), map!(HashMap, ("foo".to_string(), 101i), ("bar".to_string(), 102i)))) -> bytes("d1:ad3:bari102e3:fooi101eee")) #[test] fn decode_error_on_wrong_map_key_type() { let benc = Bencode::Dict(map!(TreeMap, (util::ByteString::from_vec(bytes("foo")), Bencode::ByteString(bytes("bar"))))); let mut decoder = Decoder::new(&benc); let res: DecoderResult<TreeMap<int, String>> = Decodable::decode(&mut decoder); assert!(res.is_err()); } #[test] fn encode_error_on_wrong_map_key_type() { let m = map!(HashMap, (1i, "foo")); let encoded = Encoder::buffer_encode(&m); assert!(encoded.is_err()) } #[test] fn encodes_struct_fields_in_sorted_order() { #[deriving(Encodable)] struct OrderedStruct { z: int, a: int, ab: int, aa: int, } let s = OrderedStruct { z: 4, a: 1, ab: 3, aa: 2 }; assert_eq!(Encoder::buffer_encode(&s), Ok(bytes("d1:ai1e2:aai2e2:abi3e1:zi4ee"))); } #[deriving(Encodable, Decodable, Eq, PartialEq, Show, Clone)] struct OptionalStruct { a: Option<int>, b: int, c: Option<Vec<Option<bool>>>, } #[deriving(Encodable, Decodable, Eq, PartialEq, Show)] struct OptionalStructOuter { a: Option<OptionalStruct>, b: Option<int>, } static OPT_STRUCT: OptionalStruct = OptionalStruct { a: None, b: 10, c: None }; #[test] fn struct_option_none_fields_are_not_encoded() { assert_encoding!(OPT_STRUCT.clone(), bytes("d1:bi10ee")); } #[test] fn struct_options_not_present_default_to_none() { assert_decoding!(bytes("d1:bi10ee"), OPT_STRUCT.clone()); } gen_encode_identity_test!(encodes_nested_struct_fields, identity_nested_struct_field, { OptionalStructOuter { a: Some(OPT_STRUCT.clone()), b: None } } -> bytes("d1:ad1:bi10eee"), { let a = OptionalStruct { a: None, b: 10, c: Some(vec![Some(true), None]) }; OptionalStructOuter { a: Some(a), b: Some(99) } } -> bytes("d1:ad1:bi10e1:cl4:true3:nilee1:bi99ee")) fn try_bencode(bencode: Bencode) -> Vec<u8> { match bencode.to_bytes() { Ok(v) => v, Err(err) => panic!("Unexpected error: {}", err) } } #[test] fn encodes_empty_bytestring() { assert_eq!(try_bencode(Bencode::ByteString(Vec::new())), bytes("0:")); } #[test] fn encodes_nonempty_bytestring() { assert_eq!(try_bencode(Bencode::ByteString(b"abc".to_vec())), bytes("3:abc")); assert_eq!(try_bencode(Bencode::ByteString(vec![0, 1, 2, 3])), bytes("4:\x00\x01\x02\x03")); } #[test] fn encodes_empty_list() { assert_eq!(try_bencode(Bencode::List(Vec::new())), bytes("le")); } #[test] fn encodes_nonempty_list() { assert_eq!(try_bencode(Bencode::List(vec![Bencode::Number(1)])), bytes("li1ee")); assert_eq!(try_bencode(Bencode::List(vec![Bencode::ByteString("foobar".as_bytes().to_vec()), Bencode::Number(-1)])), bytes("l6:foobari-1ee")); } #[test] fn encodes_nested_list() { assert_eq!(try_bencode(Bencode::List(vec![Bencode::List(vec![])])), bytes("llee")); let list = Bencode::List(vec![Bencode::Number(1988), Bencode::List(vec![Bencode::Number(2014)])]); assert_eq!(try_bencode(list), bytes("li1988eli2014eee")); } #[test] fn encodes_empty_dict() { assert_eq!(try_bencode(Bencode::Dict(TreeMap::new())), bytes("de")); } #[test] fn encodes_dict_with_items() { let mut m = TreeMap::new(); m.insert(util::ByteString::from_str("k1"), Bencode::Number(1)); assert_eq!(try_bencode(Bencode::Dict(m.clone())), bytes("d2:k1i1ee")); m.insert(util::ByteString::from_str("k2"), Bencode::ByteString(vec![0, 0])); assert_eq!(try_bencode(Bencode::Dict(m.clone())), bytes("d2:k1i1e2:k22:\0\0e")); } #[test] fn encodes_nested_dict() { let mut outer = TreeMap::new(); let mut inner = TreeMap::new(); inner.insert(util::ByteString::from_str("val"), Bencode::ByteString(vec![68, 0, 90])); outer.insert(util::ByteString::from_str("inner"), Bencode::Dict(inner)); assert_eq!(try_bencode(Bencode::Dict(outer)), bytes("d5:innerd3:val3:D\0Zee")); } #[test] fn encodes_dict_fields_in_sorted_order() { let mut m = TreeMap::new(); m.insert(util::ByteString::from_str("z"), Bencode::Number(1)); m.insert(util::ByteString::from_str("abd"), Bencode::Number(3)); m.insert(util::ByteString::from_str("abc"), Bencode::Number(2)); assert_eq!(try_bencode(Bencode::Dict(m)), bytes("d3:abci2e3:abdi3e1:zi1ee")); } fn assert_decoded_eq(events: &[BencodeEvent], expected: Result<Bencode, Error>) { let mut parser = Parser::new(events.to_vec().into_iter()); let result = parser.parse(); assert_eq!(expected, result); } #[test] fn decodes_empty_input() { assert_decoded_eq([], Ok(Bencode::Empty)); } #[test] fn decodes_number() { assert_decoded_eq([NumberValue(25)], Ok(Bencode::Number(25))); } #[test] fn decodes_bytestring() { assert_decoded_eq([ByteStringValue(bytes("foo"))], Ok(Bencode::ByteString(bytes("foo")))); } #[test] fn decodes_empty_list() { assert_decoded_eq([ListStart, ListEnd], Ok(Bencode::List(vec![]))); } #[test] fn decodes_list_with_elements() { assert_decoded_eq([ListStart, NumberValue(1), ListEnd], Ok(Bencode::List(vec![Bencode::Number(1)]))); assert_decoded_eq([ListStart, ByteStringValue(bytes("str")), NumberValue(11), ListEnd], Ok(Bencode::List(vec![Bencode::ByteString(bytes("str")), Bencode::Number(11)]))); } #[test] fn decodes_nested_list() { assert_decoded_eq([ListStart, ListStart, NumberValue(13), ListEnd, ByteStringValue(bytes("rust")), ListEnd], Ok(Bencode::List(vec![Bencode::List(vec![Bencode::Number(13)]), Bencode::ByteString(bytes("rust"))]))); } #[test] fn decodes_empty_dict() { assert_decoded_eq([DictStart, DictEnd], Ok(Bencode::Dict(TreeMap::new()))); } #[test] fn decodes_dict_with_value() { let mut map = TreeMap::new(); map.insert(util::ByteString::from_str("foo"), Bencode::ByteString(bytes("rust"))); assert_decoded_eq([DictStart, DictKey(bytes("foo")), ByteStringValue(bytes("rust")), DictEnd], Ok(Bencode::Dict(map))); } #[test] fn decodes_dict_with_values() { let mut map = TreeMap::new(); map.insert(util::ByteString::from_str("num"), Bencode::Number(9)); map.insert(util::ByteString::from_str("str"), Bencode::ByteString(bytes("abc"))); map.insert(util::ByteString::from_str("list"), Bencode::List(vec![Bencode::Number(99)])); assert_decoded_eq([DictStart, DictKey(bytes("num")), NumberValue(9), DictKey(bytes("str")), ByteStringValue(bytes("abc")), DictKey(bytes("list")), ListStart, NumberValue(99), ListEnd, DictEnd], Ok(Bencode::Dict(map))); } #[test] fn decodes_nested_dict() { let mut inner = TreeMap::new(); inner.insert(util::ByteString::from_str("inner"), Bencode::Number(2)); let mut outer = TreeMap::new(); outer.insert(util::ByteString::from_str("dict"), Bencode::Dict(inner)); outer.insert(util::ByteString::from_str("outer"), Bencode::Number(1)); assert_decoded_eq([DictStart, DictKey(bytes("outer")), NumberValue(1), DictKey(bytes("dict")), DictStart, DictKey(bytes("inner")), NumberValue(2), DictEnd, DictEnd], Ok(Bencode::Dict(outer))); } #[test] fn decode_error_on_parse_error() { let err = Error{ pos: 1, msg: "error msg".to_string() }; let perr = ParseError(err.clone()); assert_decoded_eq([perr.clone()], Err(err.clone())); assert_decoded_eq([NumberValue(1), perr.clone()], Err(err.clone())); assert_decoded_eq([ListStart, perr.clone()], Err(err.clone())); assert_decoded_eq([ListStart, ByteStringValue(bytes("foo")), perr.clone()], Err(err.clone())); assert_decoded_eq([DictStart, perr.clone()], Err(err.clone())); assert_decoded_eq([DictStart, DictKey(bytes("foo")), perr.clone()], Err(err.clone())); } } #[cfg(test)] mod bench { extern crate test; use self::test::Bencher; use std::io; use serialize::{Encodable, Decodable}; use streaming::StreamingParser; use super::{Encoder, Decoder, Parser, DecoderResult}; #[bench] fn encode_large_vec_of_uint(bh: &mut Bencher) { let v = Vec::from_fn(100, |n| n); bh.iter(|| { let mut w = io::MemWriter::with_capacity(v.len() * 10); { let mut enc = Encoder::new(&mut w); let _ = v.encode(&mut enc); } w.unwrap() }); bh.bytes = v.len() as u64 * 4; } #[bench] fn decode_large_vec_of_uint(bh: &mut Bencher) { let v = Vec::from_fn(100, |n| n); let b = Encoder::buffer_encode(&v).unwrap(); bh.iter(|| { let streaming_parser = StreamingParser::new(b.clone().into_iter()); let mut parser = Parser::new(streaming_parser); let bencode = parser.parse().unwrap(); let mut decoder = Decoder::new(&bencode); let result: DecoderResult<Vec<uint>> = Decodable::decode(&mut decoder); result }); bh.bytes = b.len() as u64 * 4; } }
//! //! Raw communication channel to the FUSE kernel driver. //! use std::io; use std::ffi::{CString, CStr, OsStr, AsOsStr}; use std::os::unix::ffi::OsStrExt; use std::path::{PathBuf, Path}; use libc::{c_char, c_int, c_void, size_t}; use fuse::{fuse_args, fuse_mount_compat25}; // Libc provides iovec based I/O using readv and writev functions #[allow(dead_code, non_camel_case_types)] mod libc { use libc::{c_char, c_int, c_void, size_t, ssize_t}; /// Iovec data structure for readv and writev calls. #[repr(C)] pub struct iovec { pub iov_base: *const c_void, pub iov_len: size_t, } extern "system" { /// Read data from fd into multiple buffers pub fn readv (fd: c_int, iov: *mut iovec, iovcnt: c_int) -> ssize_t; /// Write data from multiple buffers to fd pub fn writev (fd: c_int, iov: *const iovec, iovcnt: c_int) -> ssize_t; pub fn realpath (file_name: *const c_char, resolved_name: *mut c_char) -> *const c_char; #[cfg(target_os = "macos")] pub fn unmount(dir: *const c_char, flags: c_int) -> c_int; #[cfg(not(target_os = "macos"))] pub fn umount(dir: *const c_char) -> c_int; } /// Max length for path names. 4096 should be reasonable safe (OS X uses 1024, Linux uses 4096) pub const PATH_MAX: usize = 4096; } /// Wrapper around libc's realpath. Returns the errno value if the real path cannot be obtained. /// FIXME: Use Rust's realpath method once available in std (see also https://github.com/mozilla/rust/issues/11857) fn real_path (path: &CStr) -> io::Result<CString> { let mut resolved: Vec<c_char> = Vec::with_capacity(libc::PATH_MAX); unsafe { if libc::realpath(path.as_ptr(), resolved.as_mut_ptr()).is_null() { Err(io::Error::last_os_error()) } else { // Using CStr::from_ptr gets the correct string length via strlen() let cresolved = CStr::from_ptr(resolved.as_ptr()); Ok(CString::new(cresolved.to_bytes()).unwrap()) } } } /// Helper function to provide options as a fuse_args struct /// (which contains an argc count and an argv pointer) fn with_fuse_args<T, F: FnOnce(&fuse_args) -> T> (options: &[&OsStr], f: F) -> T { let mut args: Vec<CString> = vec![CString::new("rust-fuse").unwrap()]; args.extend(options.iter().map(|s| s.to_cstring().unwrap() )); let argptrs: Vec<*const i8> = args.iter().map(|s| s.as_ptr()).collect(); f(&fuse_args { argc: argptrs.len() as i32, argv: argptrs.as_ptr(), allocated: 0 }) } /// A raw communication channel to the FUSE kernel driver pub struct Channel { mountpoint: PathBuf, fd: c_int, } impl Channel { /// Create a new communication channel to the kernel driver by mounting the /// given path. The kernel driver will delegate filesystem operations of /// the given path to the channel. If the channel is dropped, the path is /// unmounted. pub fn new (mountpoint: &Path, options: &[&OsStr]) -> io::Result<Channel> { let mnt = try!(mountpoint.as_os_str().to_cstring()); real_path(&mnt).and_then(|mnt| { with_fuse_args(options, |args| { let fd = unsafe { fuse_mount_compat25(mnt.as_ptr(), args) }; if fd < 0 { Err(io::Error::last_os_error()) } else { let mountpoint = PathBuf::from(<OsStr as OsStrExt>::from_bytes(mnt.as_bytes())); Ok(Channel { mountpoint: mountpoint, fd: fd }) } }) }) } /// Return path of the mounted filesystem pub fn mountpoint (&self) -> &Path { &self.mountpoint } /// Receives data up to the capacity of the given buffer (can block). pub fn receive (&self, buffer: &mut Vec<u8>) -> io::Result<()> { let rc = unsafe { ::libc::read(self.fd, buffer.as_ptr() as *mut c_void, buffer.capacity() as size_t) }; if rc < 0 { Err(io::Error::last_os_error()) } else { unsafe { buffer.set_len(rc as usize); } Ok(()) } } /// Returns a sender object for this channel. The sender object can be /// used to send to the channel. Multiple sender objects can be used /// and they can safely be sent to other threads. pub fn sender (&self) -> ChannelSender { // Since write/writev syscalls are threadsafe, we can simply create // a sender by using the same fd and use it in other threads. Only // the channel closes the fd when dropped. If any sender is used after // dropping the channel, it'll return an EBADF error. ChannelSender { fd: self.fd } } } impl Drop for Channel { fn drop (&mut self) { // TODO: send ioctl FUSEDEVIOCSETDAEMONDEAD on OS X before closing the fd // Close the communication channel to the kernel driver // (closing it before unnmount prevents sync unmount deadlock) unsafe { ::libc::close(self.fd); } // Unmount this channel's mount point let _ = unmount(&self.mountpoint); } } #[derive(Clone, Copy)] pub struct ChannelSender { fd: c_int, } impl ChannelSender { /// Send all data in the slice of slice of bytes in a single write (can block). pub fn send (&self, buffer: &[&[u8]]) -> io::Result<()> { let iovecs: Vec<libc::iovec> = buffer.iter().map(|d| { libc::iovec { iov_base: d.as_ptr() as *const c_void, iov_len: d.len() as size_t } }).collect(); let rc = unsafe { libc::writev(self.fd, iovecs.as_ptr(), iovecs.len() as c_int) }; if rc < 0 { Err(io::Error::last_os_error()) } else { Ok(()) } } } /// Unmount an arbitrary mount point pub fn unmount (mountpoint: &Path) -> io::Result<()> { // fuse_unmount_compat22 unfortunately doesn't return a status. Additionally, // it attempts to call realpath, which in turn calls into the filesystem. So // if the filesystem returns an error, the unmount does not take place, with // no indication of the error available to the caller. So we call unmount // directly, which is what osxfuse does anyway, since we already converted // to the real path when we first mounted. #[cfg(target_os = "macos")] #[inline] fn libc_umount (mnt: &CStr) -> c_int { unsafe { libc::unmount(mnt.as_ptr(), 0) } } #[cfg(not(target_os = "macos"))] #[inline] fn libc_umount (mnt: &CStr) -> c_int { unsafe { libc::umount(mnt.as_ptr()) } } let mnt = try!(mountpoint.as_os_str().to_cstring()); let rc = libc_umount(&mnt); if rc < 0 { Err(io::Error::last_os_error()) } else { Ok(()) } } #[cfg(test)] mod test { use super::with_fuse_args; use std::ffi::{CStr, OsStr}; #[test] fn fuse_args () { with_fuse_args(&[OsStr::from_str("foo"), OsStr::from_str("bar")], |args| { assert_eq!(args.argc, 3); assert_eq!(unsafe { CStr::from_ptr(*args.argv.offset(0)).to_bytes() }, b"rust-fuse"); assert_eq!(unsafe { CStr::from_ptr(*args.argv.offset(1)).to_bytes() }, b"foo"); assert_eq!(unsafe { CStr::from_ptr(*args.argv.offset(2)).to_bytes() }, b"bar"); }); } } convert OsStr::from_str() to OsStr::new() //! //! Raw communication channel to the FUSE kernel driver. //! use std::io; use std::ffi::{CString, CStr, OsStr, AsOsStr}; use std::os::unix::ffi::OsStrExt; use std::path::{PathBuf, Path}; use libc::{c_char, c_int, c_void, size_t}; use fuse::{fuse_args, fuse_mount_compat25}; // Libc provides iovec based I/O using readv and writev functions #[allow(dead_code, non_camel_case_types)] mod libc { use libc::{c_char, c_int, c_void, size_t, ssize_t}; /// Iovec data structure for readv and writev calls. #[repr(C)] pub struct iovec { pub iov_base: *const c_void, pub iov_len: size_t, } extern "system" { /// Read data from fd into multiple buffers pub fn readv (fd: c_int, iov: *mut iovec, iovcnt: c_int) -> ssize_t; /// Write data from multiple buffers to fd pub fn writev (fd: c_int, iov: *const iovec, iovcnt: c_int) -> ssize_t; pub fn realpath (file_name: *const c_char, resolved_name: *mut c_char) -> *const c_char; #[cfg(target_os = "macos")] pub fn unmount(dir: *const c_char, flags: c_int) -> c_int; #[cfg(not(target_os = "macos"))] pub fn umount(dir: *const c_char) -> c_int; } /// Max length for path names. 4096 should be reasonable safe (OS X uses 1024, Linux uses 4096) pub const PATH_MAX: usize = 4096; } /// Wrapper around libc's realpath. Returns the errno value if the real path cannot be obtained. /// FIXME: Use Rust's realpath method once available in std (see also https://github.com/mozilla/rust/issues/11857) fn real_path (path: &CStr) -> io::Result<CString> { let mut resolved: Vec<c_char> = Vec::with_capacity(libc::PATH_MAX); unsafe { if libc::realpath(path.as_ptr(), resolved.as_mut_ptr()).is_null() { Err(io::Error::last_os_error()) } else { // Using CStr::from_ptr gets the correct string length via strlen() let cresolved = CStr::from_ptr(resolved.as_ptr()); Ok(CString::new(cresolved.to_bytes()).unwrap()) } } } /// Helper function to provide options as a fuse_args struct /// (which contains an argc count and an argv pointer) fn with_fuse_args<T, F: FnOnce(&fuse_args) -> T> (options: &[&OsStr], f: F) -> T { let mut args: Vec<CString> = vec![CString::new("rust-fuse").unwrap()]; args.extend(options.iter().map(|s| s.to_cstring().unwrap() )); let argptrs: Vec<*const i8> = args.iter().map(|s| s.as_ptr()).collect(); f(&fuse_args { argc: argptrs.len() as i32, argv: argptrs.as_ptr(), allocated: 0 }) } /// A raw communication channel to the FUSE kernel driver pub struct Channel { mountpoint: PathBuf, fd: c_int, } impl Channel { /// Create a new communication channel to the kernel driver by mounting the /// given path. The kernel driver will delegate filesystem operations of /// the given path to the channel. If the channel is dropped, the path is /// unmounted. pub fn new (mountpoint: &Path, options: &[&OsStr]) -> io::Result<Channel> { let mnt = try!(mountpoint.as_os_str().to_cstring()); real_path(&mnt).and_then(|mnt| { with_fuse_args(options, |args| { let fd = unsafe { fuse_mount_compat25(mnt.as_ptr(), args) }; if fd < 0 { Err(io::Error::last_os_error()) } else { let mountpoint = PathBuf::from(<OsStr as OsStrExt>::from_bytes(mnt.as_bytes())); Ok(Channel { mountpoint: mountpoint, fd: fd }) } }) }) } /// Return path of the mounted filesystem pub fn mountpoint (&self) -> &Path { &self.mountpoint } /// Receives data up to the capacity of the given buffer (can block). pub fn receive (&self, buffer: &mut Vec<u8>) -> io::Result<()> { let rc = unsafe { ::libc::read(self.fd, buffer.as_ptr() as *mut c_void, buffer.capacity() as size_t) }; if rc < 0 { Err(io::Error::last_os_error()) } else { unsafe { buffer.set_len(rc as usize); } Ok(()) } } /// Returns a sender object for this channel. The sender object can be /// used to send to the channel. Multiple sender objects can be used /// and they can safely be sent to other threads. pub fn sender (&self) -> ChannelSender { // Since write/writev syscalls are threadsafe, we can simply create // a sender by using the same fd and use it in other threads. Only // the channel closes the fd when dropped. If any sender is used after // dropping the channel, it'll return an EBADF error. ChannelSender { fd: self.fd } } } impl Drop for Channel { fn drop (&mut self) { // TODO: send ioctl FUSEDEVIOCSETDAEMONDEAD on OS X before closing the fd // Close the communication channel to the kernel driver // (closing it before unnmount prevents sync unmount deadlock) unsafe { ::libc::close(self.fd); } // Unmount this channel's mount point let _ = unmount(&self.mountpoint); } } #[derive(Clone, Copy)] pub struct ChannelSender { fd: c_int, } impl ChannelSender { /// Send all data in the slice of slice of bytes in a single write (can block). pub fn send (&self, buffer: &[&[u8]]) -> io::Result<()> { let iovecs: Vec<libc::iovec> = buffer.iter().map(|d| { libc::iovec { iov_base: d.as_ptr() as *const c_void, iov_len: d.len() as size_t } }).collect(); let rc = unsafe { libc::writev(self.fd, iovecs.as_ptr(), iovecs.len() as c_int) }; if rc < 0 { Err(io::Error::last_os_error()) } else { Ok(()) } } } /// Unmount an arbitrary mount point pub fn unmount (mountpoint: &Path) -> io::Result<()> { // fuse_unmount_compat22 unfortunately doesn't return a status. Additionally, // it attempts to call realpath, which in turn calls into the filesystem. So // if the filesystem returns an error, the unmount does not take place, with // no indication of the error available to the caller. So we call unmount // directly, which is what osxfuse does anyway, since we already converted // to the real path when we first mounted. #[cfg(target_os = "macos")] #[inline] fn libc_umount (mnt: &CStr) -> c_int { unsafe { libc::unmount(mnt.as_ptr(), 0) } } #[cfg(not(target_os = "macos"))] #[inline] fn libc_umount (mnt: &CStr) -> c_int { unsafe { libc::umount(mnt.as_ptr()) } } let mnt = try!(mountpoint.as_os_str().to_cstring()); let rc = libc_umount(&mnt); if rc < 0 { Err(io::Error::last_os_error()) } else { Ok(()) } } #[cfg(test)] mod test { use super::with_fuse_args; use std::ffi::{CStr, OsStr}; #[test] fn fuse_args () { with_fuse_args(&[OsStr::new("foo"), OsStr::new("bar")], |args| { assert_eq!(args.argc, 3); assert_eq!(unsafe { CStr::from_ptr(*args.argv.offset(0)).to_bytes() }, b"rust-fuse"); assert_eq!(unsafe { CStr::from_ptr(*args.argv.offset(1)).to_bytes() }, b"foo"); assert_eq!(unsafe { CStr::from_ptr(*args.argv.offset(2)).to_bytes() }, b"bar"); }); } }
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Formatting and tools for comments. use std::{self, borrow::Cow, iter}; use itertools::{multipeek, MultiPeek}; use syntax::codemap::Span; use config::Config; use rewrite::RewriteContext; use shape::{Indent, Shape}; use string::{rewrite_string, StringFormat}; use utils::{count_newlines, first_line_width, last_line_width}; fn is_custom_comment(comment: &str) -> bool { if !comment.starts_with("//") { false } else if let Some(c) = comment.chars().nth(2) { !c.is_alphanumeric() && !c.is_whitespace() } else { false } } #[derive(Copy, Clone, PartialEq, Eq)] pub enum CommentStyle<'a> { DoubleSlash, TripleSlash, Doc, SingleBullet, DoubleBullet, Exclamation, Custom(&'a str), } fn custom_opener(s: &str) -> &str { s.lines().next().map_or("", |first_line| { first_line .find(' ') .map_or(first_line, |space_index| &first_line[0..space_index + 1]) }) } impl<'a> CommentStyle<'a> { pub fn is_doc_comment(&self) -> bool { match *self { CommentStyle::TripleSlash | CommentStyle::Doc => true, _ => false, } } pub fn opener(&self) -> &'a str { match *self { CommentStyle::DoubleSlash => "// ", CommentStyle::TripleSlash => "/// ", CommentStyle::Doc => "//! ", CommentStyle::SingleBullet => "/* ", CommentStyle::DoubleBullet => "/** ", CommentStyle::Exclamation => "/*! ", CommentStyle::Custom(opener) => opener, } } pub fn closer(&self) -> &'a str { match *self { CommentStyle::DoubleSlash | CommentStyle::TripleSlash | CommentStyle::Custom(..) | CommentStyle::Doc => "", CommentStyle::DoubleBullet => " **/", CommentStyle::SingleBullet | CommentStyle::Exclamation => " */", } } pub fn line_start(&self) -> &'a str { match *self { CommentStyle::DoubleSlash => "// ", CommentStyle::TripleSlash => "/// ", CommentStyle::Doc => "//! ", CommentStyle::SingleBullet | CommentStyle::Exclamation => " * ", CommentStyle::DoubleBullet => " ** ", CommentStyle::Custom(opener) => opener, } } pub fn to_str_tuplet(&self) -> (&'a str, &'a str, &'a str) { (self.opener(), self.closer(), self.line_start()) } pub fn line_with_same_comment_style(&self, line: &str, normalize_comments: bool) -> bool { match *self { CommentStyle::DoubleSlash | CommentStyle::TripleSlash | CommentStyle::Doc => { line.trim_left().starts_with(self.line_start().trim_left()) || comment_style(line, normalize_comments) == *self } CommentStyle::DoubleBullet | CommentStyle::SingleBullet | CommentStyle::Exclamation => { line.trim_left().starts_with(self.closer().trim_left()) || line.trim_left().starts_with(self.line_start().trim_left()) || comment_style(line, normalize_comments) == *self } CommentStyle::Custom(opener) => line.trim_left().starts_with(opener.trim_right()), } } } fn comment_style(orig: &str, normalize_comments: bool) -> CommentStyle { if !normalize_comments { if orig.starts_with("/**") && !orig.starts_with("/**/") { CommentStyle::DoubleBullet } else if orig.starts_with("/*!") { CommentStyle::Exclamation } else if orig.starts_with("/*") { CommentStyle::SingleBullet } else if orig.starts_with("///") && orig.chars().nth(3).map_or(true, |c| c != '/') { CommentStyle::TripleSlash } else if orig.starts_with("//!") { CommentStyle::Doc } else if is_custom_comment(orig) { CommentStyle::Custom(custom_opener(orig)) } else { CommentStyle::DoubleSlash } } else if (orig.starts_with("///") && orig.chars().nth(3).map_or(true, |c| c != '/')) || (orig.starts_with("/**") && !orig.starts_with("/**/")) { CommentStyle::TripleSlash } else if orig.starts_with("//!") || orig.starts_with("/*!") { CommentStyle::Doc } else if is_custom_comment(orig) { CommentStyle::Custom(custom_opener(orig)) } else { CommentStyle::DoubleSlash } } /// Combine `prev_str` and `next_str` into a single `String`. `span` may contain /// comments between two strings. If there are such comments, then that will be /// recovered. If `allow_extend` is true and there is no comment between the two /// strings, then they will be put on a single line as long as doing so does not /// exceed max width. pub fn combine_strs_with_missing_comments( context: &RewriteContext, prev_str: &str, next_str: &str, span: Span, shape: Shape, allow_extend: bool, ) -> Option<String> { let mut result = String::with_capacity(prev_str.len() + next_str.len() + shape.indent.width() + 128); result.push_str(prev_str); let mut allow_one_line = !prev_str.contains('\n') && !next_str.contains('\n'); let first_sep = if prev_str.is_empty() || next_str.is_empty() { "" } else { " " }; let mut one_line_width = last_line_width(prev_str) + first_line_width(next_str) + first_sep.len(); let config = context.config; let indent = shape.indent; let missing_comment = rewrite_missing_comment(span, shape, context)?; if missing_comment.is_empty() { if allow_extend && prev_str.len() + first_sep.len() + next_str.len() <= shape.width { result.push_str(first_sep); } else if !prev_str.is_empty() { result.push_str(&indent.to_string_with_newline(config)) } result.push_str(next_str); return Some(result); } // We have a missing comment between the first expression and the second expression. // Peek the the original source code and find out whether there is a newline between the first // expression and the second expression or the missing comment. We will preserve the original // layout whenever possible. let original_snippet = context.snippet(span); let prefer_same_line = if let Some(pos) = original_snippet.find('/') { !original_snippet[..pos].contains('\n') } else { !original_snippet.contains('\n') }; one_line_width -= first_sep.len(); let first_sep = if prev_str.is_empty() || missing_comment.is_empty() { Cow::from("") } else { let one_line_width = last_line_width(prev_str) + first_line_width(&missing_comment) + 1; if prefer_same_line && one_line_width <= shape.width { Cow::from(" ") } else { indent.to_string_with_newline(config) } }; result.push_str(&first_sep); result.push_str(&missing_comment); let second_sep = if missing_comment.is_empty() || next_str.is_empty() { Cow::from("") } else if missing_comment.starts_with("//") { indent.to_string_with_newline(config) } else { one_line_width += missing_comment.len() + first_sep.len() + 1; allow_one_line &= !missing_comment.starts_with("//") && !missing_comment.contains('\n'); if prefer_same_line && allow_one_line && one_line_width <= shape.width { Cow::from(" ") } else { indent.to_string_with_newline(config) } }; result.push_str(&second_sep); result.push_str(next_str); Some(result) } pub fn rewrite_doc_comment(orig: &str, shape: Shape, config: &Config) -> Option<String> { _rewrite_comment(orig, false, shape, config, true) } pub fn rewrite_comment( orig: &str, block_style: bool, shape: Shape, config: &Config, ) -> Option<String> { _rewrite_comment(orig, block_style, shape, config, false) } fn _rewrite_comment( orig: &str, block_style: bool, shape: Shape, config: &Config, is_doc_comment: bool, ) -> Option<String> { // If there are lines without a starting sigil, we won't format them correctly // so in that case we won't even re-align (if !config.normalize_comments()) and // we should stop now. let num_bare_lines = orig .lines() .map(|line| line.trim()) .filter(|l| !(l.starts_with('*') || l.starts_with("//") || l.starts_with("/*"))) .count(); if num_bare_lines > 0 && !config.normalize_comments() { return Some(orig.to_owned()); } if !config.normalize_comments() && !config.wrap_comments() { return light_rewrite_comment(orig, shape.indent, config, is_doc_comment); } identify_comment(orig, block_style, shape, config, is_doc_comment) } fn identify_comment( orig: &str, block_style: bool, shape: Shape, config: &Config, is_doc_comment: bool, ) -> Option<String> { let style = comment_style(orig, false); let first_group = orig .lines() .take_while(|l| style.line_with_same_comment_style(l, false)) .collect::<Vec<_>>() .join("\n"); let rest = orig .lines() .skip(first_group.lines().count()) .collect::<Vec<_>>() .join("\n"); let first_group_str = rewrite_comment_inner( &first_group, block_style, style, shape, config, is_doc_comment || style.is_doc_comment(), )?; if rest.is_empty() { Some(first_group_str) } else { identify_comment(&rest, block_style, shape, config, is_doc_comment).map(|rest_str| { format!( "{}\n{}{}", first_group_str, shape.indent.to_string(config), rest_str ) }) } } fn rewrite_comment_inner( orig: &str, block_style: bool, style: CommentStyle, shape: Shape, config: &Config, is_doc_comment: bool, ) -> Option<String> { let (opener, closer, line_start) = if block_style { CommentStyle::SingleBullet.to_str_tuplet() } else { comment_style(orig, config.normalize_comments()).to_str_tuplet() }; let max_chars = shape .width .checked_sub(closer.len() + opener.len()) .unwrap_or(1); let indent_str = shape.indent.to_string_with_newline(config); let fmt_indent = shape.indent + (opener.len() - line_start.len()); let mut fmt = StringFormat { opener: "", closer: "", line_start, line_end: "", shape: Shape::legacy(max_chars, fmt_indent), trim_end: true, config, }; let line_breaks = count_newlines(orig.trim_right()); let lines = orig .lines() .enumerate() .map(|(i, mut line)| { line = trim_right_unless_two_whitespaces(line.trim_left(), is_doc_comment); // Drop old closer. if i == line_breaks && line.ends_with("*/") && !line.starts_with("//") { line = line[..(line.len() - 2)].trim_right(); } line }) .map(|s| left_trim_comment_line(s, &style)) .map(|(line, has_leading_whitespace)| { if orig.starts_with("/*") && line_breaks == 0 { ( line.trim_left(), has_leading_whitespace || config.normalize_comments(), ) } else { (line, has_leading_whitespace || config.normalize_comments()) } }); let mut result = String::with_capacity(orig.len() * 2); result.push_str(opener); let mut code_block_buffer = String::with_capacity(128); let mut is_prev_line_multi_line = false; let mut inside_code_block = false; let comment_line_separator = format!("{}{}", indent_str, line_start); let join_code_block_with_comment_line_separator = |s: &str| { let mut result = String::with_capacity(s.len() + 128); let mut iter = s.lines().peekable(); while let Some(line) = iter.next() { result.push_str(line); result.push_str(match iter.peek() { Some(next_line) if next_line.is_empty() => comment_line_separator.trim_right(), Some(..) => &comment_line_separator, None => "", }); } result }; for (i, (line, has_leading_whitespace)) in lines.enumerate() { let is_last = i == count_newlines(orig); if inside_code_block { if line.starts_with("```") { inside_code_block = false; result.push_str(&comment_line_separator); let code_block = ::format_code_block(&code_block_buffer, config) .unwrap_or_else(|| code_block_buffer.to_owned()); result.push_str(&join_code_block_with_comment_line_separator(&code_block)); code_block_buffer.clear(); result.push_str(&comment_line_separator); result.push_str(line); } else { code_block_buffer.push_str(line); code_block_buffer.push('\n'); if is_last { // There is an code block that is not properly enclosed by backticks. // We will leave them untouched. result.push_str(&comment_line_separator); result.push_str(&join_code_block_with_comment_line_separator( &code_block_buffer, )); } } continue; } else { inside_code_block = line.starts_with("```"); if result == opener { let force_leading_whitespace = opener == "/* " && count_newlines(orig) == 0; if !has_leading_whitespace && !force_leading_whitespace && result.ends_with(' ') { result.pop(); } if line.is_empty() { continue; } } else if is_prev_line_multi_line && !line.is_empty() { result.push(' ') } else if is_last && !closer.is_empty() && line.is_empty() { result.push_str(&indent_str); } else { result.push_str(&comment_line_separator); if !has_leading_whitespace && result.ends_with(' ') { result.pop(); } } } if config.wrap_comments() && line.len() > fmt.shape.width && !has_url(line) { match rewrite_string(line, &fmt, Some(max_chars)) { Some(ref s) => { is_prev_line_multi_line = s.contains('\n'); result.push_str(s); } None if is_prev_line_multi_line => { // We failed to put the current `line` next to the previous `line`. // Remove the trailing space, then start rewrite on the next line. result.pop(); result.push_str(&comment_line_separator); fmt.shape = Shape::legacy(max_chars, fmt_indent); match rewrite_string(line, &fmt, Some(max_chars)) { Some(ref s) => { is_prev_line_multi_line = s.contains('\n'); result.push_str(s); } None => { is_prev_line_multi_line = false; result.push_str(line); } } } None => { is_prev_line_multi_line = false; result.push_str(line); } } fmt.shape = if is_prev_line_multi_line { // 1 = " " let offset = 1 + last_line_width(&result) - line_start.len(); Shape { width: max_chars.saturating_sub(offset), indent: fmt_indent, offset: fmt.shape.offset + offset, } } else { Shape::legacy(max_chars, fmt_indent) }; } else { if line.is_empty() && result.ends_with(' ') && !is_last { // Remove space if this is an empty comment or a doc comment. result.pop(); } result.push_str(line); fmt.shape = Shape::legacy(max_chars, fmt_indent); is_prev_line_multi_line = false; } } result.push_str(closer); if result.ends_with(opener) && opener.ends_with(' ') { // Trailing space. result.pop(); } Some(result) } /// Returns true if the given string MAY include URLs or alike. fn has_url(s: &str) -> bool { // This function may return false positive, but should get its job done in most cases. s.contains("https://") || s.contains("http://") || s.contains("ftp://") || s.contains("file://") } /// Given the span, rewrite the missing comment inside it if available. /// Note that the given span must only include comments (or leading/trailing whitespaces). pub fn rewrite_missing_comment( span: Span, shape: Shape, context: &RewriteContext, ) -> Option<String> { let missing_snippet = context.snippet(span); let trimmed_snippet = missing_snippet.trim(); if !trimmed_snippet.is_empty() { rewrite_comment(trimmed_snippet, false, shape, context.config) } else { Some(String::new()) } } /// Recover the missing comments in the specified span, if available. /// The layout of the comments will be preserved as long as it does not break the code /// and its total width does not exceed the max width. pub fn recover_missing_comment_in_span( span: Span, shape: Shape, context: &RewriteContext, used_width: usize, ) -> Option<String> { let missing_comment = rewrite_missing_comment(span, shape, context)?; if missing_comment.is_empty() { Some(String::new()) } else { let missing_snippet = context.snippet(span); let pos = missing_snippet.find('/').unwrap_or(0); // 1 = ` ` let total_width = missing_comment.len() + used_width + 1; let force_new_line_before_comment = missing_snippet[..pos].contains('\n') || total_width > context.config.max_width(); let sep = if force_new_line_before_comment { shape.indent.to_string_with_newline(context.config) } else { Cow::from(" ") }; Some(format!("{}{}", sep, missing_comment)) } } /// Trim trailing whitespaces unless they consist of two or more whitespaces. fn trim_right_unless_two_whitespaces(s: &str, is_doc_comment: bool) -> &str { if is_doc_comment && s.ends_with(" ") { s } else { s.trim_right() } } /// Trims whitespace and aligns to indent, but otherwise does not change comments. fn light_rewrite_comment( orig: &str, offset: Indent, config: &Config, is_doc_comment: bool, ) -> Option<String> { let lines: Vec<&str> = orig .lines() .map(|l| { // This is basically just l.trim(), but in the case that a line starts // with `*` we want to leave one space before it, so it aligns with the // `*` in `/*`. let first_non_whitespace = l.find(|c| !char::is_whitespace(c)); let left_trimmed = if let Some(fnw) = first_non_whitespace { if l.as_bytes()[fnw] == b'*' && fnw > 0 { &l[fnw - 1..] } else { &l[fnw..] } } else { "" }; // Preserve markdown's double-space line break syntax in doc comment. trim_right_unless_two_whitespaces(left_trimmed, is_doc_comment) }) .collect(); Some(lines.join(&format!("\n{}", offset.to_string(config)))) } /// Trims comment characters and possibly a single space from the left of a string. /// Does not trim all whitespace. If a single space is trimmed from the left of the string, /// this function returns true. fn left_trim_comment_line<'a>(line: &'a str, style: &CommentStyle) -> (&'a str, bool) { if line.starts_with("//! ") || line.starts_with("/// ") || line.starts_with("/*! ") || line.starts_with("/** ") { (&line[4..], true) } else if let CommentStyle::Custom(opener) = *style { if line.starts_with(opener) { (&line[opener.len()..], true) } else { (&line[opener.trim_right().len()..], false) } } else if line.starts_with("/* ") || line.starts_with("// ") || line.starts_with("//!") || line.starts_with("///") || line.starts_with("** ") || line.starts_with("/*!") || (line.starts_with("/**") && !line.starts_with("/**/")) { (&line[3..], line.chars().nth(2).unwrap() == ' ') } else if line.starts_with("/*") || line.starts_with("* ") || line.starts_with("//") || line.starts_with("**") { (&line[2..], line.chars().nth(1).unwrap() == ' ') } else if line.starts_with('*') { (&line[1..], false) } else { (line, line.starts_with(' ')) } } pub trait FindUncommented { fn find_uncommented(&self, pat: &str) -> Option<usize>; } impl FindUncommented for str { fn find_uncommented(&self, pat: &str) -> Option<usize> { let mut needle_iter = pat.chars(); for (kind, (i, b)) in CharClasses::new(self.char_indices()) { match needle_iter.next() { None => { return Some(i - pat.len()); } Some(c) => match kind { FullCodeCharKind::Normal | FullCodeCharKind::InString if b == c => {} _ => { needle_iter = pat.chars(); } }, } } // Handle case where the pattern is a suffix of the search string match needle_iter.next() { Some(_) => None, None => Some(self.len() - pat.len()), } } } // Returns the first byte position after the first comment. The given string // is expected to be prefixed by a comment, including delimiters. // Good: "/* /* inner */ outer */ code();" // Bad: "code(); // hello\n world!" pub fn find_comment_end(s: &str) -> Option<usize> { let mut iter = CharClasses::new(s.char_indices()); for (kind, (i, _c)) in &mut iter { if kind == FullCodeCharKind::Normal || kind == FullCodeCharKind::InString { return Some(i); } } // Handle case where the comment ends at the end of s. if iter.status == CharClassesStatus::Normal { Some(s.len()) } else { None } } /// Returns true if text contains any comment. pub fn contains_comment(text: &str) -> bool { CharClasses::new(text.chars()).any(|(kind, _)| kind.is_comment()) } /// Remove trailing spaces from the specified snippet. We do not remove spaces /// inside strings or comments. pub fn remove_trailing_white_spaces(text: &str) -> String { let mut buffer = String::with_capacity(text.len()); let mut space_buffer = String::with_capacity(128); for (char_kind, c) in CharClasses::new(text.chars()) { match c { '\n' => { if char_kind == FullCodeCharKind::InString { buffer.push_str(&space_buffer); } space_buffer.clear(); buffer.push('\n'); } _ if c.is_whitespace() => { space_buffer.push(c); } _ => { if !space_buffer.is_empty() { buffer.push_str(&space_buffer); space_buffer.clear(); } buffer.push(c); } } } buffer } pub struct CharClasses<T> where T: Iterator, T::Item: RichChar, { base: MultiPeek<T>, status: CharClassesStatus, } pub trait RichChar { fn get_char(&self) -> char; } impl RichChar for char { fn get_char(&self) -> char { *self } } impl RichChar for (usize, char) { fn get_char(&self) -> char { self.1 } } #[derive(PartialEq, Eq, Debug, Clone, Copy)] enum CharClassesStatus { Normal, LitString, LitStringEscape, LitChar, LitCharEscape, // The u32 is the nesting deepness of the comment BlockComment(u32), // Status when the '/' has been consumed, but not yet the '*', deepness is // the new deepness (after the comment opening). BlockCommentOpening(u32), // Status when the '*' has been consumed, but not yet the '/', deepness is // the new deepness (after the comment closing). BlockCommentClosing(u32), LineComment, } /// Distinguish between functional part of code and comments #[derive(PartialEq, Eq, Debug, Clone, Copy)] pub enum CodeCharKind { Normal, Comment, } /// Distinguish between functional part of code and comments, /// describing opening and closing of comments for ease when chunking /// code from tagged characters #[derive(PartialEq, Eq, Debug, Clone, Copy)] pub enum FullCodeCharKind { Normal, /// The first character of a comment, there is only one for a comment (always '/') StartComment, /// Any character inside a comment including the second character of comment /// marks ("//", "/*") InComment, /// Last character of a comment, '\n' for a line comment, '/' for a block comment. EndComment, /// Inside a string. InString, } impl FullCodeCharKind { pub fn is_comment(&self) -> bool { match *self { FullCodeCharKind::StartComment | FullCodeCharKind::InComment | FullCodeCharKind::EndComment => true, _ => false, } } pub fn is_string(&self) -> bool { *self == FullCodeCharKind::InString } fn to_codecharkind(&self) -> CodeCharKind { if self.is_comment() { CodeCharKind::Comment } else { CodeCharKind::Normal } } } impl<T> CharClasses<T> where T: Iterator, T::Item: RichChar, { pub fn new(base: T) -> CharClasses<T> { CharClasses { base: multipeek(base), status: CharClassesStatus::Normal, } } } impl<T> Iterator for CharClasses<T> where T: Iterator, T::Item: RichChar, { type Item = (FullCodeCharKind, T::Item); fn next(&mut self) -> Option<(FullCodeCharKind, T::Item)> { let item = self.base.next()?; let chr = item.get_char(); let mut char_kind = FullCodeCharKind::Normal; self.status = match self.status { CharClassesStatus::LitString => match chr { '"' => CharClassesStatus::Normal, '\\' => { char_kind = FullCodeCharKind::InString; CharClassesStatus::LitStringEscape } _ => { char_kind = FullCodeCharKind::InString; CharClassesStatus::LitString } }, CharClassesStatus::LitStringEscape => { char_kind = FullCodeCharKind::InString; CharClassesStatus::LitString } CharClassesStatus::LitChar => match chr { '\\' => CharClassesStatus::LitCharEscape, '\'' => CharClassesStatus::Normal, _ => CharClassesStatus::LitChar, }, CharClassesStatus::LitCharEscape => CharClassesStatus::LitChar, CharClassesStatus::Normal => match chr { '"' => { char_kind = FullCodeCharKind::InString; CharClassesStatus::LitString } '\'' => { // HACK: Work around mut borrow. match self.base.peek() { Some(next) if next.get_char() == '\\' => { self.status = CharClassesStatus::LitChar; return Some((char_kind, item)); } _ => (), } match self.base.peek() { Some(next) if next.get_char() == '\'' => CharClassesStatus::LitChar, _ => CharClassesStatus::Normal, } } '/' => match self.base.peek() { Some(next) if next.get_char() == '*' => { self.status = CharClassesStatus::BlockCommentOpening(1); return Some((FullCodeCharKind::StartComment, item)); } Some(next) if next.get_char() == '/' => { self.status = CharClassesStatus::LineComment; return Some((FullCodeCharKind::StartComment, item)); } _ => CharClassesStatus::Normal, }, _ => CharClassesStatus::Normal, }, CharClassesStatus::BlockComment(deepness) => { assert_ne!(deepness, 0); self.status = match self.base.peek() { Some(next) if next.get_char() == '/' && chr == '*' => { CharClassesStatus::BlockCommentClosing(deepness - 1) } Some(next) if next.get_char() == '*' && chr == '/' => { CharClassesStatus::BlockCommentOpening(deepness + 1) } _ => CharClassesStatus::BlockComment(deepness), }; return Some((FullCodeCharKind::InComment, item)); } CharClassesStatus::BlockCommentOpening(deepness) => { assert_eq!(chr, '*'); self.status = CharClassesStatus::BlockComment(deepness); return Some((FullCodeCharKind::InComment, item)); } CharClassesStatus::BlockCommentClosing(deepness) => { assert_eq!(chr, '/'); if deepness == 0 { self.status = CharClassesStatus::Normal; return Some((FullCodeCharKind::EndComment, item)); } else { self.status = CharClassesStatus::BlockComment(deepness); return Some((FullCodeCharKind::InComment, item)); } } CharClassesStatus::LineComment => match chr { '\n' => { self.status = CharClassesStatus::Normal; return Some((FullCodeCharKind::EndComment, item)); } _ => { self.status = CharClassesStatus::LineComment; return Some((FullCodeCharKind::InComment, item)); } }, }; Some((char_kind, item)) } } /// An iterator over the lines of a string, paired with the char kind at the /// end of the line. pub struct LineClasses<'a> { base: iter::Peekable<CharClasses<std::str::Chars<'a>>>, kind: FullCodeCharKind, } impl<'a> LineClasses<'a> { pub fn new(s: &'a str) -> Self { LineClasses { base: CharClasses::new(s.chars()).peekable(), kind: FullCodeCharKind::Normal, } } } impl<'a> Iterator for LineClasses<'a> { type Item = (FullCodeCharKind, String); fn next(&mut self) -> Option<Self::Item> { if self.base.peek().is_none() { return None; } let mut line = String::new(); while let Some((kind, c)) = self.base.next() { self.kind = kind; if c == '\n' { break; } else { line.push(c); } } Some((self.kind, line)) } } /// Iterator over functional and commented parts of a string. Any part of a string is either /// functional code, either *one* block comment, either *one* line comment. Whitespace between /// comments is functional code. Line comments contain their ending newlines. struct UngroupedCommentCodeSlices<'a> { slice: &'a str, iter: iter::Peekable<CharClasses<std::str::CharIndices<'a>>>, } impl<'a> UngroupedCommentCodeSlices<'a> { fn new(code: &'a str) -> UngroupedCommentCodeSlices<'a> { UngroupedCommentCodeSlices { slice: code, iter: CharClasses::new(code.char_indices()).peekable(), } } } impl<'a> Iterator for UngroupedCommentCodeSlices<'a> { type Item = (CodeCharKind, usize, &'a str); fn next(&mut self) -> Option<Self::Item> { let (kind, (start_idx, _)) = self.iter.next()?; match kind { FullCodeCharKind::Normal | FullCodeCharKind::InString => { // Consume all the Normal code while let Some(&(char_kind, _)) = self.iter.peek() { if char_kind.is_comment() { break; } let _ = self.iter.next(); } } FullCodeCharKind::StartComment => { // Consume the whole comment while let Some((FullCodeCharKind::InComment, (_, _))) = self.iter.next() {} } _ => panic!(), } let slice = match self.iter.peek() { Some(&(_, (end_idx, _))) => &self.slice[start_idx..end_idx], None => &self.slice[start_idx..], }; Some(( if kind.is_comment() { CodeCharKind::Comment } else { CodeCharKind::Normal }, start_idx, slice, )) } } /// Iterator over an alternating sequence of functional and commented parts of /// a string. The first item is always a, possibly zero length, subslice of /// functional text. Line style comments contain their ending newlines. pub struct CommentCodeSlices<'a> { slice: &'a str, last_slice_kind: CodeCharKind, last_slice_end: usize, } impl<'a> CommentCodeSlices<'a> { pub fn new(slice: &'a str) -> CommentCodeSlices<'a> { CommentCodeSlices { slice, last_slice_kind: CodeCharKind::Comment, last_slice_end: 0, } } } impl<'a> Iterator for CommentCodeSlices<'a> { type Item = (CodeCharKind, usize, &'a str); fn next(&mut self) -> Option<Self::Item> { if self.last_slice_end == self.slice.len() { return None; } let mut sub_slice_end = self.last_slice_end; let mut first_whitespace = None; let subslice = &self.slice[self.last_slice_end..]; let mut iter = CharClasses::new(subslice.char_indices()); for (kind, (i, c)) in &mut iter { let is_comment_connector = self.last_slice_kind == CodeCharKind::Normal && &subslice[..2] == "//" && [' ', '\t'].contains(&c); if is_comment_connector && first_whitespace.is_none() { first_whitespace = Some(i); } if kind.to_codecharkind() == self.last_slice_kind && !is_comment_connector { let last_index = match first_whitespace { Some(j) => j, None => i, }; sub_slice_end = self.last_slice_end + last_index; break; } if !is_comment_connector { first_whitespace = None; } } if let (None, true) = (iter.next(), sub_slice_end == self.last_slice_end) { // This was the last subslice. sub_slice_end = match first_whitespace { Some(i) => self.last_slice_end + i, None => self.slice.len(), }; } let kind = match self.last_slice_kind { CodeCharKind::Comment => CodeCharKind::Normal, CodeCharKind::Normal => CodeCharKind::Comment, }; let res = ( kind, self.last_slice_end, &self.slice[self.last_slice_end..sub_slice_end], ); self.last_slice_end = sub_slice_end; self.last_slice_kind = kind; Some(res) } } /// Checks is `new` didn't miss any comment from `span`, if it removed any, return previous text /// (if it fits in the width/offset, else return None), else return `new` pub fn recover_comment_removed( new: String, span: Span, context: &RewriteContext, ) -> Option<String> { let snippet = context.snippet(span); if snippet != new && changed_comment_content(snippet, &new) { // We missed some comments. Keep the original text. Some(snippet.to_owned()) } else { Some(new) } } /// Return true if the two strings of code have the same payload of comments. /// The payload of comments is everything in the string except: /// - actual code (not comments) /// - comment start/end marks /// - whitespace /// - '*' at the beginning of lines in block comments fn changed_comment_content(orig: &str, new: &str) -> bool { // Cannot write this as a fn since we cannot return types containing closures let code_comment_content = |code| { let slices = UngroupedCommentCodeSlices::new(code); slices .filter(|&(ref kind, _, _)| *kind == CodeCharKind::Comment) .flat_map(|(_, _, s)| CommentReducer::new(s)) }; let res = code_comment_content(orig).ne(code_comment_content(new)); debug!( "comment::changed_comment_content: {}\norig: '{}'\nnew: '{}'\nraw_old: {}\nraw_new: {}", res, orig, new, code_comment_content(orig).collect::<String>(), code_comment_content(new).collect::<String>() ); res } /// Iterator over the 'payload' characters of a comment. /// It skips whitespace, comment start/end marks, and '*' at the beginning of lines. /// The comment must be one comment, ie not more than one start mark (no multiple line comments, /// for example). struct CommentReducer<'a> { is_block: bool, at_start_line: bool, iter: std::str::Chars<'a>, } impl<'a> CommentReducer<'a> { fn new(comment: &'a str) -> CommentReducer<'a> { let is_block = comment.starts_with("/*"); let comment = remove_comment_header(comment); CommentReducer { is_block, at_start_line: false, // There are no supplementary '*' on the first line iter: comment.chars(), } } } impl<'a> Iterator for CommentReducer<'a> { type Item = char; fn next(&mut self) -> Option<Self::Item> { loop { let mut c = self.iter.next()?; if self.is_block && self.at_start_line { while c.is_whitespace() { c = self.iter.next()?; } // Ignore leading '*' if c == '*' { c = self.iter.next()?; } } else if c == '\n' { self.at_start_line = true; } if !c.is_whitespace() { return Some(c); } } } } fn remove_comment_header(comment: &str) -> &str { if comment.starts_with("///") || comment.starts_with("//!") { &comment[3..] } else if comment.starts_with("//") { &comment[2..] } else if (comment.starts_with("/**") && !comment.starts_with("/**/")) || comment.starts_with("/*!") { &comment[3..comment.len() - 2] } else { assert!( comment.starts_with("/*"), format!("string '{}' is not a comment", comment) ); &comment[2..comment.len() - 2] } } #[cfg(test)] mod test { use super::*; use shape::{Indent, Shape}; #[test] fn char_classes() { let mut iter = CharClasses::new("//\n\n".chars()); assert_eq!((FullCodeCharKind::StartComment, '/'), iter.next().unwrap()); assert_eq!((FullCodeCharKind::InComment, '/'), iter.next().unwrap()); assert_eq!((FullCodeCharKind::EndComment, '\n'), iter.next().unwrap()); assert_eq!((FullCodeCharKind::Normal, '\n'), iter.next().unwrap()); assert_eq!(None, iter.next()); } #[test] fn comment_code_slices() { let input = "code(); /* test */ 1 + 1"; let mut iter = CommentCodeSlices::new(input); assert_eq!((CodeCharKind::Normal, 0, "code(); "), iter.next().unwrap()); assert_eq!( (CodeCharKind::Comment, 8, "/* test */"), iter.next().unwrap() ); assert_eq!((CodeCharKind::Normal, 18, " 1 + 1"), iter.next().unwrap()); assert_eq!(None, iter.next()); } #[test] fn comment_code_slices_two() { let input = "// comment\n test();"; let mut iter = CommentCodeSlices::new(input); assert_eq!((CodeCharKind::Normal, 0, ""), iter.next().unwrap()); assert_eq!( (CodeCharKind::Comment, 0, "// comment\n"), iter.next().unwrap() ); assert_eq!( (CodeCharKind::Normal, 11, " test();"), iter.next().unwrap() ); assert_eq!(None, iter.next()); } #[test] fn comment_code_slices_three() { let input = "1 // comment\n // comment2\n\n"; let mut iter = CommentCodeSlices::new(input); assert_eq!((CodeCharKind::Normal, 0, "1 "), iter.next().unwrap()); assert_eq!( (CodeCharKind::Comment, 2, "// comment\n // comment2\n"), iter.next().unwrap() ); assert_eq!((CodeCharKind::Normal, 29, "\n"), iter.next().unwrap()); assert_eq!(None, iter.next()); } #[test] #[rustfmt::skip] fn format_comments() { let mut config: ::config::Config = Default::default(); config.set().wrap_comments(true); config.set().normalize_comments(true); let comment = rewrite_comment(" //test", true, Shape::legacy(100, Indent::new(0, 100)), &config).unwrap(); assert_eq!("/* test */", comment); let comment = rewrite_comment("// comment on a", false, Shape::legacy(10, Indent::empty()), &config).unwrap(); assert_eq!("// comment\n// on a", comment); let comment = rewrite_comment("// A multi line comment\n // between args.", false, Shape::legacy(60, Indent::new(0, 12)), &config).unwrap(); assert_eq!("// A multi line comment\n // between args.", comment); let input = "// comment"; let expected = "/* comment */"; let comment = rewrite_comment(input, true, Shape::legacy(9, Indent::new(0, 69)), &config).unwrap(); assert_eq!(expected, comment); let comment = rewrite_comment("/* trimmed */", true, Shape::legacy(100, Indent::new(0, 100)), &config).unwrap(); assert_eq!("/* trimmed */", comment); } // This is probably intended to be a non-test fn, but it is not used. I'm // keeping it around unless it helps us test stuff. fn uncommented(text: &str) -> String { CharClasses::new(text.chars()) .filter_map(|(s, c)| match s { FullCodeCharKind::Normal | FullCodeCharKind::InString => Some(c), _ => None, }) .collect() } #[test] fn test_uncommented() { assert_eq!(&uncommented("abc/*...*/"), "abc"); assert_eq!( &uncommented("// .... /* \n../* /* *** / */ */a/* // */c\n"), "..ac\n" ); assert_eq!(&uncommented("abc \" /* */\" qsdf"), "abc \" /* */\" qsdf"); } #[test] fn test_contains_comment() { assert_eq!(contains_comment("abc"), false); assert_eq!(contains_comment("abc // qsdf"), true); assert_eq!(contains_comment("abc /* kqsdf"), true); assert_eq!(contains_comment("abc \" /* */\" qsdf"), false); } #[test] fn test_find_uncommented() { fn check(haystack: &str, needle: &str, expected: Option<usize>) { assert_eq!(expected, haystack.find_uncommented(needle)); } check("/*/ */test", "test", Some(6)); check("//test\ntest", "test", Some(7)); check("/* comment only */", "whatever", None); check( "/* comment */ some text /* more commentary */ result", "result", Some(46), ); check("sup // sup", "p", Some(2)); check("sup", "x", None); check(r#"π? /**/ π is nice!"#, r#"π is nice"#, Some(9)); check("/*sup yo? \n sup*/ sup", "p", Some(20)); check("hel/*lohello*/lo", "hello", None); check("acb", "ab", None); check(",/*A*/ ", ",", Some(0)); check("abc", "abc", Some(0)); check("/* abc */", "abc", None); check("/**/abc/* */", "abc", Some(4)); check("\"/* abc */\"", "abc", Some(4)); check("\"/* abc", "abc", Some(4)); } #[test] fn test_remove_trailing_white_spaces() { let s = format!(" r#\"\n test\n \"#"); assert_eq!(remove_trailing_white_spaces(&s), s); } } Format code block with sharp prefix by hiding lines with a leading `#` behind a custom comment. // Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Formatting and tools for comments. use std::{self, borrow::Cow, iter}; use itertools::{multipeek, MultiPeek}; use syntax::codemap::Span; use config::Config; use rewrite::RewriteContext; use shape::{Indent, Shape}; use string::{rewrite_string, StringFormat}; use utils::{count_newlines, first_line_width, last_line_width}; fn is_custom_comment(comment: &str) -> bool { if !comment.starts_with("//") { false } else if let Some(c) = comment.chars().nth(2) { !c.is_alphanumeric() && !c.is_whitespace() } else { false } } #[derive(Copy, Clone, PartialEq, Eq)] pub enum CommentStyle<'a> { DoubleSlash, TripleSlash, Doc, SingleBullet, DoubleBullet, Exclamation, Custom(&'a str), } fn custom_opener(s: &str) -> &str { s.lines().next().map_or("", |first_line| { first_line .find(' ') .map_or(first_line, |space_index| &first_line[0..space_index + 1]) }) } impl<'a> CommentStyle<'a> { pub fn is_doc_comment(&self) -> bool { match *self { CommentStyle::TripleSlash | CommentStyle::Doc => true, _ => false, } } pub fn opener(&self) -> &'a str { match *self { CommentStyle::DoubleSlash => "// ", CommentStyle::TripleSlash => "/// ", CommentStyle::Doc => "//! ", CommentStyle::SingleBullet => "/* ", CommentStyle::DoubleBullet => "/** ", CommentStyle::Exclamation => "/*! ", CommentStyle::Custom(opener) => opener, } } pub fn closer(&self) -> &'a str { match *self { CommentStyle::DoubleSlash | CommentStyle::TripleSlash | CommentStyle::Custom(..) | CommentStyle::Doc => "", CommentStyle::DoubleBullet => " **/", CommentStyle::SingleBullet | CommentStyle::Exclamation => " */", } } pub fn line_start(&self) -> &'a str { match *self { CommentStyle::DoubleSlash => "// ", CommentStyle::TripleSlash => "/// ", CommentStyle::Doc => "//! ", CommentStyle::SingleBullet | CommentStyle::Exclamation => " * ", CommentStyle::DoubleBullet => " ** ", CommentStyle::Custom(opener) => opener, } } pub fn to_str_tuplet(&self) -> (&'a str, &'a str, &'a str) { (self.opener(), self.closer(), self.line_start()) } pub fn line_with_same_comment_style(&self, line: &str, normalize_comments: bool) -> bool { match *self { CommentStyle::DoubleSlash | CommentStyle::TripleSlash | CommentStyle::Doc => { line.trim_left().starts_with(self.line_start().trim_left()) || comment_style(line, normalize_comments) == *self } CommentStyle::DoubleBullet | CommentStyle::SingleBullet | CommentStyle::Exclamation => { line.trim_left().starts_with(self.closer().trim_left()) || line.trim_left().starts_with(self.line_start().trim_left()) || comment_style(line, normalize_comments) == *self } CommentStyle::Custom(opener) => line.trim_left().starts_with(opener.trim_right()), } } } fn comment_style(orig: &str, normalize_comments: bool) -> CommentStyle { if !normalize_comments { if orig.starts_with("/**") && !orig.starts_with("/**/") { CommentStyle::DoubleBullet } else if orig.starts_with("/*!") { CommentStyle::Exclamation } else if orig.starts_with("/*") { CommentStyle::SingleBullet } else if orig.starts_with("///") && orig.chars().nth(3).map_or(true, |c| c != '/') { CommentStyle::TripleSlash } else if orig.starts_with("//!") { CommentStyle::Doc } else if is_custom_comment(orig) { CommentStyle::Custom(custom_opener(orig)) } else { CommentStyle::DoubleSlash } } else if (orig.starts_with("///") && orig.chars().nth(3).map_or(true, |c| c != '/')) || (orig.starts_with("/**") && !orig.starts_with("/**/")) { CommentStyle::TripleSlash } else if orig.starts_with("//!") || orig.starts_with("/*!") { CommentStyle::Doc } else if is_custom_comment(orig) { CommentStyle::Custom(custom_opener(orig)) } else { CommentStyle::DoubleSlash } } /// Combine `prev_str` and `next_str` into a single `String`. `span` may contain /// comments between two strings. If there are such comments, then that will be /// recovered. If `allow_extend` is true and there is no comment between the two /// strings, then they will be put on a single line as long as doing so does not /// exceed max width. pub fn combine_strs_with_missing_comments( context: &RewriteContext, prev_str: &str, next_str: &str, span: Span, shape: Shape, allow_extend: bool, ) -> Option<String> { let mut result = String::with_capacity(prev_str.len() + next_str.len() + shape.indent.width() + 128); result.push_str(prev_str); let mut allow_one_line = !prev_str.contains('\n') && !next_str.contains('\n'); let first_sep = if prev_str.is_empty() || next_str.is_empty() { "" } else { " " }; let mut one_line_width = last_line_width(prev_str) + first_line_width(next_str) + first_sep.len(); let config = context.config; let indent = shape.indent; let missing_comment = rewrite_missing_comment(span, shape, context)?; if missing_comment.is_empty() { if allow_extend && prev_str.len() + first_sep.len() + next_str.len() <= shape.width { result.push_str(first_sep); } else if !prev_str.is_empty() { result.push_str(&indent.to_string_with_newline(config)) } result.push_str(next_str); return Some(result); } // We have a missing comment between the first expression and the second expression. // Peek the the original source code and find out whether there is a newline between the first // expression and the second expression or the missing comment. We will preserve the original // layout whenever possible. let original_snippet = context.snippet(span); let prefer_same_line = if let Some(pos) = original_snippet.find('/') { !original_snippet[..pos].contains('\n') } else { !original_snippet.contains('\n') }; one_line_width -= first_sep.len(); let first_sep = if prev_str.is_empty() || missing_comment.is_empty() { Cow::from("") } else { let one_line_width = last_line_width(prev_str) + first_line_width(&missing_comment) + 1; if prefer_same_line && one_line_width <= shape.width { Cow::from(" ") } else { indent.to_string_with_newline(config) } }; result.push_str(&first_sep); result.push_str(&missing_comment); let second_sep = if missing_comment.is_empty() || next_str.is_empty() { Cow::from("") } else if missing_comment.starts_with("//") { indent.to_string_with_newline(config) } else { one_line_width += missing_comment.len() + first_sep.len() + 1; allow_one_line &= !missing_comment.starts_with("//") && !missing_comment.contains('\n'); if prefer_same_line && allow_one_line && one_line_width <= shape.width { Cow::from(" ") } else { indent.to_string_with_newline(config) } }; result.push_str(&second_sep); result.push_str(next_str); Some(result) } pub fn rewrite_doc_comment(orig: &str, shape: Shape, config: &Config) -> Option<String> { _rewrite_comment(orig, false, shape, config, true) } pub fn rewrite_comment( orig: &str, block_style: bool, shape: Shape, config: &Config, ) -> Option<String> { _rewrite_comment(orig, block_style, shape, config, false) } fn _rewrite_comment( orig: &str, block_style: bool, shape: Shape, config: &Config, is_doc_comment: bool, ) -> Option<String> { // If there are lines without a starting sigil, we won't format them correctly // so in that case we won't even re-align (if !config.normalize_comments()) and // we should stop now. let num_bare_lines = orig .lines() .map(|line| line.trim()) .filter(|l| !(l.starts_with('*') || l.starts_with("//") || l.starts_with("/*"))) .count(); if num_bare_lines > 0 && !config.normalize_comments() { return Some(orig.to_owned()); } if !config.normalize_comments() && !config.wrap_comments() { return light_rewrite_comment(orig, shape.indent, config, is_doc_comment); } identify_comment(orig, block_style, shape, config, is_doc_comment) } fn identify_comment( orig: &str, block_style: bool, shape: Shape, config: &Config, is_doc_comment: bool, ) -> Option<String> { let style = comment_style(orig, false); let first_group = orig .lines() .take_while(|l| style.line_with_same_comment_style(l, false)) .collect::<Vec<_>>() .join("\n"); let rest = orig .lines() .skip(first_group.lines().count()) .collect::<Vec<_>>() .join("\n"); let first_group_str = rewrite_comment_inner( &first_group, block_style, style, shape, config, is_doc_comment || style.is_doc_comment(), )?; if rest.is_empty() { Some(first_group_str) } else { identify_comment(&rest, block_style, shape, config, is_doc_comment).map(|rest_str| { format!( "{}\n{}{}", first_group_str, shape.indent.to_string(config), rest_str ) }) } } fn rewrite_comment_inner( orig: &str, block_style: bool, style: CommentStyle, shape: Shape, config: &Config, is_doc_comment: bool, ) -> Option<String> { let (opener, closer, line_start) = if block_style { CommentStyle::SingleBullet.to_str_tuplet() } else { comment_style(orig, config.normalize_comments()).to_str_tuplet() }; let max_chars = shape .width .checked_sub(closer.len() + opener.len()) .unwrap_or(1); let indent_str = shape.indent.to_string_with_newline(config); let fmt_indent = shape.indent + (opener.len() - line_start.len()); let mut fmt = StringFormat { opener: "", closer: "", line_start, line_end: "", shape: Shape::legacy(max_chars, fmt_indent), trim_end: true, config, }; let line_breaks = count_newlines(orig.trim_right()); let lines = orig .lines() .enumerate() .map(|(i, mut line)| { line = trim_right_unless_two_whitespaces(line.trim_left(), is_doc_comment); // Drop old closer. if i == line_breaks && line.ends_with("*/") && !line.starts_with("//") { line = line[..(line.len() - 2)].trim_right(); } line }) .map(|s| left_trim_comment_line(s, &style)) .map(|(line, has_leading_whitespace)| { if orig.starts_with("/*") && line_breaks == 0 { ( line.trim_left(), has_leading_whitespace || config.normalize_comments(), ) } else { (line, has_leading_whitespace || config.normalize_comments()) } }); let mut result = String::with_capacity(orig.len() * 2); result.push_str(opener); let mut code_block_buffer = String::with_capacity(128); let mut is_prev_line_multi_line = false; let mut inside_code_block = false; let comment_line_separator = format!("{}{}", indent_str, line_start); let join_code_block_with_comment_line_separator = |s: &str| { let mut result = String::with_capacity(s.len() + 128); let mut iter = s.lines().peekable(); while let Some(line) = iter.next() { result.push_str(line); result.push_str(match iter.peek() { Some(next_line) if next_line.is_empty() => comment_line_separator.trim_right(), Some(..) => &comment_line_separator, None => "", }); } result }; for (i, (line, has_leading_whitespace)) in lines.enumerate() { let is_last = i == count_newlines(orig); if inside_code_block { if line.starts_with("```") { inside_code_block = false; result.push_str(&comment_line_separator); let code_block = { let mut config = config.clone(); config.set().wrap_comments(false); ::format_code_block(&code_block_buffer, &config) .map_or_else(|| code_block_buffer.to_owned(), trim_custom_comment_prefix) }; result.push_str(&join_code_block_with_comment_line_separator(&code_block)); code_block_buffer.clear(); result.push_str(&comment_line_separator); result.push_str(line); } else { code_block_buffer.push_str(&hide_sharp_behind_comment(line)); code_block_buffer.push('\n'); if is_last { // There is an code block that is not properly enclosed by backticks. // We will leave them untouched. result.push_str(&comment_line_separator); result.push_str(&join_code_block_with_comment_line_separator( &code_block_buffer, )); } } continue; } else { inside_code_block = line.starts_with("```"); if result == opener { let force_leading_whitespace = opener == "/* " && count_newlines(orig) == 0; if !has_leading_whitespace && !force_leading_whitespace && result.ends_with(' ') { result.pop(); } if line.is_empty() { continue; } } else if is_prev_line_multi_line && !line.is_empty() { result.push(' ') } else if is_last && !closer.is_empty() && line.is_empty() { result.push_str(&indent_str); } else { result.push_str(&comment_line_separator); if !has_leading_whitespace && result.ends_with(' ') { result.pop(); } } } if config.wrap_comments() && line.len() > fmt.shape.width && !has_url(line) { match rewrite_string(line, &fmt, Some(max_chars)) { Some(ref s) => { is_prev_line_multi_line = s.contains('\n'); result.push_str(s); } None if is_prev_line_multi_line => { // We failed to put the current `line` next to the previous `line`. // Remove the trailing space, then start rewrite on the next line. result.pop(); result.push_str(&comment_line_separator); fmt.shape = Shape::legacy(max_chars, fmt_indent); match rewrite_string(line, &fmt, Some(max_chars)) { Some(ref s) => { is_prev_line_multi_line = s.contains('\n'); result.push_str(s); } None => { is_prev_line_multi_line = false; result.push_str(line); } } } None => { is_prev_line_multi_line = false; result.push_str(line); } } fmt.shape = if is_prev_line_multi_line { // 1 = " " let offset = 1 + last_line_width(&result) - line_start.len(); Shape { width: max_chars.saturating_sub(offset), indent: fmt_indent, offset: fmt.shape.offset + offset, } } else { Shape::legacy(max_chars, fmt_indent) }; } else { if line.is_empty() && result.ends_with(' ') && !is_last { // Remove space if this is an empty comment or a doc comment. result.pop(); } result.push_str(line); fmt.shape = Shape::legacy(max_chars, fmt_indent); is_prev_line_multi_line = false; } } result.push_str(closer); if result.ends_with(opener) && opener.ends_with(' ') { // Trailing space. result.pop(); } Some(result) } const RUSTFMT_CUSTOM_COMMENT_PREFIX: &str = "//#### "; fn hide_sharp_behind_comment<'a>(s: &'a str) -> Cow<'a, str> { if s.trim_left().starts_with('#') { Cow::from(format!("{}{}", RUSTFMT_CUSTOM_COMMENT_PREFIX, s)) } else { Cow::from(s) } } fn trim_custom_comment_prefix(s: String) -> String { s.lines() .map(|line| line.trim_left_matches(RUSTFMT_CUSTOM_COMMENT_PREFIX)) .collect::<Vec<_>>() .join("\n") } /// Returns true if the given string MAY include URLs or alike. fn has_url(s: &str) -> bool { // This function may return false positive, but should get its job done in most cases. s.contains("https://") || s.contains("http://") || s.contains("ftp://") || s.contains("file://") } /// Given the span, rewrite the missing comment inside it if available. /// Note that the given span must only include comments (or leading/trailing whitespaces). pub fn rewrite_missing_comment( span: Span, shape: Shape, context: &RewriteContext, ) -> Option<String> { let missing_snippet = context.snippet(span); let trimmed_snippet = missing_snippet.trim(); if !trimmed_snippet.is_empty() { rewrite_comment(trimmed_snippet, false, shape, context.config) } else { Some(String::new()) } } /// Recover the missing comments in the specified span, if available. /// The layout of the comments will be preserved as long as it does not break the code /// and its total width does not exceed the max width. pub fn recover_missing_comment_in_span( span: Span, shape: Shape, context: &RewriteContext, used_width: usize, ) -> Option<String> { let missing_comment = rewrite_missing_comment(span, shape, context)?; if missing_comment.is_empty() { Some(String::new()) } else { let missing_snippet = context.snippet(span); let pos = missing_snippet.find('/').unwrap_or(0); // 1 = ` ` let total_width = missing_comment.len() + used_width + 1; let force_new_line_before_comment = missing_snippet[..pos].contains('\n') || total_width > context.config.max_width(); let sep = if force_new_line_before_comment { shape.indent.to_string_with_newline(context.config) } else { Cow::from(" ") }; Some(format!("{}{}", sep, missing_comment)) } } /// Trim trailing whitespaces unless they consist of two or more whitespaces. fn trim_right_unless_two_whitespaces(s: &str, is_doc_comment: bool) -> &str { if is_doc_comment && s.ends_with(" ") { s } else { s.trim_right() } } /// Trims whitespace and aligns to indent, but otherwise does not change comments. fn light_rewrite_comment( orig: &str, offset: Indent, config: &Config, is_doc_comment: bool, ) -> Option<String> { let lines: Vec<&str> = orig .lines() .map(|l| { // This is basically just l.trim(), but in the case that a line starts // with `*` we want to leave one space before it, so it aligns with the // `*` in `/*`. let first_non_whitespace = l.find(|c| !char::is_whitespace(c)); let left_trimmed = if let Some(fnw) = first_non_whitespace { if l.as_bytes()[fnw] == b'*' && fnw > 0 { &l[fnw - 1..] } else { &l[fnw..] } } else { "" }; // Preserve markdown's double-space line break syntax in doc comment. trim_right_unless_two_whitespaces(left_trimmed, is_doc_comment) }) .collect(); Some(lines.join(&format!("\n{}", offset.to_string(config)))) } /// Trims comment characters and possibly a single space from the left of a string. /// Does not trim all whitespace. If a single space is trimmed from the left of the string, /// this function returns true. fn left_trim_comment_line<'a>(line: &'a str, style: &CommentStyle) -> (&'a str, bool) { if line.starts_with("//! ") || line.starts_with("/// ") || line.starts_with("/*! ") || line.starts_with("/** ") { (&line[4..], true) } else if let CommentStyle::Custom(opener) = *style { if line.starts_with(opener) { (&line[opener.len()..], true) } else { (&line[opener.trim_right().len()..], false) } } else if line.starts_with("/* ") || line.starts_with("// ") || line.starts_with("//!") || line.starts_with("///") || line.starts_with("** ") || line.starts_with("/*!") || (line.starts_with("/**") && !line.starts_with("/**/")) { (&line[3..], line.chars().nth(2).unwrap() == ' ') } else if line.starts_with("/*") || line.starts_with("* ") || line.starts_with("//") || line.starts_with("**") { (&line[2..], line.chars().nth(1).unwrap() == ' ') } else if line.starts_with('*') { (&line[1..], false) } else { (line, line.starts_with(' ')) } } pub trait FindUncommented { fn find_uncommented(&self, pat: &str) -> Option<usize>; } impl FindUncommented for str { fn find_uncommented(&self, pat: &str) -> Option<usize> { let mut needle_iter = pat.chars(); for (kind, (i, b)) in CharClasses::new(self.char_indices()) { match needle_iter.next() { None => { return Some(i - pat.len()); } Some(c) => match kind { FullCodeCharKind::Normal | FullCodeCharKind::InString if b == c => {} _ => { needle_iter = pat.chars(); } }, } } // Handle case where the pattern is a suffix of the search string match needle_iter.next() { Some(_) => None, None => Some(self.len() - pat.len()), } } } // Returns the first byte position after the first comment. The given string // is expected to be prefixed by a comment, including delimiters. // Good: "/* /* inner */ outer */ code();" // Bad: "code(); // hello\n world!" pub fn find_comment_end(s: &str) -> Option<usize> { let mut iter = CharClasses::new(s.char_indices()); for (kind, (i, _c)) in &mut iter { if kind == FullCodeCharKind::Normal || kind == FullCodeCharKind::InString { return Some(i); } } // Handle case where the comment ends at the end of s. if iter.status == CharClassesStatus::Normal { Some(s.len()) } else { None } } /// Returns true if text contains any comment. pub fn contains_comment(text: &str) -> bool { CharClasses::new(text.chars()).any(|(kind, _)| kind.is_comment()) } /// Remove trailing spaces from the specified snippet. We do not remove spaces /// inside strings or comments. pub fn remove_trailing_white_spaces(text: &str) -> String { let mut buffer = String::with_capacity(text.len()); let mut space_buffer = String::with_capacity(128); for (char_kind, c) in CharClasses::new(text.chars()) { match c { '\n' => { if char_kind == FullCodeCharKind::InString { buffer.push_str(&space_buffer); } space_buffer.clear(); buffer.push('\n'); } _ if c.is_whitespace() => { space_buffer.push(c); } _ => { if !space_buffer.is_empty() { buffer.push_str(&space_buffer); space_buffer.clear(); } buffer.push(c); } } } buffer } pub struct CharClasses<T> where T: Iterator, T::Item: RichChar, { base: MultiPeek<T>, status: CharClassesStatus, } pub trait RichChar { fn get_char(&self) -> char; } impl RichChar for char { fn get_char(&self) -> char { *self } } impl RichChar for (usize, char) { fn get_char(&self) -> char { self.1 } } #[derive(PartialEq, Eq, Debug, Clone, Copy)] enum CharClassesStatus { Normal, LitString, LitStringEscape, LitChar, LitCharEscape, // The u32 is the nesting deepness of the comment BlockComment(u32), // Status when the '/' has been consumed, but not yet the '*', deepness is // the new deepness (after the comment opening). BlockCommentOpening(u32), // Status when the '*' has been consumed, but not yet the '/', deepness is // the new deepness (after the comment closing). BlockCommentClosing(u32), LineComment, } /// Distinguish between functional part of code and comments #[derive(PartialEq, Eq, Debug, Clone, Copy)] pub enum CodeCharKind { Normal, Comment, } /// Distinguish between functional part of code and comments, /// describing opening and closing of comments for ease when chunking /// code from tagged characters #[derive(PartialEq, Eq, Debug, Clone, Copy)] pub enum FullCodeCharKind { Normal, /// The first character of a comment, there is only one for a comment (always '/') StartComment, /// Any character inside a comment including the second character of comment /// marks ("//", "/*") InComment, /// Last character of a comment, '\n' for a line comment, '/' for a block comment. EndComment, /// Inside a string. InString, } impl FullCodeCharKind { pub fn is_comment(&self) -> bool { match *self { FullCodeCharKind::StartComment | FullCodeCharKind::InComment | FullCodeCharKind::EndComment => true, _ => false, } } pub fn is_string(&self) -> bool { *self == FullCodeCharKind::InString } fn to_codecharkind(&self) -> CodeCharKind { if self.is_comment() { CodeCharKind::Comment } else { CodeCharKind::Normal } } } impl<T> CharClasses<T> where T: Iterator, T::Item: RichChar, { pub fn new(base: T) -> CharClasses<T> { CharClasses { base: multipeek(base), status: CharClassesStatus::Normal, } } } impl<T> Iterator for CharClasses<T> where T: Iterator, T::Item: RichChar, { type Item = (FullCodeCharKind, T::Item); fn next(&mut self) -> Option<(FullCodeCharKind, T::Item)> { let item = self.base.next()?; let chr = item.get_char(); let mut char_kind = FullCodeCharKind::Normal; self.status = match self.status { CharClassesStatus::LitString => match chr { '"' => CharClassesStatus::Normal, '\\' => { char_kind = FullCodeCharKind::InString; CharClassesStatus::LitStringEscape } _ => { char_kind = FullCodeCharKind::InString; CharClassesStatus::LitString } }, CharClassesStatus::LitStringEscape => { char_kind = FullCodeCharKind::InString; CharClassesStatus::LitString } CharClassesStatus::LitChar => match chr { '\\' => CharClassesStatus::LitCharEscape, '\'' => CharClassesStatus::Normal, _ => CharClassesStatus::LitChar, }, CharClassesStatus::LitCharEscape => CharClassesStatus::LitChar, CharClassesStatus::Normal => match chr { '"' => { char_kind = FullCodeCharKind::InString; CharClassesStatus::LitString } '\'' => { // HACK: Work around mut borrow. match self.base.peek() { Some(next) if next.get_char() == '\\' => { self.status = CharClassesStatus::LitChar; return Some((char_kind, item)); } _ => (), } match self.base.peek() { Some(next) if next.get_char() == '\'' => CharClassesStatus::LitChar, _ => CharClassesStatus::Normal, } } '/' => match self.base.peek() { Some(next) if next.get_char() == '*' => { self.status = CharClassesStatus::BlockCommentOpening(1); return Some((FullCodeCharKind::StartComment, item)); } Some(next) if next.get_char() == '/' => { self.status = CharClassesStatus::LineComment; return Some((FullCodeCharKind::StartComment, item)); } _ => CharClassesStatus::Normal, }, _ => CharClassesStatus::Normal, }, CharClassesStatus::BlockComment(deepness) => { assert_ne!(deepness, 0); self.status = match self.base.peek() { Some(next) if next.get_char() == '/' && chr == '*' => { CharClassesStatus::BlockCommentClosing(deepness - 1) } Some(next) if next.get_char() == '*' && chr == '/' => { CharClassesStatus::BlockCommentOpening(deepness + 1) } _ => CharClassesStatus::BlockComment(deepness), }; return Some((FullCodeCharKind::InComment, item)); } CharClassesStatus::BlockCommentOpening(deepness) => { assert_eq!(chr, '*'); self.status = CharClassesStatus::BlockComment(deepness); return Some((FullCodeCharKind::InComment, item)); } CharClassesStatus::BlockCommentClosing(deepness) => { assert_eq!(chr, '/'); if deepness == 0 { self.status = CharClassesStatus::Normal; return Some((FullCodeCharKind::EndComment, item)); } else { self.status = CharClassesStatus::BlockComment(deepness); return Some((FullCodeCharKind::InComment, item)); } } CharClassesStatus::LineComment => match chr { '\n' => { self.status = CharClassesStatus::Normal; return Some((FullCodeCharKind::EndComment, item)); } _ => { self.status = CharClassesStatus::LineComment; return Some((FullCodeCharKind::InComment, item)); } }, }; Some((char_kind, item)) } } /// An iterator over the lines of a string, paired with the char kind at the /// end of the line. pub struct LineClasses<'a> { base: iter::Peekable<CharClasses<std::str::Chars<'a>>>, kind: FullCodeCharKind, } impl<'a> LineClasses<'a> { pub fn new(s: &'a str) -> Self { LineClasses { base: CharClasses::new(s.chars()).peekable(), kind: FullCodeCharKind::Normal, } } } impl<'a> Iterator for LineClasses<'a> { type Item = (FullCodeCharKind, String); fn next(&mut self) -> Option<Self::Item> { if self.base.peek().is_none() { return None; } let mut line = String::new(); while let Some((kind, c)) = self.base.next() { self.kind = kind; if c == '\n' { break; } else { line.push(c); } } Some((self.kind, line)) } } /// Iterator over functional and commented parts of a string. Any part of a string is either /// functional code, either *one* block comment, either *one* line comment. Whitespace between /// comments is functional code. Line comments contain their ending newlines. struct UngroupedCommentCodeSlices<'a> { slice: &'a str, iter: iter::Peekable<CharClasses<std::str::CharIndices<'a>>>, } impl<'a> UngroupedCommentCodeSlices<'a> { fn new(code: &'a str) -> UngroupedCommentCodeSlices<'a> { UngroupedCommentCodeSlices { slice: code, iter: CharClasses::new(code.char_indices()).peekable(), } } } impl<'a> Iterator for UngroupedCommentCodeSlices<'a> { type Item = (CodeCharKind, usize, &'a str); fn next(&mut self) -> Option<Self::Item> { let (kind, (start_idx, _)) = self.iter.next()?; match kind { FullCodeCharKind::Normal | FullCodeCharKind::InString => { // Consume all the Normal code while let Some(&(char_kind, _)) = self.iter.peek() { if char_kind.is_comment() { break; } let _ = self.iter.next(); } } FullCodeCharKind::StartComment => { // Consume the whole comment while let Some((FullCodeCharKind::InComment, (_, _))) = self.iter.next() {} } _ => panic!(), } let slice = match self.iter.peek() { Some(&(_, (end_idx, _))) => &self.slice[start_idx..end_idx], None => &self.slice[start_idx..], }; Some(( if kind.is_comment() { CodeCharKind::Comment } else { CodeCharKind::Normal }, start_idx, slice, )) } } /// Iterator over an alternating sequence of functional and commented parts of /// a string. The first item is always a, possibly zero length, subslice of /// functional text. Line style comments contain their ending newlines. pub struct CommentCodeSlices<'a> { slice: &'a str, last_slice_kind: CodeCharKind, last_slice_end: usize, } impl<'a> CommentCodeSlices<'a> { pub fn new(slice: &'a str) -> CommentCodeSlices<'a> { CommentCodeSlices { slice, last_slice_kind: CodeCharKind::Comment, last_slice_end: 0, } } } impl<'a> Iterator for CommentCodeSlices<'a> { type Item = (CodeCharKind, usize, &'a str); fn next(&mut self) -> Option<Self::Item> { if self.last_slice_end == self.slice.len() { return None; } let mut sub_slice_end = self.last_slice_end; let mut first_whitespace = None; let subslice = &self.slice[self.last_slice_end..]; let mut iter = CharClasses::new(subslice.char_indices()); for (kind, (i, c)) in &mut iter { let is_comment_connector = self.last_slice_kind == CodeCharKind::Normal && &subslice[..2] == "//" && [' ', '\t'].contains(&c); if is_comment_connector && first_whitespace.is_none() { first_whitespace = Some(i); } if kind.to_codecharkind() == self.last_slice_kind && !is_comment_connector { let last_index = match first_whitespace { Some(j) => j, None => i, }; sub_slice_end = self.last_slice_end + last_index; break; } if !is_comment_connector { first_whitespace = None; } } if let (None, true) = (iter.next(), sub_slice_end == self.last_slice_end) { // This was the last subslice. sub_slice_end = match first_whitespace { Some(i) => self.last_slice_end + i, None => self.slice.len(), }; } let kind = match self.last_slice_kind { CodeCharKind::Comment => CodeCharKind::Normal, CodeCharKind::Normal => CodeCharKind::Comment, }; let res = ( kind, self.last_slice_end, &self.slice[self.last_slice_end..sub_slice_end], ); self.last_slice_end = sub_slice_end; self.last_slice_kind = kind; Some(res) } } /// Checks is `new` didn't miss any comment from `span`, if it removed any, return previous text /// (if it fits in the width/offset, else return None), else return `new` pub fn recover_comment_removed( new: String, span: Span, context: &RewriteContext, ) -> Option<String> { let snippet = context.snippet(span); if snippet != new && changed_comment_content(snippet, &new) { // We missed some comments. Keep the original text. Some(snippet.to_owned()) } else { Some(new) } } /// Return true if the two strings of code have the same payload of comments. /// The payload of comments is everything in the string except: /// - actual code (not comments) /// - comment start/end marks /// - whitespace /// - '*' at the beginning of lines in block comments fn changed_comment_content(orig: &str, new: &str) -> bool { // Cannot write this as a fn since we cannot return types containing closures let code_comment_content = |code| { let slices = UngroupedCommentCodeSlices::new(code); slices .filter(|&(ref kind, _, _)| *kind == CodeCharKind::Comment) .flat_map(|(_, _, s)| CommentReducer::new(s)) }; let res = code_comment_content(orig).ne(code_comment_content(new)); debug!( "comment::changed_comment_content: {}\norig: '{}'\nnew: '{}'\nraw_old: {}\nraw_new: {}", res, orig, new, code_comment_content(orig).collect::<String>(), code_comment_content(new).collect::<String>() ); res } /// Iterator over the 'payload' characters of a comment. /// It skips whitespace, comment start/end marks, and '*' at the beginning of lines. /// The comment must be one comment, ie not more than one start mark (no multiple line comments, /// for example). struct CommentReducer<'a> { is_block: bool, at_start_line: bool, iter: std::str::Chars<'a>, } impl<'a> CommentReducer<'a> { fn new(comment: &'a str) -> CommentReducer<'a> { let is_block = comment.starts_with("/*"); let comment = remove_comment_header(comment); CommentReducer { is_block, at_start_line: false, // There are no supplementary '*' on the first line iter: comment.chars(), } } } impl<'a> Iterator for CommentReducer<'a> { type Item = char; fn next(&mut self) -> Option<Self::Item> { loop { let mut c = self.iter.next()?; if self.is_block && self.at_start_line { while c.is_whitespace() { c = self.iter.next()?; } // Ignore leading '*' if c == '*' { c = self.iter.next()?; } } else if c == '\n' { self.at_start_line = true; } if !c.is_whitespace() { return Some(c); } } } } fn remove_comment_header(comment: &str) -> &str { if comment.starts_with("///") || comment.starts_with("//!") { &comment[3..] } else if comment.starts_with("//") { &comment[2..] } else if (comment.starts_with("/**") && !comment.starts_with("/**/")) || comment.starts_with("/*!") { &comment[3..comment.len() - 2] } else { assert!( comment.starts_with("/*"), format!("string '{}' is not a comment", comment) ); &comment[2..comment.len() - 2] } } #[cfg(test)] mod test { use super::*; use shape::{Indent, Shape}; #[test] fn char_classes() { let mut iter = CharClasses::new("//\n\n".chars()); assert_eq!((FullCodeCharKind::StartComment, '/'), iter.next().unwrap()); assert_eq!((FullCodeCharKind::InComment, '/'), iter.next().unwrap()); assert_eq!((FullCodeCharKind::EndComment, '\n'), iter.next().unwrap()); assert_eq!((FullCodeCharKind::Normal, '\n'), iter.next().unwrap()); assert_eq!(None, iter.next()); } #[test] fn comment_code_slices() { let input = "code(); /* test */ 1 + 1"; let mut iter = CommentCodeSlices::new(input); assert_eq!((CodeCharKind::Normal, 0, "code(); "), iter.next().unwrap()); assert_eq!( (CodeCharKind::Comment, 8, "/* test */"), iter.next().unwrap() ); assert_eq!((CodeCharKind::Normal, 18, " 1 + 1"), iter.next().unwrap()); assert_eq!(None, iter.next()); } #[test] fn comment_code_slices_two() { let input = "// comment\n test();"; let mut iter = CommentCodeSlices::new(input); assert_eq!((CodeCharKind::Normal, 0, ""), iter.next().unwrap()); assert_eq!( (CodeCharKind::Comment, 0, "// comment\n"), iter.next().unwrap() ); assert_eq!( (CodeCharKind::Normal, 11, " test();"), iter.next().unwrap() ); assert_eq!(None, iter.next()); } #[test] fn comment_code_slices_three() { let input = "1 // comment\n // comment2\n\n"; let mut iter = CommentCodeSlices::new(input); assert_eq!((CodeCharKind::Normal, 0, "1 "), iter.next().unwrap()); assert_eq!( (CodeCharKind::Comment, 2, "// comment\n // comment2\n"), iter.next().unwrap() ); assert_eq!((CodeCharKind::Normal, 29, "\n"), iter.next().unwrap()); assert_eq!(None, iter.next()); } #[test] #[rustfmt::skip] fn format_comments() { let mut config: ::config::Config = Default::default(); config.set().wrap_comments(true); config.set().normalize_comments(true); let comment = rewrite_comment(" //test", true, Shape::legacy(100, Indent::new(0, 100)), &config).unwrap(); assert_eq!("/* test */", comment); let comment = rewrite_comment("// comment on a", false, Shape::legacy(10, Indent::empty()), &config).unwrap(); assert_eq!("// comment\n// on a", comment); let comment = rewrite_comment("// A multi line comment\n // between args.", false, Shape::legacy(60, Indent::new(0, 12)), &config).unwrap(); assert_eq!("// A multi line comment\n // between args.", comment); let input = "// comment"; let expected = "/* comment */"; let comment = rewrite_comment(input, true, Shape::legacy(9, Indent::new(0, 69)), &config).unwrap(); assert_eq!(expected, comment); let comment = rewrite_comment("/* trimmed */", true, Shape::legacy(100, Indent::new(0, 100)), &config).unwrap(); assert_eq!("/* trimmed */", comment); } // This is probably intended to be a non-test fn, but it is not used. I'm // keeping it around unless it helps us test stuff. fn uncommented(text: &str) -> String { CharClasses::new(text.chars()) .filter_map(|(s, c)| match s { FullCodeCharKind::Normal | FullCodeCharKind::InString => Some(c), _ => None, }) .collect() } #[test] fn test_uncommented() { assert_eq!(&uncommented("abc/*...*/"), "abc"); assert_eq!( &uncommented("// .... /* \n../* /* *** / */ */a/* // */c\n"), "..ac\n" ); assert_eq!(&uncommented("abc \" /* */\" qsdf"), "abc \" /* */\" qsdf"); } #[test] fn test_contains_comment() { assert_eq!(contains_comment("abc"), false); assert_eq!(contains_comment("abc // qsdf"), true); assert_eq!(contains_comment("abc /* kqsdf"), true); assert_eq!(contains_comment("abc \" /* */\" qsdf"), false); } #[test] fn test_find_uncommented() { fn check(haystack: &str, needle: &str, expected: Option<usize>) { assert_eq!(expected, haystack.find_uncommented(needle)); } check("/*/ */test", "test", Some(6)); check("//test\ntest", "test", Some(7)); check("/* comment only */", "whatever", None); check( "/* comment */ some text /* more commentary */ result", "result", Some(46), ); check("sup // sup", "p", Some(2)); check("sup", "x", None); check(r#"π? /**/ π is nice!"#, r#"π is nice"#, Some(9)); check("/*sup yo? \n sup*/ sup", "p", Some(20)); check("hel/*lohello*/lo", "hello", None); check("acb", "ab", None); check(",/*A*/ ", ",", Some(0)); check("abc", "abc", Some(0)); check("/* abc */", "abc", None); check("/**/abc/* */", "abc", Some(4)); check("\"/* abc */\"", "abc", Some(4)); check("\"/* abc", "abc", Some(4)); } #[test] fn test_remove_trailing_white_spaces() { let s = format!(" r#\"\n test\n \"#"); assert_eq!(remove_trailing_white_spaces(&s), s); } }
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Complex numbers. use std::fmt; use std::ops::{Add, Div, Mul, Neg, Sub}; use {Zero, One, Num, Float}; // FIXME #1284: handle complex NaN & infinity etc. This // probably doesn't map to C's _Complex correctly. /// A complex number in Cartesian form. #[derive(PartialEq, Copy, Clone, Hash, Debug)] #[cfg_attr(feature = "rustc-serialize", derive(RustcEncodable, RustcDecodable))] pub struct Complex<T> { /// Real portion of the complex number pub re: T, /// Imaginary portion of the complex number pub im: T } pub type Complex32 = Complex<f32>; pub type Complex64 = Complex<f64>; impl<T: Clone + Num> Complex<T> { /// Create a new Complex #[inline] pub fn new(re: T, im: T) -> Complex<T> { Complex { re: re, im: im } } /// Returns the square of the norm (since `T` doesn't necessarily /// have a sqrt function), i.e. `re^2 + im^2`. #[inline] pub fn norm_sqr(&self) -> T { self.re.clone() * self.re.clone() + self.im.clone() * self.im.clone() } /// Multiplies `self` by the scalar `t`. #[inline] pub fn scale(&self, t: T) -> Complex<T> { Complex::new(self.re.clone() * t.clone(), self.im.clone() * t) } /// Divides `self` by the scalar `t`. #[inline] pub fn unscale(&self, t: T) -> Complex<T> { Complex::new(self.re.clone() / t.clone(), self.im.clone() / t) } } impl<T: Clone + Num + Neg<Output = T>> Complex<T> { /// Returns the complex conjugate. i.e. `re - i im` #[inline] pub fn conj(&self) -> Complex<T> { Complex::new(self.re.clone(), -self.im.clone()) } /// Returns `1/self` #[inline] pub fn inv(&self) -> Complex<T> { let norm_sqr = self.norm_sqr(); Complex::new(self.re.clone() / norm_sqr.clone(), -self.im.clone() / norm_sqr) } } impl<T: Clone + Float> Complex<T> { /// Calculate |self| #[inline] pub fn norm(&self) -> T { self.re.clone().hypot(self.im.clone()) } } impl<T: Clone + Float + Num> Complex<T> { /// Calculate the principal Arg of self. #[inline] pub fn arg(&self) -> T { self.im.clone().atan2(self.re.clone()) } /// Convert to polar form (r, theta), such that `self = r * exp(i /// * theta)` #[inline] pub fn to_polar(&self) -> (T, T) { (self.norm(), self.arg()) } /// Convert a polar representation into a complex number. #[inline] pub fn from_polar(r: &T, theta: &T) -> Complex<T> { Complex::new(*r * theta.cos(), *r * theta.sin()) } } macro_rules! forward_val_val_binop { (impl $imp:ident, $method:ident) => { impl<T: Clone + Num> $imp<Complex<T>> for Complex<T> { type Output = Complex<T>; #[inline] fn $method(self, other: Complex<T>) -> Complex<T> { (&self).$method(&other) } } } } macro_rules! forward_ref_val_binop { (impl $imp:ident, $method:ident) => { impl<'a, T: Clone + Num> $imp<Complex<T>> for &'a Complex<T> { type Output = Complex<T>; #[inline] fn $method(self, other: Complex<T>) -> Complex<T> { self.$method(&other) } } } } macro_rules! forward_val_ref_binop { (impl $imp:ident, $method:ident) => { impl<'a, T: Clone + Num> $imp<&'a Complex<T>> for Complex<T> { type Output = Complex<T>; #[inline] fn $method(self, other: &Complex<T>) -> Complex<T> { (&self).$method(other) } } } } macro_rules! forward_all_binop { (impl $imp:ident, $method:ident) => { forward_val_val_binop!(impl $imp, $method); forward_ref_val_binop!(impl $imp, $method); forward_val_ref_binop!(impl $imp, $method); }; } /* arithmetic */ forward_all_binop!(impl Add, add); // (a + i b) + (c + i d) == (a + c) + i (b + d) impl<'a, 'b, T: Clone + Num> Add<&'b Complex<T>> for &'a Complex<T> { type Output = Complex<T>; #[inline] fn add(self, other: &Complex<T>) -> Complex<T> { Complex::new(self.re.clone() + other.re.clone(), self.im.clone() + other.im.clone()) } } forward_all_binop!(impl Sub, sub); // (a + i b) - (c + i d) == (a - c) + i (b - d) impl<'a, 'b, T: Clone + Num> Sub<&'b Complex<T>> for &'a Complex<T> { type Output = Complex<T>; #[inline] fn sub(self, other: &Complex<T>) -> Complex<T> { Complex::new(self.re.clone() - other.re.clone(), self.im.clone() - other.im.clone()) } } forward_all_binop!(impl Mul, mul); // (a + i b) * (c + i d) == (a*c - b*d) + i (a*d + b*c) impl<'a, 'b, T: Clone + Num> Mul<&'b Complex<T>> for &'a Complex<T> { type Output = Complex<T>; #[inline] fn mul(self, other: &Complex<T>) -> Complex<T> { Complex::new(self.re.clone() * other.re.clone() - self.im.clone() * other.im.clone(), self.re.clone() * other.im.clone() + self.im.clone() * other.re.clone()) } } forward_all_binop!(impl Div, div); // (a + i b) / (c + i d) == [(a + i b) * (c - i d)] / (c*c + d*d) // == [(a*c + b*d) / (c*c + d*d)] + i [(b*c - a*d) / (c*c + d*d)] impl<'a, 'b, T: Clone + Num> Div<&'b Complex<T>> for &'a Complex<T> { type Output = Complex<T>; #[inline] fn div(self, other: &Complex<T>) -> Complex<T> { let norm_sqr = other.norm_sqr(); Complex::new((self.re.clone() * other.re.clone() + self.im.clone() * other.im.clone()) / norm_sqr.clone(), (self.im.clone() * other.re.clone() - self.re.clone() * other.im.clone()) / norm_sqr) } } impl<T: Clone + Num + Neg<Output = T>> Neg for Complex<T> { type Output = Complex<T>; #[inline] fn neg(self) -> Complex<T> { -&self } } impl<'a, T: Clone + Num + Neg<Output = T>> Neg for &'a Complex<T> { type Output = Complex<T>; #[inline] fn neg(self) -> Complex<T> { Complex::new(-self.re.clone(), -self.im.clone()) } } /* constants */ impl<T: Clone + Num> Zero for Complex<T> { #[inline] fn zero() -> Complex<T> { Complex::new(Zero::zero(), Zero::zero()) } #[inline] fn is_zero(&self) -> bool { self.re.is_zero() && self.im.is_zero() } } impl<T: Clone + Num> One for Complex<T> { #[inline] fn one() -> Complex<T> { Complex::new(One::one(), Zero::zero()) } } /* string conversions */ impl<T> fmt::Display for Complex<T> where T: fmt::Display + Num + PartialOrd + Clone { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if self.im < Zero::zero() { write!(f, "{}-{}i", self.re, T::zero() - self.im.clone()) } else { write!(f, "{}+{}i", self.re, self.im) } } } #[cfg(test)] mod test { #![allow(non_upper_case_globals)] use super::{Complex64, Complex}; use std::f64; use {Zero, One, Float}; pub const _0_0i : Complex64 = Complex { re: 0.0, im: 0.0 }; pub const _1_0i : Complex64 = Complex { re: 1.0, im: 0.0 }; pub const _1_1i : Complex64 = Complex { re: 1.0, im: 1.0 }; pub const _0_1i : Complex64 = Complex { re: 0.0, im: 1.0 }; pub const _neg1_1i : Complex64 = Complex { re: -1.0, im: 1.0 }; pub const _05_05i : Complex64 = Complex { re: 0.5, im: 0.5 }; pub const all_consts : [Complex64; 5] = [_0_0i, _1_0i, _1_1i, _neg1_1i, _05_05i]; #[test] fn test_consts() { // check our constants are what Complex::new creates fn test(c : Complex64, r : f64, i: f64) { assert_eq!(c, Complex::new(r,i)); } test(_0_0i, 0.0, 0.0); test(_1_0i, 1.0, 0.0); test(_1_1i, 1.0, 1.0); test(_neg1_1i, -1.0, 1.0); test(_05_05i, 0.5, 0.5); assert_eq!(_0_0i, Zero::zero()); assert_eq!(_1_0i, One::one()); } #[test] #[cfg_attr(target_arch = "x86", ignore)] // FIXME #7158: (maybe?) currently failing on x86. fn test_norm() { fn test(c: Complex64, ns: f64) { assert_eq!(c.norm_sqr(), ns); assert_eq!(c.norm(), ns.sqrt()) } test(_0_0i, 0.0); test(_1_0i, 1.0); test(_1_1i, 2.0); test(_neg1_1i, 2.0); test(_05_05i, 0.5); } #[test] fn test_scale_unscale() { assert_eq!(_05_05i.scale(2.0), _1_1i); assert_eq!(_1_1i.unscale(2.0), _05_05i); for &c in all_consts.iter() { assert_eq!(c.scale(2.0).unscale(2.0), c); } } #[test] fn test_conj() { for &c in all_consts.iter() { assert_eq!(c.conj(), Complex::new(c.re, -c.im)); assert_eq!(c.conj().conj(), c); } } #[test] fn test_inv() { assert_eq!(_1_1i.inv(), _05_05i.conj()); assert_eq!(_1_0i.inv(), _1_0i.inv()); } #[test] #[should_panic] fn test_divide_by_zero_natural() { let n = Complex::new(2, 3); let d = Complex::new(0, 0); let _x = n / d; } #[test] #[should_panic] #[ignore] fn test_inv_zero() { // FIXME #5736: should this really fail, or just NaN? _0_0i.inv(); } #[test] fn test_arg() { fn test(c: Complex64, arg: f64) { assert!((c.arg() - arg).abs() < 1.0e-6) } test(_1_0i, 0.0); test(_1_1i, 0.25 * f64::consts::PI); test(_neg1_1i, 0.75 * f64::consts::PI); test(_05_05i, 0.25 * f64::consts::PI); } #[test] fn test_polar_conv() { fn test(c: Complex64) { let (r, theta) = c.to_polar(); assert!((c - Complex::from_polar(&r, &theta)).norm() < 1e-6); } for &c in all_consts.iter() { test(c); } } mod arith { use super::{_0_0i, _1_0i, _1_1i, _0_1i, _neg1_1i, _05_05i, all_consts}; use Zero; #[test] fn test_add() { assert_eq!(_05_05i + _05_05i, _1_1i); assert_eq!(_0_1i + _1_0i, _1_1i); assert_eq!(_1_0i + _neg1_1i, _0_1i); for &c in all_consts.iter() { assert_eq!(_0_0i + c, c); assert_eq!(c + _0_0i, c); } } #[test] fn test_sub() { assert_eq!(_05_05i - _05_05i, _0_0i); assert_eq!(_0_1i - _1_0i, _neg1_1i); assert_eq!(_0_1i - _neg1_1i, _1_0i); for &c in all_consts.iter() { assert_eq!(c - _0_0i, c); assert_eq!(c - c, _0_0i); } } #[test] fn test_mul() { assert_eq!(_05_05i * _05_05i, _0_1i.unscale(2.0)); assert_eq!(_1_1i * _0_1i, _neg1_1i); // i^2 & i^4 assert_eq!(_0_1i * _0_1i, -_1_0i); assert_eq!(_0_1i * _0_1i * _0_1i * _0_1i, _1_0i); for &c in all_consts.iter() { assert_eq!(c * _1_0i, c); assert_eq!(_1_0i * c, c); } } #[test] fn test_div() { assert_eq!(_neg1_1i / _0_1i, _1_1i); for &c in all_consts.iter() { if c != Zero::zero() { assert_eq!(c / c, _1_0i); } } } #[test] fn test_neg() { assert_eq!(-_1_0i + _0_1i, _neg1_1i); assert_eq!((-_0_1i) * _0_1i, _1_0i); for &c in all_consts.iter() { assert_eq!(-(-c), c); } } } #[test] fn test_to_string() { fn test(c : Complex64, s: String) { assert_eq!(c.to_string(), s); } test(_0_0i, "0+0i".to_string()); test(_1_0i, "1+0i".to_string()); test(_0_1i, "0+1i".to_string()); test(_1_1i, "1+1i".to_string()); test(_neg1_1i, "-1+1i".to_string()); test(-_neg1_1i, "1-1i".to_string()); test(_05_05i, "0.5+0.5i".to_string()); } #[test] fn test_hash() { let a = Complex::new(0i32, 0i32); let b = Complex::new(1i32, 0i32); let c = Complex::new(0i32, 1i32); assert!(::hash(&a) != ::hash(&b)); assert!(::hash(&b) != ::hash(&c)); assert!(::hash(&c) != ::hash(&a)); } } Add mathematical functions for complex numbers. For Complex<T: Clone + Float>, the following functions along with corresponding tests were added: - exp - sin, cos, tan - sinh, cosh, tanh // Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Complex numbers. use std::fmt; use std::ops::{Add, Div, Mul, Neg, Sub}; use {Zero, One, Num, Float}; // FIXME #1284: handle complex NaN & infinity etc. This // probably doesn't map to C's _Complex correctly. /// A complex number in Cartesian form. #[derive(PartialEq, Copy, Clone, Hash, Debug)] #[cfg_attr(feature = "rustc-serialize", derive(RustcEncodable, RustcDecodable))] pub struct Complex<T> { /// Real portion of the complex number pub re: T, /// Imaginary portion of the complex number pub im: T } pub type Complex32 = Complex<f32>; pub type Complex64 = Complex<f64>; impl<T: Clone + Num> Complex<T> { /// Create a new Complex #[inline] pub fn new(re: T, im: T) -> Complex<T> { Complex { re: re, im: im } } /// Returns the square of the norm (since `T` doesn't necessarily /// have a sqrt function), i.e. `re^2 + im^2`. #[inline] pub fn norm_sqr(&self) -> T { self.re.clone() * self.re.clone() + self.im.clone() * self.im.clone() } /// Multiplies `self` by the scalar `t`. #[inline] pub fn scale(&self, t: T) -> Complex<T> { Complex::new(self.re.clone() * t.clone(), self.im.clone() * t) } /// Divides `self` by the scalar `t`. #[inline] pub fn unscale(&self, t: T) -> Complex<T> { Complex::new(self.re.clone() / t.clone(), self.im.clone() / t) } } impl<T: Clone + Num + Neg<Output = T>> Complex<T> { /// Returns the complex conjugate. i.e. `re - i im` #[inline] pub fn conj(&self) -> Complex<T> { Complex::new(self.re.clone(), -self.im.clone()) } /// Returns `1/self` #[inline] pub fn inv(&self) -> Complex<T> { let norm_sqr = self.norm_sqr(); Complex::new(self.re.clone() / norm_sqr.clone(), -self.im.clone() / norm_sqr) } } impl<T: Clone + Float> Complex<T> { /// Calculate |self| #[inline] pub fn norm(&self) -> T { self.re.clone().hypot(self.im.clone()) } /// Computes e^(self), where e is the base of the natural logarithm. #[inline] pub fn exp(&self) -> Complex<T> { // formula: e^(a + bi) = e^a * (cos(b) + isin(b)) let exp = self.re.exp(); Complex::new(exp * self.im.cos(), exp * self.im.sin()) } /// Computes the sine of self. #[inline] pub fn sin(&self) -> Complex<T> { // formula: sin(z) = (e^(iz) - e^(-iz)) / 2i //let one = One::one(); let i = Complex::new(Zero::zero(), One::one()); let two_i = i + i; let e_iz = (self*i).exp(); let e_neg_iz = e_iz.inv(); (e_iz - e_neg_iz) / two_i } /// Computes the cosine of self. #[inline] pub fn cos(&self) -> Complex<T> { // formula: cos(z) = (e^(iz) + e^(-iz)) / 2 let i = Complex::new(Zero::zero(), One::one()); let two = Complex::one() + Complex::one(); let e_iz = (self*i).exp(); let e_neg_iz = e_iz.inv(); (e_iz + e_neg_iz) / two } /// Computes the tangent of self. #[inline] pub fn tan(&self) -> Complex<T> { // formula: tan(z) = i (e^(-iz) - e^(iz)) / (e^(-iz) + e^(iz)) let i = Complex::new(Zero::zero(), One::one()); let e_iz = (self*i).exp(); let e_neg_iz = e_iz.inv(); i * (e_neg_iz - e_iz) / (e_neg_iz + e_iz) } /// Computes the hyperbolic sine of self. #[inline] pub fn sinh(&self) -> Complex<T> { // formula: sinh(z) = (e^(z) - e^(-z)) / 2 let two = Complex::one() + Complex::one(); let e_z = self.exp(); let e_neg_z = e_z.inv(); (e_z - e_neg_z) / two } /// Computes the hyperbolic cosine of self. #[inline] pub fn cosh(&self) -> Complex<T> { // formula: sinh(z) = (e^(z) + e^(-z)) / 2 let two = Complex::one() + Complex::one(); let e_z = self.exp(); let e_neg_z = e_z.inv(); (e_z + e_neg_z) / two } /// Computes the hyperbolic tangent of self. #[inline] pub fn tanh(&self) -> Complex<T> { // formula: tanh(z) = (e^(z) - e^(-z)) / (e^(z) + e^(-z)) let e_z = self.exp(); let e_neg_z = e_z.inv(); (e_z - e_neg_z) / (e_z + e_neg_z) } } impl<T: Clone + Float + Num> Complex<T> { /// Calculate the principal Arg of self. #[inline] pub fn arg(&self) -> T { self.im.clone().atan2(self.re.clone()) } /// Convert to polar form (r, theta), such that `self = r * exp(i /// * theta)` #[inline] pub fn to_polar(&self) -> (T, T) { (self.norm(), self.arg()) } /// Convert a polar representation into a complex number. #[inline] pub fn from_polar(r: &T, theta: &T) -> Complex<T> { Complex::new(*r * theta.cos(), *r * theta.sin()) } } macro_rules! forward_val_val_binop { (impl $imp:ident, $method:ident) => { impl<T: Clone + Num> $imp<Complex<T>> for Complex<T> { type Output = Complex<T>; #[inline] fn $method(self, other: Complex<T>) -> Complex<T> { (&self).$method(&other) } } } } macro_rules! forward_ref_val_binop { (impl $imp:ident, $method:ident) => { impl<'a, T: Clone + Num> $imp<Complex<T>> for &'a Complex<T> { type Output = Complex<T>; #[inline] fn $method(self, other: Complex<T>) -> Complex<T> { self.$method(&other) } } } } macro_rules! forward_val_ref_binop { (impl $imp:ident, $method:ident) => { impl<'a, T: Clone + Num> $imp<&'a Complex<T>> for Complex<T> { type Output = Complex<T>; #[inline] fn $method(self, other: &Complex<T>) -> Complex<T> { (&self).$method(other) } } } } macro_rules! forward_all_binop { (impl $imp:ident, $method:ident) => { forward_val_val_binop!(impl $imp, $method); forward_ref_val_binop!(impl $imp, $method); forward_val_ref_binop!(impl $imp, $method); }; } /* arithmetic */ forward_all_binop!(impl Add, add); // (a + i b) + (c + i d) == (a + c) + i (b + d) impl<'a, 'b, T: Clone + Num> Add<&'b Complex<T>> for &'a Complex<T> { type Output = Complex<T>; #[inline] fn add(self, other: &Complex<T>) -> Complex<T> { Complex::new(self.re.clone() + other.re.clone(), self.im.clone() + other.im.clone()) } } forward_all_binop!(impl Sub, sub); // (a + i b) - (c + i d) == (a - c) + i (b - d) impl<'a, 'b, T: Clone + Num> Sub<&'b Complex<T>> for &'a Complex<T> { type Output = Complex<T>; #[inline] fn sub(self, other: &Complex<T>) -> Complex<T> { Complex::new(self.re.clone() - other.re.clone(), self.im.clone() - other.im.clone()) } } forward_all_binop!(impl Mul, mul); // (a + i b) * (c + i d) == (a*c - b*d) + i (a*d + b*c) impl<'a, 'b, T: Clone + Num> Mul<&'b Complex<T>> for &'a Complex<T> { type Output = Complex<T>; #[inline] fn mul(self, other: &Complex<T>) -> Complex<T> { Complex::new(self.re.clone() * other.re.clone() - self.im.clone() * other.im.clone(), self.re.clone() * other.im.clone() + self.im.clone() * other.re.clone()) } } forward_all_binop!(impl Div, div); // (a + i b) / (c + i d) == [(a + i b) * (c - i d)] / (c*c + d*d) // == [(a*c + b*d) / (c*c + d*d)] + i [(b*c - a*d) / (c*c + d*d)] impl<'a, 'b, T: Clone + Num> Div<&'b Complex<T>> for &'a Complex<T> { type Output = Complex<T>; #[inline] fn div(self, other: &Complex<T>) -> Complex<T> { let norm_sqr = other.norm_sqr(); Complex::new((self.re.clone() * other.re.clone() + self.im.clone() * other.im.clone()) / norm_sqr.clone(), (self.im.clone() * other.re.clone() - self.re.clone() * other.im.clone()) / norm_sqr) } } impl<T: Clone + Num + Neg<Output = T>> Neg for Complex<T> { type Output = Complex<T>; #[inline] fn neg(self) -> Complex<T> { -&self } } impl<'a, T: Clone + Num + Neg<Output = T>> Neg for &'a Complex<T> { type Output = Complex<T>; #[inline] fn neg(self) -> Complex<T> { Complex::new(-self.re.clone(), -self.im.clone()) } } /* constants */ impl<T: Clone + Num> Zero for Complex<T> { #[inline] fn zero() -> Complex<T> { Complex::new(Zero::zero(), Zero::zero()) } #[inline] fn is_zero(&self) -> bool { self.re.is_zero() && self.im.is_zero() } } impl<T: Clone + Num> One for Complex<T> { #[inline] fn one() -> Complex<T> { Complex::new(One::one(), Zero::zero()) } } /* string conversions */ impl<T> fmt::Display for Complex<T> where T: fmt::Display + Num + PartialOrd + Clone { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if self.im < Zero::zero() { write!(f, "{}-{}i", self.re, T::zero() - self.im.clone()) } else { write!(f, "{}+{}i", self.re, self.im) } } } #[cfg(test)] mod test { #![allow(non_upper_case_globals)] use super::{Complex64, Complex}; use std::f64; use {Zero, One, Float}; pub const _0_0i : Complex64 = Complex { re: 0.0, im: 0.0 }; pub const _1_0i : Complex64 = Complex { re: 1.0, im: 0.0 }; pub const _1_1i : Complex64 = Complex { re: 1.0, im: 1.0 }; pub const _0_1i : Complex64 = Complex { re: 0.0, im: 1.0 }; pub const _neg1_1i : Complex64 = Complex { re: -1.0, im: 1.0 }; pub const _05_05i : Complex64 = Complex { re: 0.5, im: 0.5 }; pub const all_consts : [Complex64; 5] = [_0_0i, _1_0i, _1_1i, _neg1_1i, _05_05i]; #[test] fn test_consts() { // check our constants are what Complex::new creates fn test(c : Complex64, r : f64, i: f64) { assert_eq!(c, Complex::new(r,i)); } test(_0_0i, 0.0, 0.0); test(_1_0i, 1.0, 0.0); test(_1_1i, 1.0, 1.0); test(_neg1_1i, -1.0, 1.0); test(_05_05i, 0.5, 0.5); assert_eq!(_0_0i, Zero::zero()); assert_eq!(_1_0i, One::one()); } #[test] #[cfg_attr(target_arch = "x86", ignore)] // FIXME #7158: (maybe?) currently failing on x86. fn test_norm() { fn test(c: Complex64, ns: f64) { assert_eq!(c.norm_sqr(), ns); assert_eq!(c.norm(), ns.sqrt()) } test(_0_0i, 0.0); test(_1_0i, 1.0); test(_1_1i, 2.0); test(_neg1_1i, 2.0); test(_05_05i, 0.5); } #[test] fn test_scale_unscale() { assert_eq!(_05_05i.scale(2.0), _1_1i); assert_eq!(_1_1i.unscale(2.0), _05_05i); for &c in all_consts.iter() { assert_eq!(c.scale(2.0).unscale(2.0), c); } } #[test] fn test_conj() { for &c in all_consts.iter() { assert_eq!(c.conj(), Complex::new(c.re, -c.im)); assert_eq!(c.conj().conj(), c); } } #[test] fn test_inv() { assert_eq!(_1_1i.inv(), _05_05i.conj()); assert_eq!(_1_0i.inv(), _1_0i.inv()); } #[test] #[should_panic] fn test_divide_by_zero_natural() { let n = Complex::new(2, 3); let d = Complex::new(0, 0); let _x = n / d; } #[test] #[should_panic] #[ignore] fn test_inv_zero() { // FIXME #5736: should this really fail, or just NaN? _0_0i.inv(); } #[test] fn test_arg() { fn test(c: Complex64, arg: f64) { assert!((c.arg() - arg).abs() < 1.0e-6) } test(_1_0i, 0.0); test(_1_1i, 0.25 * f64::consts::PI); test(_neg1_1i, 0.75 * f64::consts::PI); test(_05_05i, 0.25 * f64::consts::PI); } #[test] fn test_polar_conv() { fn test(c: Complex64) { let (r, theta) = c.to_polar(); assert!((c - Complex::from_polar(&r, &theta)).norm() < 1e-6); } for &c in all_consts.iter() { test(c); } } fn very_close(a: Complex64, b: Complex64) -> bool { // returns true if a and b are reasonably close (a-b).norm() < 1e-10 } #[test] fn test_exp() { assert_eq!(_1_0i.exp(), Complex::new(f64::consts::E, 0.0)); assert_eq!(_0_0i.exp(), _1_0i); assert_eq!(_0_1i.exp(), Complex::new(1.0.cos(), 1.0.sin())); assert!(very_close(_05_05i.exp()*_05_05i.exp(), _1_1i.exp())); assert!(very_close(Complex::new(0.0, -f64::consts::PI).exp(), _1_0i.scale(-1.0))); for &c in all_consts.iter() { assert!(very_close(c.exp(), (c + Complex::new(0.0, f64::consts::PI*2.0)).exp())); } } #[test] fn test_sin() { assert_eq!(_0_0i.sin(), _0_0i); assert!(very_close(_1_0i.scale(f64::consts::PI*2.0).sin(), _0_0i)); assert_eq!(_0_1i.sin(), _0_1i.scale(1.0.sinh())); for &c in all_consts.iter() { assert!(very_close(c.conj().sin(), c.sin().conj())); assert!(very_close(c.scale(-1.0).sin(), c.sin().scale(-1.0))); } } #[test] fn test_cos() { assert_eq!(_0_0i.cos(), _1_0i); assert!(very_close(_1_0i.scale(f64::consts::PI*2.0).cos(), _1_0i)); assert_eq!(_0_1i.cos(), _1_0i.scale(1.0.cosh())); for &c in all_consts.iter() { assert!(very_close(c.conj().cos(), c.cos().conj())); assert!(very_close(c.scale(-1.0).cos(), c.cos())); } } #[test] fn test_tan() { assert_eq!(_0_0i.tan(), _0_0i); assert!(very_close(_1_0i.scale(f64::consts::PI).tan(), _0_0i)); for &c in all_consts.iter() { assert!(very_close(c.conj().tan(), c.tan().conj())); assert!(very_close(c.scale(-1.0).tan(), c.tan().scale(-1.0))); assert!(very_close(c.tan(), c.sin()/c.cos())); } } #[test] fn test_sinh() { assert_eq!(_0_0i.sinh(), _0_0i); assert_eq!(_1_0i.sinh(), _1_0i.scale((f64::consts::E - 1.0/f64::consts::E)/2.0)); assert_eq!(_0_1i.sinh(), _0_1i.scale(1.0.sin())); for &c in all_consts.iter() { assert!(very_close(c.conj().sinh(), c.sinh().conj())); assert!(very_close(c.scale(-1.0).sinh(), c.sinh().scale(-1.0))); } } #[test] fn test_cosh() { assert_eq!(_0_0i.cosh(), _1_0i); assert_eq!(_1_0i.cosh(), _1_0i.scale((f64::consts::E + 1.0/f64::consts::E)/2.0)); assert_eq!(_0_1i.cosh(), _1_0i.scale(1.0.cos())); for &c in all_consts.iter() { assert!(very_close(c.conj().cosh(), c.cosh().conj())); assert!(very_close(c.scale(-1.0).cosh(), c.cosh())); } } #[test] fn test_tanh() { assert_eq!(_0_0i.tanh(), _0_0i); assert!(very_close(_1_0i.tanh(), _1_0i.scale((f64::consts::E.powi(2) - 1.0)/(f64::consts::E.powi(2) + 1.0)))); assert!(very_close(_0_1i.tanh(), _0_1i.scale(1.0.tan()))); for &c in all_consts.iter() { assert!(very_close(c.conj().tanh(), c.conj().tanh())); assert!(very_close(c.scale(-1.0).tanh(), c.tanh().scale(-1.0))); assert!(very_close(c.tanh(), c.sinh()/c.cosh())); } } mod arith { use super::{_0_0i, _1_0i, _1_1i, _0_1i, _neg1_1i, _05_05i, all_consts}; use Zero; #[test] fn test_add() { assert_eq!(_05_05i + _05_05i, _1_1i); assert_eq!(_0_1i + _1_0i, _1_1i); assert_eq!(_1_0i + _neg1_1i, _0_1i); for &c in all_consts.iter() { assert_eq!(_0_0i + c, c); assert_eq!(c + _0_0i, c); } } #[test] fn test_sub() { assert_eq!(_05_05i - _05_05i, _0_0i); assert_eq!(_0_1i - _1_0i, _neg1_1i); assert_eq!(_0_1i - _neg1_1i, _1_0i); for &c in all_consts.iter() { assert_eq!(c - _0_0i, c); assert_eq!(c - c, _0_0i); } } #[test] fn test_mul() { assert_eq!(_05_05i * _05_05i, _0_1i.unscale(2.0)); assert_eq!(_1_1i * _0_1i, _neg1_1i); // i^2 & i^4 assert_eq!(_0_1i * _0_1i, -_1_0i); assert_eq!(_0_1i * _0_1i * _0_1i * _0_1i, _1_0i); for &c in all_consts.iter() { assert_eq!(c * _1_0i, c); assert_eq!(_1_0i * c, c); } } #[test] fn test_div() { assert_eq!(_neg1_1i / _0_1i, _1_1i); for &c in all_consts.iter() { if c != Zero::zero() { assert_eq!(c / c, _1_0i); } } } #[test] fn test_neg() { assert_eq!(-_1_0i + _0_1i, _neg1_1i); assert_eq!((-_0_1i) * _0_1i, _1_0i); for &c in all_consts.iter() { assert_eq!(-(-c), c); } } } #[test] fn test_to_string() { fn test(c : Complex64, s: String) { assert_eq!(c.to_string(), s); } test(_0_0i, "0+0i".to_string()); test(_1_0i, "1+0i".to_string()); test(_0_1i, "0+1i".to_string()); test(_1_1i, "1+1i".to_string()); test(_neg1_1i, "-1+1i".to_string()); test(-_neg1_1i, "1-1i".to_string()); test(_05_05i, "0.5+0.5i".to_string()); } #[test] fn test_hash() { let a = Complex::new(0i32, 0i32); let b = Complex::new(1i32, 0i32); let c = Complex::new(0i32, 1i32); assert!(::hash(&a) != ::hash(&b)); assert!(::hash(&b) != ::hash(&c)); assert!(::hash(&c) != ::hash(&a)); } }
extern crate semver; use std::collections::HashMap; use std::io::net::ip::IpAddr; use std::hash::Hash; use std::any::Any; use std::fmt::Show; #[deriving(PartialEq, Show, Clone)] pub enum Scheme { Http, Https } #[deriving(PartialEq, Show, Clone)] pub enum Host<'a> { HostName(&'a str), HostIp(IpAddr) } #[deriving(PartialEq, Hash, Eq, Show, Clone)] pub enum Method { Get, Post, Put, Delete, Head, Connect, Options, Trace, // RFC-5789 Patch, Purge, // WebDAV, Subversion, UPNP Other(&'static str) } /// A Dictionary for extensions provided by the server or middleware pub type Extensions = HashMap<&'static str, Box<Any>>; pub trait Request { /// The version of HTTP being used fn http_version(&self) -> semver::Version; /// The version of the conduit spec being used fn conduit_version(&self) -> semver::Version; /// The request method, such as GET, POST, PUT, DELETE or PATCH fn method(&self) -> Method; /// The scheme part of the request URL fn scheme(&self) -> Scheme; /// The host part of the requested URL fn host<'a>(&'a self) -> Host<'a>; /// The initial part of the request URL's path that corresponds /// to a virtual root. This allows an application to have a /// virtual location that consumes part of the path. fn virtual_root<'a>(&'a self) -> Option<&'a str>; /// The remainder of the path. fn path<'a>(&'a self) -> &'a str; /// The portion of the request URL that follows the "?" fn query_string<'a>(&'a self) -> Option<&'a str>; /// The remote IP address of the client or the last proxy that /// sent the request. fn remote_ip(&self) -> IpAddr; /// The byte-size of the body, if any fn content_length(&self) -> Option<uint>; /// The request's headers, as conduit::Headers. fn headers<'a>(&'a self) -> &'a Headers; /// A Reader for the body of the request fn body<'a>(&'a mut self) -> &'a mut Reader; /// A readable map of extensions fn extensions<'a>(&'a self) -> &'a Extensions; /// A mutable map of extensions fn mut_extensions<'a>(&'a mut self) -> &'a mut Extensions; } pub type HeaderEntries<'a> = Box<Iterator<(&'a str, Vec<&'a str>)>>; pub trait Headers { /// Find the value of a given header. Multi-line headers are represented /// as an array. fn find<'a>(&'a self, key: &str) -> Option<Vec<&'a str>>; /// Returns true if a particular header exists fn has(&self, key: &str) -> bool; /// Iterate over all of the available headers. fn iter<'a>(&'a self) -> HeaderEntries<'a>; } pub struct Response { /// The status code as a tuple of the return code and status string pub status: (uint, &'static str), /// A Map of the headers pub headers: HashMap<String, Vec<String>>, /// A Writer for body of the response pub body: Box<Reader + Send> } /// A Handler takes a request and returns a response or an error. /// By default, a bare function implements `Handler`. pub trait Handler { fn call(&self, request: &mut Request) -> Result<Response, Box<Show>>; } impl<T: 'static + Show> Handler for fn(&mut Request) -> Result<Response, T> { fn call(&self, request: &mut Request) -> Result<Response, Box<Show>> { { (*self)(request) }.map_err(|e| box e as Box<Show>) } } Don't try to be so clever extern crate semver; use std::collections::HashMap; use std::io::net::ip::IpAddr; use std::hash::Hash; use std::any::Any; use std::fmt::Show; #[deriving(PartialEq, Show, Clone)] pub enum Scheme { Http, Https } #[deriving(PartialEq, Show, Clone)] pub enum Host<'a> { HostName(&'a str), HostIp(IpAddr) } #[deriving(PartialEq, Hash, Eq, Show, Clone)] pub enum Method { Get, Post, Put, Delete, Head, Connect, Options, Trace, // RFC-5789 Patch, Purge, // WebDAV, Subversion, UPNP Other(&'static str) } /// A Dictionary for extensions provided by the server or middleware pub type Extensions = HashMap<&'static str, Box<Any>>; pub trait Request { /// The version of HTTP being used fn http_version(&self) -> semver::Version; /// The version of the conduit spec being used fn conduit_version(&self) -> semver::Version; /// The request method, such as GET, POST, PUT, DELETE or PATCH fn method(&self) -> Method; /// The scheme part of the request URL fn scheme(&self) -> Scheme; /// The host part of the requested URL fn host<'a>(&'a self) -> Host<'a>; /// The initial part of the request URL's path that corresponds /// to a virtual root. This allows an application to have a /// virtual location that consumes part of the path. fn virtual_root<'a>(&'a self) -> Option<&'a str>; /// The remainder of the path. fn path<'a>(&'a self) -> &'a str; /// The portion of the request URL that follows the "?" fn query_string<'a>(&'a self) -> Option<&'a str>; /// The remote IP address of the client or the last proxy that /// sent the request. fn remote_ip(&self) -> IpAddr; /// The byte-size of the body, if any fn content_length(&self) -> Option<uint>; /// The request's headers, as conduit::Headers. fn headers<'a>(&'a self) -> &'a Headers; /// A Reader for the body of the request fn body<'a>(&'a mut self) -> &'a mut Reader; /// A readable map of extensions fn extensions<'a>(&'a self) -> &'a Extensions; /// A mutable map of extensions fn mut_extensions<'a>(&'a mut self) -> &'a mut Extensions; } pub type HeaderEntries<'a> = Box<Iterator<(&'a str, Vec<&'a str>)>>; pub trait Headers { /// Find the value of a given header. Multi-line headers are represented /// as an array. fn find<'a>(&'a self, key: &str) -> Option<Vec<&'a str>>; /// Returns true if a particular header exists fn has(&self, key: &str) -> bool; /// Iterate over all of the available headers. fn iter<'a>(&'a self) -> HeaderEntries<'a>; } pub struct Response { /// The status code as a tuple of the return code and status string pub status: (uint, &'static str), /// A Map of the headers pub headers: HashMap<String, Vec<String>>, /// A Writer for body of the response pub body: Box<Reader + Send> } /// A Handler takes a request and returns a response or an error. /// By default, a bare function implements `Handler`. pub trait Handler { fn call(&self, request: &mut Request) -> Result<Response, Box<Show>>; } impl Handler for fn(&mut Request) -> Result<Response, Box<Show>> { fn call(&self, request: &mut Request) -> Result<Response, Box<Show>> { { (*self)(request) } } }
// Copyright 2016 Amanieu d'Antras // // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. use deadlock; use lock_api::RawMutex as RawMutexTrait; use mutex::MutexGuard; use parking_lot_core::{self, ParkResult, RequeueOp, UnparkResult, DEFAULT_PARK_TOKEN}; use raw_mutex::{RawMutex, TOKEN_HANDOFF, TOKEN_NORMAL}; use std::sync::atomic::{AtomicPtr, Ordering}; use std::time::{Duration, Instant}; use std::{fmt, ptr}; /// A type indicating whether a timed wait on a condition variable returned /// due to a time out or not. #[derive(Debug, PartialEq, Eq, Copy, Clone)] pub struct WaitTimeoutResult(bool); impl WaitTimeoutResult { /// Returns whether the wait was known to have timed out. #[inline] pub fn timed_out(&self) -> bool { self.0 } } /// A Condition Variable /// /// Condition variables represent the ability to block a thread such that it /// consumes no CPU time while waiting for an event to occur. Condition /// variables are typically associated with a boolean predicate (a condition) /// and a mutex. The predicate is always verified inside of the mutex before /// determining that thread must block. /// /// Note that this module places one additional restriction over the system /// condition variables: each condvar can be used with only one mutex at a /// time. Any attempt to use multiple mutexes on the same condition variable /// simultaneously will result in a runtime panic. However it is possible to /// switch to a different mutex if there are no threads currently waiting on /// the condition variable. /// /// # Differences from the standard library `Condvar` /// /// - No spurious wakeups: A wait will only return a non-timeout result if it /// was woken up by `notify_one` or `notify_all`. /// - `Condvar::notify_all` will only wake up a single thread, the rest are /// requeued to wait for the `Mutex` to be unlocked by the thread that was /// woken up. /// - Only requires 1 word of space, whereas the standard library boxes the /// `Condvar` due to platform limitations. /// - Can be statically constructed (requires the `const_fn` nightly feature). /// - Does not require any drop glue when dropped. /// - Inline fast path for the uncontended case. /// /// # Examples /// /// ``` /// use parking_lot::{Mutex, Condvar}; /// use std::sync::Arc; /// use std::thread; /// /// let pair = Arc::new((Mutex::new(false), Condvar::new())); /// let pair2 = pair.clone(); /// /// // Inside of our lock, spawn a new thread, and then wait for it to start /// thread::spawn(move|| { /// let &(ref lock, ref cvar) = &*pair2; /// let mut started = lock.lock(); /// *started = true; /// cvar.notify_one(); /// }); /// /// // wait for the thread to start up /// let &(ref lock, ref cvar) = &*pair; /// let mut started = lock.lock(); /// while !*started { /// cvar.wait(&mut started); /// } /// ``` pub struct Condvar { state: AtomicPtr<RawMutex>, } impl Condvar { /// Creates a new condition variable which is ready to be waited on and /// notified. #[cfg(feature = "nightly")] #[inline] pub const fn new() -> Condvar { Condvar { state: AtomicPtr::new(ptr::null_mut()), } } /// Creates a new condition variable which is ready to be waited on and /// notified. #[cfg(not(feature = "nightly"))] #[inline] pub fn new() -> Condvar { Condvar { state: AtomicPtr::new(ptr::null_mut()), } } /// Wakes up one blocked thread on this condvar. /// /// Returns whether a thread was woken up. /// /// If there is a blocked thread on this condition variable, then it will /// be woken up from its call to `wait` or `wait_timeout`. Calls to /// `notify_one` are not buffered in any way. /// /// To wake up all threads, see `notify_all()`. /// /// # Examples /// /// ``` /// use parking_lot::Condvar; /// /// let condvar = Condvar::new(); /// /// // do something with condvar, share it with other threads /// /// if !condvar.notify_one() { /// println!("Nobody was listening for this."); /// } /// ``` #[inline] pub fn notify_one(&self) -> bool { // Nothing to do if there are no waiting threads if self.state.load(Ordering::Relaxed).is_null() { return false; } self.notify_one_slow() } #[cold] #[inline(never)] fn notify_one_slow(&self) -> bool { unsafe { // Unpark one thread let addr = self as *const _ as usize; let callback = |result: UnparkResult| { // Clear our state if there are no more waiting threads if !result.have_more_threads { self.state.store(ptr::null_mut(), Ordering::Relaxed); } TOKEN_NORMAL }; let res = parking_lot_core::unpark_one(addr, callback); res.unparked_threads != 0 } } /// Wakes up all blocked threads on this condvar. /// /// Returns the number of threads woken up. /// /// This method will ensure that any current waiters on the condition /// variable are awoken. Calls to `notify_all()` are not buffered in any /// way. /// /// To wake up only one thread, see `notify_one()`. #[inline] pub fn notify_all(&self) -> usize { // Nothing to do if there are no waiting threads let state = self.state.load(Ordering::Relaxed); if state.is_null() { return 0; } self.notify_all_slow(state) } #[cold] #[inline(never)] fn notify_all_slow(&self, mutex: *mut RawMutex) -> usize { unsafe { // Unpark one thread and requeue the rest onto the mutex let from = self as *const _ as usize; let to = mutex as usize; let validate = || { // Make sure that our atomic state still points to the same // mutex. If not then it means that all threads on the current // mutex were woken up and a new waiting thread switched to a // different mutex. In that case we can get away with doing // nothing. if self.state.load(Ordering::Relaxed) != mutex { return RequeueOp::Abort; } // Clear our state since we are going to unpark or requeue all // threads. self.state.store(ptr::null_mut(), Ordering::Relaxed); // Unpark one thread if the mutex is unlocked, otherwise just // requeue everything to the mutex. This is safe to do here // since unlocking the mutex when the parked bit is set requires // locking the queue. There is the possibility of a race if the // mutex gets locked after we check, but that doesn't matter in // this case. if (*mutex).mark_parked_if_locked() { RequeueOp::RequeueAll } else { RequeueOp::UnparkOneRequeueRest } }; let callback = |op, result: UnparkResult| { // If we requeued threads to the mutex, mark it as having // parked threads. The RequeueAll case is already handled above. if op == RequeueOp::UnparkOneRequeueRest && result.have_more_threads { (*mutex).mark_parked(); } TOKEN_NORMAL }; let res = parking_lot_core::unpark_requeue(from, to, validate, callback); res.unparked_threads } } /// Blocks the current thread until this condition variable receives a /// notification. /// /// This function will atomically unlock the mutex specified (represented by /// `mutex_guard`) and block the current thread. This means that any calls /// to `notify_*()` which happen logically after the mutex is unlocked are /// candidates to wake this thread up. When this function call returns, the /// lock specified will have been re-acquired. /// /// # Panics /// /// This function will panic if another thread is waiting on the `Condvar` /// with a different `Mutex` object. #[inline] pub fn wait<T: ?Sized>(&self, mutex_guard: &mut MutexGuard<T>) { self.wait_until_internal(unsafe { MutexGuard::mutex(mutex_guard).raw() }, None); } /// Waits on this condition variable for a notification, timing out after /// the specified time instant. /// /// The semantics of this function are equivalent to `wait()` except that /// the thread will be blocked roughly until `timeout` is reached. This /// method should not be used for precise timing due to anomalies such as /// preemption or platform differences that may not cause the maximum /// amount of time waited to be precisely `timeout`. /// /// Note that the best effort is made to ensure that the time waited is /// measured with a monotonic clock, and not affected by the changes made to /// the system time. /// /// The returned `WaitTimeoutResult` value indicates if the timeout is /// known to have elapsed. /// /// Like `wait`, the lock specified will be re-acquired when this function /// returns, regardless of whether the timeout elapsed or not. /// /// # Panics /// /// This function will panic if another thread is waiting on the `Condvar` /// with a different `Mutex` object. #[inline] pub fn wait_until<T: ?Sized>( &self, mutex_guard: &mut MutexGuard<T>, timeout: Instant, ) -> WaitTimeoutResult { self.wait_until_internal( unsafe { MutexGuard::mutex(mutex_guard).raw() }, Some(timeout), ) } // This is a non-generic function to reduce the monomorphization cost of // using `wait_until`. fn wait_until_internal(&self, mutex: &RawMutex, timeout: Option<Instant>) -> WaitTimeoutResult { unsafe { let result; let mut bad_mutex = false; let mut requeued = false; { let addr = self as *const _ as usize; let lock_addr = mutex as *const _ as *mut _; let validate = || { // Ensure we don't use two different mutexes with the same // Condvar at the same time. This is done while locked to // avoid races with notify_one let state = self.state.load(Ordering::Relaxed); if state.is_null() { self.state.store(lock_addr, Ordering::Relaxed); } else if state != lock_addr { bad_mutex = true; return false; } true }; let before_sleep = || { // Unlock the mutex before sleeping... mutex.unlock(); }; let timed_out = |k, was_last_thread| { // If we were requeued to a mutex, then we did not time out. // We'll just park ourselves on the mutex again when we try // to lock it later. requeued = k != addr; // If we were the last thread on the queue then we need to // clear our state. This is normally done by the // notify_{one,all} functions when not timing out. if !requeued && was_last_thread { self.state.store(ptr::null_mut(), Ordering::Relaxed); } }; result = parking_lot_core::park( addr, validate, before_sleep, timed_out, DEFAULT_PARK_TOKEN, timeout, ); } // Panic if we tried to use multiple mutexes with a Condvar. Note // that at this point the MutexGuard is still locked. It will be // unlocked by the unwinding logic. if bad_mutex { panic!("attempted to use a condition variable with more than one mutex"); } // ... and re-lock it once we are done sleeping if result == ParkResult::Unparked(TOKEN_HANDOFF) { deadlock::acquire_resource(mutex as *const _ as usize); } else { mutex.lock(); } WaitTimeoutResult(!(result.is_unparked() || requeued)) } } /// Waits on this condition variable for a notification, timing out after a /// specified duration. /// /// The semantics of this function are equivalent to `wait()` except that /// the thread will be blocked for roughly no longer than `timeout`. This /// method should not be used for precise timing due to anomalies such as /// preemption or platform differences that may not cause the maximum /// amount of time waited to be precisely `timeout`. /// /// Note that the best effort is made to ensure that the time waited is /// measured with a monotonic clock, and not affected by the changes made to /// the system time. /// /// The returned `WaitTimeoutResult` value indicates if the timeout is /// known to have elapsed. /// /// Like `wait`, the lock specified will be re-acquired when this function /// returns, regardless of whether the timeout elapsed or not. #[inline] pub fn wait_for<T: ?Sized>( &self, guard: &mut MutexGuard<T>, timeout: Duration, ) -> WaitTimeoutResult { self.wait_until(guard, Instant::now() + timeout) } } impl Default for Condvar { #[inline] fn default() -> Condvar { Condvar::new() } } impl fmt::Debug for Condvar { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.pad("Condvar { .. }") } } #[cfg(test)] mod tests { use std::sync::mpsc::channel; use std::sync::Arc; use std::thread; use std::time::{Duration, Instant}; use {Condvar, Mutex}; #[test] fn smoke() { let c = Condvar::new(); c.notify_one(); c.notify_all(); } #[test] fn notify_one() { let m = Arc::new(Mutex::new(())); let m2 = m.clone(); let c = Arc::new(Condvar::new()); let c2 = c.clone(); let mut g = m.lock(); let _t = thread::spawn(move || { let _g = m2.lock(); c2.notify_one(); }); c.wait(&mut g); } #[test] fn notify_all() { const N: usize = 10; let data = Arc::new((Mutex::new(0), Condvar::new())); let (tx, rx) = channel(); for _ in 0..N { let data = data.clone(); let tx = tx.clone(); thread::spawn(move || { let &(ref lock, ref cond) = &*data; let mut cnt = lock.lock(); *cnt += 1; if *cnt == N { tx.send(()).unwrap(); } while *cnt != 0 { cond.wait(&mut cnt); } tx.send(()).unwrap(); }); } drop(tx); let &(ref lock, ref cond) = &*data; rx.recv().unwrap(); let mut cnt = lock.lock(); *cnt = 0; cond.notify_all(); drop(cnt); for _ in 0..N { rx.recv().unwrap(); } } #[test] fn wait_for() { let m = Arc::new(Mutex::new(())); let m2 = m.clone(); let c = Arc::new(Condvar::new()); let c2 = c.clone(); let mut g = m.lock(); let no_timeout = c.wait_for(&mut g, Duration::from_millis(1)); assert!(no_timeout.timed_out()); let _t = thread::spawn(move || { let _g = m2.lock(); c2.notify_one(); }); let timeout_res = c.wait_for(&mut g, Duration::from_millis(u32::max_value() as u64)); assert!(!timeout_res.timed_out()); drop(g); } #[test] fn wait_until() { let m = Arc::new(Mutex::new(())); let m2 = m.clone(); let c = Arc::new(Condvar::new()); let c2 = c.clone(); let mut g = m.lock(); let no_timeout = c.wait_until(&mut g, Instant::now() + Duration::from_millis(1)); assert!(no_timeout.timed_out()); let _t = thread::spawn(move || { let _g = m2.lock(); c2.notify_one(); }); let timeout_res = c.wait_until( &mut g, Instant::now() + Duration::from_millis(u32::max_value() as u64), ); assert!(!timeout_res.timed_out()); drop(g); } #[test] #[should_panic] fn two_mutexes() { let m = Arc::new(Mutex::new(())); let m2 = m.clone(); let m3 = Arc::new(Mutex::new(())); let c = Arc::new(Condvar::new()); let c2 = c.clone(); // Make sure we don't leave the child thread dangling struct PanicGuard<'a>(&'a Condvar); impl<'a> Drop for PanicGuard<'a> { fn drop(&mut self) { self.0.notify_one(); } } let (tx, rx) = channel(); let g = m.lock(); let _t = thread::spawn(move || { let mut g = m2.lock(); tx.send(()).unwrap(); c2.wait(&mut g); }); drop(g); rx.recv().unwrap(); let _g = m.lock(); let _guard = PanicGuard(&*c); let _ = c.wait(&mut m3.lock()); } #[test] fn two_mutexes_disjoint() { let m = Arc::new(Mutex::new(())); let m2 = m.clone(); let m3 = Arc::new(Mutex::new(())); let c = Arc::new(Condvar::new()); let c2 = c.clone(); let mut g = m.lock(); let _t = thread::spawn(move || { let _g = m2.lock(); c2.notify_one(); }); c.wait(&mut g); drop(g); let _ = c.wait_for(&mut m3.lock(), Duration::from_millis(1)); } #[test] fn test_debug_condvar() { let c = Condvar::new(); assert_eq!(format!("{:?}", c), "Condvar { .. }"); } } Fix notify_all return and add tests // Copyright 2016 Amanieu d'Antras // // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. use deadlock; use lock_api::RawMutex as RawMutexTrait; use mutex::MutexGuard; use parking_lot_core::{self, ParkResult, RequeueOp, UnparkResult, DEFAULT_PARK_TOKEN}; use raw_mutex::{RawMutex, TOKEN_HANDOFF, TOKEN_NORMAL}; use std::sync::atomic::{AtomicPtr, Ordering}; use std::time::{Duration, Instant}; use std::{fmt, ptr}; /// A type indicating whether a timed wait on a condition variable returned /// due to a time out or not. #[derive(Debug, PartialEq, Eq, Copy, Clone)] pub struct WaitTimeoutResult(bool); impl WaitTimeoutResult { /// Returns whether the wait was known to have timed out. #[inline] pub fn timed_out(&self) -> bool { self.0 } } /// A Condition Variable /// /// Condition variables represent the ability to block a thread such that it /// consumes no CPU time while waiting for an event to occur. Condition /// variables are typically associated with a boolean predicate (a condition) /// and a mutex. The predicate is always verified inside of the mutex before /// determining that thread must block. /// /// Note that this module places one additional restriction over the system /// condition variables: each condvar can be used with only one mutex at a /// time. Any attempt to use multiple mutexes on the same condition variable /// simultaneously will result in a runtime panic. However it is possible to /// switch to a different mutex if there are no threads currently waiting on /// the condition variable. /// /// # Differences from the standard library `Condvar` /// /// - No spurious wakeups: A wait will only return a non-timeout result if it /// was woken up by `notify_one` or `notify_all`. /// - `Condvar::notify_all` will only wake up a single thread, the rest are /// requeued to wait for the `Mutex` to be unlocked by the thread that was /// woken up. /// - Only requires 1 word of space, whereas the standard library boxes the /// `Condvar` due to platform limitations. /// - Can be statically constructed (requires the `const_fn` nightly feature). /// - Does not require any drop glue when dropped. /// - Inline fast path for the uncontended case. /// /// # Examples /// /// ``` /// use parking_lot::{Mutex, Condvar}; /// use std::sync::Arc; /// use std::thread; /// /// let pair = Arc::new((Mutex::new(false), Condvar::new())); /// let pair2 = pair.clone(); /// /// // Inside of our lock, spawn a new thread, and then wait for it to start /// thread::spawn(move|| { /// let &(ref lock, ref cvar) = &*pair2; /// let mut started = lock.lock(); /// *started = true; /// cvar.notify_one(); /// }); /// /// // wait for the thread to start up /// let &(ref lock, ref cvar) = &*pair; /// let mut started = lock.lock(); /// while !*started { /// cvar.wait(&mut started); /// } /// ``` pub struct Condvar { state: AtomicPtr<RawMutex>, } impl Condvar { /// Creates a new condition variable which is ready to be waited on and /// notified. #[cfg(feature = "nightly")] #[inline] pub const fn new() -> Condvar { Condvar { state: AtomicPtr::new(ptr::null_mut()), } } /// Creates a new condition variable which is ready to be waited on and /// notified. #[cfg(not(feature = "nightly"))] #[inline] pub fn new() -> Condvar { Condvar { state: AtomicPtr::new(ptr::null_mut()), } } /// Wakes up one blocked thread on this condvar. /// /// Returns whether a thread was woken up. /// /// If there is a blocked thread on this condition variable, then it will /// be woken up from its call to `wait` or `wait_timeout`. Calls to /// `notify_one` are not buffered in any way. /// /// To wake up all threads, see `notify_all()`. /// /// # Examples /// /// ``` /// use parking_lot::Condvar; /// /// let condvar = Condvar::new(); /// /// // do something with condvar, share it with other threads /// /// if !condvar.notify_one() { /// println!("Nobody was listening for this."); /// } /// ``` #[inline] pub fn notify_one(&self) -> bool { // Nothing to do if there are no waiting threads if self.state.load(Ordering::Relaxed).is_null() { return false; } self.notify_one_slow() } #[cold] #[inline(never)] fn notify_one_slow(&self) -> bool { unsafe { // Unpark one thread let addr = self as *const _ as usize; let callback = |result: UnparkResult| { // Clear our state if there are no more waiting threads if !result.have_more_threads { self.state.store(ptr::null_mut(), Ordering::Relaxed); } TOKEN_NORMAL }; let res = parking_lot_core::unpark_one(addr, callback); res.unparked_threads != 0 } } /// Wakes up all blocked threads on this condvar. /// /// Returns the number of threads woken up. /// /// This method will ensure that any current waiters on the condition /// variable are awoken. Calls to `notify_all()` are not buffered in any /// way. /// /// To wake up only one thread, see `notify_one()`. #[inline] pub fn notify_all(&self) -> usize { // Nothing to do if there are no waiting threads let state = self.state.load(Ordering::Relaxed); if state.is_null() { return 0; } self.notify_all_slow(state) } #[cold] #[inline(never)] fn notify_all_slow(&self, mutex: *mut RawMutex) -> usize { unsafe { // Unpark one thread and requeue the rest onto the mutex let from = self as *const _ as usize; let to = mutex as usize; let validate = || { // Make sure that our atomic state still points to the same // mutex. If not then it means that all threads on the current // mutex were woken up and a new waiting thread switched to a // different mutex. In that case we can get away with doing // nothing. if self.state.load(Ordering::Relaxed) != mutex { return RequeueOp::Abort; } // Clear our state since we are going to unpark or requeue all // threads. self.state.store(ptr::null_mut(), Ordering::Relaxed); // Unpark one thread if the mutex is unlocked, otherwise just // requeue everything to the mutex. This is safe to do here // since unlocking the mutex when the parked bit is set requires // locking the queue. There is the possibility of a race if the // mutex gets locked after we check, but that doesn't matter in // this case. if (*mutex).mark_parked_if_locked() { RequeueOp::RequeueAll } else { RequeueOp::UnparkOneRequeueRest } }; let callback = |op, result: UnparkResult| { // If we requeued threads to the mutex, mark it as having // parked threads. The RequeueAll case is already handled above. if op == RequeueOp::UnparkOneRequeueRest && result.have_more_threads { (*mutex).mark_parked(); } TOKEN_NORMAL }; let res = parking_lot_core::unpark_requeue(from, to, validate, callback); res.unparked_threads + res.requeued_threads } } /// Blocks the current thread until this condition variable receives a /// notification. /// /// This function will atomically unlock the mutex specified (represented by /// `mutex_guard`) and block the current thread. This means that any calls /// to `notify_*()` which happen logically after the mutex is unlocked are /// candidates to wake this thread up. When this function call returns, the /// lock specified will have been re-acquired. /// /// # Panics /// /// This function will panic if another thread is waiting on the `Condvar` /// with a different `Mutex` object. #[inline] pub fn wait<T: ?Sized>(&self, mutex_guard: &mut MutexGuard<T>) { self.wait_until_internal(unsafe { MutexGuard::mutex(mutex_guard).raw() }, None); } /// Waits on this condition variable for a notification, timing out after /// the specified time instant. /// /// The semantics of this function are equivalent to `wait()` except that /// the thread will be blocked roughly until `timeout` is reached. This /// method should not be used for precise timing due to anomalies such as /// preemption or platform differences that may not cause the maximum /// amount of time waited to be precisely `timeout`. /// /// Note that the best effort is made to ensure that the time waited is /// measured with a monotonic clock, and not affected by the changes made to /// the system time. /// /// The returned `WaitTimeoutResult` value indicates if the timeout is /// known to have elapsed. /// /// Like `wait`, the lock specified will be re-acquired when this function /// returns, regardless of whether the timeout elapsed or not. /// /// # Panics /// /// This function will panic if another thread is waiting on the `Condvar` /// with a different `Mutex` object. #[inline] pub fn wait_until<T: ?Sized>( &self, mutex_guard: &mut MutexGuard<T>, timeout: Instant, ) -> WaitTimeoutResult { self.wait_until_internal( unsafe { MutexGuard::mutex(mutex_guard).raw() }, Some(timeout), ) } // This is a non-generic function to reduce the monomorphization cost of // using `wait_until`. fn wait_until_internal(&self, mutex: &RawMutex, timeout: Option<Instant>) -> WaitTimeoutResult { unsafe { let result; let mut bad_mutex = false; let mut requeued = false; { let addr = self as *const _ as usize; let lock_addr = mutex as *const _ as *mut _; let validate = || { // Ensure we don't use two different mutexes with the same // Condvar at the same time. This is done while locked to // avoid races with notify_one let state = self.state.load(Ordering::Relaxed); if state.is_null() { self.state.store(lock_addr, Ordering::Relaxed); } else if state != lock_addr { bad_mutex = true; return false; } true }; let before_sleep = || { // Unlock the mutex before sleeping... mutex.unlock(); }; let timed_out = |k, was_last_thread| { // If we were requeued to a mutex, then we did not time out. // We'll just park ourselves on the mutex again when we try // to lock it later. requeued = k != addr; // If we were the last thread on the queue then we need to // clear our state. This is normally done by the // notify_{one,all} functions when not timing out. if !requeued && was_last_thread { self.state.store(ptr::null_mut(), Ordering::Relaxed); } }; result = parking_lot_core::park( addr, validate, before_sleep, timed_out, DEFAULT_PARK_TOKEN, timeout, ); } // Panic if we tried to use multiple mutexes with a Condvar. Note // that at this point the MutexGuard is still locked. It will be // unlocked by the unwinding logic. if bad_mutex { panic!("attempted to use a condition variable with more than one mutex"); } // ... and re-lock it once we are done sleeping if result == ParkResult::Unparked(TOKEN_HANDOFF) { deadlock::acquire_resource(mutex as *const _ as usize); } else { mutex.lock(); } WaitTimeoutResult(!(result.is_unparked() || requeued)) } } /// Waits on this condition variable for a notification, timing out after a /// specified duration. /// /// The semantics of this function are equivalent to `wait()` except that /// the thread will be blocked for roughly no longer than `timeout`. This /// method should not be used for precise timing due to anomalies such as /// preemption or platform differences that may not cause the maximum /// amount of time waited to be precisely `timeout`. /// /// Note that the best effort is made to ensure that the time waited is /// measured with a monotonic clock, and not affected by the changes made to /// the system time. /// /// The returned `WaitTimeoutResult` value indicates if the timeout is /// known to have elapsed. /// /// Like `wait`, the lock specified will be re-acquired when this function /// returns, regardless of whether the timeout elapsed or not. #[inline] pub fn wait_for<T: ?Sized>( &self, guard: &mut MutexGuard<T>, timeout: Duration, ) -> WaitTimeoutResult { self.wait_until(guard, Instant::now() + timeout) } } impl Default for Condvar { #[inline] fn default() -> Condvar { Condvar::new() } } impl fmt::Debug for Condvar { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.pad("Condvar { .. }") } } #[cfg(test)] mod tests { use std::sync::mpsc::channel; use std::sync::Arc; use std::thread; use std::time::{Duration, Instant}; use {Condvar, Mutex}; #[test] fn smoke() { let c = Condvar::new(); c.notify_one(); c.notify_all(); } #[test] fn notify_one() { let m = Arc::new(Mutex::new(())); let m2 = m.clone(); let c = Arc::new(Condvar::new()); let c2 = c.clone(); let mut g = m.lock(); let _t = thread::spawn(move || { let _g = m2.lock(); c2.notify_one(); }); c.wait(&mut g); } #[test] fn notify_all() { const N: usize = 10; let data = Arc::new((Mutex::new(0), Condvar::new())); let (tx, rx) = channel(); for _ in 0..N { let data = data.clone(); let tx = tx.clone(); thread::spawn(move || { let &(ref lock, ref cond) = &*data; let mut cnt = lock.lock(); *cnt += 1; if *cnt == N { tx.send(()).unwrap(); } while *cnt != 0 { cond.wait(&mut cnt); } tx.send(()).unwrap(); }); } drop(tx); let &(ref lock, ref cond) = &*data; rx.recv().unwrap(); let mut cnt = lock.lock(); *cnt = 0; cond.notify_all(); drop(cnt); for _ in 0..N { rx.recv().unwrap(); } } #[test] fn notify_one_return_true() { let m = Arc::new(Mutex::new(())); let m2 = m.clone(); let c = Arc::new(Condvar::new()); let c2 = c.clone(); let mut g = m.lock(); let _t = thread::spawn(move || { let _g = m2.lock(); assert!(c2.notify_one()); }); c.wait(&mut g); } #[test] fn notify_one_return_false() { let m = Arc::new(Mutex::new(())); let c = Arc::new(Condvar::new()); let _t = thread::spawn(move || { let _g = m.lock(); assert!(!c.notify_one()); }); } #[test] fn notify_all_return() { const N: usize = 10; let data = Arc::new((Mutex::new(0), Condvar::new())); let (tx, rx) = channel(); for _ in 0..N { let data = data.clone(); let tx = tx.clone(); thread::spawn(move || { let &(ref lock, ref cond) = &*data; let mut cnt = lock.lock(); *cnt += 1; if *cnt == N { tx.send(()).unwrap(); } while *cnt != 0 { cond.wait(&mut cnt); } tx.send(()).unwrap(); }); } drop(tx); let &(ref lock, ref cond) = &*data; rx.recv().unwrap(); let mut cnt = lock.lock(); *cnt = 0; assert_eq!(cond.notify_all(), N); drop(cnt); for _ in 0..N { rx.recv().unwrap(); } assert_eq!(cond.notify_all(), 0); } #[test] fn wait_for() { let m = Arc::new(Mutex::new(())); let m2 = m.clone(); let c = Arc::new(Condvar::new()); let c2 = c.clone(); let mut g = m.lock(); let no_timeout = c.wait_for(&mut g, Duration::from_millis(1)); assert!(no_timeout.timed_out()); let _t = thread::spawn(move || { let _g = m2.lock(); c2.notify_one(); }); let timeout_res = c.wait_for(&mut g, Duration::from_millis(u32::max_value() as u64)); assert!(!timeout_res.timed_out()); drop(g); } #[test] fn wait_until() { let m = Arc::new(Mutex::new(())); let m2 = m.clone(); let c = Arc::new(Condvar::new()); let c2 = c.clone(); let mut g = m.lock(); let no_timeout = c.wait_until(&mut g, Instant::now() + Duration::from_millis(1)); assert!(no_timeout.timed_out()); let _t = thread::spawn(move || { let _g = m2.lock(); c2.notify_one(); }); let timeout_res = c.wait_until( &mut g, Instant::now() + Duration::from_millis(u32::max_value() as u64), ); assert!(!timeout_res.timed_out()); drop(g); } #[test] #[should_panic] fn two_mutexes() { let m = Arc::new(Mutex::new(())); let m2 = m.clone(); let m3 = Arc::new(Mutex::new(())); let c = Arc::new(Condvar::new()); let c2 = c.clone(); // Make sure we don't leave the child thread dangling struct PanicGuard<'a>(&'a Condvar); impl<'a> Drop for PanicGuard<'a> { fn drop(&mut self) { self.0.notify_one(); } } let (tx, rx) = channel(); let g = m.lock(); let _t = thread::spawn(move || { let mut g = m2.lock(); tx.send(()).unwrap(); c2.wait(&mut g); }); drop(g); rx.recv().unwrap(); let _g = m.lock(); let _guard = PanicGuard(&*c); let _ = c.wait(&mut m3.lock()); } #[test] fn two_mutexes_disjoint() { let m = Arc::new(Mutex::new(())); let m2 = m.clone(); let m3 = Arc::new(Mutex::new(())); let c = Arc::new(Condvar::new()); let c2 = c.clone(); let mut g = m.lock(); let _t = thread::spawn(move || { let _g = m2.lock(); c2.notify_one(); }); c.wait(&mut g); drop(g); let _ = c.wait_for(&mut m3.lock(), Duration::from_millis(1)); } #[test] fn test_debug_condvar() { let c = Condvar::new(); assert_eq!(format!("{:?}", c), "Condvar { .. }"); } }
/* * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library. If not, see * <http://www.gnu.org/licenses/>. * * Sahid Orentino Ferdjaoui <sahid.ferdjaoui@redhat.com> */ #![allow(improper_ctypes)] extern crate libc; use std::ffi::{CString, CStr}; use std::{str, ptr, mem}; use domain::Domain; use error::Error; use network::Network; use interface::Interface; use storage_pool::StoragePool; #[allow(non_camel_case_types)] #[repr(C)] pub struct virConnect { } #[allow(non_camel_case_types)] pub type virConnectPtr = *const virConnect; #[link(name = "virt")] extern { fn virGetVersion(hyver: *const libc::c_ulong, ctype: *const libc::c_char, typever: *const libc::c_ulong) -> libc::c_int; fn virConnectOpen(uri: *const libc::c_char) -> virConnectPtr; fn virConnectOpenReadOnly(uri: *const libc::c_char) -> virConnectPtr; fn virConnectClose(c: virConnectPtr) -> libc::c_int; fn virConnectGetVersion(c: virConnectPtr, hyver: *const libc::c_ulong) -> libc::c_int; fn virConnectGetHostname(c: virConnectPtr) -> *const libc::c_char; fn virConnectGetLibVersion(c: virConnectPtr, ver: *const libc::c_ulong) -> libc::c_int; fn virConnectGetType(c: virConnectPtr) -> *const libc::c_char; fn virConnectIsAlive(c: virConnectPtr) -> libc::c_int; fn virConnectIsEncrypted(c: virConnectPtr) -> libc::c_int; fn virConnectIsSecure(c: virConnectPtr) -> libc::c_int; fn virConnectListDomains(c: virConnectPtr, ids: *const libc::c_int, maxids: libc::c_int) -> libc::c_int; fn virConnectListDefinedDomains(c: virConnectPtr, names: *const *const libc::c_char, maxnames: libc::c_int) -> libc::c_int; fn virConnectListInterfaces(c: virConnectPtr, names: *const *const libc::c_char, maxnames: libc::c_int) -> libc::c_int; fn virConnectListNetworks(c: virConnectPtr, names: *const *const libc::c_char, maxnames: libc::c_int) -> libc::c_int; fn virConnectListNWFilters(c: virConnectPtr, names: *const *const libc::c_char, maxnames: libc::c_int) -> libc::c_int; fn virConnectListStoragePools(c: virConnectPtr, names: *const *const libc::c_char, maxnames: libc::c_int) -> libc::c_int; fn virConnectListSecrets(c: virConnectPtr, names: *const *const libc::c_char, maxnames: libc::c_int) -> libc::c_int; fn virConnectListDefinedInterfaces(c: virConnectPtr, names: *const *const libc::c_char, maxifaces: libc::c_int) -> libc::c_int; fn virConnectListDefinedNetworks(c: virConnectPtr, names: *const *const libc::c_char, maxnets: libc::c_int) -> libc::c_int; fn virConnectListDefinedStoragePools(c: virConnectPtr, names: *const *const libc::c_char, maxpools: libc::c_int) -> libc::c_int; fn virConnectNumOfDomains(c: virConnectPtr) -> libc::c_int; fn virConnectNumOfInterfaces(c: virConnectPtr) -> libc::c_int; fn virConnectNumOfNetworks(c: virConnectPtr) -> libc::c_int; fn virConnectNumOfStoragePools(c: virConnectPtr) -> libc::c_int; fn virConnectNumOfNWFilters(c: virConnectPtr) -> libc::c_int; fn virConnectNumOfSecrets(c: virConnectPtr) -> libc::c_int; fn virConnectNumOfDefinedDomains(c: virConnectPtr) -> libc::c_int; fn virConnectNumOfDefinedInterfaces(c: virConnectPtr) -> libc::c_int; fn virConnectNumOfDefinedNetworks(c: virConnectPtr) -> libc::c_int; fn virConnectNumOfDefinedStoragePools(c: virConnectPtr) -> libc::c_int; } pub struct Connect { pub c: virConnectPtr } impl Connect { pub fn as_ptr(&self) -> virConnectPtr { self.c } pub fn get_version() -> Result<u32, Error> { unsafe { let ver: libc::c_ulong = 0; if virGetVersion(&ver, ptr::null(), ptr::null()) == -1 { return Err(Error::new()); } return Ok(ver as u32); } } /// This function should be called first to get a connection to /// the Hypervisor and xen store. /// /// If @uri is "", if the LIBVIRT_DEFAULT_URI environment /// variable is set, then it will be used. Otherwise if the client /// configuration file has the "uri_default" parameter set, then /// it will be used. Finally probing will be done to determine a /// suitable default driver to activate. This involves trying each /// hypervisor in turn until one successfully opens. /// /// If connecting to an unprivileged hypervisor driver which /// requires the libvirtd daemon to be active, it will /// automatically be launched if not already running. This can be /// prevented by setting the environment variable /// LIBVIRT_AUTOSTART=0 /// /// URIs are documented at http://libvirt.org/uri.html /// /// Connect.close should be used to release the resources after the /// connection is no longer needed. /// /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// conn.close(); /// return /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn new(uri: &str) -> Result<Connect, Error> { unsafe { let c = virConnectOpen(CString::new(uri).unwrap().as_ptr()); if c.is_null() { return Err(Error::new()); } return Ok(Connect{c: c}); } } /// This function should be called first to get a restricted /// connection to the library functionalities. The set of APIs /// usable are then restricted on the available methods to control /// the domains. /// /// See 'new' for notes about environment variables which can have /// an effect on opening drivers and freeing the connection /// resources. /// /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new_read_only("test:///default") { /// Ok(conn) => { /// conn.close(); /// return /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn new_read_only(uri: &str) -> Result<Connect, Error> { unsafe { let c = virConnectOpenReadOnly(CString::new(uri).unwrap().as_ptr()); if c.is_null() { return Err(Error::new()); } return Ok(Connect{c: c}); } } /// This function closes the connection to the hypervisor. This /// should not be called if further interaction with the /// hypervisor are needed especially if there is running domain /// which need further monitoring by the application. pub fn close(&self) { unsafe { virConnectClose(self.c); } } /// This returns a system hostname on which the hypervisor is /// running (based on the result of the gethostname system call, /// but possibly expanded to a fully-qualified domain name via /// getaddrinfo). If we are connected to a remote system, then /// this returns the hostname of the remote system. pub fn get_hostname(&self) -> Result<&str, Error> { unsafe { let n = virConnectGetHostname(self.c); if n.is_null() { return Err(Error::new()) } return Ok(str::from_utf8( CStr::from_ptr(n).to_bytes()).unwrap()) } } pub fn get_lib_version(&self) -> Result<u32, Error> { unsafe { let ver: libc::c_ulong = 0; if virConnectGetLibVersion(self.c, &ver) == -1 { return Err(Error::new()); } return Ok(ver as u32); } } pub fn get_type(&self) -> Result<&str, Error> { unsafe { let t = virConnectGetType(self.c); if t.is_null() { return Err(Error::new()) } return Ok(str::from_utf8( CStr::from_ptr(t).to_bytes()).unwrap()) } } pub fn is_alive(&self) -> Result<bool, Error> { unsafe { let t = virConnectIsAlive(self.c); if t == -1 { return Err(Error::new()) } return Ok(t == 1) } } pub fn is_enscrypted(&self) -> Result<bool, Error> { unsafe { let t = virConnectIsEncrypted(self.c); if t == -1 { return Err(Error::new()) } return Ok(t == 1) } } pub fn is_secure(&self) -> Result<bool, Error> { unsafe { let t = virConnectIsSecure(self.c); if t == -1 { return Err(Error::new()) } return Ok(t == 1) } } /// /// /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.list_domains() { /// Ok(arr) => assert_eq!(1, arr.len()), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// conn.close(); /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn list_domains(&self) -> Result<Vec<u32>, Error> { unsafe { let ids: [libc::c_int; 512] = mem::uninitialized(); let size = virConnectListDomains(self.c, ids.as_ptr(), 512); if size == -1 { return Err(Error::new()) } let mut array: Vec<u32> = Vec::new(); for x in 0..size as usize { array.push(ids[x] as u32); } return Ok(array) } } /// /// /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.list_interfaces() { /// Ok(arr) => assert_eq!(1, arr.len()), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// conn.close(); /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn list_interfaces(&self) -> Result<Vec<&str>, Error> { unsafe { let names: [*const libc::c_char; 1024] = mem::uninitialized(); let size = virConnectListInterfaces(self.c, names.as_ptr(), 1024); if size == -1 { return Err(Error::new()) } let mut array: Vec<&str> = Vec::new(); for x in 0..size as usize { array.push(str::from_utf8( CStr::from_ptr(names[x]).to_bytes()).unwrap()); } return Ok(array) } } /// /// /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.list_networks() { /// Ok(arr) => assert_eq!(1, arr.len()), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// conn.close(); /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn list_networks(&self) -> Result<Vec<&str>, Error> { unsafe { let names: [*const libc::c_char; 1024] = mem::uninitialized(); let size = virConnectListNetworks(self.c, names.as_ptr(), 1024); if size == -1 { return Err(Error::new()) } let mut array: Vec<&str> = Vec::new(); for x in 0..size as usize { array.push(str::from_utf8( CStr::from_ptr(names[x]).to_bytes()).unwrap()); } return Ok(array) } } pub fn list_nw_filters(&self) -> Result<Vec<&str>, Error> { unsafe { let names: [*const libc::c_char; 1024] = mem::uninitialized(); let size = virConnectListNWFilters(self.c, names.as_ptr(), 1024); if size == -1 { return Err(Error::new()) } let mut array: Vec<&str> = Vec::new(); for x in 0..size as usize { array.push(str::from_utf8( CStr::from_ptr(names[x]).to_bytes()).unwrap()); } return Ok(array) } } pub fn list_secrets(&self) -> Result<Vec<&str>, Error> { unsafe { let names: [*const libc::c_char; 1024] = mem::uninitialized(); let size = virConnectListSecrets(self.c, names.as_ptr(), 1024); if size == -1 { return Err(Error::new()) } let mut array: Vec<&str> = Vec::new(); for x in 0..size as usize { array.push(str::from_utf8( CStr::from_ptr(names[x]).to_bytes()).unwrap()); } return Ok(array) } } /// /// /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.list_storage_pools() { /// Ok(arr) => assert_eq!(1, arr.len()), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// conn.close(); /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn list_storage_pools(&self) -> Result<Vec<&str>, Error> { unsafe { let names: [*const libc::c_char; 1024] = mem::uninitialized(); let size = virConnectListStoragePools(self.c, names.as_ptr(), 1024); if size == -1 { return Err(Error::new()) } let mut array: Vec<&str> = Vec::new(); for x in 0..size as usize { array.push(str::from_utf8( CStr::from_ptr(names[x]).to_bytes()).unwrap()); } return Ok(array) } } /// /// /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.list_defined_domains() { /// Ok(arr) => assert_eq!(0, arr.len()), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// conn.close(); /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn list_defined_domains(&self) -> Result<Vec<&str>, Error> { unsafe { let names: [*const libc::c_char; 1024] = mem::uninitialized(); let size = virConnectListDefinedDomains(self.c, names.as_ptr(), 1024); if size == -1 { return Err(Error::new()) } let mut array: Vec<&str> = Vec::new(); for x in 0..size as usize { array.push(str::from_utf8( CStr::from_ptr(names[x]).to_bytes()).unwrap()); } return Ok(array) } } /// /// /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.list_defined_interfaces() { /// Ok(arr) => assert_eq!(0, arr.len()), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// conn.close(); /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn list_defined_interfaces(&self) -> Result<Vec<&str>, Error> { unsafe { let names: [*const libc::c_char; 1024] = mem::uninitialized(); let size = virConnectListDefinedInterfaces(self.c, names.as_ptr(), 1024); if size == -1 { return Err(Error::new()) } let mut array: Vec<&str> = Vec::new(); for x in 0..size as usize { array.push(str::from_utf8( CStr::from_ptr(names[x]).to_bytes()).unwrap()); } return Ok(array) } } /// /// /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.list_defined_storage_pools() { /// Ok(arr) => assert_eq!(0, arr.len()), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// conn.close(); /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn list_defined_storage_pools(&self) -> Result<Vec<&str>, Error> { unsafe { let names: [*const libc::c_char; 1024] = mem::uninitialized(); let size = virConnectListDefinedStoragePools( self.c, names.as_ptr(), 1024); if size == -1 { return Err(Error::new()) } let mut array: Vec<&str> = Vec::new(); for x in 0..size as usize { array.push(str::from_utf8( CStr::from_ptr(names[x]).to_bytes()).unwrap()); } return Ok(array) } } /// /// /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.list_networks() { /// Ok(arr) => assert_eq!(1, arr.len()), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// conn.close(); /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn list_defined_networks(&self) -> Result<Vec<&str>, Error> { unsafe { let names: [*const libc::c_char; 1024] = mem::uninitialized(); let size = virConnectListDefinedNetworks(self.c, names.as_ptr(), 1024); if size == -1 { return Err(Error::new()) } let mut array: Vec<&str> = Vec::new(); for x in 0..size as usize { array.push(str::from_utf8( CStr::from_ptr(names[x]).to_bytes()).unwrap()); } return Ok(array) } } /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.num_of_domains() { /// Ok(n) => assert_eq!(1, n), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// conn.close(); /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn num_of_domains(&self) -> Result<u32, Error> { unsafe { let num = virConnectNumOfDomains(self.c); if num == -1 { return Err(Error::new()) } return Ok(num as u32) } } /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.num_of_interfaces() { /// Ok(n) => assert_eq!(1, n), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// conn.close(); /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn num_of_interfaces(&self) -> Result<u32, Error> { unsafe { let num = virConnectNumOfInterfaces(self.c); if num == -1 { return Err(Error::new()) } return Ok(num as u32) } } /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.num_of_networks() { /// Ok(n) => assert_eq!(1, n), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// conn.close(); /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn num_of_networks(&self) -> Result<u32, Error> { unsafe { let num = virConnectNumOfNetworks(self.c); if num == -1 { return Err(Error::new()) } return Ok(num as u32) } } /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.num_of_storage_pools() { /// Ok(n) => assert_eq!(1, n), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// conn.close(); /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn num_of_storage_pools(&self) -> Result<u32, Error> { unsafe { let num = virConnectNumOfStoragePools(self.c); if num == -1 { return Err(Error::new()) } return Ok(num as u32) } } pub fn num_of_nw_filters(&self) -> Result<u32, Error> { unsafe { let num = virConnectNumOfNWFilters(self.c); if num == -1 { return Err(Error::new()) } return Ok(num as u32) } } pub fn num_of_secrets(&self) -> Result<u32, Error> { unsafe { let num = virConnectNumOfSecrets(self.c); if num == -1 { return Err(Error::new()) } return Ok(num as u32) } } /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.num_of_defined_domains() { /// Ok(n) => assert_eq!(0, n), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// conn.close(); /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn num_of_defined_domains(&self) -> Result<u32, Error> { unsafe { let num = virConnectNumOfDefinedDomains(self.c); if num == -1 { return Err(Error::new()) } return Ok(num as u32) } } /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.num_of_defined_interfaces() { /// Ok(n) => assert_eq!(0, n), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// conn.close(); /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn num_of_defined_interfaces(&self) -> Result<u32, Error> { unsafe { let num = virConnectNumOfDefinedInterfaces(self.c); if num == -1 { return Err(Error::new()) } return Ok(num as u32) } } /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.num_of_defined_networks() { /// Ok(n) => assert_eq!(0, n), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// conn.close(); /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn num_of_defined_networks(&self) -> Result<u32, Error> { unsafe { let num = virConnectNumOfDefinedNetworks(self.c); if num == -1 { return Err(Error::new()) } return Ok(num as u32) } } /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.num_of_defined_storage_pools() { /// Ok(n) => assert_eq!(0, n), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// conn.close(); /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn num_of_defined_storage_pools(&self) -> Result<u32, Error> { unsafe { let num = virConnectNumOfDefinedStoragePools(self.c); if num == -1 { return Err(Error::new()) } return Ok(num as u32) } } /// Connect.close should be used to release the resources after the /// connection is no longer needed. /// /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.get_hyp_version() { /// Ok(hyver) => assert_eq!(2, hyver), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// return /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn get_hyp_version(&self) -> Result<u32, Error> { unsafe { let hyver: libc::c_ulong = 0; if virConnectGetVersion(self.c, &hyver) == -1 { return Err(Error::new()); } return Ok(hyver as u32); } } pub fn domain_lookup_by_id(&self, id: u32) -> Result<Domain, Error> { Domain::lookup_by_id(self, id) } pub fn domain_lookup_by_name(&self, id: &str) -> Result<Domain, Error> { Domain::lookup_by_name(self, id) } pub fn network_lookup_by_id(&self, id: u32) -> Result<Network, Error> { Network::lookup_by_id(self, id) } pub fn network_lookup_by_name(&self, id: &str) -> Result<Network, Error> { Network::lookup_by_name(self, id) } pub fn interface_lookup_by_id(&self, id: u32) -> Result<Interface, Error> { Interface::lookup_by_id(self, id) } pub fn interface_lookup_by_name(&self, id: &str) -> Result<Interface, Error> { Interface::lookup_by_name(self, id) } pub fn storage_pool_lookup_by_id(&self, id: u32) -> Result<StoragePool, Error> { StoragePool::lookup_by_id(self, id) } pub fn storage_pool_lookup_by_name(&self, id: &str) -> Result<StoragePool, Error> { StoragePool::lookup_by_name(self, id) } } #[test] fn exercices() { match Connect::new("test:///default") { Ok(conn) => { println!("hostname={}", conn.get_hostname().unwrap_or("unknow")); println!("is alive={}", conn.is_alive().unwrap_or(false)); // default false println!("is secure={}", conn.is_secure().unwrap_or(false)); // default false println!("is encrypted={}", conn.is_enscrypted().unwrap_or(true)); // default true println!("version={}", Connect::get_version().unwrap_or(0)); println!("hyp version={}", conn.get_hyp_version().unwrap_or(0)); println!("lib version={}", conn.get_lib_version().unwrap_or(0)); println!("type={}", conn.get_type().unwrap_or("unknow")); conn.close(); }, Err(e) => panic!( "failed with code {}, message: {}", e.code, e.message) } } #[test] fn list_domains() { match Connect::new("test:///default") { Ok(conn) => { let doms = conn.list_domains().unwrap_or(vec![]); assert_eq!(1, doms.len()); let domid = doms[0]; match conn.domain_lookup_by_id(domid) { Ok(domain) => println!("A domain name: {}", domain.get_name().unwrap_or("noname")), Err(e) => panic!( "failed with code {}, message: {}", e.code, e.message) } conn.close(); }, Err(e) => panic!( "failed with code {}, message: {}", e.code, e.message) } } #[test] fn list_networks() { match Connect::new("test:///default") { Ok(conn) => { let nets = conn.list_networks().unwrap_or(vec![]); assert_eq!(1, nets.len()); let netid = nets[0]; match conn.network_lookup_by_name(netid) { Ok(network) => println!("A network name: {}", network.get_name().unwrap_or("noname")), Err(e) => panic!( "failed with code {}, message: {}", e.code, e.message) } conn.close(); }, Err(e) => panic!( "failed with code {}, message: {}", e.code, e.message) } } #[test] fn list_interface() { match Connect::new("test:///default") { Ok(conn) => { let ints = conn.list_interfaces().unwrap_or(vec![]); assert_eq!(1, ints.len()); let intid = ints[0]; match conn.interface_lookup_by_name(intid) { Ok(interface) => println!("An interface name: {}", interface.get_name().unwrap_or("noname")), Err(e) => panic!( "failed with code {}, message: {}", e.code, e.message) } conn.close(); }, Err(e) => panic!( "failed with code {}, message: {}", e.code, e.message) } } #[test] fn list_storage_pool() { match Connect::new("test:///default") { Ok(conn) => { let ints = conn.list_storage_pools().unwrap_or(vec![]); assert_eq!(1, ints.len()); let intid = ints[0]; match conn.storage_pool_lookup_by_name(intid) { Ok(storage_pool) => println!("A storage pool name: {}", storage_pool.get_name().unwrap_or("noname")), Err(e) => panic!( "failed with code {}, message: {}", e.code, e.message) } conn.close(); }, Err(e) => panic!( "failed with code {}, message: {}", e.code, e.message) } } adds get all node devices /* * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library. If not, see * <http://www.gnu.org/licenses/>. * * Sahid Orentino Ferdjaoui <sahid.ferdjaoui@redhat.com> */ #![allow(improper_ctypes)] extern crate libc; use std::ffi::{CString, CStr}; use std::{str, ptr, mem}; use domain::Domain; use error::Error; use network::Network; use interface::Interface; use storage_pool::StoragePool; #[allow(non_camel_case_types)] #[repr(C)] pub struct virConnect { } #[allow(non_camel_case_types)] pub type virConnectPtr = *const virConnect; #[link(name = "virt")] extern { fn virGetVersion(hyver: *const libc::c_ulong, ctype: *const libc::c_char, typever: *const libc::c_ulong) -> libc::c_int; fn virConnectOpen(uri: *const libc::c_char) -> virConnectPtr; fn virConnectOpenReadOnly(uri: *const libc::c_char) -> virConnectPtr; fn virConnectClose(c: virConnectPtr) -> libc::c_int; fn virConnectGetVersion(c: virConnectPtr, hyver: *const libc::c_ulong) -> libc::c_int; fn virConnectGetHostname(c: virConnectPtr) -> *const libc::c_char; fn virConnectGetCapabilities(c: virConnectPtr) -> *const libc::c_char; fn virConnectGetLibVersion(c: virConnectPtr, ver: *const libc::c_ulong) -> libc::c_int; fn virConnectGetType(c: virConnectPtr) -> *const libc::c_char; fn virConnectIsAlive(c: virConnectPtr) -> libc::c_int; fn virConnectIsEncrypted(c: virConnectPtr) -> libc::c_int; fn virConnectIsSecure(c: virConnectPtr) -> libc::c_int; fn virConnectListDomains(c: virConnectPtr, ids: *const libc::c_int, maxids: libc::c_int) -> libc::c_int; fn virConnectListDefinedDomains(c: virConnectPtr, names: *const *const libc::c_char, maxnames: libc::c_int) -> libc::c_int; fn virConnectListInterfaces(c: virConnectPtr, names: *const *const libc::c_char, maxnames: libc::c_int) -> libc::c_int; fn virConnectListNetworks(c: virConnectPtr, names: *const *const libc::c_char, maxnames: libc::c_int) -> libc::c_int; fn virConnectListAllNodeDevices(c: virConnectPtr, devices: *const *const libc::c_char, flags: libc::c_uint) -> libc::c_int; fn virConnectListNWFilters(c: virConnectPtr, names: *const *const libc::c_char, maxnames: libc::c_int) -> libc::c_int; fn virConnectListStoragePools(c: virConnectPtr, names: *const *const libc::c_char, maxnames: libc::c_int) -> libc::c_int; fn virConnectListSecrets(c: virConnectPtr, names: *const *const libc::c_char, maxnames: libc::c_int) -> libc::c_int; fn virConnectListDefinedInterfaces(c: virConnectPtr, names: *const *const libc::c_char, maxifaces: libc::c_int) -> libc::c_int; fn virConnectListDefinedNetworks(c: virConnectPtr, names: *const *const libc::c_char, maxnets: libc::c_int) -> libc::c_int; fn virConnectListDefinedStoragePools(c: virConnectPtr, names: *const *const libc::c_char, maxpools: libc::c_int) -> libc::c_int; fn virConnectNumOfDomains(c: virConnectPtr) -> libc::c_int; fn virConnectNumOfInterfaces(c: virConnectPtr) -> libc::c_int; fn virConnectNumOfNetworks(c: virConnectPtr) -> libc::c_int; fn virConnectNumOfStoragePools(c: virConnectPtr) -> libc::c_int; fn virConnectNumOfNWFilters(c: virConnectPtr) -> libc::c_int; fn virConnectNumOfSecrets(c: virConnectPtr) -> libc::c_int; fn virConnectNumOfDefinedDomains(c: virConnectPtr) -> libc::c_int; fn virConnectNumOfDefinedInterfaces(c: virConnectPtr) -> libc::c_int; fn virConnectNumOfDefinedNetworks(c: virConnectPtr) -> libc::c_int; fn virConnectNumOfDefinedStoragePools(c: virConnectPtr) -> libc::c_int; } pub type ConnectListAllNodeDeviceFlags = self::libc::c_uint; pub const VIR_CONNECT_LIST_NODE_DEVICES_CAP_SYSTEM: ConnectListAllNodeDeviceFlags = 1 << 0; pub const VIR_CONNECT_LIST_NODE_DEVICES_CAP_PCI_DEV: ConnectListAllNodeDeviceFlags = 1 << 1; pub const VIR_CONNECT_LIST_NODE_DEVICES_CAP_USB_DEV: ConnectListAllNodeDeviceFlags = 1 << 2; pub const VIR_CONNECT_LIST_NODE_DEVICES_CAP_USB_INTERFACE: ConnectListAllNodeDeviceFlags = 1 << 3; pub const VIR_CONNECT_LIST_NODE_DEVICES_CAP_NET: ConnectListAllNodeDeviceFlags = 1 << 4; pub const VIR_CONNECT_LIST_NODE_DEVICES_CAP_SCSI_HOST: ConnectListAllNodeDeviceFlags = 1 << 5; pub const VIR_CONNECT_LIST_NODE_DEVICES_CAP_SCSI_TARGET: ConnectListAllNodeDeviceFlags = 1 << 6; pub const VIR_CONNECT_LIST_NODE_DEVICES_CAP_SCSI: ConnectListAllNodeDeviceFlags = 1 << 7; pub const VIR_CONNECT_LIST_NODE_DEVICES_CAP_STORAGE: ConnectListAllNodeDeviceFlags = 1 << 8; pub const VIR_CONNECT_LIST_NODE_DEVICES_CAP_FC_HOST: ConnectListAllNodeDeviceFlags = 1 << 9; pub const VIR_CONNECT_LIST_NODE_DEVICES_CAP_VPORTS: ConnectListAllNodeDeviceFlags = 1 << 10; pub const VIR_CONNECT_LIST_NODE_DEVICES_CAP_SCSI_GENERIC: ConnectListAllNodeDeviceFlags = 1 << 11; pub const VIR_CONNECT_LIST_NODE_DEVICES_CAP_DRM: ConnectListAllNodeDeviceFlags = 1 << 12; pub struct Connect { pub c: virConnectPtr } impl Connect { pub fn as_ptr(&self) -> virConnectPtr { self.c } pub fn get_version() -> Result<u32, Error> { unsafe { let ver: libc::c_ulong = 0; if virGetVersion(&ver, ptr::null(), ptr::null()) == -1 { return Err(Error::new()); } return Ok(ver as u32); } } /// This function should be called first to get a connection to /// the Hypervisor and xen store. /// /// If @uri is "", if the LIBVIRT_DEFAULT_URI environment /// variable is set, then it will be used. Otherwise if the client /// configuration file has the "uri_default" parameter set, then /// it will be used. Finally probing will be done to determine a /// suitable default driver to activate. This involves trying each /// hypervisor in turn until one successfully opens. /// /// If connecting to an unprivileged hypervisor driver which /// requires the libvirtd daemon to be active, it will /// automatically be launched if not already running. This can be /// prevented by setting the environment variable /// LIBVIRT_AUTOSTART=0 /// /// URIs are documented at http://libvirt.org/uri.html /// /// Connect.close should be used to release the resources after the /// connection is no longer needed. /// /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// conn.close(); /// return /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn new(uri: &str) -> Result<Connect, Error> { unsafe { let c = virConnectOpen(CString::new(uri).unwrap().as_ptr()); if c.is_null() { return Err(Error::new()); } return Ok(Connect{c: c}); } } /// This function should be called first to get a restricted /// connection to the library functionalities. The set of APIs /// usable are then restricted on the available methods to control /// the domains. /// /// See 'new' for notes about environment variables which can have /// an effect on opening drivers and freeing the connection /// resources. /// /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new_read_only("test:///default") { /// Ok(conn) => { /// conn.close(); /// return /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn new_read_only(uri: &str) -> Result<Connect, Error> { unsafe { let c = virConnectOpenReadOnly(CString::new(uri).unwrap().as_ptr()); if c.is_null() { return Err(Error::new()); } return Ok(Connect{c: c}); } } /// This function closes the connection to the hypervisor. This /// should not be called if further interaction with the /// hypervisor are needed especially if there is running domain /// which need further monitoring by the application. pub fn close(&self) { unsafe { virConnectClose(self.c); } } /// This returns a system hostname on which the hypervisor is /// running (based on the result of the gethostname system call, /// but possibly expanded to a fully-qualified domain name via /// getaddrinfo). If we are connected to a remote system, then /// this returns the hostname of the remote system. pub fn get_hostname(&self) -> Result<&str, Error> { unsafe { let n = virConnectGetHostname(self.c); if n.is_null() { return Err(Error::new()) } return Ok(str::from_utf8( CStr::from_ptr(n).to_bytes()).unwrap()) } } pub fn get_capabilities(&self) -> Result<&str, Error> { unsafe { let n = virConnectGetCapabilities(self.c); if n.is_null() { return Err(Error::new()) } return Ok(str::from_utf8( CStr::from_ptr(n).to_bytes()).unwrap()) } } pub fn get_lib_version(&self) -> Result<u32, Error> { unsafe { let ver: libc::c_ulong = 0; if virConnectGetLibVersion(self.c, &ver) == -1 { return Err(Error::new()); } return Ok(ver as u32); } } pub fn get_type(&self) -> Result<&str, Error> { unsafe { let t = virConnectGetType(self.c); if t.is_null() { return Err(Error::new()) } return Ok(str::from_utf8( CStr::from_ptr(t).to_bytes()).unwrap()) } } pub fn is_alive(&self) -> Result<bool, Error> { unsafe { let t = virConnectIsAlive(self.c); if t == -1 { return Err(Error::new()) } return Ok(t == 1) } } pub fn is_enscrypted(&self) -> Result<bool, Error> { unsafe { let t = virConnectIsEncrypted(self.c); if t == -1 { return Err(Error::new()) } return Ok(t == 1) } } pub fn is_secure(&self) -> Result<bool, Error> { unsafe { let t = virConnectIsSecure(self.c); if t == -1 { return Err(Error::new()) } return Ok(t == 1) } } /// /// /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.list_domains() { /// Ok(arr) => assert_eq!(1, arr.len()), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// conn.close(); /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn list_domains(&self) -> Result<Vec<u32>, Error> { unsafe { let ids: [libc::c_int; 512] = mem::uninitialized(); let size = virConnectListDomains(self.c, ids.as_ptr(), 512); if size == -1 { return Err(Error::new()) } let mut array: Vec<u32> = Vec::new(); for x in 0..size as usize { array.push(ids[x] as u32); } return Ok(array) } } /// /// /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.list_interfaces() { /// Ok(arr) => assert_eq!(1, arr.len()), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// conn.close(); /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn list_interfaces(&self) -> Result<Vec<&str>, Error> { unsafe { let names: [*const libc::c_char; 1024] = mem::uninitialized(); let size = virConnectListInterfaces(self.c, names.as_ptr(), 1024); if size == -1 { return Err(Error::new()) } let mut array: Vec<&str> = Vec::new(); for x in 0..size as usize { array.push(str::from_utf8( CStr::from_ptr(names[x]).to_bytes()).unwrap()); } return Ok(array) } } /// /// /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.list_networks() { /// Ok(arr) => assert_eq!(1, arr.len()), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// conn.close(); /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn list_networks(&self) -> Result<Vec<&str>, Error> { unsafe { let names: [*const libc::c_char; 1024] = mem::uninitialized(); let size = virConnectListNetworks(self.c, names.as_ptr(), 1024); if size == -1 { return Err(Error::new()) } let mut array: Vec<&str> = Vec::new(); for x in 0..size as usize { array.push(str::from_utf8( CStr::from_ptr(names[x]).to_bytes()).unwrap()); } return Ok(array) } } pub fn list_all_node_devices( &self, flags: ConnectListAllNodeDeviceFlags) -> Result<Vec<&str>, Error> { unsafe { let names: [*const libc::c_char; 1024] = mem::uninitialized(); let size = virConnectListAllNodeDevices(self.c, names.as_ptr(), flags); if size == -1 { return Err(Error::new()) } let mut array: Vec<&str> = Vec::new(); for x in 0..size as usize { array.push(str::from_utf8( CStr::from_ptr(names[x]).to_bytes()).unwrap()); } return Ok(array) } } pub fn list_nw_filters(&self) -> Result<Vec<&str>, Error> { unsafe { let names: [*const libc::c_char; 1024] = mem::uninitialized(); let size = virConnectListNWFilters(self.c, names.as_ptr(), 1024); if size == -1 { return Err(Error::new()) } let mut array: Vec<&str> = Vec::new(); for x in 0..size as usize { array.push(str::from_utf8( CStr::from_ptr(names[x]).to_bytes()).unwrap()); } return Ok(array) } } pub fn list_secrets(&self) -> Result<Vec<&str>, Error> { unsafe { let names: [*const libc::c_char; 1024] = mem::uninitialized(); let size = virConnectListSecrets(self.c, names.as_ptr(), 1024); if size == -1 { return Err(Error::new()) } let mut array: Vec<&str> = Vec::new(); for x in 0..size as usize { array.push(str::from_utf8( CStr::from_ptr(names[x]).to_bytes()).unwrap()); } return Ok(array) } } /// /// /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.list_storage_pools() { /// Ok(arr) => assert_eq!(1, arr.len()), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// conn.close(); /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn list_storage_pools(&self) -> Result<Vec<&str>, Error> { unsafe { let names: [*const libc::c_char; 1024] = mem::uninitialized(); let size = virConnectListStoragePools(self.c, names.as_ptr(), 1024); if size == -1 { return Err(Error::new()) } let mut array: Vec<&str> = Vec::new(); for x in 0..size as usize { array.push(str::from_utf8( CStr::from_ptr(names[x]).to_bytes()).unwrap()); } return Ok(array) } } /// /// /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.list_defined_domains() { /// Ok(arr) => assert_eq!(0, arr.len()), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// conn.close(); /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn list_defined_domains(&self) -> Result<Vec<&str>, Error> { unsafe { let names: [*const libc::c_char; 1024] = mem::uninitialized(); let size = virConnectListDefinedDomains(self.c, names.as_ptr(), 1024); if size == -1 { return Err(Error::new()) } let mut array: Vec<&str> = Vec::new(); for x in 0..size as usize { array.push(str::from_utf8( CStr::from_ptr(names[x]).to_bytes()).unwrap()); } return Ok(array) } } /// /// /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.list_defined_interfaces() { /// Ok(arr) => assert_eq!(0, arr.len()), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// conn.close(); /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn list_defined_interfaces(&self) -> Result<Vec<&str>, Error> { unsafe { let names: [*const libc::c_char; 1024] = mem::uninitialized(); let size = virConnectListDefinedInterfaces(self.c, names.as_ptr(), 1024); if size == -1 { return Err(Error::new()) } let mut array: Vec<&str> = Vec::new(); for x in 0..size as usize { array.push(str::from_utf8( CStr::from_ptr(names[x]).to_bytes()).unwrap()); } return Ok(array) } } /// /// /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.list_defined_storage_pools() { /// Ok(arr) => assert_eq!(0, arr.len()), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// conn.close(); /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn list_defined_storage_pools(&self) -> Result<Vec<&str>, Error> { unsafe { let names: [*const libc::c_char; 1024] = mem::uninitialized(); let size = virConnectListDefinedStoragePools( self.c, names.as_ptr(), 1024); if size == -1 { return Err(Error::new()) } let mut array: Vec<&str> = Vec::new(); for x in 0..size as usize { array.push(str::from_utf8( CStr::from_ptr(names[x]).to_bytes()).unwrap()); } return Ok(array) } } /// /// /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.list_networks() { /// Ok(arr) => assert_eq!(1, arr.len()), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// conn.close(); /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn list_defined_networks(&self) -> Result<Vec<&str>, Error> { unsafe { let names: [*const libc::c_char; 1024] = mem::uninitialized(); let size = virConnectListDefinedNetworks(self.c, names.as_ptr(), 1024); if size == -1 { return Err(Error::new()) } let mut array: Vec<&str> = Vec::new(); for x in 0..size as usize { array.push(str::from_utf8( CStr::from_ptr(names[x]).to_bytes()).unwrap()); } return Ok(array) } } /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.num_of_domains() { /// Ok(n) => assert_eq!(1, n), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// conn.close(); /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn num_of_domains(&self) -> Result<u32, Error> { unsafe { let num = virConnectNumOfDomains(self.c); if num == -1 { return Err(Error::new()) } return Ok(num as u32) } } /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.num_of_interfaces() { /// Ok(n) => assert_eq!(1, n), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// conn.close(); /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn num_of_interfaces(&self) -> Result<u32, Error> { unsafe { let num = virConnectNumOfInterfaces(self.c); if num == -1 { return Err(Error::new()) } return Ok(num as u32) } } /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.num_of_networks() { /// Ok(n) => assert_eq!(1, n), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// conn.close(); /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn num_of_networks(&self) -> Result<u32, Error> { unsafe { let num = virConnectNumOfNetworks(self.c); if num == -1 { return Err(Error::new()) } return Ok(num as u32) } } /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.num_of_storage_pools() { /// Ok(n) => assert_eq!(1, n), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// conn.close(); /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn num_of_storage_pools(&self) -> Result<u32, Error> { unsafe { let num = virConnectNumOfStoragePools(self.c); if num == -1 { return Err(Error::new()) } return Ok(num as u32) } } pub fn num_of_nw_filters(&self) -> Result<u32, Error> { unsafe { let num = virConnectNumOfNWFilters(self.c); if num == -1 { return Err(Error::new()) } return Ok(num as u32) } } pub fn num_of_secrets(&self) -> Result<u32, Error> { unsafe { let num = virConnectNumOfSecrets(self.c); if num == -1 { return Err(Error::new()) } return Ok(num as u32) } } /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.num_of_defined_domains() { /// Ok(n) => assert_eq!(0, n), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// conn.close(); /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn num_of_defined_domains(&self) -> Result<u32, Error> { unsafe { let num = virConnectNumOfDefinedDomains(self.c); if num == -1 { return Err(Error::new()) } return Ok(num as u32) } } /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.num_of_defined_interfaces() { /// Ok(n) => assert_eq!(0, n), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// conn.close(); /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn num_of_defined_interfaces(&self) -> Result<u32, Error> { unsafe { let num = virConnectNumOfDefinedInterfaces(self.c); if num == -1 { return Err(Error::new()) } return Ok(num as u32) } } /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.num_of_defined_networks() { /// Ok(n) => assert_eq!(0, n), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// conn.close(); /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn num_of_defined_networks(&self) -> Result<u32, Error> { unsafe { let num = virConnectNumOfDefinedNetworks(self.c); if num == -1 { return Err(Error::new()) } return Ok(num as u32) } } /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.num_of_defined_storage_pools() { /// Ok(n) => assert_eq!(0, n), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// conn.close(); /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn num_of_defined_storage_pools(&self) -> Result<u32, Error> { unsafe { let num = virConnectNumOfDefinedStoragePools(self.c); if num == -1 { return Err(Error::new()) } return Ok(num as u32) } } /// Connect.close should be used to release the resources after the /// connection is no longer needed. /// /// # Examples /// /// ``` /// use virt::connect::Connect; /// /// match Connect::new("test:///default") { /// Ok(conn) => { /// match conn.get_hyp_version() { /// Ok(hyver) => assert_eq!(2, hyver), /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// return /// }, /// Err(e) => panic!( /// "failed with code {}, message: {}", e.code, e.message) /// } /// ``` pub fn get_hyp_version(&self) -> Result<u32, Error> { unsafe { let hyver: libc::c_ulong = 0; if virConnectGetVersion(self.c, &hyver) == -1 { return Err(Error::new()); } return Ok(hyver as u32); } } pub fn domain_lookup_by_id(&self, id: u32) -> Result<Domain, Error> { Domain::lookup_by_id(self, id) } pub fn domain_lookup_by_name(&self, id: &str) -> Result<Domain, Error> { Domain::lookup_by_name(self, id) } pub fn network_lookup_by_id(&self, id: u32) -> Result<Network, Error> { Network::lookup_by_id(self, id) } pub fn network_lookup_by_name(&self, id: &str) -> Result<Network, Error> { Network::lookup_by_name(self, id) } pub fn interface_lookup_by_id(&self, id: u32) -> Result<Interface, Error> { Interface::lookup_by_id(self, id) } pub fn interface_lookup_by_name(&self, id: &str) -> Result<Interface, Error> { Interface::lookup_by_name(self, id) } pub fn storage_pool_lookup_by_id(&self, id: u32) -> Result<StoragePool, Error> { StoragePool::lookup_by_id(self, id) } pub fn storage_pool_lookup_by_name(&self, id: &str) -> Result<StoragePool, Error> { StoragePool::lookup_by_name(self, id) } } #[test] fn exercices() { match Connect::new("test:///default") { Ok(conn) => { println!("hostname={}", conn.get_hostname().unwrap_or("unknow")); println!("is alive={}", conn.is_alive().unwrap_or(false)); // default false println!("is secure={}", conn.is_secure().unwrap_or(false)); // default false println!("is encrypted={}", conn.is_enscrypted().unwrap_or(true)); // default true println!("version={}", Connect::get_version().unwrap_or(0)); println!("hyp version={}", conn.get_hyp_version().unwrap_or(0)); println!("lib version={}", conn.get_lib_version().unwrap_or(0)); println!("type={}", conn.get_type().unwrap_or("unknow")); conn.close(); }, Err(e) => panic!( "failed with code {}, message: {}", e.code, e.message) } } #[test] fn list_domains() { match Connect::new("test:///default") { Ok(conn) => { let doms = conn.list_domains().unwrap_or(vec![]); assert_eq!(1, doms.len()); let domid = doms[0]; match conn.domain_lookup_by_id(domid) { Ok(domain) => println!("A domain name: {}", domain.get_name().unwrap_or("noname")), Err(e) => panic!( "failed with code {}, message: {}", e.code, e.message) } conn.close(); }, Err(e) => panic!( "failed with code {}, message: {}", e.code, e.message) } } #[test] fn list_networks() { match Connect::new("test:///default") { Ok(conn) => { let nets = conn.list_networks().unwrap_or(vec![]); assert_eq!(1, nets.len()); let netid = nets[0]; match conn.network_lookup_by_name(netid) { Ok(network) => println!("A network name: {}", network.get_name().unwrap_or("noname")), Err(e) => panic!( "failed with code {}, message: {}", e.code, e.message) } conn.close(); }, Err(e) => panic!( "failed with code {}, message: {}", e.code, e.message) } } #[test] fn list_interface() { match Connect::new("test:///default") { Ok(conn) => { let ints = conn.list_interfaces().unwrap_or(vec![]); assert_eq!(1, ints.len()); let intid = ints[0]; match conn.interface_lookup_by_name(intid) { Ok(interface) => println!("An interface name: {}", interface.get_name().unwrap_or("noname")), Err(e) => panic!( "failed with code {}, message: {}", e.code, e.message) } conn.close(); }, Err(e) => panic!( "failed with code {}, message: {}", e.code, e.message) } } #[test] fn list_storage_pool() { match Connect::new("test:///default") { Ok(conn) => { let ints = conn.list_storage_pools().unwrap_or(vec![]); assert_eq!(1, ints.len()); let intid = ints[0]; match conn.storage_pool_lookup_by_name(intid) { Ok(storage_pool) => println!("A storage pool name: {}", storage_pool.get_name().unwrap_or("noname")), Err(e) => panic!( "failed with code {}, message: {}", e.code, e.message) } conn.close(); }, Err(e) => panic!( "failed with code {}, message: {}", e.code, e.message) } }
use std::cmp::Ordering; extern crate serde; extern crate serde_json; use base64_vlq; static SOURCE_MAP_VERSION: u32 = 3; #[allow(non_snake_case)] #[derive(Deserialize, Debug)] struct SourceMap { version: u32, sources: Vec<String>, names: Vec<String>, sourceRoot: Option<String>, mappings: String, file: Option<String> // We skip this. Keeping megabytes of data that we do not care about // in memory seems reckless to caches. //sourcesContent: Option<vec<String>>, } #[derive(Clone, Eq, PartialEq, Debug)] pub struct CodePosition { /** Line number in a code file, starting from 1 */ pub line: u32, /** Column number in a code file, starting from 0 */ pub column: u32 } #[derive(Clone, Eq, PartialEq, Debug)] pub struct Mapping { /** The position in the generated file */ pub generated: CodePosition, /** The position in the corresponding original source file */ pub original: CodePosition, /** The original source file */ pub source: String, /** The original source name of the function/class, if applicable */ pub name: String } #[derive(Debug)] pub struct Cache { generated_mappings: Vec<Mapping>, /** The path prefix of mapping source paths */ pub source_root: String } /** * consume parses a SourceMap into a cache that can be queried for mappings * * The only parameter is the raw source map as a JSON string. * According to the [source map spec][source-map-spec], source maps have the following attributes: * * - version: Which version of the source map spec this map is following. * - sources: An array of URLs to the original source files. * - names: An array of identifiers which can be referrenced by individual mappings. * - sourceRoot: Optional. The URL root from which all sources are relative. * - sourcesContent: Optional. An array of contents of the original source files. * - mappings: A string of base64 VLQs which contain the actual mappings. * - file: Optional. The generated file this source map is associated with. * * Here is an example source map: * * ```json * { * "version": 3, * "file": "out.js", * "sourceRoot" : "", * "sources": ["foo.js", "bar.js"], * "names": ["src", "maps", "are", "fun"], * "mappings": "AA,AB;;ABCDE;" * } * ``` * * [source-map-spec]: https://docs.google.com/document/d/1U1RGAehQwRypUTovF1KRlpiOFze0b-_2gc6fAH0KY0k/edit?pli=1# */ pub fn consume(source_map_json: &str) -> Result<Cache, String> { let source_map: SourceMap = match serde_json::from_str(source_map_json) { Ok(x) => x, Err(err) => return Err(format!("{}", err)) }; parse_mappings(&source_map) } fn parse_mappings(source_map: &SourceMap) -> Result<Cache, String>{ if source_map.version != SOURCE_MAP_VERSION { return Err("Only Source Map version 3 is implemented".into()) } let sources_length = source_map.sources.len() as u32; let names_length = source_map.names.len() as u32; let mut generated_mappings: Vec<Mapping> = Vec::new(); let mut generated_line: u32 = 0; let mut previous_original_line: u32 = 0; let mut previous_original_column: u32 = 0; let mut previous_source: u32 = 0; let mut previous_name: u32 = 0; for line in source_map.mappings.as_bytes().split(|&x| x == (';' as u8)) { generated_line += 1; let mut previous_generated_column: u32 = 0; for segment in line.split(|&x| x == (',' as u8)) { let segment_length = segment.len(); let mut fields: Vec<i32> = Vec::new(); let mut character_index = 0; while character_index < segment_length { match base64_vlq::decode(&segment[character_index..segment_length]) { Some((value, field_length)) => { fields.push(value); character_index += field_length; }, None => return Err("Invalid VLQ mapping field".into()) }; } if fields.len() < 1 { continue; } if fields.len() == 2 { return Err("Found a source, but no line and column".into()); } if fields.len() == 3 { return Err("Found a source and line, but no column".into()); } let mut mapping = Mapping { generated: CodePosition { line: generated_line, column: ((previous_generated_column as i32) + fields[0]) as u32 }, original: CodePosition { line: 0, column: 0 }, source: "".into(), name: "".into() }; previous_generated_column = mapping.generated.column; if fields.len() > 1 { // Original source. previous_source = ((previous_source as i32) + fields[1]) as u32; if previous_source < sources_length { mapping.source = source_map.sources[previous_source as usize].to_owned(); } else { return Err(format!("Invalid source map: reference to source index {} when source list length is {}", previous_source, sources_length)); } // Original line. previous_original_line = ((previous_original_line as i32) + fields[2]) as u32; // Lines are stored 0-based mapping.original.line = previous_original_line + 1; // Original column. previous_original_column = ((previous_original_column as i32) + fields[3]) as u32; mapping.original.column = previous_original_column; if fields.len() > 4 { // Original name. previous_name = ((previous_name as i32) + fields[4]) as u32; if previous_name < names_length { mapping.name = source_map.names[previous_name as usize].to_owned(); } else { return Err(format!("Invalid source map: reference to name index {} when name list length is {}", previous_name, names_length)); } } } generated_mappings.push(mapping); } } if generated_mappings.len() < 1 { return Err("Source Map contains no mappings".to_owned()); } fn sort_key(mapping: &Mapping) -> (u32, u32) { (mapping.generated.line, mapping.generated.column) } generated_mappings.sort_by(|a, b| sort_key(a).cmp(&sort_key(b))); Ok(Cache { generated_mappings: generated_mappings, source_root: match &source_map.sourceRoot { &Some(ref x) => x.to_owned(), &None => "".into() } }) } impl Cache { /** * Returns the original source, line, column and name information for the generated * source's line and column positions provided. * * # Arguments * * * line: The line number in the generated source. * * column: The column number in the generated source. * * # Examples * * ``` * use js_source_mapper::consume; * * let cache = consume(r#"{ "version": 3, "file": "foo.js", "sources": ["source.js"], "names": ["name1", "name1", "name3"], "mappings": ";EAACA;;IAEEA;;MAEEE", "sourceRoot": "http://example.com" }"#).unwrap(); * * println!("{:?}", cache.mapping_for_generated_position(2, 2)); * // => Mapping { * // generated: CodePosition { line: 2, column: 2 }, * // original: CodePosition { line: 1, column: 1 }, * // source: "source.js" * // name: "name1" * // } * ``` * */ pub fn mapping_for_generated_position(&self, line: u32, column: u32) -> Mapping { let matcher = |mapping: &Mapping| -> Ordering { (mapping.generated.line, mapping.generated.column).cmp(&(line, column)) }; let mappings = &self.generated_mappings; match mappings.binary_search_by(matcher) { Ok(index) => &self.generated_mappings[index], Err(index) => &self.generated_mappings[if index >= mappings.len() { mappings.len() - 1 } else { index }] }.clone() } } macro_rules! assert_equal_mappings( ($a:expr, $b:expr) => ( if $a != $b { panic!(format!("\n\n{:?}\n\n!=\n\n{:?}\n\n", $a, $b)); } ); ); #[test] fn test_source_map_issue_64() { let cache = consume(r#"{ "version": 3, "file": "foo.js", "sourceRoot": "http://example.com/", "sources": ["/a"], "names": [], "mappings": "AACA", "sourcesContent": ["foo"] }"#).unwrap(); let expected = Mapping { generated: CodePosition { line: 1, column: 0 }, original: CodePosition { line: 2, column: 0 }, source: "/a".into(), name: "".into() }; let actual = cache.mapping_for_generated_position(1, 0); assert_equal_mappings!(actual, expected); } #[test] fn test_source_map_issue_72_duplicate_sources() { let cache = consume(r#"{ "version": 3, "file": "foo.js", "sources": ["source1.js", "source1.js", "source3.js"], "names": [], "mappings": ";EAAC;;IAEE;;MEEE", "sourceRoot": "http://example.com" }"#).unwrap(); { let expected = Mapping { generated: CodePosition { line: 2, column: 2 }, original: CodePosition { line: 1, column: 1 }, source: "source1.js".into(), name: "".into() }; let actual = cache.mapping_for_generated_position(2, 2); assert_equal_mappings!(actual, expected); } { let expected = Mapping { generated: CodePosition { line: 4, column: 4 }, original: CodePosition { line: 3, column: 3 }, source: "source1.js".into(), name: "".into() }; let actual = cache.mapping_for_generated_position(4, 4); assert_equal_mappings!(actual, expected); } { let expected = Mapping { generated: CodePosition { line: 6, column: 6 }, original: CodePosition { line: 5, column: 5 }, source: "source3.js".into(), name: "".into() }; let actual = cache.mapping_for_generated_position(6, 6); assert_equal_mappings!(actual, expected); } } #[test] fn test_source_map_issue_72_duplicate_names() { let cache = consume(r#"{ "version": 3, "file": "foo.js", "sources": ["source.js"], "names": ["name1", "name1", "name3"], "mappings": ";EAACA;;IAEEA;;MAEEE", "sourceRoot": "http://example.com" }"#).unwrap(); { let expected = Mapping { generated: CodePosition { line: 2, column: 2 }, original: CodePosition { line: 1, column: 1 }, source: "source.js".into(), name: "name1".into() }; let actual = cache.mapping_for_generated_position(2, 2); assert_equal_mappings!(actual, expected); } { let expected = Mapping { generated: CodePosition { line: 4, column: 4 }, original: CodePosition { line: 3, column: 3 }, source: "source.js".into(), name: "name1".into() }; let actual = cache.mapping_for_generated_position(4, 4); assert_equal_mappings!(actual, expected); } { let expected = Mapping { generated: CodePosition { line: 6, column: 6 }, original: CodePosition { line: 5, column: 5 }, source: "source.js".into(), name: "name3".into() }; let actual = cache.mapping_for_generated_position(6, 6); assert_equal_mappings!(actual, expected); } } #[test] fn it_allows_omitting_source_root() { let cache_result: Result<Cache, String> = consume(r#"{ "version": 3, "file": "foo.js", "sources": ["source.js"], "names": ["name1", "name1", "name3"], "mappings": ";EAACA;;IAEEA;;MAEEE" }"#); match cache_result { Ok(_) => {}, Err(s) => panic!(format!("Error due to omitting: '{}'", s)) } } #[test] fn it_rejects_older_source_map_revisions() { let cache_result = consume(r#"{ "version": 2, "file": "", "sources": ["source.js"], "names": ["name1", "name1", "name3"], "mappings": ";EAACA;;IAEEA;;MAEEE", "sourceRoot": "http://example.com" }"#); match cache_result { Ok(_) => panic!("Source Map revision < 3 should be rejected"), Err(_) => {} } } #[test] fn it_does_not_panic_due_to_malformed_source_maps() { let cache_result = consume(r#"{ "version": 3, "file": "", "sources": [], "names": [], "mappings": ";EAACA;;IAEEA;;MAEEE" }"#); match cache_result { Ok(_) => panic!("Invalid source maps should be rejected"), Err(_) => {} } } #[test] fn it_returns_error_when_there_are_no_mappings() { let cache_result = consume(r#"{ "version": 3, "file": "foo.js", "sources": ["source.js"], "names": ["name1", "name1", "name3"], "mappings": ";;;" }"#); match cache_result { Ok(_) => panic!("Source maps with no mappings should be rejected"), Err(_) => {} } } #[test] fn it_does_not_panic_when_querying_for_position_2() { let cache = consume(r#"{ "version": 3, "file": "foo.js", "sources": ["source.js"], "names": ["name1", "name1", "name3"], "mappings": "Z", "sourceRoot": "http://example.com" }"#).unwrap(); cache.mapping_for_generated_position(2, 2); } #[test] fn it_does_not_panic_on_invalid_bit_shifts() { consume(r#"{ "version": 3, "file": "foo.js", "sources": ["source.js"], "names": ["name1", "name1", "name3"], "mappings": "00000001", "sourceRoot": "http://example.com" }"#).expect_err("Invalid VLQ mapping field"); } Fix line number overflow (fuzz) use std::cmp::Ordering; extern crate serde; extern crate serde_json; use base64_vlq; static SOURCE_MAP_VERSION: u32 = 3; #[allow(non_snake_case)] #[derive(Deserialize, Debug)] struct SourceMap { version: u32, sources: Vec<String>, names: Vec<String>, sourceRoot: Option<String>, mappings: String, file: Option<String> // We skip this. Keeping megabytes of data that we do not care about // in memory seems reckless to caches. //sourcesContent: Option<vec<String>>, } #[derive(Clone, Eq, PartialEq, Debug)] pub struct CodePosition { /** Line number in a code file, starting from 1 */ pub line: u32, /** Column number in a code file, starting from 0 */ pub column: u32 } #[derive(Clone, Eq, PartialEq, Debug)] pub struct Mapping { /** The position in the generated file */ pub generated: CodePosition, /** The position in the corresponding original source file */ pub original: CodePosition, /** The original source file */ pub source: String, /** The original source name of the function/class, if applicable */ pub name: String } #[derive(Debug)] pub struct Cache { generated_mappings: Vec<Mapping>, /** The path prefix of mapping source paths */ pub source_root: String } /** * consume parses a SourceMap into a cache that can be queried for mappings * * The only parameter is the raw source map as a JSON string. * According to the [source map spec][source-map-spec], source maps have the following attributes: * * - version: Which version of the source map spec this map is following. * - sources: An array of URLs to the original source files. * - names: An array of identifiers which can be referrenced by individual mappings. * - sourceRoot: Optional. The URL root from which all sources are relative. * - sourcesContent: Optional. An array of contents of the original source files. * - mappings: A string of base64 VLQs which contain the actual mappings. * - file: Optional. The generated file this source map is associated with. * * Here is an example source map: * * ```json * { * "version": 3, * "file": "out.js", * "sourceRoot" : "", * "sources": ["foo.js", "bar.js"], * "names": ["src", "maps", "are", "fun"], * "mappings": "AA,AB;;ABCDE;" * } * ``` * * [source-map-spec]: https://docs.google.com/document/d/1U1RGAehQwRypUTovF1KRlpiOFze0b-_2gc6fAH0KY0k/edit?pli=1# */ pub fn consume(source_map_json: &str) -> Result<Cache, String> { let source_map: SourceMap = match serde_json::from_str(source_map_json) { Ok(x) => x, Err(err) => return Err(format!("{}", err)) }; parse_mappings(&source_map) } fn parse_mappings(source_map: &SourceMap) -> Result<Cache, String>{ if source_map.version != SOURCE_MAP_VERSION { return Err("Only Source Map version 3 is implemented".into()) } let sources_length = source_map.sources.len() as u32; let names_length = source_map.names.len() as u32; let mut generated_mappings: Vec<Mapping> = Vec::new(); let mut generated_line: u32 = 0; let mut previous_original_line: u32 = 0; let mut previous_original_column: u32 = 0; let mut previous_source: u32 = 0; let mut previous_name: u32 = 0; for line in source_map.mappings.as_bytes().split(|&x| x == (';' as u8)) { generated_line += 1; let mut previous_generated_column: u32 = 0; for segment in line.split(|&x| x == (',' as u8)) { let segment_length = segment.len(); let mut fields: Vec<i32> = Vec::new(); let mut character_index = 0; while character_index < segment_length { match base64_vlq::decode(&segment[character_index..segment_length]) { Some((value, field_length)) => { fields.push(value); character_index += field_length; }, None => return Err("Invalid VLQ mapping field".into()) }; } if fields.len() < 1 { continue; } if fields.len() == 2 { return Err("Found a source, but no line and column".into()); } if fields.len() == 3 { return Err("Found a source and line, but no column".into()); } let mut mapping = Mapping { generated: CodePosition { line: generated_line, column: ((previous_generated_column as i32) + fields[0]) as u32 }, original: CodePosition { line: 0, column: 0 }, source: "".into(), name: "".into() }; previous_generated_column = mapping.generated.column; if fields.len() > 1 { // Original source. previous_source = ((previous_source as i32) + fields[1]) as u32; if previous_source < sources_length { mapping.source = source_map.sources[previous_source as usize].to_owned(); } else { return Err(format!("Invalid source map: reference to source index {} when source list length is {}", previous_source, sources_length)); } // Original line. previous_original_line = ((previous_original_line as i32) + fields[2]) as u32; // Lines are stored 0-based mapping.original.line = previous_original_line.checked_add(1).ok_or("Line number overflowed")?; // Original column. previous_original_column = ((previous_original_column as i32) + fields[3]) as u32; mapping.original.column = previous_original_column; if fields.len() > 4 { // Original name. previous_name = ((previous_name as i32) + fields[4]) as u32; if previous_name < names_length { mapping.name = source_map.names[previous_name as usize].to_owned(); } else { return Err(format!("Invalid source map: reference to name index {} when name list length is {}", previous_name, names_length)); } } } generated_mappings.push(mapping); } } if generated_mappings.len() < 1 { return Err("Source Map contains no mappings".to_owned()); } fn sort_key(mapping: &Mapping) -> (u32, u32) { (mapping.generated.line, mapping.generated.column) } generated_mappings.sort_by(|a, b| sort_key(a).cmp(&sort_key(b))); Ok(Cache { generated_mappings: generated_mappings, source_root: match &source_map.sourceRoot { &Some(ref x) => x.to_owned(), &None => "".into() } }) } impl Cache { /** * Returns the original source, line, column and name information for the generated * source's line and column positions provided. * * # Arguments * * * line: The line number in the generated source. * * column: The column number in the generated source. * * # Examples * * ``` * use js_source_mapper::consume; * * let cache = consume(r#"{ "version": 3, "file": "foo.js", "sources": ["source.js"], "names": ["name1", "name1", "name3"], "mappings": ";EAACA;;IAEEA;;MAEEE", "sourceRoot": "http://example.com" }"#).unwrap(); * * println!("{:?}", cache.mapping_for_generated_position(2, 2)); * // => Mapping { * // generated: CodePosition { line: 2, column: 2 }, * // original: CodePosition { line: 1, column: 1 }, * // source: "source.js" * // name: "name1" * // } * ``` * */ pub fn mapping_for_generated_position(&self, line: u32, column: u32) -> Mapping { let matcher = |mapping: &Mapping| -> Ordering { (mapping.generated.line, mapping.generated.column).cmp(&(line, column)) }; let mappings = &self.generated_mappings; match mappings.binary_search_by(matcher) { Ok(index) => &self.generated_mappings[index], Err(index) => &self.generated_mappings[if index >= mappings.len() { mappings.len() - 1 } else { index }] }.clone() } } macro_rules! assert_equal_mappings( ($a:expr, $b:expr) => ( if $a != $b { panic!(format!("\n\n{:?}\n\n!=\n\n{:?}\n\n", $a, $b)); } ); ); #[test] fn test_source_map_issue_64() { let cache = consume(r#"{ "version": 3, "file": "foo.js", "sourceRoot": "http://example.com/", "sources": ["/a"], "names": [], "mappings": "AACA", "sourcesContent": ["foo"] }"#).unwrap(); let expected = Mapping { generated: CodePosition { line: 1, column: 0 }, original: CodePosition { line: 2, column: 0 }, source: "/a".into(), name: "".into() }; let actual = cache.mapping_for_generated_position(1, 0); assert_equal_mappings!(actual, expected); } #[test] fn test_source_map_issue_72_duplicate_sources() { let cache = consume(r#"{ "version": 3, "file": "foo.js", "sources": ["source1.js", "source1.js", "source3.js"], "names": [], "mappings": ";EAAC;;IAEE;;MEEE", "sourceRoot": "http://example.com" }"#).unwrap(); { let expected = Mapping { generated: CodePosition { line: 2, column: 2 }, original: CodePosition { line: 1, column: 1 }, source: "source1.js".into(), name: "".into() }; let actual = cache.mapping_for_generated_position(2, 2); assert_equal_mappings!(actual, expected); } { let expected = Mapping { generated: CodePosition { line: 4, column: 4 }, original: CodePosition { line: 3, column: 3 }, source: "source1.js".into(), name: "".into() }; let actual = cache.mapping_for_generated_position(4, 4); assert_equal_mappings!(actual, expected); } { let expected = Mapping { generated: CodePosition { line: 6, column: 6 }, original: CodePosition { line: 5, column: 5 }, source: "source3.js".into(), name: "".into() }; let actual = cache.mapping_for_generated_position(6, 6); assert_equal_mappings!(actual, expected); } } #[test] fn test_source_map_issue_72_duplicate_names() { let cache = consume(r#"{ "version": 3, "file": "foo.js", "sources": ["source.js"], "names": ["name1", "name1", "name3"], "mappings": ";EAACA;;IAEEA;;MAEEE", "sourceRoot": "http://example.com" }"#).unwrap(); { let expected = Mapping { generated: CodePosition { line: 2, column: 2 }, original: CodePosition { line: 1, column: 1 }, source: "source.js".into(), name: "name1".into() }; let actual = cache.mapping_for_generated_position(2, 2); assert_equal_mappings!(actual, expected); } { let expected = Mapping { generated: CodePosition { line: 4, column: 4 }, original: CodePosition { line: 3, column: 3 }, source: "source.js".into(), name: "name1".into() }; let actual = cache.mapping_for_generated_position(4, 4); assert_equal_mappings!(actual, expected); } { let expected = Mapping { generated: CodePosition { line: 6, column: 6 }, original: CodePosition { line: 5, column: 5 }, source: "source.js".into(), name: "name3".into() }; let actual = cache.mapping_for_generated_position(6, 6); assert_equal_mappings!(actual, expected); } } #[test] fn it_allows_omitting_source_root() { let cache_result: Result<Cache, String> = consume(r#"{ "version": 3, "file": "foo.js", "sources": ["source.js"], "names": ["name1", "name1", "name3"], "mappings": ";EAACA;;IAEEA;;MAEEE" }"#); match cache_result { Ok(_) => {}, Err(s) => panic!(format!("Error due to omitting: '{}'", s)) } } #[test] fn it_rejects_older_source_map_revisions() { let cache_result = consume(r#"{ "version": 2, "file": "", "sources": ["source.js"], "names": ["name1", "name1", "name3"], "mappings": ";EAACA;;IAEEA;;MAEEE", "sourceRoot": "http://example.com" }"#); match cache_result { Ok(_) => panic!("Source Map revision < 3 should be rejected"), Err(_) => {} } } #[test] fn it_does_not_panic_due_to_malformed_source_maps() { let cache_result = consume(r#"{ "version": 3, "file": "", "sources": [], "names": [], "mappings": ";EAACA;;IAEEA;;MAEEE" }"#); match cache_result { Ok(_) => panic!("Invalid source maps should be rejected"), Err(_) => {} } } #[test] fn it_returns_error_when_there_are_no_mappings() { let cache_result = consume(r#"{ "version": 3, "file": "foo.js", "sources": ["source.js"], "names": ["name1", "name1", "name3"], "mappings": ";;;" }"#); match cache_result { Ok(_) => panic!("Source maps with no mappings should be rejected"), Err(_) => {} } } #[test] fn it_does_not_panic_when_querying_for_position_2() { let cache = consume(r#"{ "version": 3, "file": "foo.js", "sources": ["source.js"], "names": ["name1", "name1", "name3"], "mappings": "Z", "sourceRoot": "http://example.com" }"#).unwrap(); cache.mapping_for_generated_position(2, 2); } #[test] fn it_does_not_panic_on_invalid_bit_shifts() { match consume(r#"{ "version": 3, "file": "foo.js", "sources": ["source.js"], "names": ["name1", "name1", "name3"], "mappings": "00000001", "sourceRoot": "http://example.com" }"#) { Err(s) => assert!(s == "Invalid VLQ mapping field"), _ => panic!("Invalid source map should fail to consume") }; } #[test] fn it_does_not_panic_from_add_overflow() { match consume(r#"{ "version": 3, "file": "foo.js", "sources": ["source.js"], "names": ["name1", "name1", "name3"], "mappings": "BBDDDDDDBBBBBBBc;*;ZZBBBBBBBBBBv", "sourceRoot": "http://example.com" }"#) { Err(s) => assert!(s == "Line number overflowed"), _ => panic!("Invalid source map should fail to consume") }; }
use raw::*; use std::mem; use util::NativeRef; use {CompiledFunction, Type, UncompiledFunction}; /// Holds all of the functions you have built and compiled. There can be /// multiple, but normally there is only one. pub struct Context { _context: jit_context_t } native_ref!(Context, _context, jit_context_t) /// A context that is in the build phase while generating IR #[allow(missing_copy_implementations)] pub struct Builder { _context: jit_context_t } native_ref!(Builder, _context, jit_context_t) impl Context { #[inline(always)] /// Create a new JIT Context pub fn new() -> Context { unsafe { NativeRef::from_ptr(jit_context_create()) } } #[inline(always)] /// Lock the context so you can safely generate IR pub fn build<'a, R, F:FnOnce(&'a Builder) -> R>(&'a self, cb: F) -> R { unsafe { jit_context_build_start(self.as_ptr()); let r = cb(mem::transmute(self)); jit_context_build_end(self.as_ptr()); r } } #[inline(always)] /// Lock the context so you can safely generate IR in a new function on the context which is /// compiled for you pub fn build_func<'a, F:FnOnce(&UncompiledFunction<'a>)>(&'a self, signature: Type, cb: F) -> CompiledFunction<'a> { unsafe { jit_context_build_start(self.as_ptr()); let func = UncompiledFunction::new(mem::transmute(self), signature.clone()); cb(mem::transmute(self)); jit_context_build_end(self.as_ptr()); func.compile() } } } #[unsafe_destructor] impl Drop for Context { #[inline(always)] fn drop(&mut self) { unsafe { jit_context_destroy(self.as_ptr()); } } } Implement `NoSync` & `NoSend` for `Builder` use raw::*; use std::mem; use std::kinds::marker::{NoSync, NoSend}; use util::NativeRef; use {CompiledFunction, Type, UncompiledFunction}; /// Holds all of the functions you have built and compiled. There can be /// multiple, but normally there is only one. pub struct Context { _context: jit_context_t } native_ref!(Context, _context, jit_context_t) /// A context that is in the build phase while generating IR pub struct Builder { _context: jit_context_t, no_sync: NoSync, no_send: NoSend } impl NativeRef for Builder { #[inline(always)] unsafe fn as_ptr(&self) -> jit_context_t { self._context } #[inline(always)] unsafe fn from_ptr(ptr:jit_context_t) -> Builder { Builder { _context: ptr, no_sync: NoSync, no_send: NoSend } } } impl Context { #[inline(always)] /// Create a new JIT Context pub fn new() -> Context { unsafe { NativeRef::from_ptr(jit_context_create()) } } #[inline(always)] /// Lock the context so you can safely generate IR pub fn build<'a, R, F:FnOnce(&'a Builder) -> R>(&'a self, cb: F) -> R { unsafe { jit_context_build_start(self.as_ptr()); let r = cb(mem::transmute(self)); jit_context_build_end(self.as_ptr()); r } } #[inline(always)] /// Lock the context so you can safely generate IR in a new function on the context which is /// compiled for you pub fn build_func<'a, F:FnOnce(&UncompiledFunction<'a>)>(&'a self, signature: Type, cb: F) -> CompiledFunction<'a> { unsafe { jit_context_build_start(self.as_ptr()); let func = UncompiledFunction::new(mem::transmute(self), signature.clone()); cb(mem::transmute(self)); jit_context_build_end(self.as_ptr()); func.compile() } } } #[unsafe_destructor] impl Drop for Context { #[inline(always)] fn drop(&mut self) { unsafe { jit_context_destroy(self.as_ptr()); } } }
// Copyright 2015 The Servo Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use base::CGFloat; use color_space::CGColorSpace; use core_foundation::base::{CFRelease, CFRetain, CFTypeID}; use libc::{c_void, c_int, size_t}; use std::ptr; use std::slice; use geometry::CGRect; use image::CGImage; use foreign_types::ForeignType; #[repr(C)] pub enum CGTextDrawingMode { CGTextFill, CGTextStroke, CGTextFillStroke, CGTextInvisible, CGTextFillClip, CGTextStrokeClip, CGTextClip } foreign_type! { #[doc(hidden)] type CType = ::sys::CGContext; fn drop = |cs| CFRelease(cs as *mut _); fn clone = |p| CFRetain(p as *const _) as *mut _; pub struct CGContext; pub struct CGContextRef; } impl CGContext { pub fn type_id() -> CFTypeID { unsafe { CGContextGetTypeID() } } pub fn create_bitmap_context(data: Option<*mut c_void>, width: size_t, height: size_t, bits_per_component: size_t, bytes_per_row: size_t, space: &CGColorSpace, bitmap_info: u32) -> CGContext { unsafe { let result = CGBitmapContextCreate(data.unwrap_or(ptr::null_mut()), width, height, bits_per_component, bytes_per_row, space.as_ptr(), bitmap_info); assert!(!result.is_null()); Self::from_ptr(result) } } pub fn data(&mut self) -> &mut [u8] { unsafe { slice::from_raw_parts_mut( CGBitmapContextGetData(self.as_ptr()) as *mut u8, (self.height() * self.bytes_per_row()) as usize) } } pub fn width(&self) -> size_t { unsafe { CGBitmapContextGetWidth(self.as_ptr()) } } pub fn height(&self) -> size_t { unsafe { CGBitmapContextGetHeight(self.as_ptr()) } } pub fn bytes_per_row(&self) -> size_t { unsafe { CGBitmapContextGetBytesPerRow(self.as_ptr()) } } pub fn set_rgb_fill_color(&self, red: CGFloat, green: CGFloat, blue: CGFloat, alpha: CGFloat) { unsafe { CGContextSetRGBFillColor(self.as_ptr(), red, green, blue, alpha) } } pub fn set_allows_font_smoothing(&self, allows_font_smoothing: bool) { unsafe { CGContextSetAllowsFontSmoothing(self.as_ptr(), allows_font_smoothing) } } pub fn set_font_smoothing_style(&self, style: i32) { unsafe { CGContextSetFontSmoothingStyle(self.as_ptr(), style as _); } } pub fn set_should_smooth_fonts(&self, should_smooth_fonts: bool) { unsafe { CGContextSetShouldSmoothFonts(self.as_ptr(), should_smooth_fonts) } } pub fn set_allows_antialiasing(&self, allows_antialiasing: bool) { unsafe { CGContextSetAllowsAntialiasing(self.as_ptr(), allows_antialiasing) } } pub fn set_should_antialias(&self, should_antialias: bool) { unsafe { CGContextSetShouldAntialias(self.as_ptr(), should_antialias) } } pub fn set_allows_font_subpixel_quantization(&self, allows_font_subpixel_quantization: bool) { unsafe { CGContextSetAllowsFontSubpixelQuantization(self.as_ptr(), allows_font_subpixel_quantization) } } pub fn set_should_subpixel_quantize_fonts(&self, should_subpixel_quantize_fonts: bool) { unsafe { CGContextSetShouldSubpixelQuantizeFonts(self.as_ptr(), should_subpixel_quantize_fonts) } } pub fn set_allows_font_subpixel_positioning(&self, allows_font_subpixel_positioning: bool) { unsafe { CGContextSetAllowsFontSubpixelPositioning(self.as_ptr(), allows_font_subpixel_positioning) } } pub fn set_should_subpixel_position_fonts(&self, should_subpixel_position_fonts: bool) { unsafe { CGContextSetShouldSubpixelPositionFonts(self.as_ptr(), should_subpixel_position_fonts) } } pub fn set_text_drawing_mode(&self, mode: CGTextDrawingMode) { unsafe { CGContextSetTextDrawingMode(self.as_ptr(), mode) } } pub fn fill_rect(&self, rect: CGRect) { unsafe { CGContextFillRect(self.as_ptr(), rect) } } pub fn draw_image(&self, rect: CGRect, image: &CGImage) { unsafe { CGContextDrawImage(self.as_ptr(), rect, image.as_ptr()); } } pub fn create_image(&self) -> Option<CGImage> { let image = unsafe { CGBitmapContextCreateImage(self.as_ptr()) }; if !image.is_null() { Some(unsafe { CGImage::from_ptr(image) }) } else { None } } } #[test] fn create_bitmap_context_test() { use geometry::*; let cs = CGColorSpace::create_device_rgb(); let ctx = CGContext::create_bitmap_context(None, 16, 8, 8, 0, &cs, ::base::kCGImageAlphaPremultipliedLast); ctx.set_rgb_fill_color(1.,0.,1.,1.); ctx.fill_rect(CGRect::new(&CGPoint::new(0.,0.), &CGSize::new(8.,8.))); let img = ctx.create_image().unwrap(); assert_eq!(16, img.width()); assert_eq!(8, img.height()); assert_eq!(8, img.bits_per_component()); assert_eq!(32, img.bits_per_pixel()); let data = img.data(); assert_eq!(255, data.bytes()[0]); assert_eq!(0, data.bytes()[1]); assert_eq!(255, data.bytes()[2]); assert_eq!(255, data.bytes()[3]); } #[link(name = "CoreGraphics", kind = "framework")] extern { fn CGBitmapContextCreate(data: *mut c_void, width: size_t, height: size_t, bitsPerComponent: size_t, bytesPerRow: size_t, space: ::sys::CGColorSpaceRef, bitmapInfo: u32) -> ::sys::CGContextRef; fn CGBitmapContextGetData(context: ::sys::CGContextRef) -> *mut c_void; fn CGBitmapContextGetWidth(context: ::sys::CGContextRef) -> size_t; fn CGBitmapContextGetHeight(context: ::sys::CGContextRef) -> size_t; fn CGBitmapContextGetBytesPerRow(context: ::sys::CGContextRef) -> size_t; fn CGBitmapContextCreateImage(context: ::sys::CGContextRef) -> ::sys::CGImageRef; fn CGContextGetTypeID() -> CFTypeID; fn CGContextSetAllowsFontSmoothing(c: ::sys::CGContextRef, allowsFontSmoothing: bool); fn CGContextSetShouldSmoothFonts(c: ::sys::CGContextRef, shouldSmoothFonts: bool); fn CGContextSetFontSmoothingStyle(c: ::sys::CGContextRef, style: c_int); fn CGContextSetAllowsAntialiasing(c: ::sys::CGContextRef, allowsAntialiasing: bool); fn CGContextSetShouldAntialias(c: ::sys::CGContextRef, shouldAntialias: bool); fn CGContextSetAllowsFontSubpixelQuantization(c: ::sys::CGContextRef, allowsFontSubpixelQuantization: bool); fn CGContextSetShouldSubpixelQuantizeFonts(c: ::sys::CGContextRef, shouldSubpixelQuantizeFonts: bool); fn CGContextSetAllowsFontSubpixelPositioning(c: ::sys::CGContextRef, allowsFontSubpixelPositioning: bool); fn CGContextSetShouldSubpixelPositionFonts(c: ::sys::CGContextRef, shouldSubpixelPositionFonts: bool); fn CGContextSetTextDrawingMode(c: ::sys::CGContextRef, mode: CGTextDrawingMode); fn CGContextSetRGBFillColor(context: ::sys::CGContextRef, red: CGFloat, green: CGFloat, blue: CGFloat, alpha: CGFloat); fn CGContextFillRect(context: ::sys::CGContextRef, rect: CGRect); fn CGContextDrawImage(c: ::sys::CGContextRef, rect: CGRect, image: ::sys::CGImageRef); } Auto merge of #102 - pcwalton:show-glyphs, r=jdm Add bindings to `CGContextSetFont(Size)` and `CGContextShowGlyphsAtPositions`. These functions allow for a minor time savings by avoiding Core Text font creation in the Pathfinder native rasterization functionality. r? @jdm <!-- Reviewable:start --> --- This change is [<img src="https://reviewable.io/review_button.svg" height="34" align="absmiddle" alt="Reviewable"/>](https://reviewable.io/reviews/servo/core-graphics-rs/102) <!-- Reviewable:end --> // Copyright 2015 The Servo Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use base::CGFloat; use color_space::CGColorSpace; use core_foundation::base::{CFRelease, CFRetain, CFTypeID}; use font::{CGFont, CGGlyph}; use geometry::CGPoint; use libc::{c_void, c_int, size_t}; use std::cmp; use std::ptr; use std::slice; use geometry::CGRect; use image::CGImage; use foreign_types::ForeignType; #[repr(C)] pub enum CGTextDrawingMode { CGTextFill, CGTextStroke, CGTextFillStroke, CGTextInvisible, CGTextFillClip, CGTextStrokeClip, CGTextClip } foreign_type! { #[doc(hidden)] type CType = ::sys::CGContext; fn drop = |cs| CFRelease(cs as *mut _); fn clone = |p| CFRetain(p as *const _) as *mut _; pub struct CGContext; pub struct CGContextRef; } impl CGContext { pub fn type_id() -> CFTypeID { unsafe { CGContextGetTypeID() } } pub fn create_bitmap_context(data: Option<*mut c_void>, width: size_t, height: size_t, bits_per_component: size_t, bytes_per_row: size_t, space: &CGColorSpace, bitmap_info: u32) -> CGContext { unsafe { let result = CGBitmapContextCreate(data.unwrap_or(ptr::null_mut()), width, height, bits_per_component, bytes_per_row, space.as_ptr(), bitmap_info); assert!(!result.is_null()); Self::from_ptr(result) } } pub fn data(&mut self) -> &mut [u8] { unsafe { slice::from_raw_parts_mut( CGBitmapContextGetData(self.as_ptr()) as *mut u8, (self.height() * self.bytes_per_row()) as usize) } } pub fn width(&self) -> size_t { unsafe { CGBitmapContextGetWidth(self.as_ptr()) } } pub fn height(&self) -> size_t { unsafe { CGBitmapContextGetHeight(self.as_ptr()) } } pub fn bytes_per_row(&self) -> size_t { unsafe { CGBitmapContextGetBytesPerRow(self.as_ptr()) } } pub fn set_rgb_fill_color(&self, red: CGFloat, green: CGFloat, blue: CGFloat, alpha: CGFloat) { unsafe { CGContextSetRGBFillColor(self.as_ptr(), red, green, blue, alpha) } } pub fn set_allows_font_smoothing(&self, allows_font_smoothing: bool) { unsafe { CGContextSetAllowsFontSmoothing(self.as_ptr(), allows_font_smoothing) } } pub fn set_font_smoothing_style(&self, style: i32) { unsafe { CGContextSetFontSmoothingStyle(self.as_ptr(), style as _); } } pub fn set_should_smooth_fonts(&self, should_smooth_fonts: bool) { unsafe { CGContextSetShouldSmoothFonts(self.as_ptr(), should_smooth_fonts) } } pub fn set_allows_antialiasing(&self, allows_antialiasing: bool) { unsafe { CGContextSetAllowsAntialiasing(self.as_ptr(), allows_antialiasing) } } pub fn set_should_antialias(&self, should_antialias: bool) { unsafe { CGContextSetShouldAntialias(self.as_ptr(), should_antialias) } } pub fn set_allows_font_subpixel_quantization(&self, allows_font_subpixel_quantization: bool) { unsafe { CGContextSetAllowsFontSubpixelQuantization(self.as_ptr(), allows_font_subpixel_quantization) } } pub fn set_should_subpixel_quantize_fonts(&self, should_subpixel_quantize_fonts: bool) { unsafe { CGContextSetShouldSubpixelQuantizeFonts(self.as_ptr(), should_subpixel_quantize_fonts) } } pub fn set_allows_font_subpixel_positioning(&self, allows_font_subpixel_positioning: bool) { unsafe { CGContextSetAllowsFontSubpixelPositioning(self.as_ptr(), allows_font_subpixel_positioning) } } pub fn set_should_subpixel_position_fonts(&self, should_subpixel_position_fonts: bool) { unsafe { CGContextSetShouldSubpixelPositionFonts(self.as_ptr(), should_subpixel_position_fonts) } } pub fn set_text_drawing_mode(&self, mode: CGTextDrawingMode) { unsafe { CGContextSetTextDrawingMode(self.as_ptr(), mode) } } pub fn fill_rect(&self, rect: CGRect) { unsafe { CGContextFillRect(self.as_ptr(), rect) } } pub fn draw_image(&self, rect: CGRect, image: &CGImage) { unsafe { CGContextDrawImage(self.as_ptr(), rect, image.as_ptr()); } } pub fn create_image(&self) -> Option<CGImage> { let image = unsafe { CGBitmapContextCreateImage(self.as_ptr()) }; if !image.is_null() { Some(unsafe { CGImage::from_ptr(image) }) } else { None } } pub fn set_font(&self, font: &CGFont) { unsafe { CGContextSetFont(self.as_ptr(), font.as_ptr()) } } pub fn set_font_size(&self, size: CGFloat) { unsafe { CGContextSetFontSize(self.as_ptr(), size) } } pub fn show_glyphs_at_positions(&self, glyphs: &[CGGlyph], positions: &[CGPoint]) { unsafe { let count = cmp::min(glyphs.len(), positions.len()); CGContextShowGlyphsAtPositions(self.as_ptr(), glyphs.as_ptr(), positions.as_ptr(), count) } } } #[test] fn create_bitmap_context_test() { use geometry::*; let cs = CGColorSpace::create_device_rgb(); let ctx = CGContext::create_bitmap_context(None, 16, 8, 8, 0, &cs, ::base::kCGImageAlphaPremultipliedLast); ctx.set_rgb_fill_color(1.,0.,1.,1.); ctx.fill_rect(CGRect::new(&CGPoint::new(0.,0.), &CGSize::new(8.,8.))); let img = ctx.create_image().unwrap(); assert_eq!(16, img.width()); assert_eq!(8, img.height()); assert_eq!(8, img.bits_per_component()); assert_eq!(32, img.bits_per_pixel()); let data = img.data(); assert_eq!(255, data.bytes()[0]); assert_eq!(0, data.bytes()[1]); assert_eq!(255, data.bytes()[2]); assert_eq!(255, data.bytes()[3]); } #[link(name = "CoreGraphics", kind = "framework")] extern { fn CGBitmapContextCreate(data: *mut c_void, width: size_t, height: size_t, bitsPerComponent: size_t, bytesPerRow: size_t, space: ::sys::CGColorSpaceRef, bitmapInfo: u32) -> ::sys::CGContextRef; fn CGBitmapContextGetData(context: ::sys::CGContextRef) -> *mut c_void; fn CGBitmapContextGetWidth(context: ::sys::CGContextRef) -> size_t; fn CGBitmapContextGetHeight(context: ::sys::CGContextRef) -> size_t; fn CGBitmapContextGetBytesPerRow(context: ::sys::CGContextRef) -> size_t; fn CGBitmapContextCreateImage(context: ::sys::CGContextRef) -> ::sys::CGImageRef; fn CGContextGetTypeID() -> CFTypeID; fn CGContextSetAllowsFontSmoothing(c: ::sys::CGContextRef, allowsFontSmoothing: bool); fn CGContextSetShouldSmoothFonts(c: ::sys::CGContextRef, shouldSmoothFonts: bool); fn CGContextSetFontSmoothingStyle(c: ::sys::CGContextRef, style: c_int); fn CGContextSetAllowsAntialiasing(c: ::sys::CGContextRef, allowsAntialiasing: bool); fn CGContextSetShouldAntialias(c: ::sys::CGContextRef, shouldAntialias: bool); fn CGContextSetAllowsFontSubpixelQuantization(c: ::sys::CGContextRef, allowsFontSubpixelQuantization: bool); fn CGContextSetShouldSubpixelQuantizeFonts(c: ::sys::CGContextRef, shouldSubpixelQuantizeFonts: bool); fn CGContextSetAllowsFontSubpixelPositioning(c: ::sys::CGContextRef, allowsFontSubpixelPositioning: bool); fn CGContextSetShouldSubpixelPositionFonts(c: ::sys::CGContextRef, shouldSubpixelPositionFonts: bool); fn CGContextSetTextDrawingMode(c: ::sys::CGContextRef, mode: CGTextDrawingMode); fn CGContextSetRGBFillColor(context: ::sys::CGContextRef, red: CGFloat, green: CGFloat, blue: CGFloat, alpha: CGFloat); fn CGContextFillRect(context: ::sys::CGContextRef, rect: CGRect); fn CGContextDrawImage(c: ::sys::CGContextRef, rect: CGRect, image: ::sys::CGImageRef); fn CGContextSetFont(c: ::sys::CGContextRef, font: ::sys::CGFontRef); fn CGContextSetFontSize(c: ::sys::CGContextRef, size: CGFloat); fn CGContextShowGlyphsAtPositions(c: ::sys::CGContextRef, glyphs: *const CGGlyph, positions: *const CGPoint, count: size_t); }
//! Simple module to store files in database. //! //! cratesfyi is generating more than 5 million files, they are small and mostly html files. //! They are using so many inodes and it is better to store them in database instead of //! filesystem. This module is adding files into database and retrieving them. use std::path::{PathBuf, Path}; use postgres::Connection; use rustc_serialize::json::{Json, ToJson}; use std::cmp; use std::fs; use std::io::Read; use error::Result; use failure::err_msg; use rusoto_s3::{S3, PutObjectRequest, GetObjectRequest, S3Client}; use rusoto_core::region::Region; use rusoto_credential::DefaultCredentialsProvider; use std::ffi::OsStr; const MAX_CONCURRENT_UPLOADS: usize = 1000; pub(super) static S3_BUCKET_NAME: &str = "rust-docs-rs"; fn get_file_list_from_dir<P: AsRef<Path>>(path: P, files: &mut Vec<PathBuf>) -> Result<()> { let path = path.as_ref(); for file in path.read_dir()? { let file = file?; if file.file_type()?.is_file() { files.push(file.path()); } else if file.file_type()?.is_dir() { get_file_list_from_dir(file.path(), files)?; } } Ok(()) } fn get_file_list<P: AsRef<Path>>(path: P) -> Result<Vec<PathBuf>> { let path = path.as_ref(); let mut files = Vec::new(); if !path.exists() { return Err(err_msg("File not found")); } else if path.is_file() { files.push(PathBuf::from(path.file_name().unwrap())); } else if path.is_dir() { get_file_list_from_dir(path, &mut files)?; for file_path in &mut files { // We want the paths in this list to not be {path}/bar.txt but just bar.txt *file_path = PathBuf::from(file_path.strip_prefix(path).unwrap()); } } Ok(files) } pub struct Blob { pub path: String, pub mime: String, pub date_updated: time::Timespec, pub content: Vec<u8>, } pub fn get_path(conn: &Connection, path: &str) -> Option<Blob> { if let Some(client) = s3_client() { let res = client.get_object(GetObjectRequest { bucket: S3_BUCKET_NAME.into(), key: path.into(), ..Default::default() }).sync(); let res = match res { Ok(r) => r, Err(_) => { return None; } }; let mut b = res.body.unwrap().into_blocking_read(); let mut content = Vec::new(); b.read_to_end(&mut content).unwrap(); let last_modified = res.last_modified.unwrap(); let last_modified = time::strptime(&last_modified, "%a, %d %b %Y %H:%M:%S %Z") .unwrap_or_else(|e| panic!("failed to parse {:?} as timespec: {:?}", last_modified, e)) .to_timespec(); Some(Blob { path: path.into(), mime: res.content_type.unwrap(), date_updated: last_modified, content, }) } else { let rows = conn.query("SELECT path, mime, date_updated, content FROM files WHERE path = $1", &[&path]).unwrap(); if rows.len() == 0 { None } else { let row = rows.get(0); Some(Blob { path: row.get(0), mime: row.get(1), date_updated: row.get(2), content: row.get(3), }) } } } pub(super) fn s3_client() -> Option<S3Client> { // If AWS keys aren't configured, then presume we should use the DB exclusively // for file storage. if std::env::var_os("AWS_ACCESS_KEY_ID").is_none() && std::env::var_os("FORCE_S3").is_none() { return None; } let creds = match DefaultCredentialsProvider::new() { Ok(creds) => creds, Err(err) => { warn!("failed to retrieve AWS credentials: {}", err); return None; } }; Some(S3Client::new_with( rusoto_core::request::HttpClient::new().unwrap(), creds, std::env::var("S3_ENDPOINT").ok().map(|e| Region::Custom { name: std::env::var("S3_REGION") .unwrap_or_else(|_| "us-west-1".to_owned()), endpoint: e, }).unwrap_or(Region::UsWest1), )) } /// Store all files in a directory and return [[mimetype, filename]] as Json /// /// If there is an S3 Client configured, store files into an S3 bucket; /// otherwise, stores files into the 'files' table of the local database. /// /// The mimetype is detected using `magic`. /// /// Note that this function is used for uploading both sources /// and files generated by rustdoc. pub fn add_path_into_database<P: AsRef<Path>>(conn: &Connection, prefix: &str, path: P) -> Result<Json> { use std::collections::HashMap; use futures::future::Future; let trans = conn.transaction()?; let mut file_paths_and_mimes: HashMap<PathBuf, String> = HashMap::new(); let mut rt = ::tokio::runtime::Runtime::new().unwrap(); let mut to_upload = get_file_list(&path)?; let mut batch_size = cmp::min(to_upload.len(), MAX_CONCURRENT_UPLOADS); let mut currently_uploading: Vec<_> = to_upload.drain(..batch_size).collect(); let mut attempts = 0; while !to_upload.is_empty() || !currently_uploading.is_empty() { let mut futures = Vec::new(); let client = s3_client(); for file_path in &currently_uploading { let path = Path::new(path.as_ref()).join(&file_path); // Some files have insufficient permissions (like .lock file created by cargo in // documentation directory). We are skipping this files. let mut file = match fs::File::open(path) { Ok(f) => f, Err(_) => continue, }; let mut content: Vec<u8> = Vec::new(); file.read_to_end(&mut content)?; let bucket_path = Path::new(prefix).join(&file_path); #[cfg(windows)] // On windows, we need to normalize \\ to / so the route logic works let bucket_path = path_slash::PathBufExt::to_slash(&bucket_path).unwrap(); #[cfg(not(windows))] let bucket_path = bucket_path.into_os_string().into_string().unwrap(); let mime = detect_mime(&file_path)?; if let Some(client) = &client { futures.push(client.put_object(PutObjectRequest { bucket: S3_BUCKET_NAME.into(), key: bucket_path.clone(), body: Some(content.clone().into()), content_type: Some(mime.clone()), ..Default::default() }).inspect(|_| { crate::web::metrics::UPLOADED_FILES_TOTAL.inc_by(1); })); } else { // If AWS credentials are configured, don't insert/update the database // check if file already exists in database let rows = conn.query("SELECT COUNT(*) FROM files WHERE path = $1", &[&bucket_path])?; if rows.get(0).get::<usize, i64>(0) == 0 { trans.query("INSERT INTO files (path, mime, content) VALUES ($1, $2, $3)", &[&bucket_path, &mime, &content])?; } else { trans.query("UPDATE files SET mime = $2, content = $3, date_updated = NOW() \ WHERE path = $1", &[&bucket_path, &mime, &content])?; } } file_paths_and_mimes.insert(file_path.clone(), mime.clone()); } if !futures.is_empty() { attempts += 1; match rt.block_on(::futures::future::join_all(futures)) { Ok(_) => { // this batch was successful, start another batch if there are still more files batch_size = cmp::min(to_upload.len(), MAX_CONCURRENT_UPLOADS); currently_uploading = to_upload.drain(..batch_size).collect(); attempts = 0; }, Err(err) => { error!("failed to upload to s3: {:?}", err); // if any futures error, leave `currently_uploading` in tact so that we can retry the batch if attempts > 2 { panic!("failed to upload 3 times, exiting"); } } } } else { batch_size = cmp::min(to_upload.len(), MAX_CONCURRENT_UPLOADS); currently_uploading = to_upload.drain(..batch_size).collect(); } } trans.commit()?; let file_list_with_mimes: Vec<(String, PathBuf)> = file_paths_and_mimes .into_iter() .map(|(file_path, mime)| (mime, file_path)) .collect(); file_list_to_json(file_list_with_mimes) } fn detect_mime(file_path: &Path) -> Result<String> { let mime = mime_guess::from_path(file_path).first_raw().map(|m| m).unwrap_or("text/plain"); correct_mime(&mime, &file_path) } fn correct_mime(mime: &str, file_path: &Path) -> Result<String> { Ok(match mime { "text/plain" | "text/troff" | "text/x-markdown" | "text/x-rust" | "text/x-toml" => { match file_path.extension().and_then(OsStr::to_str) { Some("md") => "text/markdown", Some("rs") => "text/rust", Some("markdown") => "text/markdown", Some("css") => "text/css", Some("toml") => "text/toml", Some("js") => "application/javascript", Some("json") => "application/json", _ => mime } }, "image/svg" => "image/svg+xml", _ => mime }.to_owned()) } fn file_list_to_json(file_list: Vec<(String, PathBuf)>) -> Result<Json> { let mut file_list_json: Vec<Json> = Vec::new(); for file in file_list { let mut v: Vec<String> = Vec::new(); v.push(file.0.clone()); v.push(file.1.into_os_string().into_string().unwrap()); file_list_json.push(v.to_json()); } Ok(file_list_json.to_json()) } pub fn move_to_s3(conn: &Connection, n: usize) -> Result<usize> { let trans = conn.transaction()?; let client = s3_client().expect("configured s3"); let rows = trans.query( &format!("SELECT path, mime, content FROM files WHERE content != E'in-s3' LIMIT {}", n), &[])?; let count = rows.len(); let mut rt = ::tokio::runtime::Runtime::new().unwrap(); let mut futures = Vec::new(); for row in &rows { let path: String = row.get(0); let mime: String = row.get(1); let content: Vec<u8> = row.get(2); let path_1 = path.clone(); futures.push(client.put_object(PutObjectRequest { bucket: S3_BUCKET_NAME.into(), key: path.clone(), body: Some(content.into()), content_type: Some(mime), ..Default::default() }).map(move |_| { path_1 }).map_err(move |e| { panic!("failed to upload to {}: {:?}", path, e) })); } use ::futures::future::Future; match rt.block_on(::futures::future::join_all(futures)) { Ok(paths) => { let statement = trans.prepare("DELETE FROM files WHERE path = $1").unwrap(); for path in paths { statement.execute(&[&path]).unwrap(); } } Err(e) => { panic!("results err: {:?}", e); } } trans.commit()?; Ok(count) } #[cfg(test)] mod test { extern crate env_logger; use std::env; use super::*; #[test] fn test_get_file_list() { let _ = env_logger::try_init(); let files = get_file_list(env::current_dir().unwrap()); assert!(files.is_ok()); assert!(files.unwrap().len() > 0); let files = get_file_list(env::current_dir().unwrap().join("Cargo.toml")).unwrap(); assert_eq!(files[0], std::path::Path::new("Cargo.toml")); } #[test] fn test_mime_types() { check_mime("/ignored", ".gitignore", "text/plain"); check_mime("[package]", "hello.toml","text/toml"); check_mime(".ok { color:red; }", "hello.css","text/css"); check_mime("var x = 1", "hello.js","application/javascript"); check_mime("<html>", "hello.html","text/html"); check_mime("## HELLO", "hello.hello.md","text/markdown"); check_mime("## WORLD", "hello.markdown","text/markdown"); check_mime("{}", "hello.json","application/json"); check_mime("hello world", "hello.txt","text/plain"); check_mime("//! Simple module to ...", "file.rs", "text/rust"); check_mime("<svg></svg>", "important.svg", "image/svg+xml"); } fn check_mime(content: &str, path: &str, expected_mime: &str) { let detected_mime = detect_mime(&content.as_bytes().to_vec(), Path::new(&path)); let detected_mime = detected_mime.expect("no mime was given"); assert_eq!(detected_mime, expected_mime); } } Remove unnecessary allocation //! Simple module to store files in database. //! //! cratesfyi is generating more than 5 million files, they are small and mostly html files. //! They are using so many inodes and it is better to store them in database instead of //! filesystem. This module is adding files into database and retrieving them. use std::path::{PathBuf, Path}; use postgres::Connection; use rustc_serialize::json::{Json, ToJson}; use std::cmp; use std::fs; use std::io::Read; use error::Result; use failure::err_msg; use rusoto_s3::{S3, PutObjectRequest, GetObjectRequest, S3Client}; use rusoto_core::region::Region; use rusoto_credential::DefaultCredentialsProvider; use std::ffi::OsStr; const MAX_CONCURRENT_UPLOADS: usize = 1000; pub(super) static S3_BUCKET_NAME: &str = "rust-docs-rs"; fn get_file_list_from_dir<P: AsRef<Path>>(path: P, files: &mut Vec<PathBuf>) -> Result<()> { let path = path.as_ref(); for file in path.read_dir()? { let file = file?; if file.file_type()?.is_file() { files.push(file.path()); } else if file.file_type()?.is_dir() { get_file_list_from_dir(file.path(), files)?; } } Ok(()) } fn get_file_list<P: AsRef<Path>>(path: P) -> Result<Vec<PathBuf>> { let path = path.as_ref(); let mut files = Vec::new(); if !path.exists() { return Err(err_msg("File not found")); } else if path.is_file() { files.push(PathBuf::from(path.file_name().unwrap())); } else if path.is_dir() { get_file_list_from_dir(path, &mut files)?; for file_path in &mut files { // We want the paths in this list to not be {path}/bar.txt but just bar.txt *file_path = PathBuf::from(file_path.strip_prefix(path).unwrap()); } } Ok(files) } pub struct Blob { pub path: String, pub mime: String, pub date_updated: time::Timespec, pub content: Vec<u8>, } pub fn get_path(conn: &Connection, path: &str) -> Option<Blob> { if let Some(client) = s3_client() { let res = client.get_object(GetObjectRequest { bucket: S3_BUCKET_NAME.into(), key: path.into(), ..Default::default() }).sync(); let res = match res { Ok(r) => r, Err(_) => { return None; } }; let mut b = res.body.unwrap().into_blocking_read(); let mut content = Vec::new(); b.read_to_end(&mut content).unwrap(); let last_modified = res.last_modified.unwrap(); let last_modified = time::strptime(&last_modified, "%a, %d %b %Y %H:%M:%S %Z") .unwrap_or_else(|e| panic!("failed to parse {:?} as timespec: {:?}", last_modified, e)) .to_timespec(); Some(Blob { path: path.into(), mime: res.content_type.unwrap(), date_updated: last_modified, content, }) } else { let rows = conn.query("SELECT path, mime, date_updated, content FROM files WHERE path = $1", &[&path]).unwrap(); if rows.len() == 0 { None } else { let row = rows.get(0); Some(Blob { path: row.get(0), mime: row.get(1), date_updated: row.get(2), content: row.get(3), }) } } } pub(super) fn s3_client() -> Option<S3Client> { // If AWS keys aren't configured, then presume we should use the DB exclusively // for file storage. if std::env::var_os("AWS_ACCESS_KEY_ID").is_none() && std::env::var_os("FORCE_S3").is_none() { return None; } let creds = match DefaultCredentialsProvider::new() { Ok(creds) => creds, Err(err) => { warn!("failed to retrieve AWS credentials: {}", err); return None; } }; Some(S3Client::new_with( rusoto_core::request::HttpClient::new().unwrap(), creds, std::env::var("S3_ENDPOINT").ok().map(|e| Region::Custom { name: std::env::var("S3_REGION") .unwrap_or_else(|_| "us-west-1".to_owned()), endpoint: e, }).unwrap_or(Region::UsWest1), )) } /// Store all files in a directory and return [[mimetype, filename]] as Json /// /// If there is an S3 Client configured, store files into an S3 bucket; /// otherwise, stores files into the 'files' table of the local database. /// /// The mimetype is detected using `magic`. /// /// Note that this function is used for uploading both sources /// and files generated by rustdoc. pub fn add_path_into_database<P: AsRef<Path>>(conn: &Connection, prefix: &str, path: P) -> Result<Json> { use std::collections::HashMap; use futures::future::Future; let trans = conn.transaction()?; let mut file_paths_and_mimes: HashMap<PathBuf, String> = HashMap::new(); let mut rt = ::tokio::runtime::Runtime::new().unwrap(); let mut to_upload = get_file_list(&path)?; let mut batch_size = cmp::min(to_upload.len(), MAX_CONCURRENT_UPLOADS); let mut currently_uploading: Vec<_> = to_upload.drain(..batch_size).collect(); let mut attempts = 0; while !to_upload.is_empty() || !currently_uploading.is_empty() { let mut futures = Vec::new(); let client = s3_client(); for file_path in &currently_uploading { let path = Path::new(path.as_ref()).join(&file_path); // Some files have insufficient permissions (like .lock file created by cargo in // documentation directory). We are skipping this files. let mut file = match fs::File::open(path) { Ok(f) => f, Err(_) => continue, }; let mut content: Vec<u8> = Vec::new(); file.read_to_end(&mut content)?; let bucket_path = Path::new(prefix).join(&file_path); #[cfg(windows)] // On windows, we need to normalize \\ to / so the route logic works let bucket_path = path_slash::PathBufExt::to_slash(&bucket_path).unwrap(); #[cfg(not(windows))] let bucket_path = bucket_path.into_os_string().into_string().unwrap(); let mime = detect_mime(&file_path)?; if let Some(client) = &client { futures.push(client.put_object(PutObjectRequest { bucket: S3_BUCKET_NAME.into(), key: bucket_path.clone(), body: Some(content.clone().into()), content_type: Some(mime.to_owned()), ..Default::default() }).inspect(|_| { crate::web::metrics::UPLOADED_FILES_TOTAL.inc_by(1); })); } else { // If AWS credentials are configured, don't insert/update the database // check if file already exists in database let rows = conn.query("SELECT COUNT(*) FROM files WHERE path = $1", &[&bucket_path])?; if rows.get(0).get::<usize, i64>(0) == 0 { trans.query("INSERT INTO files (path, mime, content) VALUES ($1, $2, $3)", &[&bucket_path, &mime, &content])?; } else { trans.query("UPDATE files SET mime = $2, content = $3, date_updated = NOW() \ WHERE path = $1", &[&bucket_path, &mime, &content])?; } } file_paths_and_mimes.insert(file_path.clone(), mime.to_owned()); } if !futures.is_empty() { attempts += 1; match rt.block_on(::futures::future::join_all(futures)) { Ok(_) => { // this batch was successful, start another batch if there are still more files batch_size = cmp::min(to_upload.len(), MAX_CONCURRENT_UPLOADS); currently_uploading = to_upload.drain(..batch_size).collect(); attempts = 0; }, Err(err) => { error!("failed to upload to s3: {:?}", err); // if any futures error, leave `currently_uploading` in tact so that we can retry the batch if attempts > 2 { panic!("failed to upload 3 times, exiting"); } } } } else { batch_size = cmp::min(to_upload.len(), MAX_CONCURRENT_UPLOADS); currently_uploading = to_upload.drain(..batch_size).collect(); } } trans.commit()?; let file_list_with_mimes: Vec<(String, PathBuf)> = file_paths_and_mimes .into_iter() .map(|(file_path, mime)| (mime, file_path)) .collect(); file_list_to_json(file_list_with_mimes) } fn detect_mime(file_path: &Path) -> Result<&'static str> { let mime = mime_guess::from_path(file_path).first_raw().map(|m| m).unwrap_or("text/plain"); Ok(match mime { "text/plain" | "text/troff" | "text/x-markdown" | "text/x-rust" | "text/x-toml" => { match file_path.extension().and_then(OsStr::to_str) { Some("md") => "text/markdown", Some("rs") => "text/rust", Some("markdown") => "text/markdown", Some("css") => "text/css", Some("toml") => "text/toml", Some("js") => "application/javascript", Some("json") => "application/json", _ => mime } }, "image/svg" => "image/svg+xml", _ => mime }) } fn file_list_to_json(file_list: Vec<(String, PathBuf)>) -> Result<Json> { let mut file_list_json: Vec<Json> = Vec::new(); for file in file_list { let mut v: Vec<String> = Vec::new(); v.push(file.0.clone()); v.push(file.1.into_os_string().into_string().unwrap()); file_list_json.push(v.to_json()); } Ok(file_list_json.to_json()) } pub fn move_to_s3(conn: &Connection, n: usize) -> Result<usize> { let trans = conn.transaction()?; let client = s3_client().expect("configured s3"); let rows = trans.query( &format!("SELECT path, mime, content FROM files WHERE content != E'in-s3' LIMIT {}", n), &[])?; let count = rows.len(); let mut rt = ::tokio::runtime::Runtime::new().unwrap(); let mut futures = Vec::new(); for row in &rows { let path: String = row.get(0); let mime: String = row.get(1); let content: Vec<u8> = row.get(2); let path_1 = path.clone(); futures.push(client.put_object(PutObjectRequest { bucket: S3_BUCKET_NAME.into(), key: path.clone(), body: Some(content.into()), content_type: Some(mime), ..Default::default() }).map(move |_| { path_1 }).map_err(move |e| { panic!("failed to upload to {}: {:?}", path, e) })); } use ::futures::future::Future; match rt.block_on(::futures::future::join_all(futures)) { Ok(paths) => { let statement = trans.prepare("DELETE FROM files WHERE path = $1").unwrap(); for path in paths { statement.execute(&[&path]).unwrap(); } } Err(e) => { panic!("results err: {:?}", e); } } trans.commit()?; Ok(count) } #[cfg(test)] mod test { extern crate env_logger; use std::env; use super::*; #[test] fn test_get_file_list() { let _ = env_logger::try_init(); let files = get_file_list(env::current_dir().unwrap()); assert!(files.is_ok()); assert!(files.unwrap().len() > 0); let files = get_file_list(env::current_dir().unwrap().join("Cargo.toml")).unwrap(); assert_eq!(files[0], std::path::Path::new("Cargo.toml")); } #[test] fn test_mime_types() { check_mime("/ignored", ".gitignore", "text/plain"); check_mime("[package]", "hello.toml","text/toml"); check_mime(".ok { color:red; }", "hello.css","text/css"); check_mime("var x = 1", "hello.js","application/javascript"); check_mime("<html>", "hello.html","text/html"); check_mime("## HELLO", "hello.hello.md","text/markdown"); check_mime("## WORLD", "hello.markdown","text/markdown"); check_mime("{}", "hello.json","application/json"); check_mime("hello world", "hello.txt","text/plain"); check_mime("//! Simple module to ...", "file.rs", "text/rust"); check_mime("<svg></svg>", "important.svg", "image/svg+xml"); } fn check_mime(content: &str, path: &str, expected_mime: &str) { let detected_mime = detect_mime(&content.as_bytes().to_vec(), Path::new(&path)); let detected_mime = detected_mime.expect("no mime was given"); assert_eq!(detected_mime, expected_mime); } }
// Copyright (c) 2018-2019, The rav1e contributors. All rights reserved // // This source code is subject to the terms of the BSD 2 Clause License and // the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License // was not distributed with this source code in the LICENSE file, you can // obtain it at www.aomedia.org/license/software. If the Alliance for Open // Media Patent License 1.0 was not distributed with this source code in the // PATENTS file, you can obtain it at www.aomedia.org/license/patent. use crate::api::FrameType; use crate::context::*; use crate::encoder::FrameInvariants; use crate::encoder::FrameState; use crate::frame::*; use crate::partition::RefType::*; use crate::predict::PredictionMode::*; use crate::quantize::*; use crate::util::Pixel; use crate::util::{clamp, ILog}; use crate::DeblockState; use std::cmp; use std::sync::Arc; fn deblock_adjusted_level( deblock: &DeblockState, block: &Block, pli: usize, vertical: bool, ) -> usize { let idx = if pli == 0 { if vertical { 0 } else { 1 } } else { pli + 1 }; let level = if deblock.block_deltas_enabled { // By-block filter strength delta, if the feature is active. let block_delta = if deblock.block_delta_multi { block.deblock_deltas[idx] << deblock.block_delta_shift } else { block.deblock_deltas[0] << deblock.block_delta_shift }; // Add to frame-specified filter strength (Y-vertical, Y-horizontal, U, V) clamp(block_delta + deblock.levels[idx] as i8, 0, MAX_LOOP_FILTER as i8) as u8 } else { deblock.levels[idx] }; // if fi.seg_feaure_active { // rav1e does not yet support segments or segment features // } // Are delta modifiers for specific references and modes active? If so, add them too. if deblock.deltas_enabled { let mode = block.mode; let reference = block.ref_frames[0]; let mode_type = if mode >= NEARESTMV && mode != GLOBALMV && mode != GLOBAL_GLOBALMV { 1 } else { 0 }; let l5 = level >> 5; clamp( level as i32 + ((deblock.ref_deltas[reference.to_index()] as i32) << l5) + if reference == INTRA_FRAME { 0 } else { (deblock.mode_deltas[mode_type] as i32) << l5 }, 0, MAX_LOOP_FILTER as i32, ) as usize } else { level as usize } } fn deblock_left<'a, T: Pixel>( blocks: &'a FrameBlocks, in_bo: PlaneBlockOffset, p: &Plane<T>, ) -> &'a Block { let xdec = p.cfg.xdec; let ydec = p.cfg.ydec; // This little bit of weirdness is straight out of the spec; // subsampled chroma uses odd mi row/col let bo = PlaneBlockOffset(BlockOffset { x: in_bo.0.x | xdec, y: in_bo.0.y | ydec }); // We already know we're not at the upper/left corner, so prev_block is in frame &blocks[bo.with_offset(-1 << xdec, 0)] } fn deblock_up<'a, T: Pixel>( blocks: &'a FrameBlocks, in_bo: PlaneBlockOffset, p: &Plane<T>, ) -> &'a Block { let xdec = p.cfg.xdec; let ydec = p.cfg.ydec; // This little bit of weirdness is straight out of the spec; // subsampled chroma uses odd mi row/col let bo = PlaneBlockOffset(BlockOffset { x: in_bo.0.x | xdec, y: in_bo.0.y | ydec }); // We already know we're not at the upper/left corner, so prev_block is in frame &blocks[bo.with_offset(0, -1 << ydec)] } // Must be called on a tx edge, and not on a frame edge. This is enforced above the call. fn deblock_size<T: Pixel>( block: &Block, prev_block: &Block, p: &Plane<T>, pli: usize, vertical: bool, block_edge: bool, ) -> usize { let xdec = p.cfg.xdec; let ydec = p.cfg.ydec; // filter application is conditional on skip and block edge if !(block_edge || !block.skip || !prev_block.skip || block.ref_frames[0] == INTRA_FRAME || prev_block.ref_frames[0] == INTRA_FRAME) { 0 } else { let (txsize, prev_txsize) = if pli == 0 { (block.txsize, prev_block.txsize) } else { ( block.bsize.largest_chroma_tx_size(xdec, ydec), prev_block.bsize.largest_chroma_tx_size(xdec, ydec), ) }; let (tx_n, prev_tx_n) = if vertical { (cmp::max(txsize.width_mi(), 1), cmp::max(prev_txsize.width_mi(), 1)) } else { (cmp::max(txsize.height_mi(), 1), cmp::max(prev_txsize.height_mi(), 1)) }; cmp::min( if pli == 0 { 14 } else { 6 }, cmp::min(tx_n, prev_tx_n) << MI_SIZE_LOG2, ) } } // Must be called on a tx edge fn deblock_level( deblock: &DeblockState, block: &Block, prev_block: &Block, pli: usize, vertical: bool, ) -> usize { let level = deblock_adjusted_level(deblock, block, pli, vertical); if level == 0 { deblock_adjusted_level(deblock, prev_block, pli, vertical) } else { level } } // four taps, 4 outputs (two are trivial) fn filter_narrow2_4( p1: i32, p0: i32, q0: i32, q1: i32, shift: usize, ) -> [i32; 4] { let filter0 = clamp(p1 - q1, -128 << shift, (128 << shift) - 1); let filter1 = clamp(filter0 + 3 * (q0 - p0) + 4, -128 << shift, (128 << shift) - 1) >> 3; // be certain our optimization removing a clamp is sound debug_assert!({ let base = clamp(filter0 + 3 * (q0 - p0), -128 << shift, (128 << shift) - 1); let test = clamp(base + 4, -128 << shift, (128 << shift) - 1) >> 3; filter1 == test }); let filter2 = clamp(filter0 + 3 * (q0 - p0) + 3, -128 << shift, (128 << shift) - 1) >> 3; // be certain our optimization removing a clamp is sound debug_assert!({ let base = clamp(filter0 + 3 * (q0 - p0), -128 << shift, (128 << shift) - 1); let test = clamp(base + 3, -128 << shift, (128 << shift) - 1) >> 3; filter2 == test }); [ p1, clamp(p0 + filter2, 0, (256 << shift) - 1), clamp(q0 - filter1, 0, (256 << shift) - 1), q1, ] } // six taps, 6 outputs (four are trivial) fn filter_narrow2_6( p2: i32, p1: i32, p0: i32, q0: i32, q1: i32, q2: i32, shift: usize, ) -> [i32; 6] { let x = filter_narrow2_4(p1, p0, q0, q1, shift); [p2, x[0], x[1], x[2], x[3], q2] } // 12 taps, 12 outputs (ten are trivial) fn filter_narrow2_12( p5: i32, p4: i32, p3: i32, p2: i32, p1: i32, p0: i32, q0: i32, q1: i32, q2: i32, q3: i32, q4: i32, q5: i32, shift: usize, ) -> [i32; 12] { let x = filter_narrow2_4(p1, p0, q0, q1, shift); [p5, p4, p3, p2, x[0], x[1], x[2], x[3], q2, q3, q4, q5] } // four taps, 4 outputs fn filter_narrow4_4( p1: i32, p0: i32, q0: i32, q1: i32, shift: usize, ) -> [i32; 4] { let filter1 = clamp(3 * (q0 - p0) + 4, -128 << shift, (128 << shift) - 1) >> 3; // be certain our optimization removing a clamp is sound debug_assert!({ let base = clamp(3 * (q0 - p0), -128 << shift, (128 << shift) - 1); let test = clamp(base + 4, -128 << shift, (128 << shift) - 1) >> 3; filter1 == test }); let filter2 = clamp(3 * (q0 - p0) + 3, -128 << shift, (128 << shift) - 1) >> 3; // be certain our optimization removing a clamp is sound debug_assert!({ let base = clamp(3 * (q0 - p0), -128 << shift, (128 << shift) - 1); let test = clamp(base + 3, -128 << shift, (128 << shift) - 1) >> 3; filter2 == test }); let filter3 = (filter1 + 1) >> 1; [ clamp(p1 + filter3, 0, (256 << shift) - 1), clamp(p0 + filter2, 0, (256 << shift) - 1), clamp(q0 - filter1, 0, (256 << shift) - 1), clamp(q1 - filter3, 0, (256 << shift) - 1), ] } // six taps, 6 outputs (two are trivial) fn filter_narrow4_6( p2: i32, p1: i32, p0: i32, q0: i32, q1: i32, q2: i32, shift: usize, ) -> [i32; 6] { let x = filter_narrow4_4(p1, p0, q0, q1, shift); [p2, x[0], x[1], x[2], x[3], q2] } // 12 taps, 12 outputs (eight are trivial) fn filter_narrow4_12( p5: i32, p4: i32, p3: i32, p2: i32, p1: i32, p0: i32, q0: i32, q1: i32, q2: i32, q3: i32, q4: i32, q5: i32, shift: usize, ) -> [i32; 12] { let x = filter_narrow4_4(p1, p0, q0, q1, shift); [p5, p4, p3, p2, x[0], x[1], x[2], x[3], q2, q3, q4, q5] } // six taps, 4 outputs #[rustfmt::skip] const fn filter_wide6_4( p2: i32, p1: i32, p0: i32, q0: i32, q1: i32, q2: i32 ) -> [i32; 4] { [ (p2*3 + p1*2 + p0*2 + q0 + (1<<2)) >> 3, (p2 + p1*2 + p0*2 + q0*2 + q1 + (1<<2)) >> 3, (p1 + p0*2 + q0*2 + q1*2 + q2 + (1<<2)) >> 3, (p0 + q0*2 + q1*2 + q2*3 + (1<<2)) >> 3 ] } // eight taps, 6 outputs #[rustfmt::skip] const fn filter_wide8_6( p3: i32, p2: i32, p1: i32, p0: i32, q0: i32, q1: i32, q2: i32, q3: i32 ) -> [i32; 6] { [ (p3*3 + p2*2 + p1 + p0 + q0 + (1<<2)) >> 3, (p3*2 + p2 + p1*2 + p0 + q0 + q1 + (1<<2)) >> 3, (p3 + p2 + p1 + p0*2 + q0 + q1 + q2 +(1<<2)) >> 3, (p2 + p1 + p0 + q0*2 + q1 + q2 + q3 + (1<<2)) >> 3, (p1 + p0 + q0 + q1*2 + q2 + q3*2 + (1<<2)) >> 3, (p0 + q0 + q1 + q2*2 + q3*3 + (1<<2)) >> 3 ] } // 12 taps, 12 outputs (six are trivial) const fn filter_wide8_12( p5: i32, p4: i32, p3: i32, p2: i32, p1: i32, p0: i32, q0: i32, q1: i32, q2: i32, q3: i32, q4: i32, q5: i32, ) -> [i32; 12] { let x = filter_wide8_6(p3, p2, p1, p0, q0, q1, q2, q3); [p5, p4, p3, x[0], x[1], x[2], x[3], x[4], x[5], q3, q4, q5] } // fourteen taps, 12 outputs #[rustfmt::skip] const fn filter_wide14_12( p6: i32, p5: i32, p4: i32, p3: i32, p2: i32, p1: i32, p0: i32, q0: i32, q1: i32, q2: i32, q3: i32, q4: i32, q5: i32, q6: i32 ) -> [i32; 12] { [ (p6*7 + p5*2 + p4*2 + p3 + p2 + p1 + p0 + q0 + (1<<3)) >> 4, (p6*5 + p5*2 + p4*2 + p3*2 + p2 + p1 + p0 + q0 + q1 + (1<<3)) >> 4, (p6*4 + p5 + p4*2 + p3*2 + p2*2 + p1 + p0 + q0 + q1 + q2 + (1<<3)) >> 4, (p6*3 + p5 + p4 + p3*2 + p2*2 + p1*2 + p0 + q0 + q1 + q2 + q3 + (1<<3)) >> 4, (p6*2 + p5 + p4 + p3 + p2*2 + p1*2 + p0*2 + q0 + q1 + q2 + q3 + q4 + (1<<3)) >> 4, (p6 + p5 + p4 + p3 + p2 + p1*2 + p0*2 + q0*2 + q1 + q2 + q3 + q4 + q5 + (1<<3)) >> 4, (p5 + p4 + p3 + p2 + p1 + p0*2 + q0*2 + q1*2 + q2 + q3 + q4 + q5 + q6 + (1<<3)) >> 4, (p4 + p3 + p2 + p1 + p0 + q0*2 + q1*2 + q2*2 + q3 + q4 + q5 + q6*2 + (1<<3)) >> 4, (p3 + p2 + p1 + p0 + q0 + q1*2 + q2*2 + q3*2 + q4 + q5 + q6*3 + (1<<3)) >> 4, (p2 + p1 + p0 + q0 + q1 + q2*2 + q3*2 + q4*2 + q5 + q6*4 + (1<<3)) >> 4, (p1 + p0 + q0 + q1 + q2 + q3*2 + q4*2 + q5*2 + q6*5 + (1<<3)) >> 4, (p0 + q0 + q1 + q2 + q3 + q4*2 + q5*2 + q6*7 + (1<<3)) >> 4 ] } #[inline] fn copy_horizontal<T: Pixel>( dst: &mut PlaneMutSlice<'_, T>, x: usize, y: usize, src: &[i32], ) { let row = &mut dst[y][x..]; for (dst, src) in row.iter_mut().take(src.len()).zip(src) { *dst = T::cast_from(*src); } } #[inline] fn copy_vertical<T: Pixel>( dst: &mut PlaneMutSlice<'_, T>, x: usize, y: usize, src: &[i32], ) { for (i, v) in src.iter().enumerate() { let p = &mut dst[y + i][x]; *p = T::cast_from(*v); } } fn stride_sse(a: &[i32], b: &[i32]) -> i64 { let mut acc: i32 = 0; for (a, b) in a.iter().take(b.len()).zip(b) { acc += (*a - *b) * (*a - *b) } acc as i64 } const fn _level_to_limit(level: i32, shift: usize) -> i32 { level << shift } const fn limit_to_level(limit: i32, shift: usize) -> i32 { (limit + (1 << shift) - 1) >> shift } const fn _level_to_blimit(level: i32, shift: usize) -> i32 { (3 * level + 4) << shift } const fn blimit_to_level(blimit: i32, shift: usize) -> i32 { (((blimit + (1 << shift) - 1) >> shift) - 2) / 3 } const fn _level_to_thresh(level: i32, shift: usize) -> i32 { level >> 4 << shift } const fn thresh_to_level(thresh: i32, shift: usize) -> i32 { (thresh + (1 << shift) - 1) >> shift << 4 } fn nhev4(p1: i32, p0: i32, q0: i32, q1: i32, shift: usize) -> usize { thresh_to_level(cmp::max((p1 - p0).abs(), (q1 - q0).abs()), shift) as usize } fn mask4(p1: i32, p0: i32, q0: i32, q1: i32, shift: usize) -> usize { cmp::max( limit_to_level(cmp::max((p1 - p0).abs(), (q1 - q0).abs()), shift), blimit_to_level((p0 - q0).abs() * 2 + (p1 - q1).abs() / 2, shift), ) as usize } #[inline] fn deblock_size4_inner( [p1, p0, q0, q1]: [i32; 4], level: usize, bd: usize, ) -> Option<[i32; 4]> { if mask4(p1, p0, q0, q1, bd - 8) <= level { let x = if nhev4(p1, p0, q0, q1, bd - 8) <= level { filter_narrow4_4(p1, p0, q0, q1, bd - 8) } else { filter_narrow2_4(p1, p0, q0, q1, bd - 8) }; Some(x) } else { None } } // Assumes rec[0] is set 2 taps back from the edge fn deblock_v_size4<T: Pixel>( rec: &mut PlaneMutSlice<'_, T>, level: usize, bd: usize, ) { for y in 0..4 { let p = &rec[y]; let vals = [p[0].as_(), p[1].as_(), p[2].as_(), p[3].as_()]; if let Some(data) = deblock_size4_inner(vals, level, bd) { copy_horizontal(rec, 0, y, &data); } } } // Assumes rec[0] is set 2 taps back from the edge fn deblock_h_size4<T: Pixel>( rec: &mut PlaneMutSlice<'_, T>, level: usize, bd: usize, ) { for x in 0..4 { let vals = [rec[0][x].as_(), rec[1][x].as_(), rec[2][x].as_(), rec[3][x].as_()]; if let Some(data) = deblock_size4_inner(vals, level, bd) { copy_vertical(rec, x, 0, &data); } } } // Assumes rec[0] and src[0] are set 2 taps back from the edge. // Accesses four taps, accumulates four pixels into the tally fn sse_size4<T: Pixel>( rec: &PlaneSlice<'_, T>, src: &PlaneSlice<'_, T>, tally: &mut [i64; MAX_LOOP_FILTER + 2], horizontal_p: bool, bd: usize, ) { for i in 0..4 { let (p1, p0, q0, q1, a) = if horizontal_p { (rec[0][i].as_(), rec[1][i].as_(), rec[2][i].as_(), rec[3][i].as_(), [src[0][i].as_(), src[1][i].as_(), src[2][i].as_(), src[3][i].as_()]) } else { (rec[i][0].as_(), rec[i][1].as_(), rec[i][2].as_(), rec[i][3].as_(), [src[i][0].as_(), src[i][1].as_(), src[i][2].as_(), src[i][3].as_()]) }; // three possibilities: no filter, narrow2 and narrow4 // All possibilities produce four outputs let none: [_; 4] = [p1, p0, q0, q1]; let narrow2 = filter_narrow2_4(p1, p0, q0, q1, bd - 8); let narrow4 = filter_narrow4_4(p1, p0, q0, q1, bd - 8); // mask4 sets the dividing line for filter vs no filter // nhev4 sets the dividing line between narrow2 and narrow4 let mask = clamp(mask4(p1, p0, q0, q1, bd - 8), 1, MAX_LOOP_FILTER + 1) as usize; let nhev = clamp(nhev4(p1, p0, q0, q1, bd - 8), mask, MAX_LOOP_FILTER + 1) as usize; // sse for each; short-circuit the 'special' no-op cases. let sse_none = stride_sse(&a, &none); let sse_narrow2 = if nhev != mask { stride_sse(&a, &narrow2) } else { sse_none }; let sse_narrow4 = if nhev <= MAX_LOOP_FILTER { stride_sse(&a, &narrow4) } else { sse_none }; // accumulate possible filter values into the tally // level 0 is a special case tally[0] += sse_none; tally[mask] -= sse_none; tally[mask] += sse_narrow2; tally[nhev] -= sse_narrow2; tally[nhev] += sse_narrow4; } } fn mask6( p2: i32, p1: i32, p0: i32, q0: i32, q1: i32, q2: i32, shift: usize, ) -> usize { cmp::max( limit_to_level( cmp::max( (p2 - p1).abs(), cmp::max((p1 - p0).abs(), cmp::max((q2 - q1).abs(), (q1 - q0).abs())), ), shift, ), blimit_to_level((p0 - q0).abs() * 2 + (p1 - q1).abs() / 2, shift), ) as usize } fn flat6(p2: i32, p1: i32, p0: i32, q0: i32, q1: i32, q2: i32) -> usize { cmp::max( (p1 - p0).abs(), cmp::max((q1 - q0).abs(), cmp::max((p2 - p0).abs(), (q2 - q0).abs())), ) as usize } #[inline] fn deblock_size6_inner( [p2, p1, p0, q0, q1, q2]: [i32; 6], level: usize, bd: usize, ) -> Option<[i32; 4]> { if mask6(p2, p1, p0, q0, q1, q2, bd - 8) <= level { let flat = 1 << (bd - 8); let x = if flat6(p2, p1, p0, q0, q1, q2) <= flat { filter_wide6_4(p2, p1, p0, q0, q1, q2) } else if nhev4(p1, p0, q0, q1, bd - 8) <= level { filter_narrow4_4(p1, p0, q0, q1, bd - 8) } else { filter_narrow2_4(p1, p0, q0, q1, bd - 8) }; Some(x) } else { None } } // Assumes slice[0] is set 3 taps back from the edge fn deblock_v_size6<T: Pixel>( rec: &mut PlaneMutSlice<'_, T>, level: usize, bd: usize, ) { for y in 0..4 { let p = &rec[y]; let vals = [p[0].as_(), p[1].as_(), p[2].as_(), p[3].as_(), p[4].as_(), p[5].as_()]; if let Some(data) = deblock_size6_inner(vals, level, bd) { copy_horizontal(rec, 1, y, &data); } } } // Assumes slice[0] is set 3 taps back from the edge fn deblock_h_size6<T: Pixel>( rec: &mut PlaneMutSlice<'_, T>, level: usize, bd: usize, ) { for x in 0..4 { let vals = [ rec[0][x].as_(), rec[1][x].as_(), rec[2][x].as_(), rec[3][x].as_(), rec[4][x].as_(), rec[5][x].as_(), ]; if let Some(data) = deblock_size6_inner(vals, level, bd) { copy_vertical(rec, x, 1, &data); } } } // Assumes rec[0] and src[0] are set 3 taps back from the edge. // Accesses six taps, accumulates four pixels into the tally fn sse_size6<T: Pixel>( rec: &PlaneSlice<'_, T>, src: &PlaneSlice<'_, T>, tally: &mut [i64; MAX_LOOP_FILTER + 2], horizontal_p: bool, bd: usize, ) { let flat = 1 << (bd - 8); for i in 0..4 { let (p2, p1, p0, q0, q1, q2, a) = if horizontal_p { // six taps (rec[0][i].as_(), rec[1][i].as_(), rec[2][i].as_(), rec[3][i].as_(), rec[4][i].as_(), rec[5][i].as_(), // four pixels to compare so offset one forward [src[1][i].as_(), src[2][i].as_(), src[3][i].as_(), src[4][i].as_()]) } else { // six taps (rec[i][0].as_(), rec[i][1].as_(), rec[i][2].as_(), rec[i][3].as_(), rec[i][4].as_(), rec[i][5].as_(), // four pixels to compare so offset one forward [src[i][1].as_(), src[i][2].as_(), src[i][3].as_(), src[i][4].as_()]) }; // Four possibilities: no filter, wide6, narrow2 and narrow4 // All possibilities produce four outputs let none: [_; 4] = [p1, p0, q0, q1]; let wide6 = filter_wide6_4(p2, p1, p0, q0, q1, q2); let narrow2 = filter_narrow2_4(p1, p0, q0, q1, bd - 8); let narrow4 = filter_narrow4_4(p1, p0, q0, q1, bd - 8); // mask6 sets the dividing line for filter vs no filter // flat6 decides between wide and narrow filters (unrelated to level) // nhev4 sets the dividing line between narrow2 and narrow4 let mask = clamp(mask6(p2, p1, p0, q0, q1, q2, bd - 8), 1, MAX_LOOP_FILTER + 1) as usize; let flatp = flat6(p2, p1, p0, q0, q1, q2) <= flat; let nhev = clamp(nhev4(p1, p0, q0, q1, bd - 8), mask, MAX_LOOP_FILTER + 1) as usize; // sse for each; short-circuit the 'special' no-op cases. let sse_none = stride_sse(&a, &none); let sse_wide6 = if flatp && mask <= MAX_LOOP_FILTER { stride_sse(&a, &wide6) } else { sse_none }; let sse_narrow2 = if !flatp && nhev != mask { stride_sse(&a, &narrow2) } else { sse_none }; let sse_narrow4 = if !flatp && nhev <= MAX_LOOP_FILTER { stride_sse(&a, &narrow4) } else { sse_none }; // accumulate possible filter values into the tally tally[0] += sse_none; tally[mask] -= sse_none; if flatp { tally[mask] += sse_wide6; } else { tally[mask] += sse_narrow2; tally[nhev] -= sse_narrow2; tally[nhev] += sse_narrow4; } } } fn mask8( p3: i32, p2: i32, p1: i32, p0: i32, q0: i32, q1: i32, q2: i32, q3: i32, shift: usize, ) -> usize { cmp::max( limit_to_level( cmp::max( (p3 - p2).abs(), cmp::max( (p2 - p1).abs(), cmp::max( (p1 - p0).abs(), cmp::max( (q3 - q2).abs(), cmp::max((q2 - q1).abs(), (q1 - q0).abs()), ), ), ), ), shift, ), blimit_to_level((p0 - q0).abs() * 2 + (p1 - q1).abs() / 2, shift), ) as usize } fn flat8( p3: i32, p2: i32, p1: i32, p0: i32, q0: i32, q1: i32, q2: i32, q3: i32, ) -> usize { cmp::max( (p1 - p0).abs(), cmp::max( (q1 - q0).abs(), cmp::max( (p2 - p0).abs(), cmp::max((q2 - q0).abs(), cmp::max((p3 - p0).abs(), (q3 - q0).abs())), ), ), ) as usize } #[inline] fn deblock_size8_inner( [p3, p2, p1, p0, q0, q1, q2, q3]: [i32; 8], level: usize, bd: usize, ) -> Option<[i32; 6]> { if mask8(p3, p2, p1, p0, q0, q1, q2, q3, bd - 8) <= level { let flat = 1 << (bd - 8); let x = if flat8(p3, p2, p1, p0, q0, q1, q2, q3) <= flat { filter_wide8_6(p3, p2, p1, p0, q0, q1, q2, q3) } else if nhev4(p1, p0, q0, q1, bd - 8) <= level { filter_narrow4_6(p2, p1, p0, q0, q1, q2, bd - 8) } else { filter_narrow2_6(p2, p1, p0, q0, q1, q2, bd - 8) }; Some(x) } else { None } } // Assumes rec[0] is set 4 taps back from the edge fn deblock_v_size8<T: Pixel>( rec: &mut PlaneMutSlice<'_, T>, level: usize, bd: usize, ) { for y in 0..4 { let p = &rec[y]; let vals = [ p[0].as_(), p[1].as_(), p[2].as_(), p[3].as_(), p[4].as_(), p[5].as_(), p[6].as_(), p[7].as_(), ]; if let Some(data) = deblock_size8_inner(vals, level, bd) { copy_horizontal(rec, 1, y, &data); } } } // Assumes rec[0] is set 4 taps back from the edge fn deblock_h_size8<T: Pixel>( rec: &mut PlaneMutSlice<'_, T>, level: usize, bd: usize, ) { for x in 0..4 { let vals = [ rec[0][x].as_(), rec[1][x].as_(), rec[2][x].as_(), rec[3][x].as_(), rec[4][x].as_(), rec[5][x].as_(), rec[6][x].as_(), rec[7][x].as_(), ]; if let Some(data) = deblock_size8_inner(vals, level, bd) { copy_vertical(rec, x, 1, &data); } } } // Assumes rec[0] and src[0] are set 4 taps back from the edge. // Accesses eight taps, accumulates six pixels into the tally fn sse_size8<T: Pixel>( rec: &PlaneSlice<'_, T>, src: &PlaneSlice<'_, T>, tally: &mut [i64; MAX_LOOP_FILTER + 2], horizontal_p: bool, bd: usize, ) { let flat = 1 << (bd - 8); for i in 0..4 { let (p3, p2, p1, p0, q0, q1, q2, q3, a) = if horizontal_p { // eight taps (rec[0][i].as_(), rec[1][i].as_(), rec[2][i].as_(), rec[3][i].as_(), rec[4][i].as_(), rec[5][i].as_(), rec[6][i].as_(), rec[7][i].as_(), // six pixels to compare so offset one forward [src[1][i].as_(), src[2][i].as_(), src[3][i].as_(), src[4][i].as_(), src[5][i].as_(), src[6][i].as_(), ]) } else { // eight taps (rec[i][0].as_(), rec[i][1].as_(), rec[i][2].as_(), rec[i][3].as_(), rec[i][4].as_(), rec[i][5].as_(), rec[i][6].as_(), rec[i][7].as_(), // six pixels to compare so offset one forward [src[i][1].as_(), src[i][2].as_(), src[i][3].as_(), src[i][4].as_(), src[i][5].as_(), src[i][6].as_(), ]) }; // Four possibilities: no filter, wide8, narrow2 and narrow4 let none: [_; 6] = [p2, p1, p0, q0, q1, q2]; let wide8: [_; 6] = filter_wide8_6(p3, p2, p1, p0, q0, q1, q2, q3); let narrow2: [_; 6] = filter_narrow2_6(p2, p1, p0, q0, q1, q2, bd - 8); let narrow4: [_; 6] = filter_narrow4_6(p2, p1, p0, q0, q1, q2, bd - 8); // mask8 sets the dividing line for filter vs no filter // flat8 decides between wide and narrow filters (unrelated to level) // nhev4 sets the dividing line between narrow2 and narrow4 let mask = clamp( mask8(p3, p2, p1, p0, q0, q1, q2, q3, bd - 8), 1, MAX_LOOP_FILTER + 1, ) as usize; let flatp = flat8(p3, p2, p1, p0, q0, q1, q2, q3) <= flat; let nhev = clamp(nhev4(p1, p0, q0, q1, bd - 8), mask, MAX_LOOP_FILTER + 1) as usize; // sse for each; short-circuit the 'special' no-op cases. let sse_none = stride_sse(&a, &none); let sse_wide8 = if flatp && mask <= MAX_LOOP_FILTER { stride_sse(&a, &wide8) } else { sse_none }; let sse_narrow2 = if !flatp && nhev != mask { stride_sse(&a, &narrow2) } else { sse_none }; let sse_narrow4 = if !flatp && nhev <= MAX_LOOP_FILTER { stride_sse(&a, &narrow4) } else { sse_none }; // accumulate possible filter values into the tally tally[0] += sse_none; tally[mask] -= sse_none; if flatp { tally[mask] += sse_wide8; } else { tally[mask] += sse_narrow2; tally[nhev] -= sse_narrow2; tally[nhev] += sse_narrow4; } } } fn flat14_outer( p6: i32, p5: i32, p4: i32, p0: i32, q0: i32, q4: i32, q5: i32, q6: i32, ) -> usize { cmp::max( (p4 - p0).abs(), cmp::max( (q4 - q0).abs(), cmp::max( (p5 - p0).abs(), cmp::max((q5 - q0).abs(), cmp::max((p6 - p0).abs(), (q6 - q0).abs())), ), ), ) as usize } #[inline] fn deblock_size14_inner( [p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6]: [i32; 14], level: usize, bd: usize, ) -> Option<[i32; 12]> { // 'mask' test if mask8(p3, p2, p1, p0, q0, q1, q2, q3, bd - 8) <= level { let flat = 1 << (bd - 8); // inner flatness test let x = if flat8(p3, p2, p1, p0, q0, q1, q2, q3) <= flat { // outer flatness test if flat14_outer(p6, p5, p4, p0, q0, q4, q5, q6) <= flat { // sufficient flatness across 14 pixel width; run full-width filter filter_wide14_12( p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, ) } else { // only flat in inner area, run 8-tap filter_wide8_12(p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5) } } else if nhev4(p1, p0, q0, q1, bd - 8) <= level { // not flat, run narrow filter filter_narrow4_12(p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, bd - 8) } else { filter_narrow2_12(p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, bd - 8) }; Some(x) } else { None } } // Assumes rec[0] is set 7 taps back from the edge fn deblock_v_size14<T: Pixel>( rec: &mut PlaneMutSlice<'_, T>, level: usize, bd: usize, ) { for y in 0..4 { let p = &rec[y]; let vals = [ p[0].as_(), p[1].as_(), p[2].as_(), p[3].as_(), p[4].as_(), p[5].as_(), p[6].as_(), p[7].as_(), p[8].as_(), p[9].as_(), p[10].as_(), p[11].as_(), p[12].as_(), p[13].as_(), ]; if let Some(data) = deblock_size14_inner(vals, level, bd) { copy_horizontal(rec, 1, y, &data); } } } // Assumes rec[0] is set 7 taps back from the edge fn deblock_h_size14<T: Pixel>( rec: &mut PlaneMutSlice<'_, T>, level: usize, bd: usize, ) { for x in 0..4 { let vals = [ rec[0][x].as_(), rec[1][x].as_(), rec[2][x].as_(), rec[3][x].as_(), rec[4][x].as_(), rec[5][x].as_(), rec[6][x].as_(), rec[7][x].as_(), rec[8][x].as_(), rec[9][x].as_(), rec[10][x].as_(), rec[11][x].as_(), rec[12][x].as_(), rec[13][x].as_(), ]; if let Some(data) = deblock_size14_inner(vals, level, bd) { copy_vertical(rec, x, 1, &data); } } } // Assumes rec[0] and src[0] are set 7 taps back from the edge. // Accesses fourteen taps, accumulates twelve pixels into the tally fn sse_size14<T: Pixel>( rec: &PlaneSlice<'_, T>, src: &PlaneSlice<'_, T>, tally: &mut [i64; MAX_LOOP_FILTER + 2], horizontal_p: bool, bd: usize, ) { let flat = 1 << (bd - 8); for i in 0..4 { let (p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, a) = if horizontal_p { // 14 taps (rec[0][i].as_(), rec[1][i].as_(), rec[2][i].as_(), rec[3][i].as_(), rec[4][i].as_(), rec[5][i].as_(), rec[6][i].as_(), rec[7][i].as_(), rec[8][i].as_(), rec[9][i].as_(), rec[10][i].as_(), rec[11][i].as_(), rec[12][i].as_(), rec[13][i].as_(), // 12 pixels to compare so offset one forward [src[1][i].as_(), src[2][i].as_(), src[3][i].as_(), src[4][i].as_(), src[5][i].as_(), src[6][i].as_(), src[7][i].as_(), src[8][i].as_(), src[9][i].as_(), src[10][i].as_(), src[11][i].as_(), src[12][i].as_(), ]) } else { // 14 taps (rec[i][0].as_(), rec[i][1].as_(), rec[i][2].as_(), rec[i][3].as_(), rec[i][4].as_(), rec[i][5].as_(), rec[i][6].as_(), rec[i][7].as_(), rec[i][8].as_(), rec[i][9].as_(), rec[i][10].as_(), rec[i][11].as_(), rec[i][12].as_(), rec[i][13].as_(), // 12 pixels to compare so offset one forward [src[i][1].as_(), src[i][2].as_(), src[i][3].as_(), src[i][4].as_(), src[i][5].as_(), src[i][6].as_(), src[i][7].as_(), src[i][8].as_(), src[i][9].as_(), src[i][10].as_(), src[i][11].as_(), src[i][12].as_(), ]) }; // Five possibilities: no filter, wide14, wide8, narrow2 and narrow4 let none: [i32; 12] = [p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5]; let wide14 = filter_wide14_12(p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6); let wide8 = filter_wide8_12(p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5); let narrow2 = filter_narrow2_12( p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, bd - 8, ); let narrow4 = filter_narrow4_12( p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, bd - 8, ); // mask8 sets the dividing line for filter vs no filter // flat8 decides between wide and narrow filters (unrelated to level) // flat14 decides between wide14 and wide8 filters // nhev4 sets the dividing line between narrow2 and narrow4 let mask = clamp( mask8(p3, p2, p1, p0, q0, q1, q2, q3, bd - 8), 1, MAX_LOOP_FILTER + 1, ) as usize; let flat8p = flat8(p3, p2, p1, p0, q0, q1, q2, q3) <= flat; let flat14p = flat14_outer(p6, p5, p4, p0, q0, q4, q5, q6) <= flat; let nhev = clamp(nhev4(p1, p0, q0, q1, bd - 8), mask, MAX_LOOP_FILTER + 1) as usize; // sse for each; short-circuit the 'special' no-op cases. let sse_none = stride_sse(&a, &none); let sse_wide8 = if flat8p && !flat14p && mask <= MAX_LOOP_FILTER { stride_sse(&a, &wide8) } else { sse_none }; let sse_wide14 = if flat8p && flat14p && mask <= MAX_LOOP_FILTER { stride_sse(&a, &wide14) } else { sse_none }; let sse_narrow2 = if !flat8p && nhev != mask { stride_sse(&a, &narrow2) } else { sse_none }; let sse_narrow4 = if !flat8p && nhev <= MAX_LOOP_FILTER { stride_sse(&a, &narrow4) } else { sse_none }; // accumulate possible filter values into the tally tally[0] += sse_none; tally[mask] -= sse_none; if flat8p { if flat14p { tally[mask] += sse_wide14; } else { tally[mask] += sse_wide8; } } else { tally[mask] += sse_narrow2; tally[nhev] -= sse_narrow2; tally[nhev] += sse_narrow4; } } } fn filter_v_edge<T: Pixel>( deblock: &DeblockState, blocks: &FrameBlocks, bo: PlaneBlockOffset, p: &mut Plane<T>, pli: usize, bd: usize, xdec: usize, ydec: usize, ) { let block = &blocks[bo]; let txsize = if pli == 0 { block.txsize } else { block.bsize.largest_chroma_tx_size(xdec, ydec) }; let tx_edge = bo.0.x >> xdec & (txsize.width_mi() - 1) == 0; if tx_edge { let prev_block = deblock_left(blocks, bo, p); let block_edge = bo.0.x & (block.n4_w - 1) == 0; let filter_size = deblock_size(block, prev_block, p, pli, true, block_edge); if filter_size > 0 { let level = deblock_level(deblock, block, prev_block, pli, true); if level > 0 { let po = bo.plane_offset(&p.cfg); let mut plane_slice = p.mut_slice(po); plane_slice.x -= (filter_size >> 1) as isize; match filter_size { 4 => { deblock_v_size4(&mut plane_slice, level, bd); } 6 => { deblock_v_size6(&mut plane_slice, level, bd); } 8 => { deblock_v_size8(&mut plane_slice, level, bd); } 14 => { deblock_v_size14(&mut plane_slice, level, bd); } _ => unreachable!(), } } } } } fn sse_v_edge<T: Pixel>( blocks: &FrameBlocks, bo: PlaneBlockOffset, rec_plane: &Plane<T>, src_plane: &Plane<T>, tally: &mut [i64; MAX_LOOP_FILTER + 2], pli: usize, bd: usize, xdec: usize, ydec: usize, ) { let block = &blocks[bo]; let txsize = if pli == 0 { block.txsize } else { block.bsize.largest_chroma_tx_size(xdec, ydec) }; let tx_edge = bo.0.x >> xdec & (txsize.width_mi() - 1) == 0; if tx_edge { let prev_block = deblock_left(blocks, bo, rec_plane); let block_edge = bo.0.x & (block.n4_w - 1) == 0; let filter_size = deblock_size(block, prev_block, rec_plane, pli, true, block_edge); if filter_size > 0 { let po = { let mut po = bo.plane_offset(&rec_plane.cfg); // rec and src have identical subsampling po.x -= (filter_size >> 1) as isize; po }; let rec_slice = rec_plane.slice(po); let src_slice = src_plane.slice(po); match filter_size { 4 => { sse_size4(&rec_slice, &src_slice, tally, false, bd); } 6 => { sse_size6(&rec_slice, &src_slice, tally, false, bd); } 8 => { sse_size8(&rec_slice, &src_slice, tally, false, bd); } 14 => { sse_size14(&rec_slice, &src_slice, tally, false, bd); } _ => unreachable!(), } } } } fn filter_h_edge<T: Pixel>( deblock: &DeblockState, blocks: &FrameBlocks, bo: PlaneBlockOffset, p: &mut Plane<T>, pli: usize, bd: usize, xdec: usize, ydec: usize, ) { let block = &blocks[bo]; let txsize = if pli == 0 { block.txsize } else { block.bsize.largest_chroma_tx_size(xdec, ydec) }; let tx_edge = bo.0.y >> ydec & (txsize.height_mi() - 1) == 0; if tx_edge { let prev_block = deblock_up(blocks, bo, p); let block_edge = bo.0.y & (block.n4_h - 1) == 0; let filter_size = deblock_size(block, prev_block, p, pli, false, block_edge); if filter_size > 0 { let level = deblock_level(deblock, block, prev_block, pli, false); if level > 0 { let po = bo.plane_offset(&p.cfg); let mut plane_slice = p.mut_slice(po); plane_slice.y -= (filter_size >> 1) as isize; match filter_size { 4 => { deblock_h_size4(&mut plane_slice, level, bd); } 6 => { deblock_h_size6(&mut plane_slice, level, bd); } 8 => { deblock_h_size8(&mut plane_slice, level, bd); } 14 => { deblock_h_size14(&mut plane_slice, level, bd); } _ => unreachable!(), } } } } } fn sse_h_edge<T: Pixel>( blocks: &FrameBlocks, bo: PlaneBlockOffset, rec_plane: &Plane<T>, src_plane: &Plane<T>, tally: &mut [i64; MAX_LOOP_FILTER + 2], pli: usize, bd: usize, xdec: usize, ydec: usize, ) { let block = &blocks[bo]; let txsize = if pli == 0 { block.txsize } else { block.bsize.largest_chroma_tx_size(xdec, ydec) }; let tx_edge = bo.0.y >> ydec & (txsize.height_mi() - 1) == 0; if tx_edge { let prev_block = deblock_up(blocks, bo, rec_plane); let block_edge = bo.0.y & (block.n4_h - 1) == 0; let filter_size = deblock_size(block, prev_block, rec_plane, pli, true, block_edge); if filter_size > 0 { let po = { let mut po = bo.plane_offset(&rec_plane.cfg); // rec and src have identical subsampling po.y -= (filter_size >> 1) as isize; po }; let rec_slice = rec_plane.slice(po); let src_slice = src_plane.slice(po); match filter_size { 4 => { sse_size4(&rec_slice, &src_slice, tally, true, bd); } 6 => { sse_size6(&rec_slice, &src_slice, tally, true, bd); } 8 => { sse_size8(&rec_slice, &src_slice, tally, true, bd); } 14 => { sse_size14(&rec_slice, &src_slice, tally, true, bd); } _ => unreachable!(), } } } } // Deblocks all edges, vertical and horizontal, in a single plane pub fn deblock_plane<T: Pixel>( fi: &FrameInvariants<T>, deblock: &DeblockState, p: &mut Plane<T>, pli: usize, blocks: &FrameBlocks, ) { let xdec = p.cfg.xdec; let ydec = p.cfg.ydec; let bd = fi.sequence.bit_depth; match pli { 0 => { if deblock.levels[0] == 0 && deblock.levels[1] == 0 { return; } } 1 => { if deblock.levels[2] == 0 { return; } } 2 => { if deblock.levels[3] == 0 { return; } } _ => return, } // Deblocking happens in 4x4 (luma) units; luma x,y are clipped to // the *crop frame* by 4x4 block. Rounding is to handle chroma // fenceposts here instead of throughout the code. let cols = (((fi.width + MI_SIZE - 1) >> MI_SIZE_LOG2) + (1 << xdec >> 1)) >> xdec << xdec; // Clippy can go suck an egg let rows = (((fi.height + MI_SIZE - 1) >> MI_SIZE_LOG2) + (1 << ydec >> 1)) >> ydec << ydec; // Clippy can go suck an egg // vertical edge filtering leads horizonal by one full MI-sized // row (and horizontal filtering doesn't happen along the upper // edge). Unroll to avoid corner-cases. if rows > 0 { for x in (1 << xdec..cols).step_by(1 << xdec) { filter_v_edge( deblock, blocks, PlaneBlockOffset(BlockOffset { x, y: 0 }), p, pli, bd, xdec, ydec, ); } if rows > 1 << ydec { for x in (1 << xdec..cols).step_by(1 << xdec) { filter_v_edge( deblock, blocks, PlaneBlockOffset(BlockOffset { x, y: 1 << ydec }), p, pli, bd, xdec, ydec, ); } } } // filter rows where vertical and horizontal edge filtering both // happen (horizontal edge filtering lags vertical by one row). for y in ((2 << ydec)..rows).step_by(1 << ydec) { // Check for vertical edge at first MI block boundary on this row if cols > 1 << xdec { filter_v_edge( deblock, blocks, PlaneBlockOffset(BlockOffset { x: 1 << xdec, y }), p, pli, bd, xdec, ydec, ); } // run the rest of the row with both vertical and horizontal edge filtering. // Horizontal lags vertical edge by one row and two columns. for x in (2 << xdec..cols).step_by(1 << xdec) { filter_v_edge( deblock, blocks, PlaneBlockOffset(BlockOffset { x, y }), p, pli, bd, xdec, ydec, ); filter_h_edge( deblock, blocks, PlaneBlockOffset(BlockOffset { x: x - (2 << xdec), y: y - (1 << ydec), }), p, pli, bd, xdec, ydec, ); } // ..and the last two horizontal edges for the row if cols > 2 << xdec { filter_h_edge( deblock, blocks, PlaneBlockOffset(BlockOffset { x: cols - (2 << xdec), y: y - (1 << ydec), }), p, pli, bd, xdec, ydec, ); if cols > 1 << xdec { filter_h_edge( deblock, blocks, PlaneBlockOffset(BlockOffset { x: cols - (1 << xdec), y: y - (1 << ydec), }), p, pli, bd, xdec, ydec, ); } } } // Last horizontal row, vertical is already complete if rows > 1 << ydec { for x in (0..cols).step_by(1 << xdec) { filter_h_edge( deblock, blocks, PlaneBlockOffset(BlockOffset { x, y: rows - (1 << ydec) }), p, pli, bd, xdec, ydec, ); } } } // sse count of all edges in a single plane, accumulates into vertical and horizontal counts fn sse_plane<T: Pixel>( fi: &FrameInvariants<T>, rec: &Plane<T>, src: &Plane<T>, v_sse: &mut [i64; MAX_LOOP_FILTER + 2], h_sse: &mut [i64; MAX_LOOP_FILTER + 2], pli: usize, blocks: &FrameBlocks, ) { let xdec = rec.cfg.xdec; let ydec = rec.cfg.ydec; // Deblocking happens in 4x4 (luma) units; luma x,y are clipped to // the *crop frame* by 4x4 block. let cols = cmp::min(blocks.cols, (fi.width + MI_SIZE - 1) >> MI_SIZE_LOG2); let rows = cmp::min(blocks.rows, (fi.height + MI_SIZE - 1) >> MI_SIZE_LOG2); let bd = fi.sequence.bit_depth; // No horizontal edge filtering along top of frame for x in (1 << xdec..cols).step_by(1 << xdec) { sse_v_edge( blocks, PlaneBlockOffset(BlockOffset { x, y: 0 }), rec, src, v_sse, pli, bd, xdec, ydec, ); } // Unlike actual filtering, we're counting horizontal and vertical // as separable cases. No need to lag the horizontal processing // behind vertical. for y in (1 << ydec..rows).step_by(1 << ydec) { // No vertical filtering along left edge of frame sse_h_edge( blocks, PlaneBlockOffset(BlockOffset { x: 0, y }), rec, src, h_sse, pli, bd, xdec, ydec, ); for x in (1 << xdec..cols).step_by(1 << xdec) { sse_v_edge( blocks, PlaneBlockOffset(BlockOffset { x, y }), rec, src, v_sse, pli, bd, xdec, ydec, ); sse_h_edge( blocks, PlaneBlockOffset(BlockOffset { x, y }), rec, src, h_sse, pli, bd, xdec, ydec, ); } } } // Deblocks all edges in all planes of a frame pub fn deblock_filter_frame<T: Pixel>( fi: &FrameInvariants<T>, fs: &mut FrameState<T>, blocks: &FrameBlocks, ) { let fs_rec = Arc::make_mut(&mut fs.rec); for pli in 0..PLANES { deblock_plane(fi, &fs.deblock, &mut fs_rec.planes[pli], pli, blocks); } } fn sse_optimize<T: Pixel>( fi: &FrameInvariants<T>, fs: &mut FrameState<T>, blocks: &FrameBlocks, ) { // i64 allows us to accumulate a total of ~ 35 bits worth of pixels assert!( fs.input.planes[0].cfg.width.ilog() + fs.input.planes[0].cfg.height.ilog() < 35 ); for pli in 0..PLANES { let mut v_tally: [i64; MAX_LOOP_FILTER + 2] = [0; MAX_LOOP_FILTER + 2]; let mut h_tally: [i64; MAX_LOOP_FILTER + 2] = [0; MAX_LOOP_FILTER + 2]; sse_plane( fi, &fs.rec.planes[pli], &fs.input.planes[pli], &mut v_tally, &mut h_tally, pli, blocks, ); for i in 1..=MAX_LOOP_FILTER { v_tally[i] += v_tally[i - 1]; h_tally[i] += h_tally[i - 1]; } match pli { 0 => { let mut best_v = 999; let mut best_h = 999; for i in 0..=MAX_LOOP_FILTER { if best_v == 999 || v_tally[best_v] > v_tally[i] { best_v = i; }; if best_h == 999 || h_tally[best_h] > h_tally[i] { best_h = i; }; } fs.deblock.levels[0] = best_v as u8; fs.deblock.levels[1] = best_h as u8; } 1 | 2 => { let mut best = 999; for i in 0..=MAX_LOOP_FILTER { if best == 999 || v_tally[best] + h_tally[best] > v_tally[i] + h_tally[i] { best = i; }; } fs.deblock.levels[pli + 1] = best as u8; } _ => unreachable!(), } } } pub fn deblock_filter_optimize<T: Pixel>( fi: &FrameInvariants<T>, fs: &mut FrameState<T>, blocks: &FrameBlocks, ) { if fi.config.speed_settings.fast_deblock { let q = ac_q(fi.base_q_idx, 0, fi.sequence.bit_depth) as i32; let level = clamp( match fi.sequence.bit_depth { 8 => { if fi.frame_type == FrameType::KEY { (q * 17563 - 421_574 + (1 << 18 >> 1)) >> 18 } else { (q * 6017 + 650_707 + (1 << 18 >> 1)) >> 18 } } 10 => { if fi.frame_type == FrameType::KEY { ((q * 20723 + 4_060_632 + (1 << 20 >> 1)) >> 20) - 4 } else { (q * 20723 + 4_060_632 + (1 << 20 >> 1)) >> 20 } } 12 => { if fi.frame_type == FrameType::KEY { ((q * 20723 + 16_242_526 + (1 << 22 >> 1)) >> 22) - 4 } else { (q * 20723 + 16_242_526 + (1 << 22 >> 1)) >> 22 } } _ => unreachable!(), }, 0, MAX_LOOP_FILTER as i32, ) as u8; fs.deblock.levels[0] = level; fs.deblock.levels[1] = level; fs.deblock.levels[2] = level; fs.deblock.levels[3] = level; } else { sse_optimize(fi, fs, blocks); } } Rustfmt for deblocking fix, no functional change. // Copyright (c) 2018-2019, The rav1e contributors. All rights reserved // // This source code is subject to the terms of the BSD 2 Clause License and // the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License // was not distributed with this source code in the LICENSE file, you can // obtain it at www.aomedia.org/license/software. If the Alliance for Open // Media Patent License 1.0 was not distributed with this source code in the // PATENTS file, you can obtain it at www.aomedia.org/license/patent. use crate::api::FrameType; use crate::context::*; use crate::encoder::FrameInvariants; use crate::encoder::FrameState; use crate::frame::*; use crate::partition::RefType::*; use crate::predict::PredictionMode::*; use crate::quantize::*; use crate::util::Pixel; use crate::util::{clamp, ILog}; use crate::DeblockState; use std::cmp; use std::sync::Arc; fn deblock_adjusted_level( deblock: &DeblockState, block: &Block, pli: usize, vertical: bool, ) -> usize { let idx = if pli == 0 { if vertical { 0 } else { 1 } } else { pli + 1 }; let level = if deblock.block_deltas_enabled { // By-block filter strength delta, if the feature is active. let block_delta = if deblock.block_delta_multi { block.deblock_deltas[idx] << deblock.block_delta_shift } else { block.deblock_deltas[0] << deblock.block_delta_shift }; // Add to frame-specified filter strength (Y-vertical, Y-horizontal, U, V) clamp(block_delta + deblock.levels[idx] as i8, 0, MAX_LOOP_FILTER as i8) as u8 } else { deblock.levels[idx] }; // if fi.seg_feaure_active { // rav1e does not yet support segments or segment features // } // Are delta modifiers for specific references and modes active? If so, add them too. if deblock.deltas_enabled { let mode = block.mode; let reference = block.ref_frames[0]; let mode_type = if mode >= NEARESTMV && mode != GLOBALMV && mode != GLOBAL_GLOBALMV { 1 } else { 0 }; let l5 = level >> 5; clamp( level as i32 + ((deblock.ref_deltas[reference.to_index()] as i32) << l5) + if reference == INTRA_FRAME { 0 } else { (deblock.mode_deltas[mode_type] as i32) << l5 }, 0, MAX_LOOP_FILTER as i32, ) as usize } else { level as usize } } fn deblock_left<'a, T: Pixel>( blocks: &'a FrameBlocks, in_bo: PlaneBlockOffset, p: &Plane<T>, ) -> &'a Block { let xdec = p.cfg.xdec; let ydec = p.cfg.ydec; // This little bit of weirdness is straight out of the spec; // subsampled chroma uses odd mi row/col let bo = PlaneBlockOffset(BlockOffset { x: in_bo.0.x | xdec, y: in_bo.0.y | ydec }); // We already know we're not at the upper/left corner, so prev_block is in frame &blocks[bo.with_offset(-1 << xdec, 0)] } fn deblock_up<'a, T: Pixel>( blocks: &'a FrameBlocks, in_bo: PlaneBlockOffset, p: &Plane<T>, ) -> &'a Block { let xdec = p.cfg.xdec; let ydec = p.cfg.ydec; // This little bit of weirdness is straight out of the spec; // subsampled chroma uses odd mi row/col let bo = PlaneBlockOffset(BlockOffset { x: in_bo.0.x | xdec, y: in_bo.0.y | ydec }); // We already know we're not at the upper/left corner, so prev_block is in frame &blocks[bo.with_offset(0, -1 << ydec)] } // Must be called on a tx edge, and not on a frame edge. This is enforced above the call. fn deblock_size<T: Pixel>( block: &Block, prev_block: &Block, p: &Plane<T>, pli: usize, vertical: bool, block_edge: bool, ) -> usize { let xdec = p.cfg.xdec; let ydec = p.cfg.ydec; // filter application is conditional on skip and block edge if !(block_edge || !block.skip || !prev_block.skip || block.ref_frames[0] == INTRA_FRAME || prev_block.ref_frames[0] == INTRA_FRAME) { 0 } else { let (txsize, prev_txsize) = if pli == 0 { (block.txsize, prev_block.txsize) } else { ( block.bsize.largest_chroma_tx_size(xdec, ydec), prev_block.bsize.largest_chroma_tx_size(xdec, ydec), ) }; let (tx_n, prev_tx_n) = if vertical { (cmp::max(txsize.width_mi(), 1), cmp::max(prev_txsize.width_mi(), 1)) } else { (cmp::max(txsize.height_mi(), 1), cmp::max(prev_txsize.height_mi(), 1)) }; cmp::min( if pli == 0 { 14 } else { 6 }, cmp::min(tx_n, prev_tx_n) << MI_SIZE_LOG2, ) } } // Must be called on a tx edge fn deblock_level( deblock: &DeblockState, block: &Block, prev_block: &Block, pli: usize, vertical: bool, ) -> usize { let level = deblock_adjusted_level(deblock, block, pli, vertical); if level == 0 { deblock_adjusted_level(deblock, prev_block, pli, vertical) } else { level } } // four taps, 4 outputs (two are trivial) fn filter_narrow2_4( p1: i32, p0: i32, q0: i32, q1: i32, shift: usize, ) -> [i32; 4] { let filter0 = clamp(p1 - q1, -128 << shift, (128 << shift) - 1); let filter1 = clamp(filter0 + 3 * (q0 - p0) + 4, -128 << shift, (128 << shift) - 1) >> 3; // be certain our optimization removing a clamp is sound debug_assert!({ let base = clamp(filter0 + 3 * (q0 - p0), -128 << shift, (128 << shift) - 1); let test = clamp(base + 4, -128 << shift, (128 << shift) - 1) >> 3; filter1 == test }); let filter2 = clamp(filter0 + 3 * (q0 - p0) + 3, -128 << shift, (128 << shift) - 1) >> 3; // be certain our optimization removing a clamp is sound debug_assert!({ let base = clamp(filter0 + 3 * (q0 - p0), -128 << shift, (128 << shift) - 1); let test = clamp(base + 3, -128 << shift, (128 << shift) - 1) >> 3; filter2 == test }); [ p1, clamp(p0 + filter2, 0, (256 << shift) - 1), clamp(q0 - filter1, 0, (256 << shift) - 1), q1, ] } // six taps, 6 outputs (four are trivial) fn filter_narrow2_6( p2: i32, p1: i32, p0: i32, q0: i32, q1: i32, q2: i32, shift: usize, ) -> [i32; 6] { let x = filter_narrow2_4(p1, p0, q0, q1, shift); [p2, x[0], x[1], x[2], x[3], q2] } // 12 taps, 12 outputs (ten are trivial) fn filter_narrow2_12( p5: i32, p4: i32, p3: i32, p2: i32, p1: i32, p0: i32, q0: i32, q1: i32, q2: i32, q3: i32, q4: i32, q5: i32, shift: usize, ) -> [i32; 12] { let x = filter_narrow2_4(p1, p0, q0, q1, shift); [p5, p4, p3, p2, x[0], x[1], x[2], x[3], q2, q3, q4, q5] } // four taps, 4 outputs fn filter_narrow4_4( p1: i32, p0: i32, q0: i32, q1: i32, shift: usize, ) -> [i32; 4] { let filter1 = clamp(3 * (q0 - p0) + 4, -128 << shift, (128 << shift) - 1) >> 3; // be certain our optimization removing a clamp is sound debug_assert!({ let base = clamp(3 * (q0 - p0), -128 << shift, (128 << shift) - 1); let test = clamp(base + 4, -128 << shift, (128 << shift) - 1) >> 3; filter1 == test }); let filter2 = clamp(3 * (q0 - p0) + 3, -128 << shift, (128 << shift) - 1) >> 3; // be certain our optimization removing a clamp is sound debug_assert!({ let base = clamp(3 * (q0 - p0), -128 << shift, (128 << shift) - 1); let test = clamp(base + 3, -128 << shift, (128 << shift) - 1) >> 3; filter2 == test }); let filter3 = (filter1 + 1) >> 1; [ clamp(p1 + filter3, 0, (256 << shift) - 1), clamp(p0 + filter2, 0, (256 << shift) - 1), clamp(q0 - filter1, 0, (256 << shift) - 1), clamp(q1 - filter3, 0, (256 << shift) - 1), ] } // six taps, 6 outputs (two are trivial) fn filter_narrow4_6( p2: i32, p1: i32, p0: i32, q0: i32, q1: i32, q2: i32, shift: usize, ) -> [i32; 6] { let x = filter_narrow4_4(p1, p0, q0, q1, shift); [p2, x[0], x[1], x[2], x[3], q2] } // 12 taps, 12 outputs (eight are trivial) fn filter_narrow4_12( p5: i32, p4: i32, p3: i32, p2: i32, p1: i32, p0: i32, q0: i32, q1: i32, q2: i32, q3: i32, q4: i32, q5: i32, shift: usize, ) -> [i32; 12] { let x = filter_narrow4_4(p1, p0, q0, q1, shift); [p5, p4, p3, p2, x[0], x[1], x[2], x[3], q2, q3, q4, q5] } // six taps, 4 outputs #[rustfmt::skip] const fn filter_wide6_4( p2: i32, p1: i32, p0: i32, q0: i32, q1: i32, q2: i32 ) -> [i32; 4] { [ (p2*3 + p1*2 + p0*2 + q0 + (1<<2)) >> 3, (p2 + p1*2 + p0*2 + q0*2 + q1 + (1<<2)) >> 3, (p1 + p0*2 + q0*2 + q1*2 + q2 + (1<<2)) >> 3, (p0 + q0*2 + q1*2 + q2*3 + (1<<2)) >> 3 ] } // eight taps, 6 outputs #[rustfmt::skip] const fn filter_wide8_6( p3: i32, p2: i32, p1: i32, p0: i32, q0: i32, q1: i32, q2: i32, q3: i32 ) -> [i32; 6] { [ (p3*3 + p2*2 + p1 + p0 + q0 + (1<<2)) >> 3, (p3*2 + p2 + p1*2 + p0 + q0 + q1 + (1<<2)) >> 3, (p3 + p2 + p1 + p0*2 + q0 + q1 + q2 +(1<<2)) >> 3, (p2 + p1 + p0 + q0*2 + q1 + q2 + q3 + (1<<2)) >> 3, (p1 + p0 + q0 + q1*2 + q2 + q3*2 + (1<<2)) >> 3, (p0 + q0 + q1 + q2*2 + q3*3 + (1<<2)) >> 3 ] } // 12 taps, 12 outputs (six are trivial) const fn filter_wide8_12( p5: i32, p4: i32, p3: i32, p2: i32, p1: i32, p0: i32, q0: i32, q1: i32, q2: i32, q3: i32, q4: i32, q5: i32, ) -> [i32; 12] { let x = filter_wide8_6(p3, p2, p1, p0, q0, q1, q2, q3); [p5, p4, p3, x[0], x[1], x[2], x[3], x[4], x[5], q3, q4, q5] } // fourteen taps, 12 outputs #[rustfmt::skip] const fn filter_wide14_12( p6: i32, p5: i32, p4: i32, p3: i32, p2: i32, p1: i32, p0: i32, q0: i32, q1: i32, q2: i32, q3: i32, q4: i32, q5: i32, q6: i32 ) -> [i32; 12] { [ (p6*7 + p5*2 + p4*2 + p3 + p2 + p1 + p0 + q0 + (1<<3)) >> 4, (p6*5 + p5*2 + p4*2 + p3*2 + p2 + p1 + p0 + q0 + q1 + (1<<3)) >> 4, (p6*4 + p5 + p4*2 + p3*2 + p2*2 + p1 + p0 + q0 + q1 + q2 + (1<<3)) >> 4, (p6*3 + p5 + p4 + p3*2 + p2*2 + p1*2 + p0 + q0 + q1 + q2 + q3 + (1<<3)) >> 4, (p6*2 + p5 + p4 + p3 + p2*2 + p1*2 + p0*2 + q0 + q1 + q2 + q3 + q4 + (1<<3)) >> 4, (p6 + p5 + p4 + p3 + p2 + p1*2 + p0*2 + q0*2 + q1 + q2 + q3 + q4 + q5 + (1<<3)) >> 4, (p5 + p4 + p3 + p2 + p1 + p0*2 + q0*2 + q1*2 + q2 + q3 + q4 + q5 + q6 + (1<<3)) >> 4, (p4 + p3 + p2 + p1 + p0 + q0*2 + q1*2 + q2*2 + q3 + q4 + q5 + q6*2 + (1<<3)) >> 4, (p3 + p2 + p1 + p0 + q0 + q1*2 + q2*2 + q3*2 + q4 + q5 + q6*3 + (1<<3)) >> 4, (p2 + p1 + p0 + q0 + q1 + q2*2 + q3*2 + q4*2 + q5 + q6*4 + (1<<3)) >> 4, (p1 + p0 + q0 + q1 + q2 + q3*2 + q4*2 + q5*2 + q6*5 + (1<<3)) >> 4, (p0 + q0 + q1 + q2 + q3 + q4*2 + q5*2 + q6*7 + (1<<3)) >> 4 ] } #[inline] fn copy_horizontal<T: Pixel>( dst: &mut PlaneMutSlice<'_, T>, x: usize, y: usize, src: &[i32], ) { let row = &mut dst[y][x..]; for (dst, src) in row.iter_mut().take(src.len()).zip(src) { *dst = T::cast_from(*src); } } #[inline] fn copy_vertical<T: Pixel>( dst: &mut PlaneMutSlice<'_, T>, x: usize, y: usize, src: &[i32], ) { for (i, v) in src.iter().enumerate() { let p = &mut dst[y + i][x]; *p = T::cast_from(*v); } } fn stride_sse(a: &[i32], b: &[i32]) -> i64 { let mut acc: i32 = 0; for (a, b) in a.iter().take(b.len()).zip(b) { acc += (*a - *b) * (*a - *b) } acc as i64 } const fn _level_to_limit(level: i32, shift: usize) -> i32 { level << shift } const fn limit_to_level(limit: i32, shift: usize) -> i32 { (limit + (1 << shift) - 1) >> shift } const fn _level_to_blimit(level: i32, shift: usize) -> i32 { (3 * level + 4) << shift } const fn blimit_to_level(blimit: i32, shift: usize) -> i32 { (((blimit + (1 << shift) - 1) >> shift) - 2) / 3 } const fn _level_to_thresh(level: i32, shift: usize) -> i32 { level >> 4 << shift } const fn thresh_to_level(thresh: i32, shift: usize) -> i32 { (thresh + (1 << shift) - 1) >> shift << 4 } fn nhev4(p1: i32, p0: i32, q0: i32, q1: i32, shift: usize) -> usize { thresh_to_level(cmp::max((p1 - p0).abs(), (q1 - q0).abs()), shift) as usize } fn mask4(p1: i32, p0: i32, q0: i32, q1: i32, shift: usize) -> usize { cmp::max( limit_to_level(cmp::max((p1 - p0).abs(), (q1 - q0).abs()), shift), blimit_to_level((p0 - q0).abs() * 2 + (p1 - q1).abs() / 2, shift), ) as usize } #[inline] fn deblock_size4_inner( [p1, p0, q0, q1]: [i32; 4], level: usize, bd: usize, ) -> Option<[i32; 4]> { if mask4(p1, p0, q0, q1, bd - 8) <= level { let x = if nhev4(p1, p0, q0, q1, bd - 8) <= level { filter_narrow4_4(p1, p0, q0, q1, bd - 8) } else { filter_narrow2_4(p1, p0, q0, q1, bd - 8) }; Some(x) } else { None } } // Assumes rec[0] is set 2 taps back from the edge fn deblock_v_size4<T: Pixel>( rec: &mut PlaneMutSlice<'_, T>, level: usize, bd: usize, ) { for y in 0..4 { let p = &rec[y]; let vals = [p[0].as_(), p[1].as_(), p[2].as_(), p[3].as_()]; if let Some(data) = deblock_size4_inner(vals, level, bd) { copy_horizontal(rec, 0, y, &data); } } } // Assumes rec[0] is set 2 taps back from the edge fn deblock_h_size4<T: Pixel>( rec: &mut PlaneMutSlice<'_, T>, level: usize, bd: usize, ) { for x in 0..4 { let vals = [rec[0][x].as_(), rec[1][x].as_(), rec[2][x].as_(), rec[3][x].as_()]; if let Some(data) = deblock_size4_inner(vals, level, bd) { copy_vertical(rec, x, 0, &data); } } } // Assumes rec[0] and src[0] are set 2 taps back from the edge. // Accesses four taps, accumulates four pixels into the tally fn sse_size4<T: Pixel>( rec: &PlaneSlice<'_, T>, src: &PlaneSlice<'_, T>, tally: &mut [i64; MAX_LOOP_FILTER + 2], horizontal_p: bool, bd: usize, ) { for i in 0..4 { let (p1, p0, q0, q1, a) = if horizontal_p { ( rec[0][i].as_(), rec[1][i].as_(), rec[2][i].as_(), rec[3][i].as_(), [src[0][i].as_(), src[1][i].as_(), src[2][i].as_(), src[3][i].as_()], ) } else { ( rec[i][0].as_(), rec[i][1].as_(), rec[i][2].as_(), rec[i][3].as_(), [src[i][0].as_(), src[i][1].as_(), src[i][2].as_(), src[i][3].as_()], ) }; // three possibilities: no filter, narrow2 and narrow4 // All possibilities produce four outputs let none: [_; 4] = [p1, p0, q0, q1]; let narrow2 = filter_narrow2_4(p1, p0, q0, q1, bd - 8); let narrow4 = filter_narrow4_4(p1, p0, q0, q1, bd - 8); // mask4 sets the dividing line for filter vs no filter // nhev4 sets the dividing line between narrow2 and narrow4 let mask = clamp(mask4(p1, p0, q0, q1, bd - 8), 1, MAX_LOOP_FILTER + 1) as usize; let nhev = clamp(nhev4(p1, p0, q0, q1, bd - 8), mask, MAX_LOOP_FILTER + 1) as usize; // sse for each; short-circuit the 'special' no-op cases. let sse_none = stride_sse(&a, &none); let sse_narrow2 = if nhev != mask { stride_sse(&a, &narrow2) } else { sse_none }; let sse_narrow4 = if nhev <= MAX_LOOP_FILTER { stride_sse(&a, &narrow4) } else { sse_none }; // accumulate possible filter values into the tally // level 0 is a special case tally[0] += sse_none; tally[mask] -= sse_none; tally[mask] += sse_narrow2; tally[nhev] -= sse_narrow2; tally[nhev] += sse_narrow4; } } fn mask6( p2: i32, p1: i32, p0: i32, q0: i32, q1: i32, q2: i32, shift: usize, ) -> usize { cmp::max( limit_to_level( cmp::max( (p2 - p1).abs(), cmp::max((p1 - p0).abs(), cmp::max((q2 - q1).abs(), (q1 - q0).abs())), ), shift, ), blimit_to_level((p0 - q0).abs() * 2 + (p1 - q1).abs() / 2, shift), ) as usize } fn flat6(p2: i32, p1: i32, p0: i32, q0: i32, q1: i32, q2: i32) -> usize { cmp::max( (p1 - p0).abs(), cmp::max((q1 - q0).abs(), cmp::max((p2 - p0).abs(), (q2 - q0).abs())), ) as usize } #[inline] fn deblock_size6_inner( [p2, p1, p0, q0, q1, q2]: [i32; 6], level: usize, bd: usize, ) -> Option<[i32; 4]> { if mask6(p2, p1, p0, q0, q1, q2, bd - 8) <= level { let flat = 1 << (bd - 8); let x = if flat6(p2, p1, p0, q0, q1, q2) <= flat { filter_wide6_4(p2, p1, p0, q0, q1, q2) } else if nhev4(p1, p0, q0, q1, bd - 8) <= level { filter_narrow4_4(p1, p0, q0, q1, bd - 8) } else { filter_narrow2_4(p1, p0, q0, q1, bd - 8) }; Some(x) } else { None } } // Assumes slice[0] is set 3 taps back from the edge fn deblock_v_size6<T: Pixel>( rec: &mut PlaneMutSlice<'_, T>, level: usize, bd: usize, ) { for y in 0..4 { let p = &rec[y]; let vals = [p[0].as_(), p[1].as_(), p[2].as_(), p[3].as_(), p[4].as_(), p[5].as_()]; if let Some(data) = deblock_size6_inner(vals, level, bd) { copy_horizontal(rec, 1, y, &data); } } } // Assumes slice[0] is set 3 taps back from the edge fn deblock_h_size6<T: Pixel>( rec: &mut PlaneMutSlice<'_, T>, level: usize, bd: usize, ) { for x in 0..4 { let vals = [ rec[0][x].as_(), rec[1][x].as_(), rec[2][x].as_(), rec[3][x].as_(), rec[4][x].as_(), rec[5][x].as_(), ]; if let Some(data) = deblock_size6_inner(vals, level, bd) { copy_vertical(rec, x, 1, &data); } } } // Assumes rec[0] and src[0] are set 3 taps back from the edge. // Accesses six taps, accumulates four pixels into the tally fn sse_size6<T: Pixel>( rec: &PlaneSlice<'_, T>, src: &PlaneSlice<'_, T>, tally: &mut [i64; MAX_LOOP_FILTER + 2], horizontal_p: bool, bd: usize, ) { let flat = 1 << (bd - 8); for i in 0..4 { let (p2, p1, p0, q0, q1, q2, a) = if horizontal_p { // six taps ( rec[0][i].as_(), rec[1][i].as_(), rec[2][i].as_(), rec[3][i].as_(), rec[4][i].as_(), rec[5][i].as_(), // four pixels to compare so offset one forward [src[1][i].as_(), src[2][i].as_(), src[3][i].as_(), src[4][i].as_()], ) } else { // six taps ( rec[i][0].as_(), rec[i][1].as_(), rec[i][2].as_(), rec[i][3].as_(), rec[i][4].as_(), rec[i][5].as_(), // four pixels to compare so offset one forward [src[i][1].as_(), src[i][2].as_(), src[i][3].as_(), src[i][4].as_()], ) }; // Four possibilities: no filter, wide6, narrow2 and narrow4 // All possibilities produce four outputs let none: [_; 4] = [p1, p0, q0, q1]; let wide6 = filter_wide6_4(p2, p1, p0, q0, q1, q2); let narrow2 = filter_narrow2_4(p1, p0, q0, q1, bd - 8); let narrow4 = filter_narrow4_4(p1, p0, q0, q1, bd - 8); // mask6 sets the dividing line for filter vs no filter // flat6 decides between wide and narrow filters (unrelated to level) // nhev4 sets the dividing line between narrow2 and narrow4 let mask = clamp(mask6(p2, p1, p0, q0, q1, q2, bd - 8), 1, MAX_LOOP_FILTER + 1) as usize; let flatp = flat6(p2, p1, p0, q0, q1, q2) <= flat; let nhev = clamp(nhev4(p1, p0, q0, q1, bd - 8), mask, MAX_LOOP_FILTER + 1) as usize; // sse for each; short-circuit the 'special' no-op cases. let sse_none = stride_sse(&a, &none); let sse_wide6 = if flatp && mask <= MAX_LOOP_FILTER { stride_sse(&a, &wide6) } else { sse_none }; let sse_narrow2 = if !flatp && nhev != mask { stride_sse(&a, &narrow2) } else { sse_none }; let sse_narrow4 = if !flatp && nhev <= MAX_LOOP_FILTER { stride_sse(&a, &narrow4) } else { sse_none }; // accumulate possible filter values into the tally tally[0] += sse_none; tally[mask] -= sse_none; if flatp { tally[mask] += sse_wide6; } else { tally[mask] += sse_narrow2; tally[nhev] -= sse_narrow2; tally[nhev] += sse_narrow4; } } } fn mask8( p3: i32, p2: i32, p1: i32, p0: i32, q0: i32, q1: i32, q2: i32, q3: i32, shift: usize, ) -> usize { cmp::max( limit_to_level( cmp::max( (p3 - p2).abs(), cmp::max( (p2 - p1).abs(), cmp::max( (p1 - p0).abs(), cmp::max( (q3 - q2).abs(), cmp::max((q2 - q1).abs(), (q1 - q0).abs()), ), ), ), ), shift, ), blimit_to_level((p0 - q0).abs() * 2 + (p1 - q1).abs() / 2, shift), ) as usize } fn flat8( p3: i32, p2: i32, p1: i32, p0: i32, q0: i32, q1: i32, q2: i32, q3: i32, ) -> usize { cmp::max( (p1 - p0).abs(), cmp::max( (q1 - q0).abs(), cmp::max( (p2 - p0).abs(), cmp::max((q2 - q0).abs(), cmp::max((p3 - p0).abs(), (q3 - q0).abs())), ), ), ) as usize } #[inline] fn deblock_size8_inner( [p3, p2, p1, p0, q0, q1, q2, q3]: [i32; 8], level: usize, bd: usize, ) -> Option<[i32; 6]> { if mask8(p3, p2, p1, p0, q0, q1, q2, q3, bd - 8) <= level { let flat = 1 << (bd - 8); let x = if flat8(p3, p2, p1, p0, q0, q1, q2, q3) <= flat { filter_wide8_6(p3, p2, p1, p0, q0, q1, q2, q3) } else if nhev4(p1, p0, q0, q1, bd - 8) <= level { filter_narrow4_6(p2, p1, p0, q0, q1, q2, bd - 8) } else { filter_narrow2_6(p2, p1, p0, q0, q1, q2, bd - 8) }; Some(x) } else { None } } // Assumes rec[0] is set 4 taps back from the edge fn deblock_v_size8<T: Pixel>( rec: &mut PlaneMutSlice<'_, T>, level: usize, bd: usize, ) { for y in 0..4 { let p = &rec[y]; let vals = [ p[0].as_(), p[1].as_(), p[2].as_(), p[3].as_(), p[4].as_(), p[5].as_(), p[6].as_(), p[7].as_(), ]; if let Some(data) = deblock_size8_inner(vals, level, bd) { copy_horizontal(rec, 1, y, &data); } } } // Assumes rec[0] is set 4 taps back from the edge fn deblock_h_size8<T: Pixel>( rec: &mut PlaneMutSlice<'_, T>, level: usize, bd: usize, ) { for x in 0..4 { let vals = [ rec[0][x].as_(), rec[1][x].as_(), rec[2][x].as_(), rec[3][x].as_(), rec[4][x].as_(), rec[5][x].as_(), rec[6][x].as_(), rec[7][x].as_(), ]; if let Some(data) = deblock_size8_inner(vals, level, bd) { copy_vertical(rec, x, 1, &data); } } } // Assumes rec[0] and src[0] are set 4 taps back from the edge. // Accesses eight taps, accumulates six pixels into the tally fn sse_size8<T: Pixel>( rec: &PlaneSlice<'_, T>, src: &PlaneSlice<'_, T>, tally: &mut [i64; MAX_LOOP_FILTER + 2], horizontal_p: bool, bd: usize, ) { let flat = 1 << (bd - 8); for i in 0..4 { let (p3, p2, p1, p0, q0, q1, q2, q3, a) = if horizontal_p { // eight taps ( rec[0][i].as_(), rec[1][i].as_(), rec[2][i].as_(), rec[3][i].as_(), rec[4][i].as_(), rec[5][i].as_(), rec[6][i].as_(), rec[7][i].as_(), // six pixels to compare so offset one forward [ src[1][i].as_(), src[2][i].as_(), src[3][i].as_(), src[4][i].as_(), src[5][i].as_(), src[6][i].as_(), ], ) } else { // eight taps ( rec[i][0].as_(), rec[i][1].as_(), rec[i][2].as_(), rec[i][3].as_(), rec[i][4].as_(), rec[i][5].as_(), rec[i][6].as_(), rec[i][7].as_(), // six pixels to compare so offset one forward [ src[i][1].as_(), src[i][2].as_(), src[i][3].as_(), src[i][4].as_(), src[i][5].as_(), src[i][6].as_(), ], ) }; // Four possibilities: no filter, wide8, narrow2 and narrow4 let none: [_; 6] = [p2, p1, p0, q0, q1, q2]; let wide8: [_; 6] = filter_wide8_6(p3, p2, p1, p0, q0, q1, q2, q3); let narrow2: [_; 6] = filter_narrow2_6(p2, p1, p0, q0, q1, q2, bd - 8); let narrow4: [_; 6] = filter_narrow4_6(p2, p1, p0, q0, q1, q2, bd - 8); // mask8 sets the dividing line for filter vs no filter // flat8 decides between wide and narrow filters (unrelated to level) // nhev4 sets the dividing line between narrow2 and narrow4 let mask = clamp( mask8(p3, p2, p1, p0, q0, q1, q2, q3, bd - 8), 1, MAX_LOOP_FILTER + 1, ) as usize; let flatp = flat8(p3, p2, p1, p0, q0, q1, q2, q3) <= flat; let nhev = clamp(nhev4(p1, p0, q0, q1, bd - 8), mask, MAX_LOOP_FILTER + 1) as usize; // sse for each; short-circuit the 'special' no-op cases. let sse_none = stride_sse(&a, &none); let sse_wide8 = if flatp && mask <= MAX_LOOP_FILTER { stride_sse(&a, &wide8) } else { sse_none }; let sse_narrow2 = if !flatp && nhev != mask { stride_sse(&a, &narrow2) } else { sse_none }; let sse_narrow4 = if !flatp && nhev <= MAX_LOOP_FILTER { stride_sse(&a, &narrow4) } else { sse_none }; // accumulate possible filter values into the tally tally[0] += sse_none; tally[mask] -= sse_none; if flatp { tally[mask] += sse_wide8; } else { tally[mask] += sse_narrow2; tally[nhev] -= sse_narrow2; tally[nhev] += sse_narrow4; } } } fn flat14_outer( p6: i32, p5: i32, p4: i32, p0: i32, q0: i32, q4: i32, q5: i32, q6: i32, ) -> usize { cmp::max( (p4 - p0).abs(), cmp::max( (q4 - q0).abs(), cmp::max( (p5 - p0).abs(), cmp::max((q5 - q0).abs(), cmp::max((p6 - p0).abs(), (q6 - q0).abs())), ), ), ) as usize } #[inline] fn deblock_size14_inner( [p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6]: [i32; 14], level: usize, bd: usize, ) -> Option<[i32; 12]> { // 'mask' test if mask8(p3, p2, p1, p0, q0, q1, q2, q3, bd - 8) <= level { let flat = 1 << (bd - 8); // inner flatness test let x = if flat8(p3, p2, p1, p0, q0, q1, q2, q3) <= flat { // outer flatness test if flat14_outer(p6, p5, p4, p0, q0, q4, q5, q6) <= flat { // sufficient flatness across 14 pixel width; run full-width filter filter_wide14_12( p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, ) } else { // only flat in inner area, run 8-tap filter_wide8_12(p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5) } } else if nhev4(p1, p0, q0, q1, bd - 8) <= level { // not flat, run narrow filter filter_narrow4_12(p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, bd - 8) } else { filter_narrow2_12(p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, bd - 8) }; Some(x) } else { None } } // Assumes rec[0] is set 7 taps back from the edge fn deblock_v_size14<T: Pixel>( rec: &mut PlaneMutSlice<'_, T>, level: usize, bd: usize, ) { for y in 0..4 { let p = &rec[y]; let vals = [ p[0].as_(), p[1].as_(), p[2].as_(), p[3].as_(), p[4].as_(), p[5].as_(), p[6].as_(), p[7].as_(), p[8].as_(), p[9].as_(), p[10].as_(), p[11].as_(), p[12].as_(), p[13].as_(), ]; if let Some(data) = deblock_size14_inner(vals, level, bd) { copy_horizontal(rec, 1, y, &data); } } } // Assumes rec[0] is set 7 taps back from the edge fn deblock_h_size14<T: Pixel>( rec: &mut PlaneMutSlice<'_, T>, level: usize, bd: usize, ) { for x in 0..4 { let vals = [ rec[0][x].as_(), rec[1][x].as_(), rec[2][x].as_(), rec[3][x].as_(), rec[4][x].as_(), rec[5][x].as_(), rec[6][x].as_(), rec[7][x].as_(), rec[8][x].as_(), rec[9][x].as_(), rec[10][x].as_(), rec[11][x].as_(), rec[12][x].as_(), rec[13][x].as_(), ]; if let Some(data) = deblock_size14_inner(vals, level, bd) { copy_vertical(rec, x, 1, &data); } } } // Assumes rec[0] and src[0] are set 7 taps back from the edge. // Accesses fourteen taps, accumulates twelve pixels into the tally fn sse_size14<T: Pixel>( rec: &PlaneSlice<'_, T>, src: &PlaneSlice<'_, T>, tally: &mut [i64; MAX_LOOP_FILTER + 2], horizontal_p: bool, bd: usize, ) { let flat = 1 << (bd - 8); for i in 0..4 { let (p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, a) = if horizontal_p { // 14 taps ( rec[0][i].as_(), rec[1][i].as_(), rec[2][i].as_(), rec[3][i].as_(), rec[4][i].as_(), rec[5][i].as_(), rec[6][i].as_(), rec[7][i].as_(), rec[8][i].as_(), rec[9][i].as_(), rec[10][i].as_(), rec[11][i].as_(), rec[12][i].as_(), rec[13][i].as_(), // 12 pixels to compare so offset one forward [ src[1][i].as_(), src[2][i].as_(), src[3][i].as_(), src[4][i].as_(), src[5][i].as_(), src[6][i].as_(), src[7][i].as_(), src[8][i].as_(), src[9][i].as_(), src[10][i].as_(), src[11][i].as_(), src[12][i].as_(), ], ) } else { // 14 taps ( rec[i][0].as_(), rec[i][1].as_(), rec[i][2].as_(), rec[i][3].as_(), rec[i][4].as_(), rec[i][5].as_(), rec[i][6].as_(), rec[i][7].as_(), rec[i][8].as_(), rec[i][9].as_(), rec[i][10].as_(), rec[i][11].as_(), rec[i][12].as_(), rec[i][13].as_(), // 12 pixels to compare so offset one forward [ src[i][1].as_(), src[i][2].as_(), src[i][3].as_(), src[i][4].as_(), src[i][5].as_(), src[i][6].as_(), src[i][7].as_(), src[i][8].as_(), src[i][9].as_(), src[i][10].as_(), src[i][11].as_(), src[i][12].as_(), ], ) }; // Five possibilities: no filter, wide14, wide8, narrow2 and narrow4 let none: [i32; 12] = [p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5]; let wide14 = filter_wide14_12(p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6); let wide8 = filter_wide8_12(p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5); let narrow2 = filter_narrow2_12( p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, bd - 8, ); let narrow4 = filter_narrow4_12( p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, bd - 8, ); // mask8 sets the dividing line for filter vs no filter // flat8 decides between wide and narrow filters (unrelated to level) // flat14 decides between wide14 and wide8 filters // nhev4 sets the dividing line between narrow2 and narrow4 let mask = clamp( mask8(p3, p2, p1, p0, q0, q1, q2, q3, bd - 8), 1, MAX_LOOP_FILTER + 1, ) as usize; let flat8p = flat8(p3, p2, p1, p0, q0, q1, q2, q3) <= flat; let flat14p = flat14_outer(p6, p5, p4, p0, q0, q4, q5, q6) <= flat; let nhev = clamp(nhev4(p1, p0, q0, q1, bd - 8), mask, MAX_LOOP_FILTER + 1) as usize; // sse for each; short-circuit the 'special' no-op cases. let sse_none = stride_sse(&a, &none); let sse_wide8 = if flat8p && !flat14p && mask <= MAX_LOOP_FILTER { stride_sse(&a, &wide8) } else { sse_none }; let sse_wide14 = if flat8p && flat14p && mask <= MAX_LOOP_FILTER { stride_sse(&a, &wide14) } else { sse_none }; let sse_narrow2 = if !flat8p && nhev != mask { stride_sse(&a, &narrow2) } else { sse_none }; let sse_narrow4 = if !flat8p && nhev <= MAX_LOOP_FILTER { stride_sse(&a, &narrow4) } else { sse_none }; // accumulate possible filter values into the tally tally[0] += sse_none; tally[mask] -= sse_none; if flat8p { if flat14p { tally[mask] += sse_wide14; } else { tally[mask] += sse_wide8; } } else { tally[mask] += sse_narrow2; tally[nhev] -= sse_narrow2; tally[nhev] += sse_narrow4; } } } fn filter_v_edge<T: Pixel>( deblock: &DeblockState, blocks: &FrameBlocks, bo: PlaneBlockOffset, p: &mut Plane<T>, pli: usize, bd: usize, xdec: usize, ydec: usize, ) { let block = &blocks[bo]; let txsize = if pli == 0 { block.txsize } else { block.bsize.largest_chroma_tx_size(xdec, ydec) }; let tx_edge = bo.0.x >> xdec & (txsize.width_mi() - 1) == 0; if tx_edge { let prev_block = deblock_left(blocks, bo, p); let block_edge = bo.0.x & (block.n4_w - 1) == 0; let filter_size = deblock_size(block, prev_block, p, pli, true, block_edge); if filter_size > 0 { let level = deblock_level(deblock, block, prev_block, pli, true); if level > 0 { let po = bo.plane_offset(&p.cfg); let mut plane_slice = p.mut_slice(po); plane_slice.x -= (filter_size >> 1) as isize; match filter_size { 4 => { deblock_v_size4(&mut plane_slice, level, bd); } 6 => { deblock_v_size6(&mut plane_slice, level, bd); } 8 => { deblock_v_size8(&mut plane_slice, level, bd); } 14 => { deblock_v_size14(&mut plane_slice, level, bd); } _ => unreachable!(), } } } } } fn sse_v_edge<T: Pixel>( blocks: &FrameBlocks, bo: PlaneBlockOffset, rec_plane: &Plane<T>, src_plane: &Plane<T>, tally: &mut [i64; MAX_LOOP_FILTER + 2], pli: usize, bd: usize, xdec: usize, ydec: usize, ) { let block = &blocks[bo]; let txsize = if pli == 0 { block.txsize } else { block.bsize.largest_chroma_tx_size(xdec, ydec) }; let tx_edge = bo.0.x >> xdec & (txsize.width_mi() - 1) == 0; if tx_edge { let prev_block = deblock_left(blocks, bo, rec_plane); let block_edge = bo.0.x & (block.n4_w - 1) == 0; let filter_size = deblock_size(block, prev_block, rec_plane, pli, true, block_edge); if filter_size > 0 { let po = { let mut po = bo.plane_offset(&rec_plane.cfg); // rec and src have identical subsampling po.x -= (filter_size >> 1) as isize; po }; let rec_slice = rec_plane.slice(po); let src_slice = src_plane.slice(po); match filter_size { 4 => { sse_size4(&rec_slice, &src_slice, tally, false, bd); } 6 => { sse_size6(&rec_slice, &src_slice, tally, false, bd); } 8 => { sse_size8(&rec_slice, &src_slice, tally, false, bd); } 14 => { sse_size14(&rec_slice, &src_slice, tally, false, bd); } _ => unreachable!(), } } } } fn filter_h_edge<T: Pixel>( deblock: &DeblockState, blocks: &FrameBlocks, bo: PlaneBlockOffset, p: &mut Plane<T>, pli: usize, bd: usize, xdec: usize, ydec: usize, ) { let block = &blocks[bo]; let txsize = if pli == 0 { block.txsize } else { block.bsize.largest_chroma_tx_size(xdec, ydec) }; let tx_edge = bo.0.y >> ydec & (txsize.height_mi() - 1) == 0; if tx_edge { let prev_block = deblock_up(blocks, bo, p); let block_edge = bo.0.y & (block.n4_h - 1) == 0; let filter_size = deblock_size(block, prev_block, p, pli, false, block_edge); if filter_size > 0 { let level = deblock_level(deblock, block, prev_block, pli, false); if level > 0 { let po = bo.plane_offset(&p.cfg); let mut plane_slice = p.mut_slice(po); plane_slice.y -= (filter_size >> 1) as isize; match filter_size { 4 => { deblock_h_size4(&mut plane_slice, level, bd); } 6 => { deblock_h_size6(&mut plane_slice, level, bd); } 8 => { deblock_h_size8(&mut plane_slice, level, bd); } 14 => { deblock_h_size14(&mut plane_slice, level, bd); } _ => unreachable!(), } } } } } fn sse_h_edge<T: Pixel>( blocks: &FrameBlocks, bo: PlaneBlockOffset, rec_plane: &Plane<T>, src_plane: &Plane<T>, tally: &mut [i64; MAX_LOOP_FILTER + 2], pli: usize, bd: usize, xdec: usize, ydec: usize, ) { let block = &blocks[bo]; let txsize = if pli == 0 { block.txsize } else { block.bsize.largest_chroma_tx_size(xdec, ydec) }; let tx_edge = bo.0.y >> ydec & (txsize.height_mi() - 1) == 0; if tx_edge { let prev_block = deblock_up(blocks, bo, rec_plane); let block_edge = bo.0.y & (block.n4_h - 1) == 0; let filter_size = deblock_size(block, prev_block, rec_plane, pli, true, block_edge); if filter_size > 0 { let po = { let mut po = bo.plane_offset(&rec_plane.cfg); // rec and src have identical subsampling po.y -= (filter_size >> 1) as isize; po }; let rec_slice = rec_plane.slice(po); let src_slice = src_plane.slice(po); match filter_size { 4 => { sse_size4(&rec_slice, &src_slice, tally, true, bd); } 6 => { sse_size6(&rec_slice, &src_slice, tally, true, bd); } 8 => { sse_size8(&rec_slice, &src_slice, tally, true, bd); } 14 => { sse_size14(&rec_slice, &src_slice, tally, true, bd); } _ => unreachable!(), } } } } // Deblocks all edges, vertical and horizontal, in a single plane pub fn deblock_plane<T: Pixel>( fi: &FrameInvariants<T>, deblock: &DeblockState, p: &mut Plane<T>, pli: usize, blocks: &FrameBlocks, ) { let xdec = p.cfg.xdec; let ydec = p.cfg.ydec; let bd = fi.sequence.bit_depth; match pli { 0 => { if deblock.levels[0] == 0 && deblock.levels[1] == 0 { return; } } 1 => { if deblock.levels[2] == 0 { return; } } 2 => { if deblock.levels[3] == 0 { return; } } _ => return, } // Deblocking happens in 4x4 (luma) units; luma x,y are clipped to // the *crop frame* by 4x4 block. Rounding is to handle chroma // fenceposts here instead of throughout the code. let cols = (((fi.width + MI_SIZE - 1) >> MI_SIZE_LOG2) + (1 << xdec >> 1)) >> xdec << xdec; // Clippy can go suck an egg let rows = (((fi.height + MI_SIZE - 1) >> MI_SIZE_LOG2) + (1 << ydec >> 1)) >> ydec << ydec; // Clippy can go suck an egg // vertical edge filtering leads horizonal by one full MI-sized // row (and horizontal filtering doesn't happen along the upper // edge). Unroll to avoid corner-cases. if rows > 0 { for x in (1 << xdec..cols).step_by(1 << xdec) { filter_v_edge( deblock, blocks, PlaneBlockOffset(BlockOffset { x, y: 0 }), p, pli, bd, xdec, ydec, ); } if rows > 1 << ydec { for x in (1 << xdec..cols).step_by(1 << xdec) { filter_v_edge( deblock, blocks, PlaneBlockOffset(BlockOffset { x, y: 1 << ydec }), p, pli, bd, xdec, ydec, ); } } } // filter rows where vertical and horizontal edge filtering both // happen (horizontal edge filtering lags vertical by one row). for y in ((2 << ydec)..rows).step_by(1 << ydec) { // Check for vertical edge at first MI block boundary on this row if cols > 1 << xdec { filter_v_edge( deblock, blocks, PlaneBlockOffset(BlockOffset { x: 1 << xdec, y }), p, pli, bd, xdec, ydec, ); } // run the rest of the row with both vertical and horizontal edge filtering. // Horizontal lags vertical edge by one row and two columns. for x in (2 << xdec..cols).step_by(1 << xdec) { filter_v_edge( deblock, blocks, PlaneBlockOffset(BlockOffset { x, y }), p, pli, bd, xdec, ydec, ); filter_h_edge( deblock, blocks, PlaneBlockOffset(BlockOffset { x: x - (2 << xdec), y: y - (1 << ydec), }), p, pli, bd, xdec, ydec, ); } // ..and the last two horizontal edges for the row if cols > 2 << xdec { filter_h_edge( deblock, blocks, PlaneBlockOffset(BlockOffset { x: cols - (2 << xdec), y: y - (1 << ydec), }), p, pli, bd, xdec, ydec, ); if cols > 1 << xdec { filter_h_edge( deblock, blocks, PlaneBlockOffset(BlockOffset { x: cols - (1 << xdec), y: y - (1 << ydec), }), p, pli, bd, xdec, ydec, ); } } } // Last horizontal row, vertical is already complete if rows > 1 << ydec { for x in (0..cols).step_by(1 << xdec) { filter_h_edge( deblock, blocks, PlaneBlockOffset(BlockOffset { x, y: rows - (1 << ydec) }), p, pli, bd, xdec, ydec, ); } } } // sse count of all edges in a single plane, accumulates into vertical and horizontal counts fn sse_plane<T: Pixel>( fi: &FrameInvariants<T>, rec: &Plane<T>, src: &Plane<T>, v_sse: &mut [i64; MAX_LOOP_FILTER + 2], h_sse: &mut [i64; MAX_LOOP_FILTER + 2], pli: usize, blocks: &FrameBlocks, ) { let xdec = rec.cfg.xdec; let ydec = rec.cfg.ydec; // Deblocking happens in 4x4 (luma) units; luma x,y are clipped to // the *crop frame* by 4x4 block. let cols = cmp::min(blocks.cols, (fi.width + MI_SIZE - 1) >> MI_SIZE_LOG2); let rows = cmp::min(blocks.rows, (fi.height + MI_SIZE - 1) >> MI_SIZE_LOG2); let bd = fi.sequence.bit_depth; // No horizontal edge filtering along top of frame for x in (1 << xdec..cols).step_by(1 << xdec) { sse_v_edge( blocks, PlaneBlockOffset(BlockOffset { x, y: 0 }), rec, src, v_sse, pli, bd, xdec, ydec, ); } // Unlike actual filtering, we're counting horizontal and vertical // as separable cases. No need to lag the horizontal processing // behind vertical. for y in (1 << ydec..rows).step_by(1 << ydec) { // No vertical filtering along left edge of frame sse_h_edge( blocks, PlaneBlockOffset(BlockOffset { x: 0, y }), rec, src, h_sse, pli, bd, xdec, ydec, ); for x in (1 << xdec..cols).step_by(1 << xdec) { sse_v_edge( blocks, PlaneBlockOffset(BlockOffset { x, y }), rec, src, v_sse, pli, bd, xdec, ydec, ); sse_h_edge( blocks, PlaneBlockOffset(BlockOffset { x, y }), rec, src, h_sse, pli, bd, xdec, ydec, ); } } } // Deblocks all edges in all planes of a frame pub fn deblock_filter_frame<T: Pixel>( fi: &FrameInvariants<T>, fs: &mut FrameState<T>, blocks: &FrameBlocks, ) { let fs_rec = Arc::make_mut(&mut fs.rec); for pli in 0..PLANES { deblock_plane(fi, &fs.deblock, &mut fs_rec.planes[pli], pli, blocks); } } fn sse_optimize<T: Pixel>( fi: &FrameInvariants<T>, fs: &mut FrameState<T>, blocks: &FrameBlocks, ) { // i64 allows us to accumulate a total of ~ 35 bits worth of pixels assert!( fs.input.planes[0].cfg.width.ilog() + fs.input.planes[0].cfg.height.ilog() < 35 ); for pli in 0..PLANES { let mut v_tally: [i64; MAX_LOOP_FILTER + 2] = [0; MAX_LOOP_FILTER + 2]; let mut h_tally: [i64; MAX_LOOP_FILTER + 2] = [0; MAX_LOOP_FILTER + 2]; sse_plane( fi, &fs.rec.planes[pli], &fs.input.planes[pli], &mut v_tally, &mut h_tally, pli, blocks, ); for i in 1..=MAX_LOOP_FILTER { v_tally[i] += v_tally[i - 1]; h_tally[i] += h_tally[i - 1]; } match pli { 0 => { let mut best_v = 999; let mut best_h = 999; for i in 0..=MAX_LOOP_FILTER { if best_v == 999 || v_tally[best_v] > v_tally[i] { best_v = i; }; if best_h == 999 || h_tally[best_h] > h_tally[i] { best_h = i; }; } fs.deblock.levels[0] = best_v as u8; fs.deblock.levels[1] = best_h as u8; } 1 | 2 => { let mut best = 999; for i in 0..=MAX_LOOP_FILTER { if best == 999 || v_tally[best] + h_tally[best] > v_tally[i] + h_tally[i] { best = i; }; } fs.deblock.levels[pli + 1] = best as u8; } _ => unreachable!(), } } } pub fn deblock_filter_optimize<T: Pixel>( fi: &FrameInvariants<T>, fs: &mut FrameState<T>, blocks: &FrameBlocks, ) { if fi.config.speed_settings.fast_deblock { let q = ac_q(fi.base_q_idx, 0, fi.sequence.bit_depth) as i32; let level = clamp( match fi.sequence.bit_depth { 8 => { if fi.frame_type == FrameType::KEY { (q * 17563 - 421_574 + (1 << 18 >> 1)) >> 18 } else { (q * 6017 + 650_707 + (1 << 18 >> 1)) >> 18 } } 10 => { if fi.frame_type == FrameType::KEY { ((q * 20723 + 4_060_632 + (1 << 20 >> 1)) >> 20) - 4 } else { (q * 20723 + 4_060_632 + (1 << 20 >> 1)) >> 20 } } 12 => { if fi.frame_type == FrameType::KEY { ((q * 20723 + 16_242_526 + (1 << 22 >> 1)) >> 22) - 4 } else { (q * 20723 + 16_242_526 + (1 << 22 >> 1)) >> 22 } } _ => unreachable!(), }, 0, MAX_LOOP_FILTER as i32, ) as u8; fs.deblock.levels[0] = level; fs.deblock.levels[1] = level; fs.deblock.levels[2] = level; fs.deblock.levels[3] = level; } else { sse_optimize(fi, fs, blocks); } }
use std::io; use std::io::Read; use std::default::Default; use std::io::BufReader; use std::path::Path; use std::fs::File; use std::sync::mpsc::{Sender, Receiver}; use std::sync::mpsc; use std::thread; use self::mad_decoder_mode::* ; #[link(name = "mad")] extern { fn mad_decoder_init(decoder: &mad_decoder, message: &mad_message, input_callback: extern fn(message: &mut mad_message, stream: isize) -> mad_flow, header_callback: extern fn(), filter_callback: extern fn(), output_callback: extern fn(message: &mut mad_message, header: isize, pcm: &mad_pcm) -> mad_flow, error_callback: extern fn(), message_callback: extern fn()); fn mad_decoder_run(input: &mut mad_decoder, mode: mad_decoder_mode) -> i32; fn mad_stream_buffer(stream: isize, buf_start: *const u8, buf_length: usize); } #[repr(C)] enum mad_flow { mf_continue = 0x0000, /* continue normally */ mf_stop = 0x0010, /* stop decoding normally */ mf_break = 0x0011, /* stop decoding and signal an error */ mf_ignore = 0x0020 /* ignore the current frame */ } #[repr(C)] struct mad_pcm { sample_rate: u32, channels: u16, length: u16, samples: [[i32; 1152]; 2], } #[repr(C)] struct mad_message<'a> { buffer: &'a mut [u8; 4096], reader: &'a mut (io::Read + 'a), sender: &'a Sender<Frame>, } #[repr(C)] enum mad_decoder_mode { MAD_DECODER_MODE_SYNC = 0, MAD_DECODER_MODE_ASYNC } impl Default for mad_decoder_mode { fn default() -> mad_decoder_mode {MAD_DECODER_MODE_SYNC} } #[derive(Default)] #[repr(C)] struct mad_async_parameters { pid: u32, ain: isize, aout: isize, } #[derive(Default)] #[repr(C)] struct mad_decoder { mode: mad_decoder_mode, options: isize, async: mad_async_parameters, sync: usize, cb_data: usize, input_func: usize, header_func: usize, filter_func: usize, output_func: usize, error_func: usize, message_func: usize, } extern fn empty_callback() { } pub struct Frame { sample_rate: u32, channels: u16, length: u16, samples: [[i32; 1152]; 2], } pub fn decode(path_str: &'static str) -> Receiver<Frame> { let (tx, rx): (Sender<Frame>, Receiver<Frame>) = mpsc::channel(); thread::spawn(move || { let path = Path::new(path_str); let f = File::open(&path).unwrap(); let mut reader = BufReader::new(f); let mut input_buffer = [0u8; 4096]; reader.read(&mut input_buffer); let message = &mut mad_message { buffer: &mut input_buffer, reader: &mut reader, sender: &tx, }; let mut decoder: mad_decoder = Default::default(); let mut decoding_result: i32 = 42; extern fn input_callback (msg: &mut mad_message, stream: isize) -> mad_flow { let read_result = msg.reader.read(msg.buffer).unwrap(); unsafe { mad_stream_buffer(stream, msg.buffer.as_ptr(), msg.buffer.len()); } if read_result == 0 { return mad_flow::mf_stop; } mad_flow::mf_continue } extern fn output_callback(msg: &mut mad_message, header: isize, pcm: &mad_pcm) -> mad_flow { msg.sender.send(Frame {sample_rate: pcm.sample_rate, channels: pcm.channels, length: pcm.length, samples: pcm.samples}); mad_flow::mf_continue } unsafe { mad_decoder_init(&mut decoder, message, input_callback, empty_callback, empty_callback, output_callback, empty_callback, empty_callback); decoding_result = mad_decoder_run(&mut decoder, mad_decoder_mode::MAD_DECODER_MODE_SYNC); } }); rx } #[test] fn test_open_file() { let decoder = self::decode("test_samples/fs-242.mp3"); for frame in decoder.iter() { println!("Got frame."); } assert!(true); } Switched to synchronous channel use std::io; use std::io::Read; use std::default::Default; use std::io::BufReader; use std::path::Path; use std::fs::File; use std::sync::mpsc::{SyncSender, Receiver}; use std::sync::mpsc; use std::thread; use self::mad_decoder_mode::* ; #[link(name = "mad")] extern { fn mad_decoder_init(decoder: &mad_decoder, message: &mad_message, input_callback: extern fn(message: &mut mad_message, stream: isize) -> mad_flow, header_callback: extern fn(), filter_callback: extern fn(), output_callback: extern fn(message: &mut mad_message, header: isize, pcm: &mad_pcm) -> mad_flow, error_callback: extern fn(), message_callback: extern fn()); fn mad_decoder_run(input: &mut mad_decoder, mode: mad_decoder_mode) -> i32; fn mad_stream_buffer(stream: isize, buf_start: *const u8, buf_length: usize); } #[repr(C)] enum mad_flow { mf_continue = 0x0000, /* continue normally */ mf_stop = 0x0010, /* stop decoding normally */ mf_break = 0x0011, /* stop decoding and signal an error */ mf_ignore = 0x0020 /* ignore the current frame */ } #[repr(C)] struct mad_pcm { sample_rate: u32, channels: u16, length: u16, samples: [[i32; 1152]; 2], } #[repr(C)] struct mad_message<'a> { buffer: &'a mut [u8; 4096], reader: &'a mut (io::Read + 'a), sender: &'a SyncSender<Frame>, } #[repr(C)] enum mad_decoder_mode { MAD_DECODER_MODE_SYNC = 0, MAD_DECODER_MODE_ASYNC } impl Default for mad_decoder_mode { fn default() -> mad_decoder_mode {MAD_DECODER_MODE_SYNC} } #[derive(Default)] #[repr(C)] struct mad_async_parameters { pid: u32, ain: isize, aout: isize, } #[derive(Default)] #[repr(C)] struct mad_decoder { mode: mad_decoder_mode, options: isize, async: mad_async_parameters, sync: usize, cb_data: usize, input_func: usize, header_func: usize, filter_func: usize, output_func: usize, error_func: usize, message_func: usize, } extern fn empty_callback() { } pub struct Frame { sample_rate: u32, channels: u16, length: u16, samples: [[i32; 1152]; 2], } pub fn decode(path_str: &'static str) -> Receiver<Frame> { let (tx, rx) = mpsc::sync_channel::<Frame>(2); thread::spawn(move || { let path = Path::new(path_str); let f = File::open(&path).unwrap(); let mut reader = BufReader::new(f); let mut input_buffer = [0u8; 4096]; reader.read(&mut input_buffer); let message = &mut mad_message { buffer: &mut input_buffer, reader: &mut reader, sender: &tx, }; let mut decoder: mad_decoder = Default::default(); let mut decoding_result: i32 = 42; extern fn input_callback (msg: &mut mad_message, stream: isize) -> mad_flow { let read_result = msg.reader.read(msg.buffer).unwrap(); unsafe { mad_stream_buffer(stream, msg.buffer.as_ptr(), msg.buffer.len()); } if read_result == 0 { return mad_flow::mf_stop; } mad_flow::mf_continue } extern fn output_callback(msg: &mut mad_message, header: isize, pcm: &mad_pcm) -> mad_flow { msg.sender.send(Frame {sample_rate: pcm.sample_rate, channels: pcm.channels, length: pcm.length, samples: pcm.samples}); mad_flow::mf_continue } unsafe { mad_decoder_init(&mut decoder, message, input_callback, empty_callback, empty_callback, output_callback, empty_callback, empty_callback); decoding_result = mad_decoder_run(&mut decoder, mad_decoder_mode::MAD_DECODER_MODE_SYNC); } }); rx } #[test] fn test_open_file() { let decoder = self::decode("test_samples/fs-242.mp3"); for frame in decoder.iter() { println!("Got frame."); } assert!(true); }