require 'date' module HelixWebServices # The Perforce server specification describes the high-level configuration and intended usage of a Perforce server.\nFor installations with only one Perforce server, the server specification is optional. class ServerCommand # A unique identifier for this server.\nThis must match the contents of the server’s `server.id` file as defined by the p4 serverid command.\nIf the server type is identifier, the server id specifies the name of the cluster. attr_accessor :server_id # Server executable type.\n\nOne of the following: `server`, `proxy`, `broker`, `identifier`, `admin`.\n\nEach type may offer one or more services, defined in the `services` property. attr_accessor :type # The `server` type server provides the following services:\n\n- standard - a standard Perforce server\n- replica - a read-only replica server\n- commit-server - central server in distributed installation\n- edge-server - node in distributed installation\n- forwarding-replica - a replica configured to forward commands that involve database writes to a master server\n- build-server - a replica that supports build automation and build farm integration\n- P4AUTH - a server that provides authentication\n- P4CHANGE - a server that provides change numbering\n- depot-master - commit-server with automated failover\n- depot-standby - standby replica of the depot-master\n- workspace-server - node in a cluster installation\n- standby - read-only replica server that uses p4 journalcopy\n- forwarding-standby - forwarding replica server that uses p4 journalcopy\n\nThe `proxy` type server provides a p4p caching proxy.\n\nThe `broker` type server provides the following services:\n\n- broker - a p4broker process\n- workspace-router - routing broker for a cluster\n\nThe services field for the `identifier` type server specifies the existence of the cluster, and has the value `cluster`.\nThe name of the cluster is then drawn from the ServerID field.\n\nThe `admin` type server provides the following services:\n\n- hxca-server - the admin server for a Helix cluster.\n- zookeeper-server - ZooKeeper server for a cluster attr_accessor :services # The P4NAME associated with this server.\nYou can leave this blank or you can set it to the same value as the serverid. attr_accessor :name # The P4PORT used by this server. attr_accessor :address # For an edge server, this optional field specifies the external address used for connections to a commit server.\nThis field must be set for the edge server to enable parallel submits in a federated environment. attr_accessor :external_address # An optional description for this server. attr_accessor :description # The service user name used by the server. attr_accessor :user # For a replica server, this optional field can contain one or more patterns describing how active client workspace metadata is to be filtered. Active client workspace data includes have lists, working records, and pending resolves.\n\nTo include client data, use the syntax:\n`//client-pattern/...`\n\nTo exclude client data, use the syntax:\n`-//client-pattern/...`\n\nAll patterns are specified in client syntax. attr_accessor :client_data_filter # For a replica server, this optional field can contain one or more patterns describing how submitted revision metadata is to be filtered. Submitted revision data includes revision records, integration records, label contents, and the files listed in submitted changelists.\n\nTo include depot data, use the syntax:\n//depot/pattern/...\n\nTo exclude depot data, use the syntax:\n-//depot/pattern/...\n\nAll patterns are specified in depot syntax. attr_accessor :revision_data_filter # For a replica server, this optional field can contain one or more patterns describing the policy for automatically scheduling the replication of file content. If this field is present, only those files described by the pattern are automatically transferred to the replica; other files are not transferred until they are referenced by a replica command that needs the file content.\n\nFiles specified in the ArchiveDataFilter: field are transferred to the replica regardless of whether any users of the replica have made requests for their content.\n\nTo automatically transfer files on submit, use the syntax:\n`//depot/pattern/...`\n\nTo exclude files from automatic transfer, use the syntax:\n`-//depot/pattern/...`\n\nAll patterns are specified in depot syntax. attr_accessor :archive_data_filter # For an edge or commit server, this optional field, which is displayed only when you use the -l or -c option, shows configuration settings for this server.\n\n`-l` flag shows the current configuration.\n`-c-` flag shows current configuration values, recommended default values for fields that are not set, or unset for fields that are not set and do not have default values.\n\nIf this field is present when invoked with -c, the configuration commands in this field are run on the current server using the scope of the server specified in the serverID field. attr_accessor :distributed_config # Attribute mapping from ruby-style variable name to JSON key. def self.attribute_map { :'server_id' => :'serverID', :'type' => :'type', :'services' => :'services', :'name' => :'name', :'address' => :'address', :'external_address' => :'externalAddress', :'description' => :'description', :'user' => :'user', :'client_data_filter' => :'clientDataFilter', :'revision_data_filter' => :'revisionDataFilter', :'archive_data_filter' => :'archiveDataFilter', :'distributed_config' => :'distributedConfig' } end # Attribute type mapping. def self.swagger_types { :'server_id' => :'String', :'type' => :'String', :'services' => :'String', :'name' => :'String', :'address' => :'String', :'external_address' => :'String', :'description' => :'String', :'user' => :'String', :'client_data_filter' => :'String', :'revision_data_filter' => :'String', :'archive_data_filter' => :'String', :'distributed_config' => :'String' } end def initialize(attributes = {}) return unless attributes.is_a?(Hash) # convert string to symbol for hash key attributes = attributes.inject({}){|memo,(k,v)| memo[k.to_sym] = v; memo} if attributes[:'serverID'] self.server_id = attributes[:'serverID'] end if attributes[:'type'] self.type = attributes[:'type'] end if attributes[:'services'] self.services = attributes[:'services'] end if attributes[:'name'] self.name = attributes[:'name'] end if attributes[:'address'] self.address = attributes[:'address'] end if attributes[:'externalAddress'] self.external_address = attributes[:'externalAddress'] end if attributes[:'description'] self.description = attributes[:'description'] end if attributes[:'user'] self.user = attributes[:'user'] end if attributes[:'clientDataFilter'] self.client_data_filter = attributes[:'clientDataFilter'] end if attributes[:'revisionDataFilter'] self.revision_data_filter = attributes[:'revisionDataFilter'] end if attributes[:'archiveDataFilter'] self.archive_data_filter = attributes[:'archiveDataFilter'] end if attributes[:'distributedConfig'] self.distributed_config = attributes[:'distributedConfig'] end end # Check equality by comparing each attribute. def ==(o) return true if self.equal?(o) self.class == o.class && server_id == o.server_id && type == o.type && services == o.services && name == o.name && address == o.address && external_address == o.external_address && description == o.description && user == o.user && client_data_filter == o.client_data_filter && revision_data_filter == o.revision_data_filter && archive_data_filter == o.archive_data_filter && distributed_config == o.distributed_config end # @see the `==` method def eql?(o) self == o end # Calculate hash code according to all attributes. def hash [server_id, type, services, name, address, external_address, description, user, client_data_filter, revision_data_filter, archive_data_filter, distributed_config].hash end # build the object from hash def build_from_hash(attributes) return nil unless attributes.is_a?(Hash) self.class.swagger_types.each_pair do |key, type| if type =~ /^Array<(.*)>/i if attributes[self.class.attribute_map[key]].is_a?(Array) self.send("#{key}=", attributes[self.class.attribute_map[key]].map{ |v| _deserialize($1, v) } ) else #TODO show warning in debug mode end elsif !attributes[self.class.attribute_map[key]].nil? self.send("#{key}=", _deserialize(type, attributes[self.class.attribute_map[key]])) else # data not found in attributes(hash), not an issue as the data can be optional end end self end def _deserialize(type, value) case type.to_sym when :DateTime DateTime.parse(value) when :Date Date.parse(value) when :String value.to_s when :Integer value.to_i when :Float value.to_f when :BOOLEAN if value.to_s =~ /^(true|t|yes|y|1)$/i true else false end when /\AArray<(?.+)>\z/ inner_type = Regexp.last_match[:inner_type] value.map { |v| _deserialize(inner_type, v) } when /\AHash<(?.+), (?.+)>\z/ k_type = Regexp.last_match[:k_type] v_type = Regexp.last_match[:v_type] {}.tap do |hash| value.each do |k, v| hash[_deserialize(k_type, k)] = _deserialize(v_type, v) end end when :Object value else # model _model = HelixWebServices.const_get(type).new _model.build_from_hash(value) end end def to_s to_hash.to_s end # to_body is an alias to to_body (backward compatibility)) def to_body to_hash end # return the object in the form of hash def to_hash hash = {} self.class.attribute_map.each_pair do |attr, param| value = self.send(attr) next if value.nil? hash[param] = _to_hash(value) end hash end # Method to output non-array value in the form of hash # For object, use to_hash. Otherwise, just return the value def _to_hash(value) if value.is_a?(Array) value.compact.map{ |v| _to_hash(v) } elsif value.is_a?(Hash) {}.tap do |hash| value.each { |k, v| hash[k] = _to_hash(v) } end elsif value.respond_to? :to_hash value.to_hash else value end end end end