diff --git a/Makefile b/Makefile
index 55cb27fb..1e73b45d 100644
--- a/Makefile
+++ b/Makefile
@@ -207,3 +207,8 @@ deploy-images:
@DIMAGE=${IMAGE_ORG}/cstor-volume-manager-${XC_ARCH} ./build/push;
@DIMAGE=${IMAGE_ORG}/cstor-pool-manager-${XC_ARCH} ./build/push;
@DIMAGE=${IMAGE_ORG}/cstor-webhook-${XC_ARCH} ./build/push;
+
+.PHONY: gen-api-docs
+gen-api-docs:
+ @echo ">> generating cstor 'v1' apis docs"
+ go run github.com/ahmetb/gen-crd-api-reference-docs -api-dir ../api/pkg/apis/cstor/v1 -config hack/api-docs/config.json -template-dir hack/api-docs/template -out-file docs/api-references/apis.md
diff --git a/docs/api-references/apis.md b/docs/api-references/apis.md
new file mode 100644
index 00000000..03d67a61
--- /dev/null
+++ b/docs/api-references/apis.md
@@ -0,0 +1,3682 @@
+
Packages:
+
+cstor.openebs.io/v1
+
+
Package v1 is the v1 version of the API.
+
+Resource Types:
+
+CStorPoolCluster
+
+
+
CStorPoolCluster describes a CStorPoolCluster custom resource.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+apiVersion
+string |
+
+
+cstor.openebs.io/v1
+
+ |
+
+
+
+kind
+string
+ |
+CStorPoolCluster |
+
+
+
+metadata
+
+
+Kubernetes meta/v1.ObjectMeta
+
+
+ |
+
+Refer to the Kubernetes API documentation for the fields of the
+metadata field.
+ |
+
+
+
+spec
+
+
+CStorPoolClusterSpec
+
+
+ |
+
+
+
+
+
+
+pools
+
+
+[]PoolSpec
+
+
+ |
+
+ Pools is the spec for pools for various nodes
+where it should be created.
+ |
+
+
+
+resources
+
+
+Kubernetes core/v1.ResourceRequirements
+
+
+ |
+
+ DefaultResources are the compute resources required by the cstor-pool
+container.
+If the resources at PoolConfig is not specified, this is written
+to CSPI PoolConfig.
+ |
+
+
+
+auxResources
+
+
+Kubernetes core/v1.ResourceRequirements
+
+
+ |
+
+ AuxResources are the compute resources required by the cstor-pool pod
+side car containers.
+ |
+
+
+
+tolerations
+
+
+[]Kubernetes core/v1.Toleration
+
+
+ |
+
+ Tolerations, if specified, are the pool pod’s tolerations
+If tolerations at PoolConfig is empty, this is written to
+CSPI PoolConfig.
+ |
+
+
+
+priorityClassName
+
+string
+
+ |
+
+ DefaultPriorityClassName if specified applies to all the pool pods
+in the pool spec if the priorityClass at the pool level is
+not specified.
+ |
+
+
+ |
+
+
+
+status
+
+
+CStorPoolClusterStatus
+
+
+ |
+
+ |
+
+
+
+versionDetails
+
+
+VersionDetails
+
+
+ |
+
+ |
+
+
+
+CStorPoolInstance
+
+
+
CStorPoolInstance describes a cstor pool instance resource.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+apiVersion
+string |
+
+
+cstor.openebs.io/v1
+
+ |
+
+
+
+kind
+string
+ |
+CStorPoolInstance |
+
+
+
+metadata
+
+
+Kubernetes meta/v1.ObjectMeta
+
+
+ |
+
+Refer to the Kubernetes API documentation for the fields of the
+metadata field.
+ |
+
+
+
+spec
+
+
+CStorPoolInstanceSpec
+
+
+ |
+
+ Spec is the specification of the cstorpoolinstance resource.
+
+
+
+
+
+hostName
+
+string
+
+ |
+
+ HostName is the name of kubernetes node where the pool
+should be created.
+ |
+
+
+
+nodeSelector
+
+map[string]string
+
+ |
+
+ NodeSelector is the labels that will be used to select
+a node for pool provisioning.
+Required field
+ |
+
+
+
+poolConfig
+
+
+PoolConfig
+
+
+ |
+
+ PoolConfig is the default pool config that applies to the
+pool on node.
+ |
+
+
+
+dataRaidGroups
+
+
+[]RaidGroup
+
+
+ |
+
+ DataRaidGroups is the raid group configuration for the given pool.
+ |
+
+
+
+writeCacheRaidGroups
+
+
+[]RaidGroup
+
+
+ |
+
+ WriteCacheRaidGroups is the write cache raid group.
+ |
+
+
+ |
+
+
+
+status
+
+
+CStorPoolInstanceStatus
+
+
+ |
+
+ Status is the possible statuses of the cstorpoolinstance resource.
+ |
+
+
+
+versionDetails
+
+
+VersionDetails
+
+
+ |
+
+ VersionDetails is the openebs version.
+ |
+
+
+
+CStorVolume
+
+
+
CStorVolume describes a cstor volume resource created as custom resource
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+apiVersion
+string |
+
+
+cstor.openebs.io/v1
+
+ |
+
+
+
+kind
+string
+ |
+CStorVolume |
+
+
+
+metadata
+
+
+Kubernetes meta/v1.ObjectMeta
+
+
+ |
+
+Refer to the Kubernetes API documentation for the fields of the
+metadata field.
+ |
+
+
+
+spec
+
+
+CStorVolumeSpec
+
+
+ |
+
+
+
+
+
+
+capacity
+
+k8s.io/apimachinery/pkg/api/resource.Quantity
+
+ |
+
+ Capacity represents the desired size of the underlying volume.
+ |
+
+
+
+targetIP
+
+string
+
+ |
+
+ TargetIP IP of the iSCSI target service
+ |
+
+
+
+targetPort
+
+string
+
+ |
+
+ iSCSI Target Port typically TCP ports 3260
+ |
+
+
+
+iqn
+
+string
+
+ |
+
+ Target iSCSI Qualified Name.combination of nodeBase
+ |
+
+
+
+targetPortal
+
+string
+
+ |
+
+ iSCSI Target Portal. The Portal is combination of IP:port (typically TCP ports 3260)
+ |
+
+
+
+replicationFactor
+
+int
+
+ |
+
+ ReplicationFactor represents number of volume replica created during volume
+provisioning connect to the target
+ |
+
+
+
+consistencyFactor
+
+int
+
+ |
+
+ ConsistencyFactor is minimum number of volume replicas i.e. RF/2 + 1
+has to be connected to the target for write operations. Basically more then
+50% of replica has to be connected to target.
+ |
+
+
+
+desiredReplicationFactor
+
+int
+
+ |
+
+ DesiredReplicationFactor represents maximum number of replicas
+that are allowed to connect to the target. Required for scale operations
+ |
+
+
+
+replicaDetails
+
+
+CStorVolumeReplicaDetails
+
+
+ |
+
+ ReplicaDetails refers to the trusty replica information
+ |
+
+
+ |
+
+
+
+status
+
+
+CStorVolumeStatus
+
+
+ |
+
+ |
+
+
+
+versionDetails
+
+
+VersionDetails
+
+
+ |
+
+ |
+
+
+
+CStorVolumeConfig
+
+
+
CStorVolumeConfig describes a cstor volume config resource created as
+custom resource. CStorVolumeConfig is a request for creating cstor volume
+related resources like deployment, svc etc.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+apiVersion
+string |
+
+
+cstor.openebs.io/v1
+
+ |
+
+
+
+kind
+string
+ |
+CStorVolumeConfig |
+
+
+
+metadata
+
+
+Kubernetes meta/v1.ObjectMeta
+
+
+ |
+
+Refer to the Kubernetes API documentation for the fields of the
+metadata field.
+ |
+
+
+
+spec
+
+
+CStorVolumeConfigSpec
+
+
+ |
+
+ Spec defines a specification of a cstor volume config required
+to provisione cstor volume resources
+
+
+
+
+
+capacity
+
+
+Kubernetes core/v1.ResourceList
+
+
+ |
+
+ Capacity represents the actual resources of the underlying
+cstor volume.
+ |
+
+
+
+cstorVolumeRef
+
+
+Kubernetes core/v1.ObjectReference
+
+
+ |
+
+ CStorVolumeRef has the information about where CstorVolumeClaim
+is created from.
+ |
+
+
+
+cstorVolumeSource
+
+string
+
+ |
+
+ CStorVolumeSource contains the source volumeName@snapShotname
+combaination. This will be filled only if it is a clone creation.
+ |
+
+
+
+provision
+
+
+VolumeProvision
+
+
+ |
+
+ Provision represents the initial volume configuration for the underlying
+cstor volume based on the persistent volume request by user. Provision
+properties are immutable
+ |
+
+
+
+policy
+
+
+CStorVolumePolicySpec
+
+
+ |
+
+ Policy contains volume specific required policies target and replicas
+ |
+
+
+ |
+
+
+
+publish
+
+
+CStorVolumeConfigPublish
+
+
+ |
+
+ Publish contains info related to attachment of a volume to a node.
+i.e. NodeId etc.
+ |
+
+
+
+status
+
+
+CStorVolumeConfigStatus
+
+
+ |
+
+ Status represents the current information/status for the cstor volume
+config, populated by the controller.
+ |
+
+
+
+versionDetails
+
+
+VersionDetails
+
+
+ |
+
+ |
+
+
+
+CStorVolumePolicy
+
+
+
CStorVolumePolicy describes a configuration required for cstor volume
+resources
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+apiVersion
+string |
+
+
+cstor.openebs.io/v1
+
+ |
+
+
+
+kind
+string
+ |
+CStorVolumePolicy |
+
+
+
+metadata
+
+
+Kubernetes meta/v1.ObjectMeta
+
+
+ |
+
+Refer to the Kubernetes API documentation for the fields of the
+metadata field.
+ |
+
+
+
+spec
+
+
+CStorVolumePolicySpec
+
+
+ |
+
+ Spec defines a configuration info of a cstor volume required
+to provisione cstor volume resources
+
+
+
+
+
+provision
+
+
+Provision
+
+
+ |
+
+ replicaAffinity is set to true then volume replica resources need to be
+distributed across the pool instances
+ |
+
+
+
+target
+
+
+TargetSpec
+
+
+ |
+
+ TargetSpec represents configuration related to cstor target and its resources
+ |
+
+
+
+replica
+
+
+ReplicaSpec
+
+
+ |
+
+ ReplicaSpec represents configuration related to replicas resources
+ |
+
+
+
+replicaPoolInfo
+
+
+[]ReplicaPoolInfo
+
+
+ |
+
+ ReplicaPoolInfo holds the pool information of volume replicas.
+Ex: If volume is provisioned on which CStor pool volume replicas exist
+ |
+
+
+ |
+
+
+
+status
+
+
+CStorVolumePolicyStatus
+
+
+ |
+
+ |
+
+
+
+CStorVolumeReplica
+
+
+
CStorVolumeReplica describes a cstor volume resource created as custom resource
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+apiVersion
+string |
+
+
+cstor.openebs.io/v1
+
+ |
+
+
+
+kind
+string
+ |
+CStorVolumeReplica |
+
+
+
+metadata
+
+
+Kubernetes meta/v1.ObjectMeta
+
+
+ |
+
+Refer to the Kubernetes API documentation for the fields of the
+metadata field.
+ |
+
+
+
+spec
+
+
+CStorVolumeReplicaSpec
+
+
+ |
+
+
+
+
+
+
+targetIP
+
+string
+
+ |
+
+ TargetIP represents iscsi target IP through which replica cummunicates
+IO workloads and other volume operations like snapshot and resize requests
+ |
+
+
+
+capacity
+
+string
+
+ |
+
+ Represents the actual capacity of the underlying volume
+ |
+
+
+
+zvolWorkers
+
+string
+
+ |
+
+ ZvolWorkers represents number of threads that executes client IOs
+ |
+
+
+
+replicaid
+
+string
+
+ |
+
+ ReplicaID is unique number to identify the replica
+ |
+
+
+
+compression
+
+string
+
+ |
+
+ Controls the compression algorithm used for this volumes
+examples: on|off|gzip|gzip-N|lz4|lzjb|zle
+ |
+
+
+
+blockSize
+
+uint32
+
+ |
+
+ BlockSize is the logical block size in multiple of 512 bytes
+BlockSize specifies the block size of the volume. The blocksize
+cannot be changed once the volume has been written, so it should be
+set at volume creation time. The default blocksize for volumes is 4 Kbytes.
+Any power of 2 from 512 bytes to 128 Kbytes is valid.
+ |
+
+
+ |
+
+
+
+status
+
+
+CStorVolumeReplicaStatus
+
+
+ |
+
+ |
+
+
+
+versionDetails
+
+
+VersionDetails
+
+
+ |
+
+ |
+
+
+
+CSPCConditionType
+(string
alias)
+
+(Appears on:
+CStorPoolClusterCondition)
+
+
+
+CSPIPredicate
+
+
+
Predicate defines an abstraction to determine conditional checks against the
+provided CStorPoolInstance
+
+CStorPoolClusterCondition
+
+
+(Appears on:
+CStorPoolClusterStatus)
+
+
+
CStorPoolClusterCondition describes the state of a CSPC at a certain point.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+type
+
+
+CSPCConditionType
+
+
+ |
+
+ Type of CSPC condition.
+ |
+
+
+
+status
+
+
+Kubernetes core/v1.ConditionStatus
+
+
+ |
+
+ Status of the condition, one of True, False, Unknown.
+ |
+
+
+
+lastUpdateTime
+
+
+Kubernetes meta/v1.Time
+
+
+ |
+
+ The last time this condition was updated.
+ |
+
+
+
+lastTransitionTime
+
+
+Kubernetes meta/v1.Time
+
+
+ |
+
+ Last time the condition transitioned from one status to another.
+ |
+
+
+
+reason
+
+string
+
+ |
+
+ The reason for the condition’s last transition.
+ |
+
+
+
+message
+
+string
+
+ |
+
+ A human readable message indicating details about the transition.
+ |
+
+
+
+CStorPoolClusterSpec
+
+
+(Appears on:
+CStorPoolCluster)
+
+
+
CStorPoolClusterSpec is the spec for a CStorPoolClusterSpec resource
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+pools
+
+
+[]PoolSpec
+
+
+ |
+
+ Pools is the spec for pools for various nodes
+where it should be created.
+ |
+
+
+
+resources
+
+
+Kubernetes core/v1.ResourceRequirements
+
+
+ |
+
+ DefaultResources are the compute resources required by the cstor-pool
+container.
+If the resources at PoolConfig is not specified, this is written
+to CSPI PoolConfig.
+ |
+
+
+
+auxResources
+
+
+Kubernetes core/v1.ResourceRequirements
+
+
+ |
+
+ AuxResources are the compute resources required by the cstor-pool pod
+side car containers.
+ |
+
+
+
+tolerations
+
+
+[]Kubernetes core/v1.Toleration
+
+
+ |
+
+ Tolerations, if specified, are the pool pod’s tolerations
+If tolerations at PoolConfig is empty, this is written to
+CSPI PoolConfig.
+ |
+
+
+
+priorityClassName
+
+string
+
+ |
+
+ DefaultPriorityClassName if specified applies to all the pool pods
+in the pool spec if the priorityClass at the pool level is
+not specified.
+ |
+
+
+
+CStorPoolClusterStatus
+
+
+(Appears on:
+CStorPoolCluster)
+
+
+
CStorPoolClusterStatus represents the latest available observations of a CSPC’s current state.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+provisionedInstances
+
+int32
+
+ |
+
+ ProvisionedInstances is the the number of CSPI present at the current state.
+ |
+
+
+
+desiredInstances
+
+int32
+
+ |
+
+ DesiredInstances is the number of CSPI(s) that should be provisioned.
+ |
+
+
+
+healthyInstances
+
+int32
+
+ |
+
+ HealthyInstances is the number of CSPI(s) that are healthy.
+ |
+
+
+
+conditions
+
+
+[]CStorPoolClusterCondition
+
+
+ |
+
+ Current state of CSPC.
+ |
+
+
+
+CStorPoolInstanceBlockDevice
+
+
+(Appears on:
+RaidGroup)
+
+
+
CStorPoolInstanceBlockDevice contains the details of block devices that
+constitutes a raid group.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+blockDeviceName
+
+string
+
+ |
+
+ BlockDeviceName is the name of the block device.
+ |
+
+
+
+capacity
+
+uint64
+
+ |
+
+ Capacity is the capacity of the block device.
+It is system generated
+ |
+
+
+
+devLink
+
+string
+
+ |
+
+ DevLink is the dev link for block devices
+ |
+
+
+
+CStorPoolInstanceCapacity
+
+
+(Appears on:
+CStorPoolInstanceStatus)
+
+
+
CStorPoolInstanceCapacity stores the pool capacity related attributes.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+used
+
+k8s.io/apimachinery/pkg/api/resource.Quantity
+
+ |
+
+ Amount of physical data (and its metadata) written to pool
+after applying compression, etc..,
+ |
+
+
+
+free
+
+k8s.io/apimachinery/pkg/api/resource.Quantity
+
+ |
+
+ Amount of usable space in the pool after excluding
+metadata and raid parity
+ |
+
+
+
+total
+
+k8s.io/apimachinery/pkg/api/resource.Quantity
+
+ |
+
+ Sum of usable capacity in all the data raidgroups
+ |
+
+
+
+zfs
+
+
+ZFSCapacityAttributes
+
+
+ |
+
+ ZFSCapacityAttributes contains advanced information about pool capacity details
+ |
+
+
+
+CStorPoolInstanceCondition
+
+
+(Appears on:
+CStorPoolInstanceStatus)
+
+
+
CSPIConditionType describes the state of a CSPI at a certain point.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+type
+
+
+CStorPoolInstanceConditionType
+
+
+ |
+
+ Type of CSPC condition.
+ |
+
+
+
+status
+
+
+Kubernetes core/v1.ConditionStatus
+
+
+ |
+
+ Status of the condition, one of True, False, Unknown.
+ |
+
+
+
+lastUpdateTime
+
+
+Kubernetes meta/v1.Time
+
+
+ |
+
+ The last time this condition was updated.
+ |
+
+
+
+lastTransitionTime
+
+
+Kubernetes meta/v1.Time
+
+
+ |
+
+ Last time the condition transitioned from one status to another.
+ |
+
+
+
+reason
+
+string
+
+ |
+
+ The reason for the condition’s last transition.
+ |
+
+
+
+message
+
+string
+
+ |
+
+ A human readable message indicating details about the transition.
+ |
+
+
+
+CStorPoolInstanceConditionType
+(string
alias)
+
+(Appears on:
+CStorPoolInstanceCondition)
+
+
+
+CStorPoolInstancePhase
+(string
alias)
+
+(Appears on:
+CStorPoolInstanceStatus)
+
+
+
CStorPoolInstancePhase is the phase for CStorPoolInstance resource.
+
+CStorPoolInstanceSpec
+
+
+(Appears on:
+CStorPoolInstance)
+
+
+
CStorPoolInstanceSpec is the spec listing fields for a CStorPoolInstance resource.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+hostName
+
+string
+
+ |
+
+ HostName is the name of kubernetes node where the pool
+should be created.
+ |
+
+
+
+nodeSelector
+
+map[string]string
+
+ |
+
+ NodeSelector is the labels that will be used to select
+a node for pool provisioning.
+Required field
+ |
+
+
+
+poolConfig
+
+
+PoolConfig
+
+
+ |
+
+ PoolConfig is the default pool config that applies to the
+pool on node.
+ |
+
+
+
+dataRaidGroups
+
+
+[]RaidGroup
+
+
+ |
+
+ DataRaidGroups is the raid group configuration for the given pool.
+ |
+
+
+
+writeCacheRaidGroups
+
+
+[]RaidGroup
+
+
+ |
+
+ WriteCacheRaidGroups is the write cache raid group.
+ |
+
+
+
+CStorPoolInstanceStatus
+
+
+(Appears on:
+CStorPoolInstance)
+
+
+
CStorPoolInstanceStatus is for handling status of pool.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+conditions
+
+
+[]CStorPoolInstanceCondition
+
+
+ |
+
+ Current state of CSPI with details.
+ |
+
+
+
+phase
+
+
+CStorPoolInstancePhase
+
+
+ |
+
+ The phase of a CStorPool is a simple, high-level summary of the pool state on the
+node.
+ |
+
+
+
+capacity
+
+
+CStorPoolInstanceCapacity
+
+
+ |
+
+ Capacity describes the capacity details of a cstor pool
+ |
+
+
+
+readOnly
+
+bool
+
+ |
+
+ ReadOnly if pool is readOnly or not
+ |
+
+
+
+CStorSnapshotInfo
+
+
+(Appears on:
+CStorVolumeReplicaStatus)
+
+
+
CStorSnapshotInfo represents the snapshot information related to particular
+snapshot
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+logicalReferenced
+
+uint64
+
+ |
+
+ LogicalReferenced describes the amount of space that is “logically”
+accessable by this snapshot. This logical space ignores the
+effect of the compression and copies properties, giving a quantity
+closer to the amount of data that application see. It also includes
+space consumed by metadata.
+ |
+
+
+
+CStorVolumeCondition
+
+
+(Appears on:
+CStorVolumeStatus)
+
+
+
CStorVolumeCondition contains details about state of cstorvolume
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+type
+
+
+CStorVolumeConditionType
+
+
+ |
+
+ |
+
+
+
+status
+
+
+ConditionStatus
+
+
+ |
+
+ |
+
+
+
+lastProbeTime
+
+
+Kubernetes meta/v1.Time
+
+
+ |
+
+(Optional)
+ Last time we probed the condition.
+ |
+
+
+
+lastTransitionTime
+
+
+Kubernetes meta/v1.Time
+
+
+ |
+
+(Optional)
+ Last time the condition transitioned from one status to another.
+ |
+
+
+
+reason
+
+string
+
+ |
+
+(Optional)
+ Unique, this should be a short, machine understandable string that gives the reason
+for condition’s last transition. If it reports “ResizePending” that means the underlying
+cstorvolume is being resized.
+ |
+
+
+
+message
+
+string
+
+ |
+
+(Optional)
+ Human-readable message indicating details about last transition.
+ |
+
+
+
+CStorVolumeConditionType
+(string
alias)
+
+(Appears on:
+CStorVolumeCondition)
+
+
+
CStorVolumeConditionType is a valid value of CStorVolumeCondition.Type
+
+CStorVolumeConfigCondition
+
+
+(Appears on:
+CStorVolumeConfigStatus)
+
+
+
CStorVolumeConfigCondition contains details about state of cstor volume
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+type
+
+
+CStorVolumeConfigConditionType
+
+
+ |
+
+ Current Condition of cstor volume config. If underlying persistent volume is being
+resized then the Condition will be set to ‘ResizeStarted’ etc
+ |
+
+
+
+lastProbeTime
+
+
+Kubernetes meta/v1.Time
+
+
+ |
+
+(Optional)
+ Last time we probed the condition.
+ |
+
+
+
+lastTransitionTime
+
+
+Kubernetes meta/v1.Time
+
+
+ |
+
+(Optional)
+ Last time the condition transitioned from one status to another.
+ |
+
+
+
+reason
+
+string
+
+ |
+
+ Reason is a brief CamelCase string that describes any failure
+ |
+
+
+
+message
+
+string
+
+ |
+
+ Human-readable message indicating details about last transition.
+ |
+
+
+
+CStorVolumeConfigConditionType
+(string
alias)
+
+(Appears on:
+CStorVolumeConfigCondition)
+
+
+
CStorVolumeConfigConditionType is a valid value of CstorVolumeConfigCondition.Type
+
+CStorVolumeConfigPhase
+(string
alias)
+
+(Appears on:
+CStorVolumeConfigStatus)
+
+
+
CStorVolumeConfigPhase represents the current phase of CStorVolumeConfig.
+
+CStorVolumeConfigPublish
+
+
+(Appears on:
+CStorVolumeConfig)
+
+
+
CStorVolumeConfigPublish contains info related to attachment of a volume to a node.
+i.e. NodeId etc.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+nodeId
+
+string
+
+ |
+
+ NodeID contains publish info related to attachment of a volume to a node.
+ |
+
+
+
+CStorVolumeConfigSpec
+
+
+(Appears on:
+CStorVolumeConfig)
+
+
+
CStorVolumeConfigSpec is the spec for a CStorVolumeConfig resource
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+capacity
+
+
+Kubernetes core/v1.ResourceList
+
+
+ |
+
+ Capacity represents the actual resources of the underlying
+cstor volume.
+ |
+
+
+
+cstorVolumeRef
+
+
+Kubernetes core/v1.ObjectReference
+
+
+ |
+
+ CStorVolumeRef has the information about where CstorVolumeClaim
+is created from.
+ |
+
+
+
+cstorVolumeSource
+
+string
+
+ |
+
+ CStorVolumeSource contains the source volumeName@snapShotname
+combaination. This will be filled only if it is a clone creation.
+ |
+
+
+
+provision
+
+
+VolumeProvision
+
+
+ |
+
+ Provision represents the initial volume configuration for the underlying
+cstor volume based on the persistent volume request by user. Provision
+properties are immutable
+ |
+
+
+
+policy
+
+
+CStorVolumePolicySpec
+
+
+ |
+
+ Policy contains volume specific required policies target and replicas
+ |
+
+
+
+CStorVolumeConfigStatus
+
+
+(Appears on:
+CStorVolumeConfig)
+
+
+
CStorVolumeConfigStatus is for handling status of CstorVolume Claim.
+defines the observed state of CStorVolumeConfig
+
+
+CStorVolumePhase
+(string
alias)
+
+(Appears on:
+CStorVolumeStatus)
+
+
+
CStorVolumePhase is to hold result of action.
+
+CStorVolumePolicySpec
+
+
+(Appears on:
+CStorVolumePolicy,
+CStorVolumeConfigSpec)
+
+
+
CStorVolumePolicySpec …
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+provision
+
+
+Provision
+
+
+ |
+
+ replicaAffinity is set to true then volume replica resources need to be
+distributed across the pool instances
+ |
+
+
+
+target
+
+
+TargetSpec
+
+
+ |
+
+ TargetSpec represents configuration related to cstor target and its resources
+ |
+
+
+
+replica
+
+
+ReplicaSpec
+
+
+ |
+
+ ReplicaSpec represents configuration related to replicas resources
+ |
+
+
+
+replicaPoolInfo
+
+
+[]ReplicaPoolInfo
+
+
+ |
+
+ ReplicaPoolInfo holds the pool information of volume replicas.
+Ex: If volume is provisioned on which CStor pool volume replicas exist
+ |
+
+
+
+CStorVolumePolicyStatus
+
+
+(Appears on:
+CStorVolumePolicy)
+
+
+
CStorVolumePolicyStatus is for handling status of CstorVolumePolicy
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+phase
+
+string
+
+ |
+
+ |
+
+
+
+CStorVolumeReplicaCapacityDetails
+
+
+(Appears on:
+CStorVolumeReplicaStatus)
+
+
+
CStorVolumeReplicaCapacityDetails represents capacity information releated to volume
+replica
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+total
+
+string
+
+ |
+
+ The amount of space consumed by this volume replica and all its descendents
+ |
+
+
+
+used
+
+string
+
+ |
+
+ The amount of space that is “logically” accessible by this dataset. The logical
+space ignores the effect of the compression and copies properties, giving a
+quantity closer to the amount of data that applications see. However, it does
+include space consumed by metadata
+ |
+
+
+
+CStorVolumeReplicaDetails
+
+
+(Appears on:
+CStorVolumeSpec,
+CStorVolumeStatus)
+
+
+
CStorVolumeReplicaDetails contains trusty replica inform which will be
+updated by target
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+knownReplicas
+
+map[github.com/openebs/api/pkg/apis/cstor/v1.ReplicaID]string
+
+ |
+
+ KnownReplicas represents the replicas that target can trust to read data
+ |
+
+
+
+CStorVolumeReplicaPhase
+(string
alias)
+
+(Appears on:
+CStorVolumeReplicaStatus)
+
+
+
CStorVolumeReplicaPhase is to hold result of action.
+
+CStorVolumeReplicaSpec
+
+
+(Appears on:
+CStorVolumeReplica)
+
+
+
CStorVolumeReplicaSpec is the spec for a CStorVolumeReplica resource
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+targetIP
+
+string
+
+ |
+
+ TargetIP represents iscsi target IP through which replica cummunicates
+IO workloads and other volume operations like snapshot and resize requests
+ |
+
+
+
+capacity
+
+string
+
+ |
+
+ Represents the actual capacity of the underlying volume
+ |
+
+
+
+zvolWorkers
+
+string
+
+ |
+
+ ZvolWorkers represents number of threads that executes client IOs
+ |
+
+
+
+replicaid
+
+string
+
+ |
+
+ ReplicaID is unique number to identify the replica
+ |
+
+
+
+compression
+
+string
+
+ |
+
+ Controls the compression algorithm used for this volumes
+examples: on|off|gzip|gzip-N|lz4|lzjb|zle
+ |
+
+
+
+blockSize
+
+uint32
+
+ |
+
+ BlockSize is the logical block size in multiple of 512 bytes
+BlockSize specifies the block size of the volume. The blocksize
+cannot be changed once the volume has been written, so it should be
+set at volume creation time. The default blocksize for volumes is 4 Kbytes.
+Any power of 2 from 512 bytes to 128 Kbytes is valid.
+ |
+
+
+
+CStorVolumeReplicaStatus
+
+
+(Appears on:
+CStorVolumeReplica)
+
+
+
CStorVolumeReplicaStatus is for handling status of cvr.
+
+
+CStorVolumeSpec
+
+
+(Appears on:
+CStorVolume)
+
+
+
CStorVolumeSpec is the spec for a CStorVolume resource
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+capacity
+
+k8s.io/apimachinery/pkg/api/resource.Quantity
+
+ |
+
+ Capacity represents the desired size of the underlying volume.
+ |
+
+
+
+targetIP
+
+string
+
+ |
+
+ TargetIP IP of the iSCSI target service
+ |
+
+
+
+targetPort
+
+string
+
+ |
+
+ iSCSI Target Port typically TCP ports 3260
+ |
+
+
+
+iqn
+
+string
+
+ |
+
+ Target iSCSI Qualified Name.combination of nodeBase
+ |
+
+
+
+targetPortal
+
+string
+
+ |
+
+ iSCSI Target Portal. The Portal is combination of IP:port (typically TCP ports 3260)
+ |
+
+
+
+replicationFactor
+
+int
+
+ |
+
+ ReplicationFactor represents number of volume replica created during volume
+provisioning connect to the target
+ |
+
+
+
+consistencyFactor
+
+int
+
+ |
+
+ ConsistencyFactor is minimum number of volume replicas i.e. RF/2 + 1
+has to be connected to the target for write operations. Basically more then
+50% of replica has to be connected to target.
+ |
+
+
+
+desiredReplicationFactor
+
+int
+
+ |
+
+ DesiredReplicationFactor represents maximum number of replicas
+that are allowed to connect to the target. Required for scale operations
+ |
+
+
+
+replicaDetails
+
+
+CStorVolumeReplicaDetails
+
+
+ |
+
+ ReplicaDetails refers to the trusty replica information
+ |
+
+
+
+CStorVolumeStatus
+
+
+(Appears on:
+CStorVolume)
+
+
+
CStorVolumeStatus is for handling status of cvr.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+phase
+
+
+CStorVolumePhase
+
+
+ |
+
+ |
+
+
+
+replicaStatuses
+
+
+[]ReplicaStatus
+
+
+ |
+
+ |
+
+
+
+capacity
+
+k8s.io/apimachinery/pkg/api/resource.Quantity
+
+ |
+
+ Represents the actual capacity of the underlying volume.
+ |
+
+
+
+lastTransitionTime
+
+
+Kubernetes meta/v1.Time
+
+
+ |
+
+ LastTransitionTime refers to the time when the phase changes
+ |
+
+
+
+lastUpdateTime
+
+
+Kubernetes meta/v1.Time
+
+
+ |
+
+ LastUpdateTime refers to the time when last status updated due to any
+operations
+ |
+
+
+
+message
+
+string
+
+ |
+
+ A human-readable message indicating details about why the volume is in this state.
+ |
+
+
+
+conditions
+
+
+[]CStorVolumeCondition
+
+
+ |
+
+(Optional)
+ Current Condition of cstorvolume. If underlying persistent volume is being
+resized then the Condition will be set to ‘ResizePending’.
+ |
+
+
+
+replicaDetails
+
+
+CStorVolumeReplicaDetails
+
+
+ |
+
+ ReplicaDetails refers to the trusty replica information
+ |
+
+
+
+CVRKey
+(string
alias)
+
+
CVRKey represents the properties of a cstorvolumereplica
+
+CVStatus
+
+
+(Appears on:
+CVStatusResponse)
+
+
+
CVStatus stores the status of a CstorVolume obtained from response
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+name
+
+string
+
+ |
+
+ |
+
+
+
+status
+
+string
+
+ |
+
+ |
+
+
+
+replicaStatus
+
+
+[]ReplicaStatus
+
+
+ |
+
+ |
+
+
+
+CVStatusResponse
+
+
+
CVStatusResponse stores the reponse of istgt replica command output
+It may contain several volumes
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+volumeStatus
+
+
+[]CVStatus
+
+
+ |
+
+ |
+
+
+
+ConditionStatus
+(string
alias)
+
+(Appears on:
+CStorVolumeCondition)
+
+
+
ConditionStatus states in which state condition is present
+
+Conditions
+([]github.com/openebs/api/pkg/apis/cstor/v1.CStorVolumeCondition
alias)
+
+
Conditions enables building CRUD operations on cstorvolume conditions
+
+PoolConfig
+
+
+(Appears on:
+CStorPoolInstanceSpec,
+PoolSpec)
+
+
+
PoolConfig is the default pool config that applies to the
+pool on node.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+dataRaidGroupType
+
+string
+
+ |
+
+ DataRaidGroupType is the raid type.
+ |
+
+
+
+writeCacheGroupType
+
+string
+
+ |
+
+ WriteCacheGroupType is the write cache raid type.
+ |
+
+
+
+thickProvision
+
+bool
+
+ |
+
+ ThickProvision to enable thick provisioning
+Optional – defaults to false
+ |
+
+
+
+compression
+
+string
+
+ |
+
+ Compression to enable compression
+Optional – defaults to off
+Possible values : lz, off
+ |
+
+
+
+resources
+
+
+Kubernetes core/v1.ResourceRequirements
+
+
+ |
+
+ Resources are the compute resources required by the cstor-pool
+container.
+ |
+
+
+
+auxResources
+
+
+Kubernetes core/v1.ResourceRequirements
+
+
+ |
+
+ AuxResources are the compute resources required by the cstor-pool pod
+side car containers.
+ |
+
+
+
+tolerations
+
+
+[]Kubernetes core/v1.Toleration
+
+
+ |
+
+ Tolerations, if specified, the pool pod’s tolerations.
+ |
+
+
+
+priorityClassName
+
+string
+
+ |
+
+ PriorityClassName if specified applies to this pool pod
+If left empty, DefaultPriorityClassName is applied.
+(See CStorPoolClusterSpec.DefaultPriorityClassName)
+If both are empty, not priority class is applied.
+ |
+
+
+
+roThresholdLimit
+
+int
+
+ |
+
+ ROThresholdLimit is threshold(percentage base) limit
+for pool read only mode. If ROThresholdLimit(%) amount
+of pool storage is reached then pool will set to readonly.
+NOTE:
+1. If ROThresholdLimit is set to 100 then entire
+pool storage will be used by default it will be set to 85%.
+2. ROThresholdLimit value will be 0 <= ROThresholdLimit <= 100.
+ |
+
+
+
+PoolSpec
+
+
+(Appears on:
+CStorPoolClusterSpec)
+
+
+
PoolSpec is the spec for pool on node where it should be created.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+nodeSelector
+
+map[string]string
+
+ |
+
+ NodeSelector is the labels that will be used to select
+a node for pool provisioning.
+Required field
+ |
+
+
+
+dataRaidGroups
+
+
+[]RaidGroup
+
+
+ |
+
+ DataRaidGroups is the raid group configuration for the given pool.
+ |
+
+
+
+writeCacheRaidGroups
+
+
+[]RaidGroup
+
+
+ |
+
+ WriteCacheRaidGroups is the write cache raid group.
+ |
+
+
+
+poolConfig
+
+
+PoolConfig
+
+
+ |
+
+ PoolConfig is the default pool config that applies to the
+pool on node.
+ |
+
+
+
+PoolType
+(string
alias)
+
+
PoolType is a label for the pool type of a cStor pool.
+
+Provision
+
+
+(Appears on:
+CStorVolumePolicySpec)
+
+
+
Provision represents different provisioning policy for cstor volumes
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+replicaAffinity
+
+bool
+
+ |
+
+ replicaAffinity is set to true then volume replica resources need to be
+distributed across the cstor pool instances based on the given topology
+ |
+
+
+
+blockSize
+
+uint32
+
+ |
+
+ BlockSize is the logical block size in multiple of 512 bytes
+BlockSize specifies the block size of the volume. The blocksize
+cannot be changed once the volume has been written, so it should be
+set at volume creation time. The default blocksize for volumes is 4 Kbytes.
+Any power of 2 from 512 bytes to 128 Kbytes is valid.
+ |
+
+
+
+RaidGroup
+
+
+(Appears on:
+CStorPoolInstanceSpec,
+PoolSpec)
+
+
+
RaidGroup contains the details of a raid group for the pool
+
+
+ReplicaID
+(string
alias)
+
+
ReplicaID is to hold replicaID information
+
+ReplicaPoolInfo
+
+
+(Appears on:
+CStorVolumePolicySpec)
+
+
+
ReplicaPoolInfo represents the pool information of volume replica
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+poolName
+
+string
+
+ |
+
+ PoolName represents the pool name where volume replica exists
+ |
+
+
+
+ReplicaSpec
+
+
+(Appears on:
+CStorVolumePolicySpec)
+
+
+
ReplicaSpec represents configuration related to replicas resources
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+zvolWorkers
+
+string
+
+ |
+
+ IOWorkers represents number of threads that executes client IOs
+ |
+
+
+
+compression
+
+string
+
+ |
+
+ The zle compression algorithm compresses runs of zeros.
+ |
+
+
+
+ReplicaStatus
+
+
+(Appears on:
+CStorVolumeStatus,
+CVStatus)
+
+
+
ReplicaStatus stores the status of replicas
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+replicaId
+
+string
+
+ |
+
+ ID is replica unique identifier
+ |
+
+
+
+mode
+
+string
+
+ |
+
+ Mode represents replica status i.e. Healthy, Degraded
+ |
+
+
+
+checkpointedIOSeq
+
+string
+
+ |
+
+ Represents IO number of replica persisted on the disk
+ |
+
+
+
+inflightRead
+
+string
+
+ |
+
+ Ongoing reads I/O from target to replica
+ |
+
+
+
+inflightWrite
+
+string
+
+ |
+
+ ongoing writes I/O from target to replica
+ |
+
+
+
+inflightSync
+
+string
+
+ |
+
+ Ongoing sync I/O from target to replica
+ |
+
+
+
+upTime
+
+int
+
+ |
+
+ time since the replica connected to target
+ |
+
+
+
+quorum
+
+string
+
+ |
+
+ Quorum indicates wheather data wrtitten to the replica
+is lost or exists.
+“0” means: data has been lost( might be ephimeral case)
+and will recostruct data from other Healthy replicas in a write-only
+mode
+1 means: written data is exists on replica
+ |
+
+
+
+TargetSpec
+
+
+(Appears on:
+CStorVolumePolicySpec)
+
+
+
TargetSpec represents configuration related to cstor target and its resources
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+queueDepth
+
+string
+
+ |
+
+ QueueDepth sets the queue size at iSCSI target which limits the
+ongoing IO count from client
+ |
+
+
+
+luWorkers
+
+int64
+
+ |
+
+ IOWorkers sets the number of threads that are working on above queue
+ |
+
+
+
+monitor
+
+bool
+
+ |
+
+ Monitor enables or disables the target exporter sidecar
+ |
+
+
+
+replicationFactor
+
+int64
+
+ |
+
+ ReplicationFactor represents maximum number of replicas
+that are allowed to connect to the target
+ |
+
+
+
+resources
+
+
+Kubernetes core/v1.ResourceRequirements
+
+
+ |
+
+ Resources are the compute resources required by the cstor-target
+container.
+ |
+
+
+
+auxResources
+
+
+Kubernetes core/v1.ResourceRequirements
+
+
+ |
+
+ AuxResources are the compute resources required by the cstor-target pod
+side car containers.
+ |
+
+
+
+tolerations
+
+
+[]Kubernetes core/v1.Toleration
+
+
+ |
+
+ Tolerations, if specified, are the target pod’s tolerations
+ |
+
+
+
+affinity
+
+
+Kubernetes core/v1.PodAffinity
+
+
+ |
+
+ PodAffinity if specified, are the target pod’s affinities
+ |
+
+
+
+nodeSelector
+
+map[string]string
+
+ |
+
+ NodeSelector is the labels that will be used to select
+a node for target pod scheduleing
+Required field
+ |
+
+
+
+priorityClassName
+
+string
+
+ |
+
+ PriorityClassName if specified applies to this target pod
+If left empty, no priority class is applied.
+ |
+
+
+
+VersionDetails
+
+
+(Appears on:
+CStorPoolCluster,
+CStorPoolInstance,
+CStorVolume,
+CStorVolumeConfig,
+CStorVolumeReplica)
+
+
+
VersionDetails provides the details for upgrade
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+autoUpgrade
+
+bool
+
+ |
+
+ If AutoUpgrade is set to true then the resource is
+upgraded automatically without any manual steps
+ |
+
+
+
+desired
+
+string
+
+ |
+
+ Desired is the version that we want to
+upgrade or the control plane version
+ |
+
+
+
+status
+
+
+VersionStatus
+
+
+ |
+
+ Status gives the status of reconciliation triggered
+when the desired and current version are not same
+ |
+
+
+
+VersionState
+(string
alias)
+
+(Appears on:
+VersionStatus)
+
+
+
VersionState is the state of reconciliation
+
+VersionStatus
+
+
+(Appears on:
+VersionDetails)
+
+
+
VersionStatus is the status of the reconciliation of versions
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+dependentsUpgraded
+
+bool
+
+ |
+
+ DependentsUpgraded gives the details whether all children
+of a resource are upgraded to desired version or not
+ |
+
+
+
+current
+
+string
+
+ |
+
+ Current is the version of resource
+ |
+
+
+
+state
+
+
+VersionState
+
+
+ |
+
+ State is the state of reconciliation
+ |
+
+
+
+message
+
+string
+
+ |
+
+ Message is a human readable message if some error occurs
+ |
+
+
+
+reason
+
+string
+
+ |
+
+ Reason is the actual reason for the error state
+ |
+
+
+
+lastUpdateTime
+
+
+Kubernetes meta/v1.Time
+
+
+ |
+
+ LastUpdateTime is the time the status was last updated
+ |
+
+
+
+VolumeProvision
+
+
+(Appears on:
+CStorVolumeConfigSpec)
+
+
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+capacity
+
+
+Kubernetes core/v1.ResourceList
+
+
+ |
+
+ Capacity represents initial capacity of volume replica required during
+volume clone operations to maintain some metadata info related to child
+resources like snapshot, cloned volumes.
+ |
+
+
+
+replicaCount
+
+int
+
+ |
+
+ ReplicaCount represents initial cstor volume replica count, its will not
+be updated later on based on scale up/down operations, only readonly
+operations and validations.
+ |
+
+
+
+ZFSCapacityAttributes
+
+
+(Appears on:
+CStorPoolInstanceCapacity)
+
+
+
ZFSCapacityAttributes stores the advanced information about pool capacity related
+attributes
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+logicalUsed
+
+k8s.io/apimachinery/pkg/api/resource.Quantity
+
+ |
+
+ LogicalUsed is the amount of space that is “logically” consumed
+by this pool and all its descendents. The logical space ignores
+the effect of the compression and copies properties, giving a
+quantity closer to the amount of data that applications see.
+However, it does include space consumed by metadata.
+ |
+
+
+
+
+
+Generated with gen-crd-api-reference-docs
+on git commit 81e1720
.
+
diff --git a/go.mod b/go.mod
index dffa06a8..4ad14146 100644
--- a/go.mod
+++ b/go.mod
@@ -3,6 +3,7 @@ module github.com/openebs/cstor-operators
go 1.13
require (
+ github.com/ahmetb/gen-crd-api-reference-docs v0.1.5
github.com/davecgh/go-spew v1.1.1
github.com/evanphx/json-patch v4.5.0+incompatible // indirect
github.com/ghodss/yaml v1.0.0
diff --git a/go.sum b/go.sum
index 7ab40f0f..f05cd983 100644
--- a/go.sum
+++ b/go.sum
@@ -33,6 +33,8 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdko
github.com/Rican7/retry v0.1.0/go.mod h1:FgOROf8P5bebcC1DS0PdOQiqGUridaZvikzUmkFW6gg=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
+github.com/ahmetb/gen-crd-api-reference-docs v0.1.5 h1:OU+AFpBEhyclrQGx4I6zpCx5WvXiKqvFeeOASOmhKCY=
+github.com/ahmetb/gen-crd-api-reference-docs v0.1.5/go.mod h1:P/XzJ+c2+khJKNKABcm2biRwk2QAuwbLf8DlXuaL7WM=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
@@ -385,10 +387,6 @@ github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zM
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runtime-spec v1.0.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/selinux v1.3.1-0.20190929122143-5215b1806f52/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs=
-github.com/openebs/api v1.10.0-RC1.0.20200528063752-522f45d17d59 h1:9fZ9KGtlStVxI/dQUmNhCnHt6gn8cuuCBo7kpTI+ZdE=
-github.com/openebs/api v1.10.0-RC1.0.20200528063752-522f45d17d59/go.mod h1:TASujm6H1LGdx43MN7Dab1xdAqR7MVU8bsS74Ywop5w=
-github.com/openebs/api v1.10.0-RC1.0.20200602151240-2b7d2bdbe1ef h1:p66ZTG26pNr7TIxOMLmXvJcKjmIxIa+xsQ5Xw0hhJA4=
-github.com/openebs/api v1.10.0-RC1.0.20200602151240-2b7d2bdbe1ef/go.mod h1:TASujm6H1LGdx43MN7Dab1xdAqR7MVU8bsS74Ywop5w=
github.com/openebs/api v1.10.0-RC1.0.20200608150240-08b494f77b77 h1:Daq7JniS96LQOSOgneBwEIYOZDh4iuxZBOy6jbw5LHo=
github.com/openebs/api v1.10.0-RC1.0.20200608150240-08b494f77b77/go.mod h1:TASujm6H1LGdx43MN7Dab1xdAqR7MVU8bsS74Ywop5w=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
@@ -421,7 +419,10 @@ github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto=
github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
+github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
+github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v2.1.0+incompatible h1:j1Wcmh8OrK4Q7GXY+V7SVSY8nUWQxHW5TkBe7YUl+2s=
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
@@ -432,6 +433,8 @@ github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7/go.mod h1:5b4v6he4
github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc=
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
+github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
@@ -625,6 +628,7 @@ golang.org/x/tools v0.0.0-20190121143147-24cd39ecf745/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20190122202912-9c309ee22fab/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190213192042-740235f6c0d8/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
@@ -718,9 +722,11 @@ k8s.io/component-base v0.17.3/go.mod h1:GeQf4BrgelWm64PXkIXiPh/XS0hnO42d9gx9BtbZ
k8s.io/cri-api v0.17.4-beta.0/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
k8s.io/csi-translation-lib v0.17.3/go.mod h1:FBya8XvGIqDm2/3evLQNxaFXqv/C2UcZa5JgJt6/qqY=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20190822140433-26a664648505 h1:ZY6yclUKVbZ+SdWnkfY+Je5vrMpKOxmGeKRbsXVmqYM=
k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/heapster v1.2.0-beta.1/go.mod h1:h1uhptVXMwC8xtZBYsPXKVi8fpdlYkTs6k949KozGrM=
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
+k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
diff --git a/hack/api-docs/config.json b/hack/api-docs/config.json
new file mode 100644
index 00000000..e5fdbc70
--- /dev/null
+++ b/hack/api-docs/config.json
@@ -0,0 +1,28 @@
+{
+ "hideMemberFields": [
+ "TypeMeta"
+ ],
+ "hideTypePatterns": [
+ "ParseError$",
+ "List$"
+ ],
+ "externalPackages": [
+ {
+ "typeMatchPrefix": "^k8s\\.io/apimachinery/pkg/apis/meta/v1\\.Duration$",
+ "docsURLTemplate": "https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Duration"
+ },
+ {
+ "typeMatchPrefix": "^k8s\\.io/(api|apimachinery/pkg/apis)/",
+ "docsURLTemplate": "https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.13/#{{lower .TypeIdentifier}}-{{arrIndex .PackageSegments -1}}-{{arrIndex .PackageSegments -2}}"
+ },
+ {
+ "typeMatchPrefix": "^github\\.com/knative/pkg/apis/duck/",
+ "docsURLTemplate": "https://godoc.org/github.com/knative/pkg/apis/duck/{{arrIndex .PackageSegments -1}}#{{.TypeIdentifier}}"
+ }
+ ],
+ "typeDisplayNamePrefixOverrides": {
+ "k8s.io/api/": "Kubernetes ",
+ "k8s.io/apimachinery/pkg/apis/": "Kubernetes "
+ },
+ "markdownDisabled": false
+}
diff --git a/hack/api-docs/template/members.tpl b/hack/api-docs/template/members.tpl
new file mode 100644
index 00000000..9f08d1aa
--- /dev/null
+++ b/hack/api-docs/template/members.tpl
@@ -0,0 +1,48 @@
+{{ define "members" }}
+
+{{ range .Members }}
+{{ if not (hiddenMember .)}}
+
+
+ {{ fieldName . }}
+
+ {{ if linkForType .Type }}
+
+ {{ typeDisplayName .Type }}
+
+ {{ else }}
+ {{ typeDisplayName .Type }}
+ {{ end }}
+
+ |
+
+ {{ if fieldEmbedded . }}
+
+ (Members of {{ fieldName . }} are embedded into this type.)
+
+ {{ end}}
+
+ {{ if isOptionalMember .}}
+ (Optional)
+ {{ end }}
+
+ {{ safe (renderComments .CommentLines) }}
+
+ {{ if and (eq (.Type.Name.Name) "ObjectMeta") }}
+ Refer to the Kubernetes API documentation for the fields of the
+ metadata field.
+ {{ end }}
+
+ {{ if or (eq (fieldName .) "spec") }}
+
+
+
+ {{ template "members" .Type }}
+
+ {{ end }}
+ |
+
+{{ end }}
+{{ end }}
+
+{{ end }}
diff --git a/hack/api-docs/template/pkg.tpl b/hack/api-docs/template/pkg.tpl
new file mode 100644
index 00000000..2f03a803
--- /dev/null
+++ b/hack/api-docs/template/pkg.tpl
@@ -0,0 +1,49 @@
+{{ define "packages" }}
+
+{{ with .packages}}
+Packages:
+
+{{ end}}
+
+{{ range .packages }}
+
+ {{- packageDisplayName . -}}
+
+
+ {{ with (index .GoPackages 0 )}}
+ {{ with .DocComments }}
+
+ {{ safe (renderComments .) }}
+
+ {{ end }}
+ {{ end }}
+
+ Resource Types:
+
+ {{- range (visibleTypes (sortedTypes .Types)) -}}
+ {{ if isExportedType . -}}
+ -
+ {{ typeDisplayName . }}
+
+ {{- end }}
+ {{- end -}}
+
+
+ {{ range (visibleTypes (sortedTypes .Types))}}
+ {{ template "type" . }}
+ {{ end }}
+
+{{ end }}
+
+
+ Generated with gen-crd-api-reference-docs
+ {{ with .gitCommit }} on git commit {{ . }}
{{end}}.
+
+
+{{ end }}
diff --git a/hack/api-docs/template/type.tpl b/hack/api-docs/template/type.tpl
new file mode 100644
index 00000000..e28b088a
--- /dev/null
+++ b/hack/api-docs/template/type.tpl
@@ -0,0 +1,58 @@
+{{ define "type" }}
+
+
+ {{- .Name.Name }}
+ {{ if eq .Kind "Alias" }}({{.Underlying}}
alias){{ end -}}
+
+{{ with (typeReferences .) }}
+
+ (Appears on:
+ {{- $prev := "" -}}
+ {{- range . -}}
+ {{- if $prev -}}, {{ end -}}
+ {{ $prev = . }}
+ {{ typeDisplayName . }}
+ {{- end -}}
+ )
+
+{{ end }}
+
+
+
+ {{ safe (renderComments .CommentLines) }}
+
+
+{{ if .Members }}
+
+
+
+ Field |
+ Description |
+
+
+
+ {{ if isExportedType . }}
+
+
+ apiVersion
+ string |
+
+
+ {{apiGroup .}}
+
+ |
+
+
+
+ kind
+ string
+ |
+ {{.Name.Name}} |
+
+ {{ end }}
+ {{ template "members" .}}
+
+
+{{ end }}
+
+{{ end }}
diff --git a/vendor/github.com/ahmetb/gen-crd-api-reference-docs/.gitignore b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/.gitignore
new file mode 100644
index 00000000..a4d184e8
--- /dev/null
+++ b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/.gitignore
@@ -0,0 +1,16 @@
+# Binaries for programs and plugins
+refdocs
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# goreleaser output
+dist
diff --git a/vendor/github.com/ahmetb/gen-crd-api-reference-docs/.goreleaser.yml b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/.goreleaser.yml
new file mode 100644
index 00000000..f1caccad
--- /dev/null
+++ b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/.goreleaser.yml
@@ -0,0 +1,24 @@
+builds:
+ - env:
+ - CGO_ENABLED=0
+ # travis ci currently sets GOPATH even with go1.11.
+ # force-setting GO111MODULE=on to use vgo
+ - GO111MODULE=on
+ goos:
+ - linux
+ - darwin
+ goarch:
+ - amd64
+archive:
+ name_template: "{{ .ProjectName }}_{{ .Os }}_{{ .Arch }}"
+ files:
+ - LICENSE
+ - template/**
+ - example-config.json
+checksum:
+ name_template: "checksums.txt"
+changelog:
+ skip: true
+release:
+ # releases are uploaded to github by .travis.yml
+ disable: true
diff --git a/vendor/github.com/ahmetb/gen-crd-api-reference-docs/.travis.yml b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/.travis.yml
new file mode 100644
index 00000000..d6845bab
--- /dev/null
+++ b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/.travis.yml
@@ -0,0 +1,32 @@
+language: go
+go:
+ - 1.11.x
+install:
+ - echo noop
+before_script:
+ # travis ci currently sets GOPATH even with go1.11.
+ # force-setting GO111MODULE=on to use vgo
+ - env GO111MODULE=on go mod download
+script:
+ # travis ci currently sets GOPATH even with go1.11.
+ # force-setting GO111MODULE=on to use vgo
+ - env GO111MODULE=on go build -v -o /dev/null
+deploy:
+ # use goreleaser to prepare dist/
+ - provider: script
+ skip_cleanup: true
+ on:
+ tags: true
+ script: curl -sL https://git.io/goreleaser | bash
+ # use github release feature to upload dist/
+ - provider: releases
+ skip_cleanup: true
+ on:
+ tags: true
+ file_glob: true
+ file:
+ - dist/*.tar.gz
+ - dist/*.zip
+ - dist/checksums.txt
+ api_key:
+ secure: r1GMgbVDnZTUcny/PbIATW9dXGOTpm2U9iEGaWvpprMO2AGo7ju7SWEJWtjcap3pc0YasyR2/eon9LC0scWY0Xlpeb+g0pRCQ39FABk1Vo3DpmIPRUCFFkaescWmrWDj3ImzjJgZjCewwK6Fo8s8ngnqIlZnE1Hq6ls2xDp6jNVf+Pn7LyqxkK4axFFSPQM9zFX3N1PVUH5RT03bIJfojJZguqnhNfyTvKvHJidoeWU/Ie+fXc4AdPHyP85xrmGHYl68O0HziU6JCLXira8r1FjUgVeYFYC5nnNuylszO6JWqWh1nXYDxs5FGPnZd9N8bEi/2ahiqms8eV7S+/DGzhSoEdHikcBxTgJpZP2VOmvRSITyv3RleJzCeMULTGFQodoxRgA/Q8qZySvInNjstiBjV2Pyucrnn990XQbN8rIV4RmNggJvbAwJNCGjCwS2eB42EKNCODTuzHPbIV0ap4EjvfBBo0cZ2J9M2Q6VzdpNErdntpM1hZl9yymv3MNN4hOiLQKkofoo/QI3cffB8Y0PBPAL8Cs9Mx1bbx+Dr8iitTHBUAt4a5DHFen4MS8znrZ+Cr4kLDD9QPJ8G0oh4tDKq8CJ73Gt+xqkLZEuka0W1awz9essqE7MH20kRJbKa5woTIs0v9njHMpbeqd7KrNV+1e5F5aPRQyiCzaom7c=
diff --git a/vendor/github.com/ahmetb/gen-crd-api-reference-docs/LICENSE b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/LICENSE
new file mode 100644
index 00000000..261eeb9e
--- /dev/null
+++ b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/ahmetb/gen-crd-api-reference-docs/README.md b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/README.md
new file mode 100644
index 00000000..8b59d8bd
--- /dev/null
+++ b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/README.md
@@ -0,0 +1,77 @@
+# Kubernetes Custom Resource API Reference Docs generator
+
+If you have a project that is Custom Resource Definitions and wanted to generate
+API Reference Docs [like this][ar] this tool is for you.
+
+[ar]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.13/
+
+## Current Users
+
+- [**Knative** API reference docs](https://www.knative.dev/docs/reference/)
+- [**Kubeflow** API reference docs](https://www.kubeflow.org/docs/reference/overview/)
+- [**Agones** API reference docs](https://agones.dev/site/docs/reference/agones_crd_api_reference/)
+- _[[ADD YOUR PROJECT HERE]]_
+
+## Why
+
+Normally you would want to use the same [docs generator][dg] as [Kubernetes API
+reference][ar], but here's why I wrote a different parser/generator:
+
+1. Today, Kubernetes API [does not][pr] provide OpenAPI specs for CRDs (e.g.
+ Knative), therefore the [gen-apidocs][ga]
+ generator used by Kubernetes won't work.
+
+2. Even when Kubernetes API starts providing OpenAPI specs for CRDs, your CRD
+ must have a validation schema (e.g. Knative API doesn't!)
+
+3. Kubernetes [gen-apidocs][ga] parser relies on running a `kube-apiserver` and
+ calling `/apis` endpoint to get OpenAPI specs to generate docs. **This tool
+ doesn't need that!**
+
+[dg]: https://github.com/kubernetes-incubator/reference-docs/
+[ga]: https://github.com/kubernetes-incubator/reference-docs/tree/master/gen-apidocs/generators
+[pr]: https://github.com/kubernetes/kubernetes/pull/71192
+
+## How
+
+This is a custom API reference docs generator that uses the
+[k8s.io/gengo](https://godoc.org/k8s.io/gengo) project to parse types and
+generate API documentation from it.
+
+Capabilities of this tool include:
+
+- Doesn't depend on OpenAPI specs, or kube-apiserver, or a running cluster.
+- Relies only on the Go source code (pkg/apis/**/*.go) to parse API types.
+- Can link to other sites for external APIs. For example, if your types have a
+ reference to Kubernetes core/v1.PodSpec, you can link to it.
+- [Configurable](./example-config.json) settings to hide certain fields or types
+ entirely from the generated output.
+- Either output to a file or start a live http-server (for rapid iteration).
+- Supports markdown rendering from godoc type, package and field comments.
+
+## Try it out
+
+1. Clone this repository.
+
+2. Make sure you have go1.11+ instaled. Then run `go build`, you should get a
+ `refdocs` binary executable.
+
+3. Clone a Knative repository, set GOPATH correctly,
+ and call the compiled binary within that directory.
+
+ ```sh
+ # go into a repository root with GOPATH set. (I use my own script
+ # goclone(1) to have a separate GOPATH for each repo I clone.)
+ $ goclone knative/build
+
+ $ /path/to/refdocs \
+ -config "/path/to/example-config.json" \
+ -api-dir "github.com/knative/build/pkg/apis/build/v1alpha1" \
+ -out-file docs.html
+ ```
+
+4. Visit `docs.html` to view the results.
+
+-----
+
+This is not an official Google project. See [LICENSE](./LICENSE).
diff --git a/vendor/github.com/ahmetb/gen-crd-api-reference-docs/example-config.json b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/example-config.json
new file mode 100644
index 00000000..e5fdbc70
--- /dev/null
+++ b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/example-config.json
@@ -0,0 +1,28 @@
+{
+ "hideMemberFields": [
+ "TypeMeta"
+ ],
+ "hideTypePatterns": [
+ "ParseError$",
+ "List$"
+ ],
+ "externalPackages": [
+ {
+ "typeMatchPrefix": "^k8s\\.io/apimachinery/pkg/apis/meta/v1\\.Duration$",
+ "docsURLTemplate": "https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Duration"
+ },
+ {
+ "typeMatchPrefix": "^k8s\\.io/(api|apimachinery/pkg/apis)/",
+ "docsURLTemplate": "https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.13/#{{lower .TypeIdentifier}}-{{arrIndex .PackageSegments -1}}-{{arrIndex .PackageSegments -2}}"
+ },
+ {
+ "typeMatchPrefix": "^github\\.com/knative/pkg/apis/duck/",
+ "docsURLTemplate": "https://godoc.org/github.com/knative/pkg/apis/duck/{{arrIndex .PackageSegments -1}}#{{.TypeIdentifier}}"
+ }
+ ],
+ "typeDisplayNamePrefixOverrides": {
+ "k8s.io/api/": "Kubernetes ",
+ "k8s.io/apimachinery/pkg/apis/": "Kubernetes "
+ },
+ "markdownDisabled": false
+}
diff --git a/vendor/github.com/ahmetb/gen-crd-api-reference-docs/go.mod b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/go.mod
new file mode 100644
index 00000000..a42a99a9
--- /dev/null
+++ b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/go.mod
@@ -0,0 +1,12 @@
+module github.com/ahmetb/gen-crd-api-reference-docs
+
+require (
+ github.com/pkg/errors v0.8.1
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/russross/blackfriday/v2 v2.0.1
+ github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect
+ github.com/spf13/pflag v1.0.3 // indirect
+ golang.org/x/tools v0.0.0-20190213192042-740235f6c0d8 // indirect
+ k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6
+ k8s.io/klog v0.2.0
+)
diff --git a/vendor/github.com/ahmetb/gen-crd-api-reference-docs/go.sum b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/go.sum
new file mode 100644
index 00000000..0378ec02
--- /dev/null
+++ b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/go.sum
@@ -0,0 +1,23 @@
+github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+golang.org/x/tools v0.0.0-20181221235234-d00ac6d27372 h1:zWPUEY/PjVHT+zO3L8OfkjrtIjf55joTxn/RQP/AjOI=
+golang.org/x/tools v0.0.0-20181221235234-d00ac6d27372/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190213192042-740235f6c0d8/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+k8s.io/gengo v0.0.0-20181113154421-fd15ee9cc2f7 h1:zjNgw2qqBQmKd0S59lGZBQqFxJqUZroVbDphfnVm5do=
+k8s.io/gengo v0.0.0-20181113154421-fd15ee9cc2f7/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6 h1:4s3/R4+OYYYUKptXPhZKjQ04WJ6EhQQVFdjOFvCazDk=
+k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/klog v0.1.0 h1:I5HMfc/DtuVaGR1KPwUrTc476K8NCqNBldC7H4dYEzk=
+k8s.io/klog v0.1.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
+k8s.io/klog v0.2.0 h1:0ElL0OHzF3N+OhoJTL0uca20SxtYt4X4+bzHeqrB83c=
+k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
diff --git a/vendor/github.com/ahmetb/gen-crd-api-reference-docs/main.go b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/main.go
new file mode 100644
index 00000000..e0fdf395
--- /dev/null
+++ b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/main.go
@@ -0,0 +1,615 @@
+package main
+
+import (
+ "bytes"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "html/template"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "reflect"
+ "regexp"
+ "sort"
+ "strings"
+ texttemplate "text/template"
+ "time"
+ "unicode"
+
+ "github.com/pkg/errors"
+ "github.com/russross/blackfriday/v2"
+ "k8s.io/gengo/parser"
+ "k8s.io/gengo/types"
+ "k8s.io/klog"
+)
+
+var (
+ flConfig = flag.String("config", "", "path to config file")
+ flAPIDir = flag.String("api-dir", "", "api directory (or import path), point this to pkg/apis")
+ flTemplateDir = flag.String("template-dir", "template", "path to template/ dir")
+
+ flHTTPAddr = flag.String("http-addr", "", "start an HTTP server on specified addr to view the result (e.g. :8080)")
+ flOutFile = flag.String("out-file", "", "path to output file to save the result")
+)
+
+type generatorConfig struct {
+ // HiddenMemberFields hides fields with specified names on all types.
+ HiddenMemberFields []string `json:"hideMemberFields"`
+
+ // HideTypePatterns hides types matching the specified patterns from the
+ // output.
+ HideTypePatterns []string `json:"hideTypePatterns"`
+
+ // ExternalPackages lists recognized external package references and how to
+ // link to them.
+ ExternalPackages []externalPackage `json:"externalPackages"`
+
+ // TypeDisplayNamePrefixOverrides is a mapping of how to override displayed
+ // name for types with certain prefixes with what value.
+ TypeDisplayNamePrefixOverrides map[string]string `json:"typeDisplayNamePrefixOverrides"`
+
+ // MarkdownDisabled controls markdown rendering for comment lines.
+ MarkdownDisabled bool `json:"markdownDisabled"`
+}
+
+type externalPackage struct {
+ TypeMatchPrefix string `json:"typeMatchPrefix"`
+ DocsURLTemplate string `json:"docsURLTemplate"`
+}
+
+type apiPackage struct {
+ apiGroup string
+ apiVersion string
+ GoPackages []*types.Package
+ Types []*types.Type // because multiple 'types.Package's can add types to an apiVersion
+}
+
+func (v *apiPackage) identifier() string { return fmt.Sprintf("%s/%s", v.apiGroup, v.apiVersion) }
+
+func init() {
+ klog.InitFlags(nil)
+ flag.Set("alsologtostderr", "true") // for klog
+ flag.Parse()
+
+ if *flConfig == "" {
+ panic("-config not specified")
+ }
+ if *flAPIDir == "" {
+ panic("-api-dir not specified")
+ }
+ if *flHTTPAddr == "" && *flOutFile == "" {
+ panic("-out-file or -http-addr must be specified")
+ }
+ if *flHTTPAddr != "" && *flOutFile != "" {
+ panic("only -out-file or -http-addr can be specified")
+ }
+ if err := resolveTemplateDir(*flTemplateDir); err != nil {
+ panic(err)
+ }
+
+}
+
+func resolveTemplateDir(dir string) error {
+ path, err := filepath.Abs(dir)
+ if err != nil {
+ return err
+ }
+ if fi, err := os.Stat(path); err != nil {
+ return errors.Wrapf(err, "cannot read the %s directory", path)
+ } else if !fi.IsDir() {
+ return errors.Errorf("%s path is not a directory", path)
+ }
+ return nil
+}
+
+func main() {
+ defer klog.Flush()
+
+ f, err := os.Open(*flConfig)
+ if err != nil {
+ klog.Fatalf("failed to open config file: %+v", err)
+ }
+ d := json.NewDecoder(f)
+ d.DisallowUnknownFields()
+ var config generatorConfig
+ if err := d.Decode(&config); err != nil {
+ klog.Fatalf("failed to parse config file: %+v", err)
+ }
+
+ klog.Infof("parsing go packages in directory %s", *flAPIDir)
+ pkgs, err := parseAPIPackages(*flAPIDir)
+ if err != nil {
+ klog.Fatal(err)
+ }
+ if len(pkgs) == 0 {
+ klog.Fatalf("no API packages found in %s", *flAPIDir)
+ }
+
+ apiPackages, err := combineAPIPackages(pkgs)
+ if err != nil {
+ klog.Fatal(err)
+ }
+
+ mkOutput := func() (string, error) {
+ var b bytes.Buffer
+ err := render(&b, apiPackages, config)
+ if err != nil {
+ return "", errors.Wrap(err, "failed to render the result")
+ }
+
+ // remove trailing whitespace from each html line for markdown renderers
+ s := regexp.MustCompile(`(?m)^\s+`).ReplaceAllString(b.String(), "")
+ return s, nil
+ }
+
+ if *flOutFile != "" {
+ dir := filepath.Dir(*flOutFile)
+ if err := os.MkdirAll(dir, 0755); err != nil {
+ klog.Fatalf("failed to create dir %s: %v", dir, err)
+ }
+ s, err := mkOutput()
+ if err != nil {
+ klog.Fatalf("failed: %+v", err)
+ }
+ if err := ioutil.WriteFile(*flOutFile, []byte(s), 0644); err != nil {
+ klog.Fatalf("failed to write to out file: %v", err)
+ }
+ klog.Infof("written to %s", *flOutFile)
+ }
+
+ if *flHTTPAddr != "" {
+ h := func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+ defer func() { klog.Infof("request took %v", time.Since(now)) }()
+ s, err := mkOutput()
+ if err != nil {
+ fmt.Fprintf(w, "error: %+v", err)
+ klog.Warningf("failed: %+v", err)
+ }
+ if _, err := fmt.Fprint(w, s); err != nil {
+ klog.Warningf("response write error: %v", err)
+ }
+ }
+ http.HandleFunc("/", h)
+ klog.Infof("server listening at %s", *flHTTPAddr)
+ klog.Fatal(http.ListenAndServe(*flHTTPAddr, nil))
+ }
+}
+
+// groupName extracts the "//+groupName" meta-comment from the specified
+// package's godoc, or returns empty string if it cannot be found.
+func groupName(pkg *types.Package) string {
+ m := types.ExtractCommentTags("+", pkg.DocComments)
+ v := m["groupName"]
+ if len(v) == 1 {
+ return v[0]
+ }
+ return ""
+}
+
+func parseAPIPackages(dir string) ([]*types.Package, error) {
+ b := parser.New()
+ // the following will silently fail (turn on -v=4 to see logs)
+ if err := b.AddDirRecursive(*flAPIDir); err != nil {
+ return nil, err
+ }
+ scan, err := b.FindTypes()
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to parse pkgs and types")
+ }
+ var pkgNames []string
+ for p := range scan {
+ pkg := scan[p]
+ klog.V(3).Infof("trying package=%v groupName=%s", p, groupName(pkg))
+
+ // Do not pick up packages that are in vendor/ as API packages. (This
+ // happened in knative/eventing-sources/vendor/..., where a package
+ // matched the pattern, but it didn't have a compatible import path).
+ if isVendorPackage(pkg) {
+ klog.V(3).Infof("package=%v coming from vendor/, ignoring.", p)
+ continue
+ }
+
+ if groupName(pkg) != "" && len(pkg.Types) > 0 {
+ klog.V(3).Infof("package=%v has groupName and has types", p)
+ pkgNames = append(pkgNames, p)
+ }
+ }
+ sort.Strings(pkgNames)
+ var pkgs []*types.Package
+ for _, p := range pkgNames {
+ klog.Infof("using package=%s", p)
+ pkgs = append(pkgs, scan[p])
+ }
+ return pkgs, nil
+}
+
+// combineAPIPackages groups the Go packages by the they
+// offer, and combines the types in them.
+func combineAPIPackages(pkgs []*types.Package) ([]*apiPackage, error) {
+ pkgMap := make(map[string]*apiPackage)
+
+ for _, pkg := range pkgs {
+ apiGroup, apiVersion, err := apiVersionForPackage(pkg)
+ if err != nil {
+ return nil, errors.Wrapf(err, "could not get apiVersion for package %s", pkg.Path)
+ }
+
+ typeList := make([]*types.Type, 0, len(pkg.Types))
+ for _, t := range pkg.Types {
+ typeList = append(typeList, t)
+ }
+
+ id := fmt.Sprintf("%s/%s", apiGroup, apiVersion)
+ v, ok := pkgMap[id]
+ if !ok {
+ pkgMap[id] = &apiPackage{
+ apiGroup: apiGroup,
+ apiVersion: apiVersion,
+ Types: typeList,
+ GoPackages: []*types.Package{pkg},
+ }
+ } else {
+ v.Types = append(v.Types, typeList...)
+ v.GoPackages = append(v.GoPackages, pkg)
+ }
+ }
+ out := make([]*apiPackage, 0, len(pkgMap))
+ for _, v := range pkgMap {
+ out = append(out, v)
+ }
+ return out, nil
+}
+
+// isVendorPackage determines if package is coming from vendor/ dir.
+func isVendorPackage(pkg *types.Package) bool {
+ vendorPattern := string(os.PathSeparator) + "vendor" + string(os.PathSeparator)
+ return strings.Contains(pkg.SourcePath, vendorPattern)
+}
+
+func findTypeReferences(pkgs []*apiPackage) map[*types.Type][]*types.Type {
+ m := make(map[*types.Type][]*types.Type)
+ for _, pkg := range pkgs {
+ for _, typ := range pkg.Types {
+ for _, member := range typ.Members {
+ t := member.Type
+ t = tryDereference(t)
+ m[t] = append(m[t], typ)
+ }
+ }
+ }
+ return m
+}
+
+func isExportedType(t *types.Type) bool {
+ // TODO(ahmetb) use types.ExtractSingleBoolCommentTag() to parse +genclient
+ // https://godoc.org/k8s.io/gengo/types#ExtractCommentTags
+ return strings.Contains(strings.Join(t.SecondClosestCommentLines, "\n"), "+genclient")
+}
+
+func fieldName(m types.Member) string {
+ v := reflect.StructTag(m.Tags).Get("json")
+ v = strings.TrimSuffix(v, ",omitempty")
+ v = strings.TrimSuffix(v, ",inline")
+ if v != "" {
+ return v
+ }
+ return m.Name
+}
+
+func fieldEmbedded(m types.Member) bool {
+ return strings.Contains(reflect.StructTag(m.Tags).Get("json"), ",inline")
+}
+
+func isLocalType(t *types.Type, typePkgMap map[*types.Type]*apiPackage) bool {
+ t = tryDereference(t)
+ _, ok := typePkgMap[t]
+ return ok
+}
+
+func renderComments(s []string, markdown bool) string {
+ s = filterCommentTags(s)
+ doc := strings.Join(s, "\n")
+
+ if markdown {
+ // TODO(ahmetb): when a comment includes stuff like "http://"
+ // we treat this as a HTML tag with markdown renderer below. solve this.
+ return string(blackfriday.Run([]byte(doc)))
+ }
+ return nl2br(doc)
+}
+
+func safe(s string) template.HTML { return template.HTML(s) }
+
+func nl2br(s string) string {
+ return strings.Replace(s, "\n\n", string(template.HTML("
")), -1)
+}
+
+func hiddenMember(m types.Member, c generatorConfig) bool {
+ for _, v := range c.HiddenMemberFields {
+ if m.Name == v {
+ return true
+ }
+ }
+ return false
+}
+
+func typeIdentifier(t *types.Type) string {
+ t = tryDereference(t)
+ return t.Name.String() // {PackagePath.Name}
+}
+
+// apiGroupForType looks up apiGroup for the given type
+func apiGroupForType(t *types.Type, typePkgMap map[*types.Type]*apiPackage) string {
+ t = tryDereference(t)
+
+ v := typePkgMap[t]
+ if v == nil {
+ klog.Warningf("WARNING: cannot read apiVersion for %s from type=>pkg map", t.Name.String())
+ return ""
+ }
+
+ return v.identifier()
+}
+
+// anchorIDForLocalType returns the #anchor string for the local type
+func anchorIDForLocalType(t *types.Type, typePkgMap map[*types.Type]*apiPackage) string {
+ return fmt.Sprintf("%s.%s", apiGroupForType(t, typePkgMap), t.Name.Name)
+}
+
+// linkForType returns an anchor to the type if it can be generated. returns
+// empty string if it is not a local type or unrecognized external type.
+func linkForType(t *types.Type, c generatorConfig, typePkgMap map[*types.Type]*apiPackage) (string, error) {
+ t = tryDereference(t) // dereference kind=Pointer
+
+ if isLocalType(t, typePkgMap) {
+ return "#" + anchorIDForLocalType(t, typePkgMap), nil
+ }
+
+ var arrIndex = func(a []string, i int) string {
+ return a[(len(a)+i)%len(a)]
+ }
+
+ // types like k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta,
+ // k8s.io/api/core/v1.Container, k8s.io/api/autoscaling/v1.CrossVersionObjectReference,
+ // github.com/knative/build/pkg/apis/build/v1alpha1.BuildSpec
+ if t.Kind == types.Struct || t.Kind == types.Pointer || t.Kind == types.Interface || t.Kind == types.Alias {
+ id := typeIdentifier(t) // gives {{ImportPath.Identifier}} for type
+ segments := strings.Split(t.Name.Package, "/") // to parse [meta, v1] from "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ for _, v := range c.ExternalPackages {
+ r, err := regexp.Compile(v.TypeMatchPrefix)
+ if err != nil {
+ return "", errors.Wrapf(err, "pattern %q failed to compile", v.TypeMatchPrefix)
+ }
+ if r.MatchString(id) {
+ tpl, err := texttemplate.New("").Funcs(map[string]interface{}{
+ "lower": strings.ToLower,
+ "arrIndex": arrIndex,
+ }).Parse(v.DocsURLTemplate)
+ if err != nil {
+ return "", errors.Wrap(err, "docs URL template failed to parse")
+ }
+
+ var b bytes.Buffer
+ if err := tpl.
+ Execute(&b, map[string]interface{}{
+ "TypeIdentifier": t.Name.Name,
+ "PackagePath": t.Name.Package,
+ "PackageSegments": segments,
+ }); err != nil {
+ return "", errors.Wrap(err, "docs url template execution error")
+ }
+ return b.String(), nil
+ }
+ }
+ klog.Warningf("not found external link source for type %v", t.Name)
+ }
+ return "", nil
+}
+
+// tryDereference returns the underlying type when t is a pointer, map, or slice.
+func tryDereference(t *types.Type) *types.Type {
+ if t.Elem != nil {
+ return t.Elem
+ }
+ return t
+}
+
+func typeDisplayName(t *types.Type, c generatorConfig, typePkgMap map[*types.Type]*apiPackage) string {
+ s := typeIdentifier(t)
+ if isLocalType(t, typePkgMap) {
+ s = tryDereference(t).Name.Name
+ }
+ if t.Kind == types.Pointer {
+ s = strings.TrimLeft(s, "*")
+ }
+
+ switch t.Kind {
+ case types.Struct,
+ types.Interface,
+ types.Alias,
+ types.Pointer,
+ types.Slice,
+ types.Builtin:
+ // noop
+ case types.Map:
+ // return original name
+ return t.Name.Name
+ default:
+ klog.Fatalf("type %s has kind=%v which is unhandled", t.Name, t.Kind)
+ }
+
+ // substitute prefix, if registered
+ for prefix, replacement := range c.TypeDisplayNamePrefixOverrides {
+ if strings.HasPrefix(s, prefix) {
+ s = strings.Replace(s, prefix, replacement, 1)
+ }
+ }
+
+ if t.Kind == types.Slice {
+ s = "[]" + s
+ }
+
+ return s
+}
+
+func hideType(t *types.Type, c generatorConfig) bool {
+ for _, pattern := range c.HideTypePatterns {
+ if regexp.MustCompile(pattern).MatchString(t.Name.String()) {
+ return true
+ }
+ }
+ if !isExportedType(t) && unicode.IsLower(rune(t.Name.Name[0])) {
+ // types that start with lowercase
+ return true
+ }
+ return false
+}
+
+func typeReferences(t *types.Type, c generatorConfig, references map[*types.Type][]*types.Type) []*types.Type {
+ var out []*types.Type
+ m := make(map[*types.Type]struct{})
+ for _, ref := range references[t] {
+ if !hideType(ref, c) {
+ m[ref] = struct{}{}
+ }
+ }
+ for k := range m {
+ out = append(out, k)
+ }
+ sortTypes(out)
+ return out
+}
+
+func sortTypes(typs []*types.Type) []*types.Type {
+ sort.Slice(typs, func(i, j int) bool {
+ t1, t2 := typs[i], typs[j]
+ if isExportedType(t1) && !isExportedType(t2) {
+ return true
+ } else if !isExportedType(t1) && isExportedType(t2) {
+ return false
+ }
+ return t1.Name.Name < t2.Name.Name
+ })
+ return typs
+}
+
+func visibleTypes(in []*types.Type, c generatorConfig) []*types.Type {
+ var out []*types.Type
+ for _, t := range in {
+ if !hideType(t, c) {
+ out = append(out, t)
+ }
+ }
+ return out
+}
+
+func packageDisplayName(pkg *types.Package, apiVersions map[string]string) string {
+ apiGroupVersion, ok := apiVersions[pkg.Path]
+ if ok {
+ return apiGroupVersion
+ }
+ return pkg.Path // go import path
+}
+
+func filterCommentTags(comments []string) []string {
+ var out []string
+ for _, v := range comments {
+ if !strings.HasPrefix(strings.TrimSpace(v), "+") {
+ out = append(out, v)
+ }
+ }
+ return out
+}
+
+func isOptionalMember(m types.Member) bool {
+ tags := types.ExtractCommentTags("+", m.CommentLines)
+ _, ok := tags["optional"]
+ return ok
+}
+
+func apiVersionForPackage(pkg *types.Package) (string, string, error) {
+ group := groupName(pkg)
+ version := pkg.Name // assumes basename (i.e. "v1" in "core/v1") is apiVersion
+ r := `^v\d+((alpha|beta)\d+)?$`
+ if !regexp.MustCompile(r).MatchString(version) {
+ return "", "", errors.Errorf("cannot infer kubernetes apiVersion of go package %s (basename %q doesn't match expected pattern %s that's used to determine apiVersion)", pkg.Path, version, r)
+ }
+ return group, version, nil
+}
+
+// extractTypeToPackageMap creates a *types.Type map to apiPackage
+func extractTypeToPackageMap(pkgs []*apiPackage) map[*types.Type]*apiPackage {
+ out := make(map[*types.Type]*apiPackage)
+ for _, ap := range pkgs {
+ for _, t := range ap.Types {
+ out[t] = ap
+ }
+ }
+ return out
+}
+
+// packageMapToList flattens the map.
+func packageMapToList(pkgs map[string]*apiPackage) []*apiPackage {
+ // TODO(ahmetb): we should probably not deal with maps, this type can be
+ // a list everywhere.
+ out := make([]*apiPackage, 0, len(pkgs))
+ for _, v := range pkgs {
+ out = append(out, v)
+ }
+ return out
+}
+
+func render(w io.Writer, pkgs []*apiPackage, config generatorConfig) error {
+ references := findTypeReferences(pkgs)
+ typePkgMap := extractTypeToPackageMap(pkgs)
+
+ t, err := template.New("").Funcs(map[string]interface{}{
+ "isExportedType": isExportedType,
+ "fieldName": fieldName,
+ "fieldEmbedded": fieldEmbedded,
+ "typeIdentifier": func(t *types.Type) string { return typeIdentifier(t) },
+ "typeDisplayName": func(t *types.Type) string { return typeDisplayName(t, config, typePkgMap) },
+ "visibleTypes": func(t []*types.Type) []*types.Type { return visibleTypes(t, config) },
+ "renderComments": func(s []string) string { return renderComments(s, !config.MarkdownDisabled) },
+ "packageDisplayName": func(p *apiPackage) string { return p.identifier() },
+ "apiGroup": func(t *types.Type) string { return apiGroupForType(t, typePkgMap) },
+ "packageAnchorID": func(p *apiPackage) string {
+ // TODO(ahmetb): currently this is the same as packageDisplayName
+ // func, and it's fine since it retuns valid DOM id strings like
+ // 'serving.knative.dev/v1alpha1' which is valid per HTML5, except
+ // spaces, so just trim those.
+ return strings.Replace(p.identifier(), " ", "", -1)
+ },
+ "linkForType": func(t *types.Type) string {
+ v, err := linkForType(t, config, typePkgMap)
+ if err != nil {
+ klog.Fatal(errors.Wrapf(err, "error getting link for type=%s", t.Name))
+ return ""
+ }
+ return v
+ },
+ "anchorIDForType": func(t *types.Type) string { return anchorIDForLocalType(t, typePkgMap) },
+ "safe": safe,
+ "sortedTypes": sortTypes,
+ "typeReferences": func(t *types.Type) []*types.Type { return typeReferences(t, config, references) },
+ "hiddenMember": func(m types.Member) bool { return hiddenMember(m, config) },
+ "isLocalType": isLocalType,
+ "isOptionalMember": isOptionalMember,
+ }).ParseGlob(filepath.Join(*flTemplateDir, "*.tpl"))
+ if err != nil {
+ return errors.Wrap(err, "parse error")
+ }
+
+ gitCommit, _ := exec.Command("git", "rev-parse", "--short", "HEAD").Output()
+ return errors.Wrap(t.ExecuteTemplate(w, "packages", map[string]interface{}{
+ "packages": pkgs,
+ "config": config,
+ "gitCommit": strings.TrimSpace(string(gitCommit)),
+ }), "template execution error")
+}
diff --git a/vendor/github.com/russross/blackfriday/v2/.gitignore b/vendor/github.com/russross/blackfriday/v2/.gitignore
new file mode 100644
index 00000000..75623dcc
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/v2/.gitignore
@@ -0,0 +1,8 @@
+*.out
+*.swp
+*.8
+*.6
+_obj
+_test*
+markdown
+tags
diff --git a/vendor/github.com/russross/blackfriday/v2/.travis.yml b/vendor/github.com/russross/blackfriday/v2/.travis.yml
new file mode 100644
index 00000000..b0b525a5
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/v2/.travis.yml
@@ -0,0 +1,17 @@
+sudo: false
+language: go
+go:
+ - "1.10.x"
+ - "1.11.x"
+ - tip
+matrix:
+ fast_finish: true
+ allow_failures:
+ - go: tip
+install:
+ - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
+script:
+ - go get -t -v ./...
+ - diff -u <(echo -n) <(gofmt -d -s .)
+ - go tool vet .
+ - go test -v ./...
diff --git a/vendor/github.com/russross/blackfriday/v2/LICENSE.txt b/vendor/github.com/russross/blackfriday/v2/LICENSE.txt
new file mode 100644
index 00000000..2885af36
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/v2/LICENSE.txt
@@ -0,0 +1,29 @@
+Blackfriday is distributed under the Simplified BSD License:
+
+> Copyright © 2011 Russ Ross
+> All rights reserved.
+>
+> Redistribution and use in source and binary forms, with or without
+> modification, are permitted provided that the following conditions
+> are met:
+>
+> 1. Redistributions of source code must retain the above copyright
+> notice, this list of conditions and the following disclaimer.
+>
+> 2. Redistributions in binary form must reproduce the above
+> copyright notice, this list of conditions and the following
+> disclaimer in the documentation and/or other materials provided with
+> the distribution.
+>
+> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+> POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/russross/blackfriday/v2/README.md b/vendor/github.com/russross/blackfriday/v2/README.md
new file mode 100644
index 00000000..d5a8649b
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/v2/README.md
@@ -0,0 +1,291 @@
+Blackfriday [![Build Status](https://travis-ci.org/russross/blackfriday.svg?branch=master)](https://travis-ci.org/russross/blackfriday)
+===========
+
+Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It
+is paranoid about its input (so you can safely feed it user-supplied
+data), it is fast, it supports common extensions (tables, smart
+punctuation substitutions, etc.), and it is safe for all utf-8
+(unicode) input.
+
+HTML output is currently supported, along with Smartypants
+extensions.
+
+It started as a translation from C of [Sundown][3].
+
+
+Installation
+------------
+
+Blackfriday is compatible with any modern Go release. With Go 1.7 and git
+installed:
+
+ go get gopkg.in/russross/blackfriday.v2
+
+will download, compile, and install the package into your `$GOPATH`
+directory hierarchy. Alternatively, you can achieve the same if you
+import it into a project:
+
+ import "gopkg.in/russross/blackfriday.v2"
+
+and `go get` without parameters.
+
+
+Versions
+--------
+
+Currently maintained and recommended version of Blackfriday is `v2`. It's being
+developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the
+documentation is available at
+https://godoc.org/gopkg.in/russross/blackfriday.v2.
+
+It is `go get`-able via via [gopkg.in][6] at `gopkg.in/russross/blackfriday.v2`,
+but we highly recommend using package management tool like [dep][7] or
+[Glide][8] and make use of semantic versioning. With package management you
+should import `github.com/russross/blackfriday` and specify that you're using
+version 2.0.0.
+
+Version 2 offers a number of improvements over v1:
+
+* Cleaned up API
+* A separate call to [`Parse`][4], which produces an abstract syntax tree for
+ the document
+* Latest bug fixes
+* Flexibility to easily add your own rendering extensions
+
+Potential drawbacks:
+
+* Our benchmarks show v2 to be slightly slower than v1. Currently in the
+ ballpark of around 15%.
+* API breakage. If you can't afford modifying your code to adhere to the new API
+ and don't care too much about the new features, v2 is probably not for you.
+* Several bug fixes are trailing behind and still need to be forward-ported to
+ v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for
+ tracking.
+
+Usage
+-----
+
+For the most sensible markdown processing, it is as simple as getting your input
+into a byte slice and calling:
+
+```go
+output := blackfriday.Run(input)
+```
+
+Your input will be parsed and the output rendered with a set of most popular
+extensions enabled. If you want the most basic feature set, corresponding with
+the bare Markdown specification, use:
+
+```go
+output := blackfriday.Run(input, blackfriday.WithNoExtensions())
+```
+
+### Sanitize untrusted content
+
+Blackfriday itself does nothing to protect against malicious content. If you are
+dealing with user-supplied markdown, we recommend running Blackfriday's output
+through HTML sanitizer such as [Bluemonday][5].
+
+Here's an example of simple usage of Blackfriday together with Bluemonday:
+
+```go
+import (
+ "github.com/microcosm-cc/bluemonday"
+ "github.com/russross/blackfriday"
+)
+
+// ...
+unsafe := blackfriday.Run(input)
+html := bluemonday.UGCPolicy().SanitizeBytes(unsafe)
+```
+
+### Custom options
+
+If you want to customize the set of options, use `blackfriday.WithExtensions`,
+`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`.
+
+You can also check out `blackfriday-tool` for a more complete example
+of how to use it. Download and install it using:
+
+ go get github.com/russross/blackfriday-tool
+
+This is a simple command-line tool that allows you to process a
+markdown file using a standalone program. You can also browse the
+source directly on github if you are just looking for some example
+code:
+
+*
+
+Note that if you have not already done so, installing
+`blackfriday-tool` will be sufficient to download and install
+blackfriday in addition to the tool itself. The tool binary will be
+installed in `$GOPATH/bin`. This is a statically-linked binary that
+can be copied to wherever you need it without worrying about
+dependencies and library versions.
+
+
+Features
+--------
+
+All features of Sundown are supported, including:
+
+* **Compatibility**. The Markdown v1.0.3 test suite passes with
+ the `--tidy` option. Without `--tidy`, the differences are
+ mostly in whitespace and entity escaping, where blackfriday is
+ more consistent and cleaner.
+
+* **Common extensions**, including table support, fenced code
+ blocks, autolinks, strikethroughs, non-strict emphasis, etc.
+
+* **Safety**. Blackfriday is paranoid when parsing, making it safe
+ to feed untrusted user input without fear of bad things
+ happening. The test suite stress tests this and there are no
+ known inputs that make it crash. If you find one, please let me
+ know and send me the input that does it.
+
+ NOTE: "safety" in this context means *runtime safety only*. In order to
+ protect yourself against JavaScript injection in untrusted content, see
+ [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content).
+
+* **Fast processing**. It is fast enough to render on-demand in
+ most web applications without having to cache the output.
+
+* **Thread safety**. You can run multiple parsers in different
+ goroutines without ill effect. There is no dependence on global
+ shared state.
+
+* **Minimal dependencies**. Blackfriday only depends on standard
+ library packages in Go. The source code is pretty
+ self-contained, so it is easy to add to any project, including
+ Google App Engine projects.
+
+* **Standards compliant**. Output successfully validates using the
+ W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional.
+
+
+Extensions
+----------
+
+In addition to the standard markdown syntax, this package
+implements the following extensions:
+
+* **Intra-word emphasis supression**. The `_` character is
+ commonly used inside words when discussing code, so having
+ markdown interpret it as an emphasis command is usually the
+ wrong thing. Blackfriday lets you treat all emphasis markers as
+ normal characters when they occur inside a word.
+
+* **Tables**. Tables can be created by drawing them in the input
+ using a simple syntax:
+
+ ```
+ Name | Age
+ --------|------
+ Bob | 27
+ Alice | 23
+ ```
+
+* **Fenced code blocks**. In addition to the normal 4-space
+ indentation to mark code blocks, you can explicitly mark them
+ and supply a language (to make syntax highlighting simple). Just
+ mark it like this:
+
+ ```go
+ func getTrue() bool {
+ return true
+ }
+ ```
+
+ You can use 3 or more backticks to mark the beginning of the
+ block, and the same number to mark the end of the block.
+
+* **Definition lists**. A simple definition list is made of a single-line
+ term followed by a colon and the definition for that term.
+
+ Cat
+ : Fluffy animal everyone likes
+
+ Internet
+ : Vector of transmission for pictures of cats
+
+ Terms must be separated from the previous definition by a blank line.
+
+* **Footnotes**. A marker in the text that will become a superscript number;
+ a footnote definition that will be placed in a list of footnotes at the
+ end of the document. A footnote looks like this:
+
+ This is a footnote.[^1]
+
+ [^1]: the footnote text.
+
+* **Autolinking**. Blackfriday can find URLs that have not been
+ explicitly marked as links and turn them into links.
+
+* **Strikethrough**. Use two tildes (`~~`) to mark text that
+ should be crossed out.
+
+* **Hard line breaks**. With this extension enabled newlines in the input
+ translate into line breaks in the output. This extension is off by default.
+
+* **Smart quotes**. Smartypants-style punctuation substitution is
+ supported, turning normal double- and single-quote marks into
+ curly quotes, etc.
+
+* **LaTeX-style dash parsing** is an additional option, where `--`
+ is translated into `–`, and `---` is translated into
+ `—`. This differs from most smartypants processors, which
+ turn a single hyphen into an ndash and a double hyphen into an
+ mdash.
+
+* **Smart fractions**, where anything that looks like a fraction
+ is translated into suitable HTML (instead of just a few special
+ cases like most smartypant processors). For example, `4/5`
+ becomes `4⁄5`, which renders as
+ 4⁄5.
+
+
+Other renderers
+---------------
+
+Blackfriday is structured to allow alternative rendering engines. Here
+are a few of note:
+
+* [github_flavored_markdown](https://godoc.org/github.com/shurcooL/github_flavored_markdown):
+ provides a GitHub Flavored Markdown renderer with fenced code block
+ highlighting, clickable heading anchor links.
+
+ It's not customizable, and its goal is to produce HTML output
+ equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode),
+ except the rendering is performed locally.
+
+* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt,
+ but for markdown.
+
+* [LaTeX output](https://github.com/Ambrevar/Blackfriday-LaTeX):
+ renders output as LaTeX.
+
+* [Blackfriday-Confluence](https://github.com/kentaro-m/blackfriday-confluence): provides a [Confluence Wiki Markup](https://confluence.atlassian.com/doc/confluence-wiki-markup-251003035.html) renderer.
+
+
+Todo
+----
+
+* More unit testing
+* Improve unicode support. It does not understand all unicode
+ rules (about what constitutes a letter, a punctuation symbol,
+ etc.), so it may fail to detect word boundaries correctly in
+ some instances. It is safe on all utf-8 input.
+
+
+License
+-------
+
+[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt)
+
+
+ [1]: https://daringfireball.net/projects/markdown/ "Markdown"
+ [2]: https://golang.org/ "Go Language"
+ [3]: https://github.com/vmg/sundown "Sundown"
+ [4]: https://godoc.org/gopkg.in/russross/blackfriday.v2#Parse "Parse func"
+ [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday"
+ [6]: https://labix.org/gopkg.in "gopkg.in"
diff --git a/vendor/github.com/russross/blackfriday/v2/block.go b/vendor/github.com/russross/blackfriday/v2/block.go
new file mode 100644
index 00000000..b8607474
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/v2/block.go
@@ -0,0 +1,1590 @@
+//
+// Blackfriday Markdown Processor
+// Available at http://github.com/russross/blackfriday
+//
+// Copyright © 2011 Russ Ross .
+// Distributed under the Simplified BSD License.
+// See README.md for details.
+//
+
+//
+// Functions to parse block-level elements.
+//
+
+package blackfriday
+
+import (
+ "bytes"
+ "html"
+ "regexp"
+ "strings"
+
+ "github.com/shurcooL/sanitized_anchor_name"
+)
+
+const (
+ charEntity = "&(?:#x[a-f0-9]{1,8}|#[0-9]{1,8}|[a-z][a-z0-9]{1,31});"
+ escapable = "[!\"#$%&'()*+,./:;<=>?@[\\\\\\]^_`{|}~-]"
+)
+
+var (
+ reBackslashOrAmp = regexp.MustCompile("[\\&]")
+ reEntityOrEscapedChar = regexp.MustCompile("(?i)\\\\" + escapable + "|" + charEntity)
+)
+
+// Parse block-level data.
+// Note: this function and many that it calls assume that
+// the input buffer ends with a newline.
+func (p *Markdown) block(data []byte) {
+ // this is called recursively: enforce a maximum depth
+ if p.nesting >= p.maxNesting {
+ return
+ }
+ p.nesting++
+
+ // parse out one block-level construct at a time
+ for len(data) > 0 {
+ // prefixed heading:
+ //
+ // # Heading 1
+ // ## Heading 2
+ // ...
+ // ###### Heading 6
+ if p.isPrefixHeading(data) {
+ data = data[p.prefixHeading(data):]
+ continue
+ }
+
+ // block of preformatted HTML:
+ //
+ //
+ // ...
+ //
+ if data[0] == '<' {
+ if i := p.html(data, true); i > 0 {
+ data = data[i:]
+ continue
+ }
+ }
+
+ // title block
+ //
+ // % stuff
+ // % more stuff
+ // % even more stuff
+ if p.extensions&Titleblock != 0 {
+ if data[0] == '%' {
+ if i := p.titleBlock(data, true); i > 0 {
+ data = data[i:]
+ continue
+ }
+ }
+ }
+
+ // blank lines. note: returns the # of bytes to skip
+ if i := p.isEmpty(data); i > 0 {
+ data = data[i:]
+ continue
+ }
+
+ // indented code block:
+ //
+ // func max(a, b int) int {
+ // if a > b {
+ // return a
+ // }
+ // return b
+ // }
+ if p.codePrefix(data) > 0 {
+ data = data[p.code(data):]
+ continue
+ }
+
+ // fenced code block:
+ //
+ // ``` go
+ // func fact(n int) int {
+ // if n <= 1 {
+ // return n
+ // }
+ // return n * fact(n-1)
+ // }
+ // ```
+ if p.extensions&FencedCode != 0 {
+ if i := p.fencedCodeBlock(data, true); i > 0 {
+ data = data[i:]
+ continue
+ }
+ }
+
+ // horizontal rule:
+ //
+ // ------
+ // or
+ // ******
+ // or
+ // ______
+ if p.isHRule(data) {
+ p.addBlock(HorizontalRule, nil)
+ var i int
+ for i = 0; i < len(data) && data[i] != '\n'; i++ {
+ }
+ data = data[i:]
+ continue
+ }
+
+ // block quote:
+ //
+ // > A big quote I found somewhere
+ // > on the web
+ if p.quotePrefix(data) > 0 {
+ data = data[p.quote(data):]
+ continue
+ }
+
+ // table:
+ //
+ // Name | Age | Phone
+ // ------|-----|---------
+ // Bob | 31 | 555-1234
+ // Alice | 27 | 555-4321
+ if p.extensions&Tables != 0 {
+ if i := p.table(data); i > 0 {
+ data = data[i:]
+ continue
+ }
+ }
+
+ // an itemized/unordered list:
+ //
+ // * Item 1
+ // * Item 2
+ //
+ // also works with + or -
+ if p.uliPrefix(data) > 0 {
+ data = data[p.list(data, 0):]
+ continue
+ }
+
+ // a numbered/ordered list:
+ //
+ // 1. Item 1
+ // 2. Item 2
+ if p.oliPrefix(data) > 0 {
+ data = data[p.list(data, ListTypeOrdered):]
+ continue
+ }
+
+ // definition lists:
+ //
+ // Term 1
+ // : Definition a
+ // : Definition b
+ //
+ // Term 2
+ // : Definition c
+ if p.extensions&DefinitionLists != 0 {
+ if p.dliPrefix(data) > 0 {
+ data = data[p.list(data, ListTypeDefinition):]
+ continue
+ }
+ }
+
+ // anything else must look like a normal paragraph
+ // note: this finds underlined headings, too
+ data = data[p.paragraph(data):]
+ }
+
+ p.nesting--
+}
+
+func (p *Markdown) addBlock(typ NodeType, content []byte) *Node {
+ p.closeUnmatchedBlocks()
+ container := p.addChild(typ, 0)
+ container.content = content
+ return container
+}
+
+func (p *Markdown) isPrefixHeading(data []byte) bool {
+ if data[0] != '#' {
+ return false
+ }
+
+ if p.extensions&SpaceHeadings != 0 {
+ level := 0
+ for level < 6 && level < len(data) && data[level] == '#' {
+ level++
+ }
+ if level == len(data) || data[level] != ' ' {
+ return false
+ }
+ }
+ return true
+}
+
+func (p *Markdown) prefixHeading(data []byte) int {
+ level := 0
+ for level < 6 && level < len(data) && data[level] == '#' {
+ level++
+ }
+ i := skipChar(data, level, ' ')
+ end := skipUntilChar(data, i, '\n')
+ skip := end
+ id := ""
+ if p.extensions&HeadingIDs != 0 {
+ j, k := 0, 0
+ // find start/end of heading id
+ for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ {
+ }
+ for k = j + 1; k < end && data[k] != '}'; k++ {
+ }
+ // extract heading id iff found
+ if j < end && k < end {
+ id = string(data[j+2 : k])
+ end = j
+ skip = k + 1
+ for end > 0 && data[end-1] == ' ' {
+ end--
+ }
+ }
+ }
+ for end > 0 && data[end-1] == '#' {
+ if isBackslashEscaped(data, end-1) {
+ break
+ }
+ end--
+ }
+ for end > 0 && data[end-1] == ' ' {
+ end--
+ }
+ if end > i {
+ if id == "" && p.extensions&AutoHeadingIDs != 0 {
+ id = sanitized_anchor_name.Create(string(data[i:end]))
+ }
+ block := p.addBlock(Heading, data[i:end])
+ block.HeadingID = id
+ block.Level = level
+ }
+ return skip
+}
+
+func (p *Markdown) isUnderlinedHeading(data []byte) int {
+ // test of level 1 heading
+ if data[0] == '=' {
+ i := skipChar(data, 1, '=')
+ i = skipChar(data, i, ' ')
+ if i < len(data) && data[i] == '\n' {
+ return 1
+ }
+ return 0
+ }
+
+ // test of level 2 heading
+ if data[0] == '-' {
+ i := skipChar(data, 1, '-')
+ i = skipChar(data, i, ' ')
+ if i < len(data) && data[i] == '\n' {
+ return 2
+ }
+ return 0
+ }
+
+ return 0
+}
+
+func (p *Markdown) titleBlock(data []byte, doRender bool) int {
+ if data[0] != '%' {
+ return 0
+ }
+ splitData := bytes.Split(data, []byte("\n"))
+ var i int
+ for idx, b := range splitData {
+ if !bytes.HasPrefix(b, []byte("%")) {
+ i = idx // - 1
+ break
+ }
+ }
+
+ data = bytes.Join(splitData[0:i], []byte("\n"))
+ consumed := len(data)
+ data = bytes.TrimPrefix(data, []byte("% "))
+ data = bytes.Replace(data, []byte("\n% "), []byte("\n"), -1)
+ block := p.addBlock(Heading, data)
+ block.Level = 1
+ block.IsTitleblock = true
+
+ return consumed
+}
+
+func (p *Markdown) html(data []byte, doRender bool) int {
+ var i, j int
+
+ // identify the opening tag
+ if data[0] != '<' {
+ return 0
+ }
+ curtag, tagfound := p.htmlFindTag(data[1:])
+
+ // handle special cases
+ if !tagfound {
+ // check for an HTML comment
+ if size := p.htmlComment(data, doRender); size > 0 {
+ return size
+ }
+
+ // check for an
tag
+ if size := p.htmlHr(data, doRender); size > 0 {
+ return size
+ }
+
+ // no special case recognized
+ return 0
+ }
+
+ // look for an unindented matching closing tag
+ // followed by a blank line
+ found := false
+ /*
+ closetag := []byte("\n" + curtag + ">")
+ j = len(curtag) + 1
+ for !found {
+ // scan for a closing tag at the beginning of a line
+ if skip := bytes.Index(data[j:], closetag); skip >= 0 {
+ j += skip + len(closetag)
+ } else {
+ break
+ }
+
+ // see if it is the only thing on the line
+ if skip := p.isEmpty(data[j:]); skip > 0 {
+ // see if it is followed by a blank line/eof
+ j += skip
+ if j >= len(data) {
+ found = true
+ i = j
+ } else {
+ if skip := p.isEmpty(data[j:]); skip > 0 {
+ j += skip
+ found = true
+ i = j
+ }
+ }
+ }
+ }
+ */
+
+ // if not found, try a second pass looking for indented match
+ // but not if tag is "ins" or "del" (following original Markdown.pl)
+ if !found && curtag != "ins" && curtag != "del" {
+ i = 1
+ for i < len(data) {
+ i++
+ for i < len(data) && !(data[i-1] == '<' && data[i] == '/') {
+ i++
+ }
+
+ if i+2+len(curtag) >= len(data) {
+ break
+ }
+
+ j = p.htmlFindEnd(curtag, data[i-1:])
+
+ if j > 0 {
+ i += j - 1
+ found = true
+ break
+ }
+ }
+ }
+
+ if !found {
+ return 0
+ }
+
+ // the end of the block has been found
+ if doRender {
+ // trim newlines
+ end := i
+ for end > 0 && data[end-1] == '\n' {
+ end--
+ }
+ finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end]))
+ }
+
+ return i
+}
+
+func finalizeHTMLBlock(block *Node) {
+ block.Literal = block.content
+ block.content = nil
+}
+
+// HTML comment, lax form
+func (p *Markdown) htmlComment(data []byte, doRender bool) int {
+ i := p.inlineHTMLComment(data)
+ // needs to end with a blank line
+ if j := p.isEmpty(data[i:]); j > 0 {
+ size := i + j
+ if doRender {
+ // trim trailing newlines
+ end := size
+ for end > 0 && data[end-1] == '\n' {
+ end--
+ }
+ block := p.addBlock(HTMLBlock, data[:end])
+ finalizeHTMLBlock(block)
+ }
+ return size
+ }
+ return 0
+}
+
+// HR, which is the only self-closing block tag considered
+func (p *Markdown) htmlHr(data []byte, doRender bool) int {
+ if len(data) < 4 {
+ return 0
+ }
+ if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') {
+ return 0
+ }
+ if data[3] != ' ' && data[3] != '/' && data[3] != '>' {
+ // not an
tag after all; at least not a valid one
+ return 0
+ }
+ i := 3
+ for i < len(data) && data[i] != '>' && data[i] != '\n' {
+ i++
+ }
+ if i < len(data) && data[i] == '>' {
+ i++
+ if j := p.isEmpty(data[i:]); j > 0 {
+ size := i + j
+ if doRender {
+ // trim newlines
+ end := size
+ for end > 0 && data[end-1] == '\n' {
+ end--
+ }
+ finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end]))
+ }
+ return size
+ }
+ }
+ return 0
+}
+
+func (p *Markdown) htmlFindTag(data []byte) (string, bool) {
+ i := 0
+ for i < len(data) && isalnum(data[i]) {
+ i++
+ }
+ key := string(data[:i])
+ if _, ok := blockTags[key]; ok {
+ return key, true
+ }
+ return "", false
+}
+
+func (p *Markdown) htmlFindEnd(tag string, data []byte) int {
+ // assume data[0] == '<' && data[1] == '/' already tested
+ if tag == "hr" {
+ return 2
+ }
+ // check if tag is a match
+ closetag := []byte("" + tag + ">")
+ if !bytes.HasPrefix(data, closetag) {
+ return 0
+ }
+ i := len(closetag)
+
+ // check that the rest of the line is blank
+ skip := 0
+ if skip = p.isEmpty(data[i:]); skip == 0 {
+ return 0
+ }
+ i += skip
+ skip = 0
+
+ if i >= len(data) {
+ return i
+ }
+
+ if p.extensions&LaxHTMLBlocks != 0 {
+ return i
+ }
+ if skip = p.isEmpty(data[i:]); skip == 0 {
+ // following line must be blank
+ return 0
+ }
+
+ return i + skip
+}
+
+func (*Markdown) isEmpty(data []byte) int {
+ // it is okay to call isEmpty on an empty buffer
+ if len(data) == 0 {
+ return 0
+ }
+
+ var i int
+ for i = 0; i < len(data) && data[i] != '\n'; i++ {
+ if data[i] != ' ' && data[i] != '\t' {
+ return 0
+ }
+ }
+ if i < len(data) && data[i] == '\n' {
+ i++
+ }
+ return i
+}
+
+func (*Markdown) isHRule(data []byte) bool {
+ i := 0
+
+ // skip up to three spaces
+ for i < 3 && data[i] == ' ' {
+ i++
+ }
+
+ // look at the hrule char
+ if data[i] != '*' && data[i] != '-' && data[i] != '_' {
+ return false
+ }
+ c := data[i]
+
+ // the whole line must be the char or whitespace
+ n := 0
+ for i < len(data) && data[i] != '\n' {
+ switch {
+ case data[i] == c:
+ n++
+ case data[i] != ' ':
+ return false
+ }
+ i++
+ }
+
+ return n >= 3
+}
+
+// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data,
+// and returns the end index if so, or 0 otherwise. It also returns the marker found.
+// If info is not nil, it gets set to the syntax specified in the fence line.
+func isFenceLine(data []byte, info *string, oldmarker string) (end int, marker string) {
+ i, size := 0, 0
+
+ // skip up to three spaces
+ for i < len(data) && i < 3 && data[i] == ' ' {
+ i++
+ }
+
+ // check for the marker characters: ~ or `
+ if i >= len(data) {
+ return 0, ""
+ }
+ if data[i] != '~' && data[i] != '`' {
+ return 0, ""
+ }
+
+ c := data[i]
+
+ // the whole line must be the same char or whitespace
+ for i < len(data) && data[i] == c {
+ size++
+ i++
+ }
+
+ // the marker char must occur at least 3 times
+ if size < 3 {
+ return 0, ""
+ }
+ marker = string(data[i-size : i])
+
+ // if this is the end marker, it must match the beginning marker
+ if oldmarker != "" && marker != oldmarker {
+ return 0, ""
+ }
+
+ // TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here
+ // into one, always get the info string, and discard it if the caller doesn't care.
+ if info != nil {
+ infoLength := 0
+ i = skipChar(data, i, ' ')
+
+ if i >= len(data) {
+ if i == len(data) {
+ return i, marker
+ }
+ return 0, ""
+ }
+
+ infoStart := i
+
+ if data[i] == '{' {
+ i++
+ infoStart++
+
+ for i < len(data) && data[i] != '}' && data[i] != '\n' {
+ infoLength++
+ i++
+ }
+
+ if i >= len(data) || data[i] != '}' {
+ return 0, ""
+ }
+
+ // strip all whitespace at the beginning and the end
+ // of the {} block
+ for infoLength > 0 && isspace(data[infoStart]) {
+ infoStart++
+ infoLength--
+ }
+
+ for infoLength > 0 && isspace(data[infoStart+infoLength-1]) {
+ infoLength--
+ }
+ i++
+ i = skipChar(data, i, ' ')
+ } else {
+ for i < len(data) && !isverticalspace(data[i]) {
+ infoLength++
+ i++
+ }
+ }
+
+ *info = strings.TrimSpace(string(data[infoStart : infoStart+infoLength]))
+ }
+
+ if i == len(data) {
+ return i, marker
+ }
+ if i > len(data) || data[i] != '\n' {
+ return 0, ""
+ }
+ return i + 1, marker // Take newline into account.
+}
+
+// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning,
+// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects.
+// If doRender is true, a final newline is mandatory to recognize the fenced code block.
+func (p *Markdown) fencedCodeBlock(data []byte, doRender bool) int {
+ var info string
+ beg, marker := isFenceLine(data, &info, "")
+ if beg == 0 || beg >= len(data) {
+ return 0
+ }
+
+ var work bytes.Buffer
+ work.Write([]byte(info))
+ work.WriteByte('\n')
+
+ for {
+ // safe to assume beg < len(data)
+
+ // check for the end of the code block
+ fenceEnd, _ := isFenceLine(data[beg:], nil, marker)
+ if fenceEnd != 0 {
+ beg += fenceEnd
+ break
+ }
+
+ // copy the current line
+ end := skipUntilChar(data, beg, '\n') + 1
+
+ // did we reach the end of the buffer without a closing marker?
+ if end >= len(data) {
+ return 0
+ }
+
+ // verbatim copy to the working buffer
+ if doRender {
+ work.Write(data[beg:end])
+ }
+ beg = end
+ }
+
+ if doRender {
+ block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer
+ block.IsFenced = true
+ finalizeCodeBlock(block)
+ }
+
+ return beg
+}
+
+func unescapeChar(str []byte) []byte {
+ if str[0] == '\\' {
+ return []byte{str[1]}
+ }
+ return []byte(html.UnescapeString(string(str)))
+}
+
+func unescapeString(str []byte) []byte {
+ if reBackslashOrAmp.Match(str) {
+ return reEntityOrEscapedChar.ReplaceAllFunc(str, unescapeChar)
+ }
+ return str
+}
+
+func finalizeCodeBlock(block *Node) {
+ if block.IsFenced {
+ newlinePos := bytes.IndexByte(block.content, '\n')
+ firstLine := block.content[:newlinePos]
+ rest := block.content[newlinePos+1:]
+ block.Info = unescapeString(bytes.Trim(firstLine, "\n"))
+ block.Literal = rest
+ } else {
+ block.Literal = block.content
+ }
+ block.content = nil
+}
+
+func (p *Markdown) table(data []byte) int {
+ table := p.addBlock(Table, nil)
+ i, columns := p.tableHeader(data)
+ if i == 0 {
+ p.tip = table.Parent
+ table.Unlink()
+ return 0
+ }
+
+ p.addBlock(TableBody, nil)
+
+ for i < len(data) {
+ pipes, rowStart := 0, i
+ for ; i < len(data) && data[i] != '\n'; i++ {
+ if data[i] == '|' {
+ pipes++
+ }
+ }
+
+ if pipes == 0 {
+ i = rowStart
+ break
+ }
+
+ // include the newline in data sent to tableRow
+ if i < len(data) && data[i] == '\n' {
+ i++
+ }
+ p.tableRow(data[rowStart:i], columns, false)
+ }
+
+ return i
+}
+
+// check if the specified position is preceded by an odd number of backslashes
+func isBackslashEscaped(data []byte, i int) bool {
+ backslashes := 0
+ for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' {
+ backslashes++
+ }
+ return backslashes&1 == 1
+}
+
+func (p *Markdown) tableHeader(data []byte) (size int, columns []CellAlignFlags) {
+ i := 0
+ colCount := 1
+ for i = 0; i < len(data) && data[i] != '\n'; i++ {
+ if data[i] == '|' && !isBackslashEscaped(data, i) {
+ colCount++
+ }
+ }
+
+ // doesn't look like a table header
+ if colCount == 1 {
+ return
+ }
+
+ // include the newline in the data sent to tableRow
+ j := i
+ if j < len(data) && data[j] == '\n' {
+ j++
+ }
+ header := data[:j]
+
+ // column count ignores pipes at beginning or end of line
+ if data[0] == '|' {
+ colCount--
+ }
+ if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) {
+ colCount--
+ }
+
+ columns = make([]CellAlignFlags, colCount)
+
+ // move on to the header underline
+ i++
+ if i >= len(data) {
+ return
+ }
+
+ if data[i] == '|' && !isBackslashEscaped(data, i) {
+ i++
+ }
+ i = skipChar(data, i, ' ')
+
+ // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3
+ // and trailing | optional on last column
+ col := 0
+ for i < len(data) && data[i] != '\n' {
+ dashes := 0
+
+ if data[i] == ':' {
+ i++
+ columns[col] |= TableAlignmentLeft
+ dashes++
+ }
+ for i < len(data) && data[i] == '-' {
+ i++
+ dashes++
+ }
+ if i < len(data) && data[i] == ':' {
+ i++
+ columns[col] |= TableAlignmentRight
+ dashes++
+ }
+ for i < len(data) && data[i] == ' ' {
+ i++
+ }
+ if i == len(data) {
+ return
+ }
+ // end of column test is messy
+ switch {
+ case dashes < 3:
+ // not a valid column
+ return
+
+ case data[i] == '|' && !isBackslashEscaped(data, i):
+ // marker found, now skip past trailing whitespace
+ col++
+ i++
+ for i < len(data) && data[i] == ' ' {
+ i++
+ }
+
+ // trailing junk found after last column
+ if col >= colCount && i < len(data) && data[i] != '\n' {
+ return
+ }
+
+ case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount:
+ // something else found where marker was required
+ return
+
+ case data[i] == '\n':
+ // marker is optional for the last column
+ col++
+
+ default:
+ // trailing junk found after last column
+ return
+ }
+ }
+ if col != colCount {
+ return
+ }
+
+ p.addBlock(TableHead, nil)
+ p.tableRow(header, columns, true)
+ size = i
+ if size < len(data) && data[size] == '\n' {
+ size++
+ }
+ return
+}
+
+func (p *Markdown) tableRow(data []byte, columns []CellAlignFlags, header bool) {
+ p.addBlock(TableRow, nil)
+ i, col := 0, 0
+
+ if data[i] == '|' && !isBackslashEscaped(data, i) {
+ i++
+ }
+
+ for col = 0; col < len(columns) && i < len(data); col++ {
+ for i < len(data) && data[i] == ' ' {
+ i++
+ }
+
+ cellStart := i
+
+ for i < len(data) && (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' {
+ i++
+ }
+
+ cellEnd := i
+
+ // skip the end-of-cell marker, possibly taking us past end of buffer
+ i++
+
+ for cellEnd > cellStart && cellEnd-1 < len(data) && data[cellEnd-1] == ' ' {
+ cellEnd--
+ }
+
+ cell := p.addBlock(TableCell, data[cellStart:cellEnd])
+ cell.IsHeader = header
+ cell.Align = columns[col]
+ }
+
+ // pad it out with empty columns to get the right number
+ for ; col < len(columns); col++ {
+ cell := p.addBlock(TableCell, nil)
+ cell.IsHeader = header
+ cell.Align = columns[col]
+ }
+
+ // silently ignore rows with too many cells
+}
+
+// returns blockquote prefix length
+func (p *Markdown) quotePrefix(data []byte) int {
+ i := 0
+ for i < 3 && i < len(data) && data[i] == ' ' {
+ i++
+ }
+ if i < len(data) && data[i] == '>' {
+ if i+1 < len(data) && data[i+1] == ' ' {
+ return i + 2
+ }
+ return i + 1
+ }
+ return 0
+}
+
+// blockquote ends with at least one blank line
+// followed by something without a blockquote prefix
+func (p *Markdown) terminateBlockquote(data []byte, beg, end int) bool {
+ if p.isEmpty(data[beg:]) <= 0 {
+ return false
+ }
+ if end >= len(data) {
+ return true
+ }
+ return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0
+}
+
+// parse a blockquote fragment
+func (p *Markdown) quote(data []byte) int {
+ block := p.addBlock(BlockQuote, nil)
+ var raw bytes.Buffer
+ beg, end := 0, 0
+ for beg < len(data) {
+ end = beg
+ // Step over whole lines, collecting them. While doing that, check for
+ // fenced code and if one's found, incorporate it altogether,
+ // irregardless of any contents inside it
+ for end < len(data) && data[end] != '\n' {
+ if p.extensions&FencedCode != 0 {
+ if i := p.fencedCodeBlock(data[end:], false); i > 0 {
+ // -1 to compensate for the extra end++ after the loop:
+ end += i - 1
+ break
+ }
+ }
+ end++
+ }
+ if end < len(data) && data[end] == '\n' {
+ end++
+ }
+ if pre := p.quotePrefix(data[beg:]); pre > 0 {
+ // skip the prefix
+ beg += pre
+ } else if p.terminateBlockquote(data, beg, end) {
+ break
+ }
+ // this line is part of the blockquote
+ raw.Write(data[beg:end])
+ beg = end
+ }
+ p.block(raw.Bytes())
+ p.finalize(block)
+ return end
+}
+
+// returns prefix length for block code
+func (p *Markdown) codePrefix(data []byte) int {
+ if len(data) >= 1 && data[0] == '\t' {
+ return 1
+ }
+ if len(data) >= 4 && data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' {
+ return 4
+ }
+ return 0
+}
+
+func (p *Markdown) code(data []byte) int {
+ var work bytes.Buffer
+
+ i := 0
+ for i < len(data) {
+ beg := i
+ for i < len(data) && data[i] != '\n' {
+ i++
+ }
+ if i < len(data) && data[i] == '\n' {
+ i++
+ }
+
+ blankline := p.isEmpty(data[beg:i]) > 0
+ if pre := p.codePrefix(data[beg:i]); pre > 0 {
+ beg += pre
+ } else if !blankline {
+ // non-empty, non-prefixed line breaks the pre
+ i = beg
+ break
+ }
+
+ // verbatim copy to the working buffer
+ if blankline {
+ work.WriteByte('\n')
+ } else {
+ work.Write(data[beg:i])
+ }
+ }
+
+ // trim all the \n off the end of work
+ workbytes := work.Bytes()
+ eol := len(workbytes)
+ for eol > 0 && workbytes[eol-1] == '\n' {
+ eol--
+ }
+ if eol != len(workbytes) {
+ work.Truncate(eol)
+ }
+
+ work.WriteByte('\n')
+
+ block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer
+ block.IsFenced = false
+ finalizeCodeBlock(block)
+
+ return i
+}
+
+// returns unordered list item prefix
+func (p *Markdown) uliPrefix(data []byte) int {
+ i := 0
+ // start with up to 3 spaces
+ for i < len(data) && i < 3 && data[i] == ' ' {
+ i++
+ }
+ if i >= len(data)-1 {
+ return 0
+ }
+ // need one of {'*', '+', '-'} followed by a space or a tab
+ if (data[i] != '*' && data[i] != '+' && data[i] != '-') ||
+ (data[i+1] != ' ' && data[i+1] != '\t') {
+ return 0
+ }
+ return i + 2
+}
+
+// returns ordered list item prefix
+func (p *Markdown) oliPrefix(data []byte) int {
+ i := 0
+
+ // start with up to 3 spaces
+ for i < 3 && i < len(data) && data[i] == ' ' {
+ i++
+ }
+
+ // count the digits
+ start := i
+ for i < len(data) && data[i] >= '0' && data[i] <= '9' {
+ i++
+ }
+ if start == i || i >= len(data)-1 {
+ return 0
+ }
+
+ // we need >= 1 digits followed by a dot and a space or a tab
+ if data[i] != '.' || !(data[i+1] == ' ' || data[i+1] == '\t') {
+ return 0
+ }
+ return i + 2
+}
+
+// returns definition list item prefix
+func (p *Markdown) dliPrefix(data []byte) int {
+ if len(data) < 2 {
+ return 0
+ }
+ i := 0
+ // need a ':' followed by a space or a tab
+ if data[i] != ':' || !(data[i+1] == ' ' || data[i+1] == '\t') {
+ return 0
+ }
+ for i < len(data) && data[i] == ' ' {
+ i++
+ }
+ return i + 2
+}
+
+// parse ordered or unordered list block
+func (p *Markdown) list(data []byte, flags ListType) int {
+ i := 0
+ flags |= ListItemBeginningOfList
+ block := p.addBlock(List, nil)
+ block.ListFlags = flags
+ block.Tight = true
+
+ for i < len(data) {
+ skip := p.listItem(data[i:], &flags)
+ if flags&ListItemContainsBlock != 0 {
+ block.ListData.Tight = false
+ }
+ i += skip
+ if skip == 0 || flags&ListItemEndOfList != 0 {
+ break
+ }
+ flags &= ^ListItemBeginningOfList
+ }
+
+ above := block.Parent
+ finalizeList(block)
+ p.tip = above
+ return i
+}
+
+// Returns true if the list item is not the same type as its parent list
+func (p *Markdown) listTypeChanged(data []byte, flags *ListType) bool {
+ if p.dliPrefix(data) > 0 && *flags&ListTypeDefinition == 0 {
+ return true
+ } else if p.oliPrefix(data) > 0 && *flags&ListTypeOrdered == 0 {
+ return true
+ } else if p.uliPrefix(data) > 0 && (*flags&ListTypeOrdered != 0 || *flags&ListTypeDefinition != 0) {
+ return true
+ }
+ return false
+}
+
+// Returns true if block ends with a blank line, descending if needed
+// into lists and sublists.
+func endsWithBlankLine(block *Node) bool {
+ // TODO: figure this out. Always false now.
+ for block != nil {
+ //if block.lastLineBlank {
+ //return true
+ //}
+ t := block.Type
+ if t == List || t == Item {
+ block = block.LastChild
+ } else {
+ break
+ }
+ }
+ return false
+}
+
+func finalizeList(block *Node) {
+ block.open = false
+ item := block.FirstChild
+ for item != nil {
+ // check for non-final list item ending with blank line:
+ if endsWithBlankLine(item) && item.Next != nil {
+ block.ListData.Tight = false
+ break
+ }
+ // recurse into children of list item, to see if there are spaces
+ // between any of them:
+ subItem := item.FirstChild
+ for subItem != nil {
+ if endsWithBlankLine(subItem) && (item.Next != nil || subItem.Next != nil) {
+ block.ListData.Tight = false
+ break
+ }
+ subItem = subItem.Next
+ }
+ item = item.Next
+ }
+}
+
+// Parse a single list item.
+// Assumes initial prefix is already removed if this is a sublist.
+func (p *Markdown) listItem(data []byte, flags *ListType) int {
+ // keep track of the indentation of the first line
+ itemIndent := 0
+ if data[0] == '\t' {
+ itemIndent += 4
+ } else {
+ for itemIndent < 3 && data[itemIndent] == ' ' {
+ itemIndent++
+ }
+ }
+
+ var bulletChar byte = '*'
+ i := p.uliPrefix(data)
+ if i == 0 {
+ i = p.oliPrefix(data)
+ } else {
+ bulletChar = data[i-2]
+ }
+ if i == 0 {
+ i = p.dliPrefix(data)
+ // reset definition term flag
+ if i > 0 {
+ *flags &= ^ListTypeTerm
+ }
+ }
+ if i == 0 {
+ // if in definition list, set term flag and continue
+ if *flags&ListTypeDefinition != 0 {
+ *flags |= ListTypeTerm
+ } else {
+ return 0
+ }
+ }
+
+ // skip leading whitespace on first line
+ for i < len(data) && data[i] == ' ' {
+ i++
+ }
+
+ // find the end of the line
+ line := i
+ for i > 0 && i < len(data) && data[i-1] != '\n' {
+ i++
+ }
+
+ // get working buffer
+ var raw bytes.Buffer
+
+ // put the first line into the working buffer
+ raw.Write(data[line:i])
+ line = i
+
+ // process the following lines
+ containsBlankLine := false
+ sublist := 0
+ codeBlockMarker := ""
+
+gatherlines:
+ for line < len(data) {
+ i++
+
+ // find the end of this line
+ for i < len(data) && data[i-1] != '\n' {
+ i++
+ }
+
+ // if it is an empty line, guess that it is part of this item
+ // and move on to the next line
+ if p.isEmpty(data[line:i]) > 0 {
+ containsBlankLine = true
+ line = i
+ continue
+ }
+
+ // calculate the indentation
+ indent := 0
+ indentIndex := 0
+ if data[line] == '\t' {
+ indentIndex++
+ indent += 4
+ } else {
+ for indent < 4 && line+indent < i && data[line+indent] == ' ' {
+ indent++
+ indentIndex++
+ }
+ }
+
+ chunk := data[line+indentIndex : i]
+
+ if p.extensions&FencedCode != 0 {
+ // determine if in or out of codeblock
+ // if in codeblock, ignore normal list processing
+ _, marker := isFenceLine(chunk, nil, codeBlockMarker)
+ if marker != "" {
+ if codeBlockMarker == "" {
+ // start of codeblock
+ codeBlockMarker = marker
+ } else {
+ // end of codeblock.
+ codeBlockMarker = ""
+ }
+ }
+ // we are in a codeblock, write line, and continue
+ if codeBlockMarker != "" || marker != "" {
+ raw.Write(data[line+indentIndex : i])
+ line = i
+ continue gatherlines
+ }
+ }
+
+ // evaluate how this line fits in
+ switch {
+ // is this a nested list item?
+ case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) ||
+ p.oliPrefix(chunk) > 0 ||
+ p.dliPrefix(chunk) > 0:
+
+ // to be a nested list, it must be indented more
+ // if not, it is either a different kind of list
+ // or the next item in the same list
+ if indent <= itemIndent {
+ if p.listTypeChanged(chunk, flags) {
+ *flags |= ListItemEndOfList
+ } else if containsBlankLine {
+ *flags |= ListItemContainsBlock
+ }
+
+ break gatherlines
+ }
+
+ if containsBlankLine {
+ *flags |= ListItemContainsBlock
+ }
+
+ // is this the first item in the nested list?
+ if sublist == 0 {
+ sublist = raw.Len()
+ }
+
+ // is this a nested prefix heading?
+ case p.isPrefixHeading(chunk):
+ // if the heading is not indented, it is not nested in the list
+ // and thus ends the list
+ if containsBlankLine && indent < 4 {
+ *flags |= ListItemEndOfList
+ break gatherlines
+ }
+ *flags |= ListItemContainsBlock
+
+ // anything following an empty line is only part
+ // of this item if it is indented 4 spaces
+ // (regardless of the indentation of the beginning of the item)
+ case containsBlankLine && indent < 4:
+ if *flags&ListTypeDefinition != 0 && i < len(data)-1 {
+ // is the next item still a part of this list?
+ next := i
+ for next < len(data) && data[next] != '\n' {
+ next++
+ }
+ for next < len(data)-1 && data[next] == '\n' {
+ next++
+ }
+ if i < len(data)-1 && data[i] != ':' && data[next] != ':' {
+ *flags |= ListItemEndOfList
+ }
+ } else {
+ *flags |= ListItemEndOfList
+ }
+ break gatherlines
+
+ // a blank line means this should be parsed as a block
+ case containsBlankLine:
+ raw.WriteByte('\n')
+ *flags |= ListItemContainsBlock
+ }
+
+ // if this line was preceded by one or more blanks,
+ // re-introduce the blank into the buffer
+ if containsBlankLine {
+ containsBlankLine = false
+ raw.WriteByte('\n')
+ }
+
+ // add the line into the working buffer without prefix
+ raw.Write(data[line+indentIndex : i])
+
+ line = i
+ }
+
+ rawBytes := raw.Bytes()
+
+ block := p.addBlock(Item, nil)
+ block.ListFlags = *flags
+ block.Tight = false
+ block.BulletChar = bulletChar
+ block.Delimiter = '.' // Only '.' is possible in Markdown, but ')' will also be possible in CommonMark
+
+ // render the contents of the list item
+ if *flags&ListItemContainsBlock != 0 && *flags&ListTypeTerm == 0 {
+ // intermediate render of block item, except for definition term
+ if sublist > 0 {
+ p.block(rawBytes[:sublist])
+ p.block(rawBytes[sublist:])
+ } else {
+ p.block(rawBytes)
+ }
+ } else {
+ // intermediate render of inline item
+ if sublist > 0 {
+ child := p.addChild(Paragraph, 0)
+ child.content = rawBytes[:sublist]
+ p.block(rawBytes[sublist:])
+ } else {
+ child := p.addChild(Paragraph, 0)
+ child.content = rawBytes
+ }
+ }
+ return line
+}
+
+// render a single paragraph that has already been parsed out
+func (p *Markdown) renderParagraph(data []byte) {
+ if len(data) == 0 {
+ return
+ }
+
+ // trim leading spaces
+ beg := 0
+ for data[beg] == ' ' {
+ beg++
+ }
+
+ end := len(data)
+ // trim trailing newline
+ if data[len(data)-1] == '\n' {
+ end--
+ }
+
+ // trim trailing spaces
+ for end > beg && data[end-1] == ' ' {
+ end--
+ }
+
+ p.addBlock(Paragraph, data[beg:end])
+}
+
+func (p *Markdown) paragraph(data []byte) int {
+ // prev: index of 1st char of previous line
+ // line: index of 1st char of current line
+ // i: index of cursor/end of current line
+ var prev, line, i int
+ tabSize := TabSizeDefault
+ if p.extensions&TabSizeEight != 0 {
+ tabSize = TabSizeDouble
+ }
+ // keep going until we find something to mark the end of the paragraph
+ for i < len(data) {
+ // mark the beginning of the current line
+ prev = line
+ current := data[i:]
+ line = i
+
+ // did we find a reference or a footnote? If so, end a paragraph
+ // preceding it and report that we have consumed up to the end of that
+ // reference:
+ if refEnd := isReference(p, current, tabSize); refEnd > 0 {
+ p.renderParagraph(data[:i])
+ return i + refEnd
+ }
+
+ // did we find a blank line marking the end of the paragraph?
+ if n := p.isEmpty(current); n > 0 {
+ // did this blank line followed by a definition list item?
+ if p.extensions&DefinitionLists != 0 {
+ if i < len(data)-1 && data[i+1] == ':' {
+ return p.list(data[prev:], ListTypeDefinition)
+ }
+ }
+
+ p.renderParagraph(data[:i])
+ return i + n
+ }
+
+ // an underline under some text marks a heading, so our paragraph ended on prev line
+ if i > 0 {
+ if level := p.isUnderlinedHeading(current); level > 0 {
+ // render the paragraph
+ p.renderParagraph(data[:prev])
+
+ // ignore leading and trailing whitespace
+ eol := i - 1
+ for prev < eol && data[prev] == ' ' {
+ prev++
+ }
+ for eol > prev && data[eol-1] == ' ' {
+ eol--
+ }
+
+ id := ""
+ if p.extensions&AutoHeadingIDs != 0 {
+ id = sanitized_anchor_name.Create(string(data[prev:eol]))
+ }
+
+ block := p.addBlock(Heading, data[prev:eol])
+ block.Level = level
+ block.HeadingID = id
+
+ // find the end of the underline
+ for i < len(data) && data[i] != '\n' {
+ i++
+ }
+ return i
+ }
+ }
+
+ // if the next line starts a block of HTML, then the paragraph ends here
+ if p.extensions&LaxHTMLBlocks != 0 {
+ if data[i] == '<' && p.html(current, false) > 0 {
+ // rewind to before the HTML block
+ p.renderParagraph(data[:i])
+ return i
+ }
+ }
+
+ // if there's a prefixed heading or a horizontal rule after this, paragraph is over
+ if p.isPrefixHeading(current) || p.isHRule(current) {
+ p.renderParagraph(data[:i])
+ return i
+ }
+
+ // if there's a fenced code block, paragraph is over
+ if p.extensions&FencedCode != 0 {
+ if p.fencedCodeBlock(current, false) > 0 {
+ p.renderParagraph(data[:i])
+ return i
+ }
+ }
+
+ // if there's a definition list item, prev line is a definition term
+ if p.extensions&DefinitionLists != 0 {
+ if p.dliPrefix(current) != 0 {
+ ret := p.list(data[prev:], ListTypeDefinition)
+ return ret
+ }
+ }
+
+ // if there's a list after this, paragraph is over
+ if p.extensions&NoEmptyLineBeforeBlock != 0 {
+ if p.uliPrefix(current) != 0 ||
+ p.oliPrefix(current) != 0 ||
+ p.quotePrefix(current) != 0 ||
+ p.codePrefix(current) != 0 {
+ p.renderParagraph(data[:i])
+ return i
+ }
+ }
+
+ // otherwise, scan to the beginning of the next line
+ nl := bytes.IndexByte(data[i:], '\n')
+ if nl >= 0 {
+ i += nl + 1
+ } else {
+ i += len(data[i:])
+ }
+ }
+
+ p.renderParagraph(data[:i])
+ return i
+}
+
+func skipChar(data []byte, start int, char byte) int {
+ i := start
+ for i < len(data) && data[i] == char {
+ i++
+ }
+ return i
+}
+
+func skipUntilChar(text []byte, start int, char byte) int {
+ i := start
+ for i < len(text) && text[i] != char {
+ i++
+ }
+ return i
+}
diff --git a/vendor/github.com/russross/blackfriday/v2/doc.go b/vendor/github.com/russross/blackfriday/v2/doc.go
new file mode 100644
index 00000000..5b3fa987
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/v2/doc.go
@@ -0,0 +1,18 @@
+// Package blackfriday is a markdown processor.
+//
+// It translates plain text with simple formatting rules into an AST, which can
+// then be further processed to HTML (provided by Blackfriday itself) or other
+// formats (provided by the community).
+//
+// The simplest way to invoke Blackfriday is to call the Run function. It will
+// take a text input and produce a text output in HTML (or other format).
+//
+// A slightly more sophisticated way to use Blackfriday is to create a Markdown
+// processor and to call Parse, which returns a syntax tree for the input
+// document. You can leverage Blackfriday's parsing for content extraction from
+// markdown documents. You can assign a custom renderer and set various options
+// to the Markdown processor.
+//
+// If you're interested in calling Blackfriday from command line, see
+// https://github.com/russross/blackfriday-tool.
+package blackfriday
diff --git a/vendor/github.com/russross/blackfriday/v2/esc.go b/vendor/github.com/russross/blackfriday/v2/esc.go
new file mode 100644
index 00000000..6385f27c
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/v2/esc.go
@@ -0,0 +1,34 @@
+package blackfriday
+
+import (
+ "html"
+ "io"
+)
+
+var htmlEscaper = [256][]byte{
+ '&': []byte("&"),
+ '<': []byte("<"),
+ '>': []byte(">"),
+ '"': []byte("""),
+}
+
+func escapeHTML(w io.Writer, s []byte) {
+ var start, end int
+ for end < len(s) {
+ escSeq := htmlEscaper[s[end]]
+ if escSeq != nil {
+ w.Write(s[start:end])
+ w.Write(escSeq)
+ start = end + 1
+ }
+ end++
+ }
+ if start < len(s) && end <= len(s) {
+ w.Write(s[start:end])
+ }
+}
+
+func escLink(w io.Writer, text []byte) {
+ unesc := html.UnescapeString(string(text))
+ escapeHTML(w, []byte(unesc))
+}
diff --git a/vendor/github.com/russross/blackfriday/v2/go.mod b/vendor/github.com/russross/blackfriday/v2/go.mod
new file mode 100644
index 00000000..620b74e0
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/v2/go.mod
@@ -0,0 +1 @@
+module github.com/russross/blackfriday/v2
diff --git a/vendor/github.com/russross/blackfriday/v2/html.go b/vendor/github.com/russross/blackfriday/v2/html.go
new file mode 100644
index 00000000..284c8718
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/v2/html.go
@@ -0,0 +1,949 @@
+//
+// Blackfriday Markdown Processor
+// Available at http://github.com/russross/blackfriday
+//
+// Copyright © 2011 Russ Ross .
+// Distributed under the Simplified BSD License.
+// See README.md for details.
+//
+
+//
+//
+// HTML rendering backend
+//
+//
+
+package blackfriday
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "regexp"
+ "strings"
+)
+
+// HTMLFlags control optional behavior of HTML renderer.
+type HTMLFlags int
+
+// HTML renderer configuration options.
+const (
+ HTMLFlagsNone HTMLFlags = 0
+ SkipHTML HTMLFlags = 1 << iota // Skip preformatted HTML blocks
+ SkipImages // Skip embedded images
+ SkipLinks // Skip all links
+ Safelink // Only link to trusted protocols
+ NofollowLinks // Only link with rel="nofollow"
+ NoreferrerLinks // Only link with rel="noreferrer"
+ NoopenerLinks // Only link with rel="noopener"
+ HrefTargetBlank // Add a blank target
+ CompletePage // Generate a complete HTML page
+ UseXHTML // Generate XHTML output instead of HTML
+ FootnoteReturnLinks // Generate a link at the end of a footnote to return to the source
+ Smartypants // Enable smart punctuation substitutions
+ SmartypantsFractions // Enable smart fractions (with Smartypants)
+ SmartypantsDashes // Enable smart dashes (with Smartypants)
+ SmartypantsLatexDashes // Enable LaTeX-style dashes (with Smartypants)
+ SmartypantsAngledQuotes // Enable angled double quotes (with Smartypants) for double quotes rendering
+ SmartypantsQuotesNBSP // Enable « French guillemets » (with Smartypants)
+ TOC // Generate a table of contents
+)
+
+var (
+ htmlTagRe = regexp.MustCompile("(?i)^" + htmlTag)
+)
+
+const (
+ htmlTag = "(?:" + openTag + "|" + closeTag + "|" + htmlComment + "|" +
+ processingInstruction + "|" + declaration + "|" + cdata + ")"
+ closeTag = "" + tagName + "\\s*[>]"
+ openTag = "<" + tagName + attribute + "*" + "\\s*/?>"
+ attribute = "(?:" + "\\s+" + attributeName + attributeValueSpec + "?)"
+ attributeValue = "(?:" + unquotedValue + "|" + singleQuotedValue + "|" + doubleQuotedValue + ")"
+ attributeValueSpec = "(?:" + "\\s*=" + "\\s*" + attributeValue + ")"
+ attributeName = "[a-zA-Z_:][a-zA-Z0-9:._-]*"
+ cdata = ""
+ declaration = "]*>"
+ doubleQuotedValue = "\"[^\"]*\""
+ htmlComment = "|"
+ processingInstruction = "[<][?].*?[?][>]"
+ singleQuotedValue = "'[^']*'"
+ tagName = "[A-Za-z][A-Za-z0-9-]*"
+ unquotedValue = "[^\"'=<>`\\x00-\\x20]+"
+)
+
+// HTMLRendererParameters is a collection of supplementary parameters tweaking
+// the behavior of various parts of HTML renderer.
+type HTMLRendererParameters struct {
+ // Prepend this text to each relative URL.
+ AbsolutePrefix string
+ // Add this text to each footnote anchor, to ensure uniqueness.
+ FootnoteAnchorPrefix string
+ // Show this text inside the tag for a footnote return link, if the
+ // HTML_FOOTNOTE_RETURN_LINKS flag is enabled. If blank, the string
+ // [return] is used.
+ FootnoteReturnLinkContents string
+ // If set, add this text to the front of each Heading ID, to ensure
+ // uniqueness.
+ HeadingIDPrefix string
+ // If set, add this text to the back of each Heading ID, to ensure uniqueness.
+ HeadingIDSuffix string
+ // Increase heading levels: if the offset is 1, becomes etc.
+ // Negative offset is also valid.
+ // Resulting levels are clipped between 1 and 6.
+ HeadingLevelOffset int
+
+ Title string // Document title (used if CompletePage is set)
+ CSS string // Optional CSS file URL (used if CompletePage is set)
+ Icon string // Optional icon file URL (used if CompletePage is set)
+
+ Flags HTMLFlags // Flags allow customizing this renderer's behavior
+}
+
+// HTMLRenderer is a type that implements the Renderer interface for HTML output.
+//
+// Do not create this directly, instead use the NewHTMLRenderer function.
+type HTMLRenderer struct {
+ HTMLRendererParameters
+
+ closeTag string // how to end singleton tags: either " />" or ">"
+
+ // Track heading IDs to prevent ID collision in a single generation.
+ headingIDs map[string]int
+
+ lastOutputLen int
+ disableTags int
+
+ sr *SPRenderer
+}
+
+const (
+ xhtmlClose = " />"
+ htmlClose = ">"
+)
+
+// NewHTMLRenderer creates and configures an HTMLRenderer object, which
+// satisfies the Renderer interface.
+func NewHTMLRenderer(params HTMLRendererParameters) *HTMLRenderer {
+ // configure the rendering engine
+ closeTag := htmlClose
+ if params.Flags&UseXHTML != 0 {
+ closeTag = xhtmlClose
+ }
+
+ if params.FootnoteReturnLinkContents == "" {
+ params.FootnoteReturnLinkContents = `[return]`
+ }
+
+ return &HTMLRenderer{
+ HTMLRendererParameters: params,
+
+ closeTag: closeTag,
+ headingIDs: make(map[string]int),
+
+ sr: NewSmartypantsRenderer(params.Flags),
+ }
+}
+
+func isHTMLTag(tag []byte, tagname string) bool {
+ found, _ := findHTMLTagPos(tag, tagname)
+ return found
+}
+
+// Look for a character, but ignore it when it's in any kind of quotes, it
+// might be JavaScript
+func skipUntilCharIgnoreQuotes(html []byte, start int, char byte) int {
+ inSingleQuote := false
+ inDoubleQuote := false
+ inGraveQuote := false
+ i := start
+ for i < len(html) {
+ switch {
+ case html[i] == char && !inSingleQuote && !inDoubleQuote && !inGraveQuote:
+ return i
+ case html[i] == '\'':
+ inSingleQuote = !inSingleQuote
+ case html[i] == '"':
+ inDoubleQuote = !inDoubleQuote
+ case html[i] == '`':
+ inGraveQuote = !inGraveQuote
+ }
+ i++
+ }
+ return start
+}
+
+func findHTMLTagPos(tag []byte, tagname string) (bool, int) {
+ i := 0
+ if i < len(tag) && tag[0] != '<' {
+ return false, -1
+ }
+ i++
+ i = skipSpace(tag, i)
+
+ if i < len(tag) && tag[i] == '/' {
+ i++
+ }
+
+ i = skipSpace(tag, i)
+ j := 0
+ for ; i < len(tag); i, j = i+1, j+1 {
+ if j >= len(tagname) {
+ break
+ }
+
+ if strings.ToLower(string(tag[i]))[0] != tagname[j] {
+ return false, -1
+ }
+ }
+
+ if i == len(tag) {
+ return false, -1
+ }
+
+ rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>')
+ if rightAngle >= i {
+ return true, rightAngle
+ }
+
+ return false, -1
+}
+
+func skipSpace(tag []byte, i int) int {
+ for i < len(tag) && isspace(tag[i]) {
+ i++
+ }
+ return i
+}
+
+func isRelativeLink(link []byte) (yes bool) {
+ // a tag begin with '#'
+ if link[0] == '#' {
+ return true
+ }
+
+ // link begin with '/' but not '//', the second maybe a protocol relative link
+ if len(link) >= 2 && link[0] == '/' && link[1] != '/' {
+ return true
+ }
+
+ // only the root '/'
+ if len(link) == 1 && link[0] == '/' {
+ return true
+ }
+
+ // current directory : begin with "./"
+ if bytes.HasPrefix(link, []byte("./")) {
+ return true
+ }
+
+ // parent directory : begin with "../"
+ if bytes.HasPrefix(link, []byte("../")) {
+ return true
+ }
+
+ return false
+}
+
+func (r *HTMLRenderer) ensureUniqueHeadingID(id string) string {
+ for count, found := r.headingIDs[id]; found; count, found = r.headingIDs[id] {
+ tmp := fmt.Sprintf("%s-%d", id, count+1)
+
+ if _, tmpFound := r.headingIDs[tmp]; !tmpFound {
+ r.headingIDs[id] = count + 1
+ id = tmp
+ } else {
+ id = id + "-1"
+ }
+ }
+
+ if _, found := r.headingIDs[id]; !found {
+ r.headingIDs[id] = 0
+ }
+
+ return id
+}
+
+func (r *HTMLRenderer) addAbsPrefix(link []byte) []byte {
+ if r.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' {
+ newDest := r.AbsolutePrefix
+ if link[0] != '/' {
+ newDest += "/"
+ }
+ newDest += string(link)
+ return []byte(newDest)
+ }
+ return link
+}
+
+func appendLinkAttrs(attrs []string, flags HTMLFlags, link []byte) []string {
+ if isRelativeLink(link) {
+ return attrs
+ }
+ val := []string{}
+ if flags&NofollowLinks != 0 {
+ val = append(val, "nofollow")
+ }
+ if flags&NoreferrerLinks != 0 {
+ val = append(val, "noreferrer")
+ }
+ if flags&NoopenerLinks != 0 {
+ val = append(val, "noopener")
+ }
+ if flags&HrefTargetBlank != 0 {
+ attrs = append(attrs, "target=\"_blank\"")
+ }
+ if len(val) == 0 {
+ return attrs
+ }
+ attr := fmt.Sprintf("rel=%q", strings.Join(val, " "))
+ return append(attrs, attr)
+}
+
+func isMailto(link []byte) bool {
+ return bytes.HasPrefix(link, []byte("mailto:"))
+}
+
+func needSkipLink(flags HTMLFlags, dest []byte) bool {
+ if flags&SkipLinks != 0 {
+ return true
+ }
+ return flags&Safelink != 0 && !isSafeLink(dest) && !isMailto(dest)
+}
+
+func isSmartypantable(node *Node) bool {
+ pt := node.Parent.Type
+ return pt != Link && pt != CodeBlock && pt != Code
+}
+
+func appendLanguageAttr(attrs []string, info []byte) []string {
+ if len(info) == 0 {
+ return attrs
+ }
+ endOfLang := bytes.IndexAny(info, "\t ")
+ if endOfLang < 0 {
+ endOfLang = len(info)
+ }
+ return append(attrs, fmt.Sprintf("class=\"language-%s\"", info[:endOfLang]))
+}
+
+func (r *HTMLRenderer) tag(w io.Writer, name []byte, attrs []string) {
+ w.Write(name)
+ if len(attrs) > 0 {
+ w.Write(spaceBytes)
+ w.Write([]byte(strings.Join(attrs, " ")))
+ }
+ w.Write(gtBytes)
+ r.lastOutputLen = 1
+}
+
+func footnoteRef(prefix string, node *Node) []byte {
+ urlFrag := prefix + string(slugify(node.Destination))
+ anchor := fmt.Sprintf(`%d`, urlFrag, node.NoteID)
+ return []byte(fmt.Sprintf(``, urlFrag, anchor))
+}
+
+func footnoteItem(prefix string, slug []byte) []byte {
+ return []byte(fmt.Sprintf(`
`, prefix, slug))
+}
+
+func footnoteReturnLink(prefix, returnLink string, slug []byte) []byte {
+ const format = ` `
+ return []byte(fmt.Sprintf(format, prefix, slug, returnLink))
+}
+
+func itemOpenCR(node *Node) bool {
+ if node.Prev == nil {
+ return false
+ }
+ ld := node.Parent.ListData
+ return !ld.Tight && ld.ListFlags&ListTypeDefinition == 0
+}
+
+func skipParagraphTags(node *Node) bool {
+ grandparent := node.Parent.Parent
+ if grandparent == nil || grandparent.Type != List {
+ return false
+ }
+ tightOrTerm := grandparent.Tight || node.Parent.ListFlags&ListTypeTerm != 0
+ return grandparent.Type == List && tightOrTerm
+}
+
+func cellAlignment(align CellAlignFlags) string {
+ switch align {
+ case TableAlignmentLeft:
+ return "left"
+ case TableAlignmentRight:
+ return "right"
+ case TableAlignmentCenter:
+ return "center"
+ default:
+ return ""
+ }
+}
+
+func (r *HTMLRenderer) out(w io.Writer, text []byte) {
+ if r.disableTags > 0 {
+ w.Write(htmlTagRe.ReplaceAll(text, []byte{}))
+ } else {
+ w.Write(text)
+ }
+ r.lastOutputLen = len(text)
+}
+
+func (r *HTMLRenderer) cr(w io.Writer) {
+ if r.lastOutputLen > 0 {
+ r.out(w, nlBytes)
+ }
+}
+
+var (
+ nlBytes = []byte{'\n'}
+ gtBytes = []byte{'>'}
+ spaceBytes = []byte{' '}
+)
+
+var (
+ brTag = []byte("
")
+ brXHTMLTag = []byte("
")
+ emTag = []byte("")
+ emCloseTag = []byte("")
+ strongTag = []byte("")
+ strongCloseTag = []byte("")
+ delTag = []byte("")
+ delCloseTag = []byte("")
+ ttTag = []byte("")
+ ttCloseTag = []byte("")
+ aTag = []byte("")
+ preTag = []byte("")
+ preCloseTag = []byte("
")
+ codeTag = []byte("")
+ codeCloseTag = []byte("
")
+ pTag = []byte("")
+ pCloseTag = []byte("
")
+ blockquoteTag = []byte("")
+ blockquoteCloseTag = []byte("
")
+ hrTag = []byte("
")
+ hrXHTMLTag = []byte("
")
+ ulTag = []byte("")
+ ulCloseTag = []byte("
")
+ olTag = []byte("")
+ olCloseTag = []byte("
")
+ dlTag = []byte("")
+ dlCloseTag = []byte("
")
+ liTag = []byte("")
+ liCloseTag = []byte("")
+ ddTag = []byte("")
+ ddCloseTag = []byte("")
+ dtTag = []byte("")
+ dtCloseTag = []byte("")
+ tableTag = []byte("")
+ tableCloseTag = []byte("
")
+ tdTag = []byte("")
+ thTag = []byte(" | ")
+ theadTag = []byte("")
+ theadCloseTag = []byte("")
+ tbodyTag = []byte(" | ")
+ tbodyCloseTag = []byte("")
+ trTag = []byte("")
+ trCloseTag = []byte("
")
+ h1Tag = []byte("")
+ h2Tag = []byte("")
+ h3Tag = []byte("")
+ h4Tag = []byte("")
+ h5Tag = []byte("")
+ h6Tag = []byte("")
+
+ footnotesDivBytes = []byte("\n\n")
+)
+
+func headingTagsFromLevel(level int) ([]byte, []byte) {
+ if level <= 1 {
+ return h1Tag, h1CloseTag
+ }
+ switch level {
+ case 2:
+ return h2Tag, h2CloseTag
+ case 3:
+ return h3Tag, h3CloseTag
+ case 4:
+ return h4Tag, h4CloseTag
+ case 5:
+ return h5Tag, h5CloseTag
+ }
+ return h6Tag, h6CloseTag
+}
+
+func (r *HTMLRenderer) outHRTag(w io.Writer) {
+ if r.Flags&UseXHTML == 0 {
+ r.out(w, hrTag)
+ } else {
+ r.out(w, hrXHTMLTag)
+ }
+}
+
+// RenderNode is a default renderer of a single node of a syntax tree. For
+// block nodes it will be called twice: first time with entering=true, second
+// time with entering=false, so that it could know when it's working on an open
+// tag and when on close. It writes the result to w.
+//
+// The return value is a way to tell the calling walker to adjust its walk
+// pattern: e.g. it can terminate the traversal by returning Terminate. Or it
+// can ask the walker to skip a subtree of this node by returning SkipChildren.
+// The typical behavior is to return GoToNext, which asks for the usual
+// traversal to the next node.
+func (r *HTMLRenderer) RenderNode(w io.Writer, node *Node, entering bool) WalkStatus {
+ attrs := []string{}
+ switch node.Type {
+ case Text:
+ if r.Flags&Smartypants != 0 {
+ var tmp bytes.Buffer
+ escapeHTML(&tmp, node.Literal)
+ r.sr.Process(w, tmp.Bytes())
+ } else {
+ if node.Parent.Type == Link {
+ escLink(w, node.Literal)
+ } else {
+ escapeHTML(w, node.Literal)
+ }
+ }
+ case Softbreak:
+ r.cr(w)
+ // TODO: make it configurable via out(renderer.softbreak)
+ case Hardbreak:
+ if r.Flags&UseXHTML == 0 {
+ r.out(w, brTag)
+ } else {
+ r.out(w, brXHTMLTag)
+ }
+ r.cr(w)
+ case Emph:
+ if entering {
+ r.out(w, emTag)
+ } else {
+ r.out(w, emCloseTag)
+ }
+ case Strong:
+ if entering {
+ r.out(w, strongTag)
+ } else {
+ r.out(w, strongCloseTag)
+ }
+ case Del:
+ if entering {
+ r.out(w, delTag)
+ } else {
+ r.out(w, delCloseTag)
+ }
+ case HTMLSpan:
+ if r.Flags&SkipHTML != 0 {
+ break
+ }
+ r.out(w, node.Literal)
+ case Link:
+ // mark it but don't link it if it is not a safe link: no smartypants
+ dest := node.LinkData.Destination
+ if needSkipLink(r.Flags, dest) {
+ if entering {
+ r.out(w, ttTag)
+ } else {
+ r.out(w, ttCloseTag)
+ }
+ } else {
+ if entering {
+ dest = r.addAbsPrefix(dest)
+ var hrefBuf bytes.Buffer
+ hrefBuf.WriteString("href=\"")
+ escLink(&hrefBuf, dest)
+ hrefBuf.WriteByte('"')
+ attrs = append(attrs, hrefBuf.String())
+ if node.NoteID != 0 {
+ r.out(w, footnoteRef(r.FootnoteAnchorPrefix, node))
+ break
+ }
+ attrs = appendLinkAttrs(attrs, r.Flags, dest)
+ if len(node.LinkData.Title) > 0 {
+ var titleBuff bytes.Buffer
+ titleBuff.WriteString("title=\"")
+ escapeHTML(&titleBuff, node.LinkData.Title)
+ titleBuff.WriteByte('"')
+ attrs = append(attrs, titleBuff.String())
+ }
+ r.tag(w, aTag, attrs)
+ } else {
+ if node.NoteID != 0 {
+ break
+ }
+ r.out(w, aCloseTag)
+ }
+ }
+ case Image:
+ if r.Flags&SkipImages != 0 {
+ return SkipChildren
+ }
+ if entering {
+ dest := node.LinkData.Destination
+ dest = r.addAbsPrefix(dest)
+ if r.disableTags == 0 {
+ //if options.safe && potentiallyUnsafe(dest) {
+ //out(w, ``))
+ }
+ }
+ case Code:
+ r.out(w, codeTag)
+ escapeHTML(w, node.Literal)
+ r.out(w, codeCloseTag)
+ case Document:
+ break
+ case Paragraph:
+ if skipParagraphTags(node) {
+ break
+ }
+ if entering {
+ // TODO: untangle this clusterfuck about when the newlines need
+ // to be added and when not.
+ if node.Prev != nil {
+ switch node.Prev.Type {
+ case HTMLBlock, List, Paragraph, Heading, CodeBlock, BlockQuote, HorizontalRule:
+ r.cr(w)
+ }
+ }
+ if node.Parent.Type == BlockQuote && node.Prev == nil {
+ r.cr(w)
+ }
+ r.out(w, pTag)
+ } else {
+ r.out(w, pCloseTag)
+ if !(node.Parent.Type == Item && node.Next == nil) {
+ r.cr(w)
+ }
+ }
+ case BlockQuote:
+ if entering {
+ r.cr(w)
+ r.out(w, blockquoteTag)
+ } else {
+ r.out(w, blockquoteCloseTag)
+ r.cr(w)
+ }
+ case HTMLBlock:
+ if r.Flags&SkipHTML != 0 {
+ break
+ }
+ r.cr(w)
+ r.out(w, node.Literal)
+ r.cr(w)
+ case Heading:
+ headingLevel := r.HTMLRendererParameters.HeadingLevelOffset + node.Level
+ openTag, closeTag := headingTagsFromLevel(headingLevel)
+ if entering {
+ if node.IsTitleblock {
+ attrs = append(attrs, `class="title"`)
+ }
+ if node.HeadingID != "" {
+ id := r.ensureUniqueHeadingID(node.HeadingID)
+ if r.HeadingIDPrefix != "" {
+ id = r.HeadingIDPrefix + id
+ }
+ if r.HeadingIDSuffix != "" {
+ id = id + r.HeadingIDSuffix
+ }
+ attrs = append(attrs, fmt.Sprintf(`id="%s"`, id))
+ }
+ r.cr(w)
+ r.tag(w, openTag, attrs)
+ } else {
+ r.out(w, closeTag)
+ if !(node.Parent.Type == Item && node.Next == nil) {
+ r.cr(w)
+ }
+ }
+ case HorizontalRule:
+ r.cr(w)
+ r.outHRTag(w)
+ r.cr(w)
+ case List:
+ openTag := ulTag
+ closeTag := ulCloseTag
+ if node.ListFlags&ListTypeOrdered != 0 {
+ openTag = olTag
+ closeTag = olCloseTag
+ }
+ if node.ListFlags&ListTypeDefinition != 0 {
+ openTag = dlTag
+ closeTag = dlCloseTag
+ }
+ if entering {
+ if node.IsFootnotesList {
+ r.out(w, footnotesDivBytes)
+ r.outHRTag(w)
+ r.cr(w)
+ }
+ r.cr(w)
+ if node.Parent.Type == Item && node.Parent.Parent.Tight {
+ r.cr(w)
+ }
+ r.tag(w, openTag[:len(openTag)-1], attrs)
+ r.cr(w)
+ } else {
+ r.out(w, closeTag)
+ //cr(w)
+ //if node.parent.Type != Item {
+ // cr(w)
+ //}
+ if node.Parent.Type == Item && node.Next != nil {
+ r.cr(w)
+ }
+ if node.Parent.Type == Document || node.Parent.Type == BlockQuote {
+ r.cr(w)
+ }
+ if node.IsFootnotesList {
+ r.out(w, footnotesCloseDivBytes)
+ }
+ }
+ case Item:
+ openTag := liTag
+ closeTag := liCloseTag
+ if node.ListFlags&ListTypeDefinition != 0 {
+ openTag = ddTag
+ closeTag = ddCloseTag
+ }
+ if node.ListFlags&ListTypeTerm != 0 {
+ openTag = dtTag
+ closeTag = dtCloseTag
+ }
+ if entering {
+ if itemOpenCR(node) {
+ r.cr(w)
+ }
+ if node.ListData.RefLink != nil {
+ slug := slugify(node.ListData.RefLink)
+ r.out(w, footnoteItem(r.FootnoteAnchorPrefix, slug))
+ break
+ }
+ r.out(w, openTag)
+ } else {
+ if node.ListData.RefLink != nil {
+ slug := slugify(node.ListData.RefLink)
+ if r.Flags&FootnoteReturnLinks != 0 {
+ r.out(w, footnoteReturnLink(r.FootnoteAnchorPrefix, r.FootnoteReturnLinkContents, slug))
+ }
+ }
+ r.out(w, closeTag)
+ r.cr(w)
+ }
+ case CodeBlock:
+ attrs = appendLanguageAttr(attrs, node.Info)
+ r.cr(w)
+ r.out(w, preTag)
+ r.tag(w, codeTag[:len(codeTag)-1], attrs)
+ escapeHTML(w, node.Literal)
+ r.out(w, codeCloseTag)
+ r.out(w, preCloseTag)
+ if node.Parent.Type != Item {
+ r.cr(w)
+ }
+ case Table:
+ if entering {
+ r.cr(w)
+ r.out(w, tableTag)
+ } else {
+ r.out(w, tableCloseTag)
+ r.cr(w)
+ }
+ case TableCell:
+ openTag := tdTag
+ closeTag := tdCloseTag
+ if node.IsHeader {
+ openTag = thTag
+ closeTag = thCloseTag
+ }
+ if entering {
+ align := cellAlignment(node.Align)
+ if align != "" {
+ attrs = append(attrs, fmt.Sprintf(`align="%s"`, align))
+ }
+ if node.Prev == nil {
+ r.cr(w)
+ }
+ r.tag(w, openTag, attrs)
+ } else {
+ r.out(w, closeTag)
+ r.cr(w)
+ }
+ case TableHead:
+ if entering {
+ r.cr(w)
+ r.out(w, theadTag)
+ } else {
+ r.out(w, theadCloseTag)
+ r.cr(w)
+ }
+ case TableBody:
+ if entering {
+ r.cr(w)
+ r.out(w, tbodyTag)
+ // XXX: this is to adhere to a rather silly test. Should fix test.
+ if node.FirstChild == nil {
+ r.cr(w)
+ }
+ } else {
+ r.out(w, tbodyCloseTag)
+ r.cr(w)
+ }
+ case TableRow:
+ if entering {
+ r.cr(w)
+ r.out(w, trTag)
+ } else {
+ r.out(w, trCloseTag)
+ r.cr(w)
+ }
+ default:
+ panic("Unknown node type " + node.Type.String())
+ }
+ return GoToNext
+}
+
+// RenderHeader writes HTML document preamble and TOC if requested.
+func (r *HTMLRenderer) RenderHeader(w io.Writer, ast *Node) {
+ r.writeDocumentHeader(w)
+ if r.Flags&TOC != 0 {
+ r.writeTOC(w, ast)
+ }
+}
+
+// RenderFooter writes HTML document footer.
+func (r *HTMLRenderer) RenderFooter(w io.Writer, ast *Node) {
+ if r.Flags&CompletePage == 0 {
+ return
+ }
+ io.WriteString(w, "\n