+<<<<<<< HEAD Please use the following instructions to update the VM:
hostDevices
section, and save VM the changes to the YAML file.hostDevices
section, and save virtual machine the changes to the YAML file.raw
and qcow2
image formats which are supported by qemu. Bootable ISO images can also be used and are treated like raw
images.'
fileName: File Name
uploadFile: Upload File
@@ -699,6 +1090,23 @@ harvester:
sourceType:
download: URL
upload: File
+=======
+ isEncryption: Encryption
+ encryptionSecret: Encryption Secret
+ virtualSize: Virtual Size
+ urlTip: 'Supports the raw
and qcow2
image formats which are supported by qemu. Bootable ISO images can also be used and are treated like raw
images.'
+ fileName: File Name
+ uploadFile: Upload File
+ source: Source Type
+ sourceType:
+ download: URL
+ upload: File
+ clone: Clone
+ encrypt: Encrypt
+ decrypt: Decrypt
+ sourceImage: Source Image
+ cryptoOperation: Crypto Operation
+>>>>>>> b5455bcb (fix: separate used/allocated units)
warning:
uploading: |-
{count, plural,
@@ -717,10 +1125,17 @@ harvester:
tips:
notExistImage:
title: Image {name} does not exist!
+<<<<<<< HEAD
message: Please select a new Image.
notExistNode:
title: Node {name} does not exist!
message: Please select a new Node.
+=======
+ message: Please select a new image.
+ notExistNode:
+ title: Node {name} does not exist!
+ message: Please select a new node.
+>>>>>>> b5455bcb (fix: separate used/allocated units)
upgradePage:
upgradeApp: Upgrade Software
@@ -737,8 +1152,13 @@ harvester:
selectExisting: Select Existing Image
createRepository: Creating Upgrade Repository
succeeded: Succeeded
+<<<<<<< HEAD
releaseTip: Please read the upgrade documentation carefully. You can view details on the Harvester Release Note.
checkReady: I have read and understood the upgrade content related to this Harvester version.
+=======
+ releaseTip: Please read the upgrade documentation carefully. You can view details on the Harvester Release Notes.
+ checkReady: I have read and understood the upgrade instructions related to this Harvester version.
+>>>>>>> b5455bcb (fix: separate used/allocated units)
pending: Pending
repoInfo:
upgradeStatus: Upgrade Status
@@ -748,34 +1168,91 @@ harvester:
harvesterChart: Harvester Chart
success: Success
fail: Fail
+<<<<<<< HEAD
ongoing: on-going
+=======
+ ongoing: On-going
+>>>>>>> b5455bcb (fix: separate used/allocated units)
downloadLog: Download Log
logStatus: Log Download Status
dismissMessage: Dismiss it
upgradeInfo:
warning: WARNING
+<<<<<<< HEAD
doc: Read the documentation before starting the upgrade process. Ensure that you complete procedures that are relevant to your environment and the version you are upgrading to.
tip: Unmet system requirements and incorrectly performed procedures may cause complete upgrade failure and other issues that require manual workarounds.
moreNotes: For more details about the release notes, please visit -
backup:
label: VM Backups
+=======
+ doc: Read the documentation before starting the upgrade process. Ensure that you complete procedures that are relevant to your environment and the version you are upgrading to.
+ tip: Unmet system requirements and incorrectly performed procedures may cause complete upgrade failure and other issues that require manual workarounds.
+ moreNotes: For more details about the release notes, please visit -
+
+ schedule:
+ label: Virtual Machine Schedules
+ createTitle: Create Schedule
+ createButtonText: Create Schedule
+ scheduleType: Virtual Machine Schedule Type
+ cron: Cron Schedule
+ detail:
+ namespace: Namespace
+ sourceVM: Source Virtual Machine
+ tabs:
+ basic: Basic
+ backups: Backups
+ snapshots: Snapshots
+ message:
+ noSetting:
+ suffix: before creating a backup schedule
+ retain:
+ label: Retain
+ count: Count
+ tooltip: Number of up-to-date VM backups to retain. Maximum to 250, minimum to 2.
+ maxFailure:
+ label: Max Failure
+ count: Count
+ tooltip: Max number of consecutive failed backups that could be tolerated. If reach this threshold, Harvester controller will suspend the schedule job. This value should less than retain count
+ virtualMachine:
+ title: Virtual Machine Name
+ placeholder: Select a virtual machine
+ type:
+ snapshot: Snapshot
+ backup: Backup
+
+ backup:
+ label: Virtual Machine Backups
+>>>>>>> b5455bcb (fix: separate used/allocated units)
createText: Restore Backup
title: Restore Virtual Machine
backupTargetTip: The endpoint used to access the backupstore. NFS and S3 are supported.
message:
noSetting:
+<<<<<<< HEAD
prefix: You must configure the backup target in
middle: 'setting'
suffix: before creating a new backup.
errorTip:
prefix: Backup Target value in
middle: Setting
+=======
+ prefix: You must configure the backup target
+ middle: 'setting'
+ suffix: before creating a new backup.
+ errorTip:
+ prefix: Backup target value in
+ middle: setting
+>>>>>>> b5455bcb (fix: separate used/allocated units)
suffix: "is invalid, error: "
viewSetting:
prefix: Click
middle: here
+<<<<<<< HEAD
suffix: to view the backup config.
+=======
+ suffix: to view the backup configuration.
+>>>>>>> b5455bcb (fix: separate used/allocated units)
testConnect:
actionLabel: Test connection
waitingLabel: Testing connection...
@@ -789,13 +1266,20 @@ harvester:
virtualMachineName: Virtual Machine Name
keepMacAddress: Keep MAC Address
matchTarget: The current backup target does not match the existing one.
+<<<<<<< HEAD
progress:
+=======
+ progress:
+>>>>>>> b5455bcb (fix: separate used/allocated units)
details: Volume details
tooltip:
starting: Backup initiating
progress: Backup in progress
complete: Backup completed
+<<<<<<< HEAD
+=======
+>>>>>>> b5455bcb (fix: separate used/allocated units)
restore:
progress:
details: Volume details
@@ -805,12 +1289,17 @@ harvester:
complete: Restore completed
network:
+<<<<<<< HEAD
label: VM Networks
+=======
+ label: Virtual Machine Networks
+>>>>>>> b5455bcb (fix: separate used/allocated units)
tabs:
basics: Basics
layer3Network: Route
clusterNetwork:
label: Cluster Network
+<<<<<<< HEAD
create: Create a New Cluster Network
toolTip: Define your custom cluster scope network name
createPlaceholder: Input a new Cluster Network name
@@ -820,6 +1309,17 @@ harvester:
mode:
label: Mode
auto: Auto(DHCP)
+=======
+ create: Create a new cluster network
+ toolTip: Define your custom cluster scope network name
+ createPlaceholder: Input a new cluster network name
+ selectOrCreatePlaceholder: Select or create a new cluster network
+ selectPlaceholder: Select a cluster network
+ layer3Network:
+ mode:
+ label: Mode
+ auto: Auto (DHCP)
+>>>>>>> b5455bcb (fix: separate used/allocated units)
manual: Manual
serverIPAddr:
label: DHCP Server IP
@@ -843,11 +1343,20 @@ harvester:
validation:
physicalNIC: DefaultPhysicalNIC
placeholder:
+<<<<<<< HEAD
accessKeyId: specify your access key id
secretAccessKey: specify your secret access key
cert: upload a self-signed SSL certificate
vlanChangeTip: The newly modified default network interface only applies to newly added nodes, not existing ones.
defaultPhysicalNIC: Default Network Interface
+=======
+ accessKeyId: Specify your access key ID
+ secretAccessKey: Specify your secret access key
+ cert: Upload a self-signed SSL certificate
+ vlanChangeTip: The newly modified default network interface only applies to newly added nodes, not existing ones.
+ defaultPhysicalNIC: Default Network Interface
+ modifiedMessage: Settings that have been customized from default settings are tagged with 'Modified'.
+>>>>>>> b5455bcb (fix: separate used/allocated units)
percentTip: The value in parentheses represents the distribution percentage of the network interface on all hosts. If an interface less than 100% is selected, the user needs to manually specify the network interface on the host where the vlan network configuration fails.
message:
ca:
@@ -870,10 +1379,18 @@ harvester:
placeholder: e.g. 172.16.0.1/32
invalid: '"Exclude list" is invalid.'
addIp: Add Exclude IP
+<<<<<<< HEAD
warning: 'WARNING: Number of IPs Required = Number of Nodes * 4 + Number of Disks * 2 + Number of Images to Download/Upload
. For more information about storage network settings, see the documentation.'
vmForceDeletionPolicy:
period: Period
+=======
+ warning: 'WARNING: Number of IPs Required = Number of Nodes * 4 + Number of Disks * 2 + Number of Images to Download/Upload
. For more information about storage network settings, see the documentation.'
+ vmForceDeletionPolicy:
+ period: Period
+ ratio : Ratio
+>>>>>>> b5455bcb (fix: separate used/allocated units)
autoRotateRKE2Certs:
expiringInHours: Expiring in
httpProxy:
@@ -896,7 +1413,11 @@ harvester:
addRewrite: Add Rewrite
addMirror: Add Mirror
configs:
+<<<<<<< HEAD
configs: Configs
+=======
+ configs: Configurations
+>>>>>>> b5455bcb (fix: separate used/allocated units)
registryEDQNorIP: Registry FDQN or IP
registryPlaceholder: myregistry.local:5000
username: Username
@@ -904,11 +1425,19 @@ harvester:
auth: Auth
identityToken: Identity Token
insecureSkipVerify: InsecureSkipVerify
+<<<<<<< HEAD
addConfig: Add Config
upgrade:
selectExitImage: Please select the OS image to upgrade.
imageUrl: Please input a valid image url.
+=======
+ addConfig: Add Configuration
+
+ upgrade:
+ selectExitImage: Please select the OS image to upgrade.
+ imageUrl: Please input a valid image URL.
+>>>>>>> b5455bcb (fix: separate used/allocated units)
chooseFile: Please select to upload an image.
checksum: Checksum
harvesterMonitoring:
@@ -924,16 +1453,28 @@ harvester:
retention: How long to retain metrics
retentionSize: Maximum size of metrics
clusterRegistrationUrl:
+<<<<<<< HEAD
message: To completely unset the imported Harvester cluster, please also remove it on the Rancher dashboard UI via the Virtualization Management
page.
+=======
+ message: To completely unset the imported Harvester cluster, please also remove it on the Rancher Dashboard UI via the Virtualization Management
page.
+>>>>>>> b5455bcb (fix: separate used/allocated units)
ntpServers:
isNotIPV4: The address you entered is not IPv4 or host. Please enter a valid IPv4 address or a host address.
isDuplicate: There are duplicate NTP server configurations.
cloudTemplate:
+<<<<<<< HEAD
label: Cloud Config Templates
templateType: Template Type
userData: User Data
networkData: Network Data
+=======
+ label: Cloud Configuration Templates
+ templateType: Template Type
+ userData: User Data
+ networkData: Network Data
+
+>>>>>>> b5455bcb (fix: separate used/allocated units)
support:
title: Harvester Support
kubeconfig:
@@ -942,7 +1483,11 @@ harvester:
internal:
rancher:
title: Access Embedded Rancher UI
+<<<<<<< HEAD
titleDescription: You can only use the embedded Rancher UI for debugging and validation purposes. For more information about how Harvester integrates with Rancher, see the documentation.
+=======
+ titleDescription: You can only use the embedded Rancher UI for debugging and validation purposes. For more information about how Harvester integrates with Rancher, see the documentation.
+>>>>>>> b5455bcb (fix: separate used/allocated units)
longhorn:
title: Access Embedded Longhorn UI
titleDescription: We only support to use the embedded Longhorn UI for debugging and validation purpose.
@@ -962,7 +1507,11 @@ harvester:
cidr:
label: CIDR/IP Range
invalid: '"CIDR/IP Range" is invalid.'
+<<<<<<< HEAD
toolTip: "We can apply multiple pools or ranges by seperating them with commas. i.e. 192.168.0.200/30,192.168.0.200/29 or 192.168.0.10-192.168.0.11"
+=======
+ toolTip: "We can apply multiple pools or ranges by separating them with commas. For example: 192.168.0.200/30,192.168.0.200/29 or 192.168.0.10-192.168.0.11"
+>>>>>>> b5455bcb (fix: separate used/allocated units)
add:
label: Add IP Pools
@@ -971,11 +1520,16 @@ harvester:
label: Protocols
ciphers:
label: Ciphers
+<<<<<<< HEAD
+=======
+
+>>>>>>> b5455bcb (fix: separate used/allocated units)
monitoring:
configuration:
label: Configuration
alertmanagerConfig:
+<<<<<<< HEAD
label: Alertmanager Configs
diabledMonitoringTips:
prefix: 'You must enable'
@@ -987,6 +1541,19 @@ harvester:
suffix: 'for configs to take effect.'
disabledAddon:
prefix: 'Monitoring Addon is disabled now, click'
+=======
+ label: Alertmanager Configurations
+ diabledMonitoringTips:
+ prefix: 'Enable the'
+ middle: 'monitoring'
+ suffix: 'add-on first.'
+ diabledAlertingTips:
+ prefix: 'Enable'
+ middle: 'Alertmanager'
+ suffix: 'for configuration to take effect.'
+ disabledAddon:
+ prefix: 'The monitoring add-on is disabled, click'
+>>>>>>> b5455bcb (fix: separate used/allocated units)
middle: 'here'
suffix: 'to enable it.'
@@ -996,6 +1563,7 @@ harvester:
fluentbit: Fluentbit
fluentd: Fluentd
clusterFlow:
+<<<<<<< HEAD
label: Cluster Flow
clusterOutput:
label: Cluster Output
@@ -1009,19 +1577,44 @@ harvester:
suffix: 'for configs to take effect.'
snapshot:
+=======
+ label: Cluster Flows
+ clusterOutput:
+ label: Cluster Outputs
+ flow:
+ label: Flows
+ output:
+ label: Outputs
+ diabledTips:
+ prefix: 'Enable'
+ middle: 'logging'
+ suffix: 'for configuration to take effect.'
+
+ snapshot:
+ totalSnapshotSize: Total Snapshot Size
+>>>>>>> b5455bcb (fix: separate used/allocated units)
label: Volume Snapshots
targetVolume: Original Volume
size: Size
image: Image
vmSnapshot:
+<<<<<<< HEAD
label: VM Snapshots
+=======
+ label: Virtual Machine Snapshots
+>>>>>>> b5455bcb (fix: separate used/allocated units)
createText: Restore Snapshot
snapshot: Snapshot
storage:
label: Storage
useDefault: Use the default storage
+<<<<<<< HEAD
+=======
+ volumeEncryption: Volume Encryption
+ secret: Secret
+>>>>>>> b5455bcb (fix: separate used/allocated units)
migratable:
label: Migratable
numberOfReplicas:
@@ -1034,10 +1627,25 @@ harvester:
label: Disk Selector
storageClass:
label: Storage Class
+<<<<<<< HEAD
title: Storage Classes
customize:
volumeBindingMode:
later: Bind and provision a persistent volume once a VM using the PersistentVolumeClaim is created
+=======
+ longhorn:
+ v1:
+ label: Longhorn V1 (CSI)
+ v2:
+ label: Longhorn V2 (CSI)
+ versionTooltip: Longhorn V2 is disabled for this node.
+ lvm:
+ label: LVM
+ title: Storage Classes
+ customize:
+ volumeBindingMode:
+ later: Bind and provision a persistent volume once a virtual machine using the PersistentVolumeClaim is created
+>>>>>>> b5455bcb (fix: separate used/allocated units)
parameters:
numberOfReplicas:
label: Number Of Replicas
@@ -1051,6 +1659,7 @@ harvester:
no-options: No available tags, please add in the `Host > Storage` page
migratable:
label: Migratable
+<<<<<<< HEAD
allowedTopologies:
title: Allowed Topologies
tooltip: Allowed Topologies helps scheduling VMs on hosts which match all of below expressions.
@@ -1058,6 +1667,22 @@ harvester:
vlanConfig:
title: Network Configs
createNetworkConfig: Create Network Config
+=======
+ lvmVolumeGroupType:
+ label: Volume Group Type
+ lvmVolumeGroup:
+ label: Volume Group Name
+ no-options: No available Volume Groups, please add in the `Host > Storage` page
+ node:
+ label: Node
+ allowedTopologies:
+ title: Allowed Topologies
+ tooltip: Allowed Topologies helps scheduling virtual machines on hosts which match all of below expressions.
+
+ vlanConfig:
+ title: Network Configuration
+ createNetworkConfig: Create Network Configuration
+>>>>>>> b5455bcb (fix: separate used/allocated units)
action:
migrate: Migrate
titles:
@@ -1079,14 +1704,22 @@ harvester:
validate:
available: NIC "{nic}" is not available on the selected nodes
linkAttributes:
+<<<<<<< HEAD
mtu:
+=======
+ mtu:
+>>>>>>> b5455bcb (fix: separate used/allocated units)
label: MTU
bondOptions:
mode:
label: Mode
miimon:
label: Miimon
+<<<<<<< HEAD
tooltip: -1
means to keep the original value
+=======
+ tooltip: Miimon specifies the MII link monitoring frequency in milliseconds. -1
means to keep the original value.
+>>>>>>> b5455bcb (fix: separate used/allocated units)
nodeSelector:
matchingNodes:
matchesSome: |-
@@ -1098,6 +1731,7 @@ harvester:
vlanStatus:
vlanConfig:
+<<<<<<< HEAD
label: Network Config
clusterNetwork:
@@ -1125,12 +1759,46 @@ harvester:
'rancher-vcluster': rancher-vcluster deploys a vcluster with rancher installed.
'harvester-seeder': harvester-seeder is an addon that uses ipmi and redfish to discover hardware information and perform out-of-band operations.
'harvester-system/harvester-seeder': harvester-seeder is an addon that uses ipmi and redfish to discover hardware information and perform out-of-band operations.
+=======
+ label: Network Configuration
+
+ clusterNetwork:
+ title: Cluster Network Configuration
+ create:
+ button:
+ label: Create a Cluster Network
+ clusterNetwork: There are no network configurations defined.
+ mgmt: mgmt is a built-in cluster management network and does not support any additional network configurations.
+ notExist: 'Cluster Network "{ clusterNetwork }" does not exist'
+ notReady: 'Cluster Network "{ clusterNetwork }" is not ready'
+
+ addons:
+ descriptions:
+ 'harvester-system/vm-import-controller': vm-import-controller is an add-on to help migrate virtual machine workloads from other source clusters to an existing Harvester cluster.
+ 'harvester-system/pcidevices-controller': pcidevices-controller is an add-on to help discover PCI devices for nodes in your cluster and allow users to prepare devices for PCI Passthrough, for use with Harvester virtual machine and guest clusters.
+ 'cattle-logging-system/rancher-logging': rancher-logging is an add-on to collect versatile logs, events, and audits from the Harvester cluster and route them to many kinds of servers based on flows.
+ 'harvester-system/rancher-vcluster': rancher-vcluster deploys a virtual cluster (vcluster) with Rancher installed.
+ 'cattle-monitoring-system/rancher-monitoring': rancher-monitoring is an add-on that collects Harvester cluster and virtual machine metrics and allows you to view the metrics on an embedded dashboard and send alert(s) to remote servers.
+ 'vm-import-controller': vm-import-controller is an add-on to help migrate virtual machine workloads from other source clusters to an existing Harvester cluster.
+ 'pcidevices-controller': pcidevices-controller is an add-on to help discover PCI devices for nodes in your cluster and allow users to prepare devices for PCI Passthrough, for use with Harvester virtual machines and guest clusters.
+ 'nvidia-driver-toolkit': 'nvidia-driver-toolkit is an add-on to enable vGPU devices and assign them to Harvester virtual machines.'
+ 'rancher-logging': rancher-logging is an add-on to collect versatile logs, events, and audits from the Harvester cluster and route them to many kinds of servers based on flows.
+ 'rancher-monitoring': rancher-monitoring is an add-on to collect Harvester cluster and virtual machine metrics, view them on the embedded dashboard, and send alert(s) to remote servers.
+ 'rancher-vcluster': rancher-vcluster deploys a virtual cluster (vcluster) with Rancher installed.
+ 'harvester-seeder': harvester-seeder is an add-on that uses IPMI and Redfish to discover hardware information and perform out-of-band operations.
+ 'harvester-system/harvester-seeder': harvester-seeder is an add-on that uses IPMI and Redfish to discover hardware information and perform out-of-band operations.
+ 'harvester-csi-driver-lvm': harvester-csi-driver-lvm is an add-on allowing users to create PVC through the LVM with local devices.
+>>>>>>> b5455bcb (fix: separate used/allocated units)
vmImport:
titles:
basic: Basic
pvc: Volume
rancherVcluster:
+<<<<<<< HEAD
accessRancher: Access Rancher Dashboard
+=======
+ accessRancher: Access the Rancher Dashboard
+>>>>>>> b5455bcb (fix: separate used/allocated units)
hostname: Hostname
rancherVersion: Rancher Version
password: Bootstrap Password
@@ -1147,6 +1815,12 @@ harvester:
location: Driver Location
parsingSpecError:
The field 'spec.valuesContent' has invalid format.
+<<<<<<< HEAD
+=======
+ usbController:
+ titles:
+ basic: Basic
+>>>>>>> b5455bcb (fix: separate used/allocated units)
loadBalancer:
label: Load Balancers
@@ -1191,12 +1865,20 @@ harvester:
label: Backend Servers
healthCheck:
warning:
+<<<<<<< HEAD
portInUse: Warning, Backend Port {port} is in use in Health Check settings; in case of updating the port, update the Health Check settings accordingly.
+=======
+ portInUse: Warning, the Backend Port {port} is in use in Health Check settings. If you need to update the port, update the Health Check settings accordingly.
+>>>>>>> b5455bcb (fix: separate used/allocated units)
ipPool:
label: IP Pools
network:
+<<<<<<< HEAD
label: VM Network
+=======
+ label: Virtual Machine Network
+>>>>>>> b5455bcb (fix: separate used/allocated units)
tabs:
range: Range
scope: Scope
@@ -1226,13 +1908,21 @@ harvester:
addLabel: Add CIDR
range:
addLabel: Add Range
+<<<<<<< HEAD
+=======
+
+>>>>>>> b5455bcb (fix: separate used/allocated units)
service:
healthCheckPort:
label: Health Check Port
healthCheckSuccessThreshold:
label: Health Check Success Threshold
+<<<<<<< HEAD
description: If the number of times the prober continuously detects an address successfully reaches the success threshold, then the backend server can start to forward traffic.
+=======
+ description: If the number of times the probe continuously detects an address successfully reaches the success threshold, then the backend server can start to forward traffic.
+>>>>>>> b5455bcb (fix: separate used/allocated units)
healthCheckFailureThreshold:
label: Health Check Failure Threshold
description: The backend server will stop forwarding traffic if the number of health check failures reaches the failure threshold.
@@ -1257,22 +1947,38 @@ harvester:
sriovgpu:
label: SR-IOV GPU Devices
nodeName: Node
+<<<<<<< HEAD
numVFs: Number Of Virtual Functions
+=======
+ numVFs: Number of Virtual Functions
+>>>>>>> b5455bcb (fix: separate used/allocated units)
vfAddresses: Virtual Functions Addresses
vGpuDevices: vGPU Devices
showMore: Show More
parentSriov: Filter By Parent SR-IOV GPU
+<<<<<<< HEAD
noPermission: Please contact system admin to add Harvester addons first
goSetting:
prefix: The nvidia-driver-toolkit addon is not enabled, click
+=======
+ noPermission: Please contact your system administrator to add Harvester add-ons first.
+ goSetting:
+ prefix: The nvidia-driver-toolkit add-on is not enabled, click
+>>>>>>> b5455bcb (fix: separate used/allocated units)
middle: here
suffix: to enable it to manage your SR-IOV GPU devices.
vgpu:
label: vGPU Devices
+<<<<<<< HEAD
noPermission: Please contact system admin to add Harvester addons first
goSetting:
prefix: The nvidia-driver-toolkit addon is not enabled, click
+=======
+ noPermission: Please contact system administrator to add Harvester add-ons first.
+ goSetting:
+ prefix: The nvidia-driver-toolkit add-on is not enabled, click
+>>>>>>> b5455bcb (fix: separate used/allocated units)
middle: here
suffix: to enable it to manage your vGPU devices.
enableGroup: Enable Group
@@ -1283,22 +1989,59 @@ harvester:
available: Available Devices
compatibleNodes: Compatible Nodes
impossibleSelection: 'There are no hosts with all of the selected devices.'
+<<<<<<< HEAD
howToUseDevice: 'Use the table below to enable vGPU devices you want to use in this VM.'
deviceInTheSameHost: 'You can only select devices on the same host.'
harvesterVlanConfigMigrateDialog:
targetClusterNetwork:
+=======
+ howToUseDevice: 'Use the table below to enable vGPU devices you want to use in this virtual machine.'
+ deviceInTheSameHost: 'You can only select devices on the same host.'
+
+ usb:
+ label: USB Devices
+ noPermission: Please contact system admin to add Harvester add-ons first
+ goSetting:
+ prefix: The pcidevices-controller add-on is not enabled, click
+ middle: here
+ suffix: to enable it to manage your USB devices.
+ enableGroup: Enable Group
+ disableGroup: Disable Group
+ available: Available USB Devices
+ compatibleNodes: Compatible Nodes
+ impossibleSelection: 'There are no hosts with all of the selected devices.'
+ howToUseDevice: 'Use the table below to enable USB passthrough on each device you want to use in this VM.'
+ deviceInTheSameHost: 'You can only select devices on the same host.'
+ showCompatibility: Show device compatibility matrix
+ hideCompatibility: Hide device compatibility matrix
+ claimError: Error enabling passthrough on {name}
+ unclaimError: Error disabling passthrough on {name}
+ cantUnclaim: You cannot disable passthrough on a device claimed by another user.
+ enablePassthroughWarning: 'Please re-enable the USB device if the device path changes in the following situations:upgrade-checker-enabled
is equal to True.
+ 'harv-ui-source': Configure how to load the UI source.
+ 'harv-ui-index': 'HTML index location for the UI.'
+ 'harv-ui-plugin-index': 'JS index location for the Harvester plugin UI.'
+ 'harv-cluster-registration-url': Registration URL for multi-cluster management.
+ 'harv-http-proxy': 'HTTP proxy for Harvester to access external services.'
+ 'harv-additional-ca': 'Custom CA root certificates for TLS validation.'
+ 'harv-overcommit-config': 'Resource overcommit configuration.'
+ 'harv-support-bundle-timeout': 'Support bundle timeout configuration in minutes, use 0 to disable the timeout.'
+ 'harv-support-bundle-expiration': 'Support bundle expiration configuration in minutes.'
+ 'harv-support-bundle-node-collection-timeout': 'Support bundle node collection timeout configuration in minutes.'
+ 'harv-vm-force-reset-policy': Configuration for the force-reset action when a virtual machine is stuck on a node that is down.
+ 'harv-ssl-parameters': Custom SSL Parameters for TLS validation.
+ 'harv-storage-network': 'Longhorn storage-network setting.'
+ 'harv-support-bundle-namespaces': Specify resources in other namespaces to be collected by the support package.
+ 'harv-auto-disk-provision-paths': Specify the disks(using glob pattern) that Harvester will automatically add as virtual machine storage.
+ 'harv-support-bundle-image': Support bundle image configuration. Find different versions in rancher/support-bundle-kit.
+ 'harv-release-download-url': This setting allows you to configure the upgrade release download
URL address. Harvester will get the ISO URL and checksum value from the ($URL
/$VERSION
/version.yaml) file hosted by the configured URL.
+ 'harv-default-vm-termination-grace-period-seconds': Configure the virtual machine termination grace period for virtual machine stop.
+ 'harv-ntp-servers': Configure NTP server. You can configure multiple IPv4 addresses or host addresses.
+ 'harv-auto-rotate-rke2-certs': The certificate rotation mechanism relies on Rancher. Harvester will automatically update certificates generation to trigger rotation.
+ 'harv-kubeconfig-default-token-ttl-minutes': 'TTL (in minutes) applied on Harvester administration kubeconfig files. Default is 0, which means to never expire.'
+ 'harv-longhorn-v2-data-engine-enabled': 'Enable the Longhorn V2 data engine. Default is false.
+<<<<<<< HEAD
{username}
; all the global permissions, project, and cluster role bindings of this {vendor} user will also apply to the {provider} user.'
github:
clientId:
@@ -384,7 +416,11 @@ authConfig:
3: cattle-resource-system
namespace that has an encryption-provider-config.yaml
key. {namespace}
, already exists and cannot be added to a different project."
+<<<<<<< HEAD
project: Install into Project
+=======
+ project: Install Into Project
+>>>>>>> b5455bcb (fix: separate used/allocated units)
section:
chartOptions: Edit Options
valuesYaml: Edit YAML
@@ -931,8 +1026,13 @@ catalog:
} the {existing, select,
true { app}
false { chart}
+<<<<<<< HEAD
}. Start by setting some basic information used by {vendor} to manage the App.
nsCreationDescription: "To install the app into a new namespace enter it's name in the Namespace field and select it."
+=======
+ }. Start by setting some basic information used by {vendor} to manage the application.
+ nsCreationDescription: "To install the application into a new namespace, enter the name in the Namespace field and select it."
+>>>>>>> b5455bcb (fix: separate used/allocated units)
createNamespace: "Namespace {namespace}
will be created."
clusterTplVersion:
label: Version
@@ -940,6 +1040,7 @@ catalog:
description: Select a version of the Cluster Template
clusterTplValues:
label: Values
+<<<<<<< HEAD
subtext: Change how the Cluster is defined
description: Configure Values used by Helm that help define the Cluster.
helmValues:
@@ -953,6 +1054,21 @@ catalog:
checkbox: Customize Helm options before install
label: Helm Options
subtext: Change how the app is deployed
+=======
+ subtext: Change how the cluster is defined
+ description: Configure Values used by Helm that help define the cluster.
+ helmValues:
+ label: Values
+ subtext: Change how the application works
+ description: Configure values used by Helm that help define the application.
+ chartInfo:
+ button: View Chart Information
+ label: Chart Information
+ helmCli:
+ checkbox: Customize Helm options before installation
+ label: Helm Options
+ subtext: Change how the application is deployed
+>>>>>>> b5455bcb (fix: separate used/allocated units)
description: Supply additional deployment options
version: Version
versions:
@@ -971,7 +1087,11 @@ catalog:
gitBranch:
label: Git Branch
placeholder: e.g. master
+<<<<<<< HEAD
defaultMessage: 'Will default to "master" if left blank'
+=======
+ defaultMessage: 'The branch will default to "master" if left blank'
+>>>>>>> b5455bcb (fix: separate used/allocated units)
gitRepo:
label: Git Repo URL
placeholder: 'e.g. https://github.com/your-company/charts.git'
@@ -1103,7 +1223,11 @@ cluster:
rke2-multus: Multus Configuration
agentEnvVars:
label: Agent Environment
+<<<<<<< HEAD
detail: Add additional environment variables to the agent container. This is most commonly useful for configuring a HTTP proxy.
+=======
+ detail: Add additional environment variables to the agent container. This is most commonly useful for configuring a HTTP proxy.
+>>>>>>> b5455bcb (fix: separate used/allocated units)
keyLabel: Variable Name
cloudProvider:
aws:
@@ -1116,7 +1240,11 @@ cluster:
label: Google
rancher-vsphere:
label: vSphere
+<<<<<<< HEAD
note: 'Important: Configure the vSphere Cloud Provider and Storage Provider options in the Add-On Config tab.'
+=======
+ note: 'Important: Configure the vSphere Cloud Provider and Storage Provider options in the Add-on Configuration tab.'
+>>>>>>> b5455bcb (fix: separate used/allocated units)
harvester:
label: Harvester
copyConfig: Copy KubeConfig to Clipboard
@@ -1124,10 +1252,17 @@ cluster:
custom:
nodeRole:
label: Node Role
+<<<<<<< HEAD
detail: Choose what roles the node will have in the cluster. The cluster needs to have at least one node with each role.
advanced:
label: Advanced
detail: Additional control over how the node will be registered. These values will often need to be different for each node registered.
+=======
+ detail: Choose what roles the node will have in the cluster. The cluster needs to have at least one node with each role.
+ advanced:
+ label: Advanced
+ detail: Additional control over how the node will be registered. These values will often need to be different for each node registered.
+>>>>>>> b5455bcb (fix: separate used/allocated units)
nodeName: Node Name
publicIp: Node Public IP
privateIp: Node Private IP
@@ -1140,14 +1275,23 @@ cluster:
windowsDetail: Run this command in PowerShell on each of the existing Windows machines you want to register. Windows nodes can only be workers.
windowsNotReady: The cluster must be up and running with Linux etcd, control plane, and worker nodes before the registration command for adding Windows workers will display.
windowsWarning: Workload pods, including some deployed by Rancher charts, will be scheduled on both Linux and Windows nodes by default. Edit NodeSelector in the chart to direct them to be placed onto a compatible node.
+<<<<<<< HEAD
windowsDeprecatedForRKE1: Windows support is being deprecated for RKE1. We suggest migrating to RKE2.
+=======
+ windowsDeprecatedForRKE1: Windows support is being deprecated for RKE1 and RKE1 is soon to be deprecrated. Please migrate to RKE2.
+>>>>>>> b5455bcb (fix: separate used/allocated units)
insecure: "Insecure: Select this to skip TLS verification if your server has a self-signed certificate."
credential:
banner:
createCredential: |-
{length, plural,
+<<<<<<< HEAD
=0 {First you'll need to create a credential to talk to the cloud provider}
other {Ok, Let's create a new credential}
+=======
+ =0 {First, you will need to create a credential to talk to the cloud provider}
+ other {Ok, start to create a new credential}
+>>>>>>> b5455bcb (fix: separate used/allocated units)
}
selectExisting:
label: Select Existing
@@ -1160,7 +1304,11 @@ cluster:
label: Access Key
placeholder: Your AWS Access Key
defaultRegion:
+<<<<<<< HEAD
help: The default region to use when creating clusters. Also contacted to verify that this credential works.
+=======
+ help: The default region to use when creating clusters. Also contacted to verify that this credential works.
+>>>>>>> b5455bcb (fix: separate used/allocated units)
label: Default Region
secretKey:
label: Secret Key
@@ -1265,7 +1413,11 @@ cluster:
volume: Volume
imageVolume: Image Volume
addVolume: Add Volume
+<<<<<<< HEAD
addVMImage: Add VM Image
+=======
+ addVMImage: Add Virtual Machine Image
+>>>>>>> b5455bcb (fix: separate used/allocated units)
storageClass: Storage Class
sshUser: SSH User
userData:
@@ -1281,9 +1433,15 @@ cluster:
installGuestAgent: Install guest agent
description:
label: Cluster Description
+<<<<<<< HEAD
placeholder: Any text you want that better describes this cluster
harvester:
importNotice: Import Harvester Clusters via
+=======
+ placeholder: Any text to describe this cluster
+ harvester:
+ importNotice: Import Harvester Clusters Via
+>>>>>>> b5455bcb (fix: separate used/allocated units)
warning:
label: This is a Harvester Cluster - enable the Harvester feature flag to manage it
state: Warning
@@ -1316,11 +1474,19 @@ cluster:
sshUser:
placeholder: e.g. ubuntu
toolTip: SSH user to login with the selected OS image.
+<<<<<<< HEAD
haveOneOwner: There must be at least one member with the Owner role.
import:
warningBanner: 'You should not import a cluster which has already been connected to another instance of Rancher as it will lead to data corruption.'
commandInstructions: 'Run the kubectl
command below on an existing Kubernetes cluster running a supported Kubernetes version to import it into {vendor}:'
commandInstructionsInsecure: 'If you get a "certificate signed by unknown authority" error, your {vendor} installation has a self-signed or untrusted SSL certificate. Run the command below instead to bypass the certificate verification:'
+=======
+ haveOneOwner: There must be at least one member with the owner role.
+ import:
+ warningBanner: 'You should not import a cluster which has already been connected to another instance of Rancher as it will lead to data corruption.'
+ commandInstructions: 'Run the kubectl
command below on an existing Kubernetes cluster running a supported Kubernetes version to import it into {vendor}:'
+ commandInstructionsInsecure: 'If you get a "certificate signed by unknown authority" error, your {vendor} installation has a self-signed or untrusted SSL certificate. Run the command below instead to bypass the certificate verification:'
+>>>>>>> b5455bcb (fix: separate used/allocated units)
clusterRoleBindingInstructions: 'If you get permission errors creating some of the resources, your user may not have the cluster-admin
role. Use this command to apply it:'
clusterRoleBindingCommand: 'kubectl create clusterrolebinding cluster-admin-binding --clusterrole cluster-admin --user /etc/alertmanager/secrets/label: Additional Secrets +<<<<<<< HEAD existing: Choose an existing config secret info: | Create default config: A Secret containing your Alertmanager Config will be created in the
cattle-monitoring-systemnamespace on deploying this chart under the name
alertmanager-rancher-monitoring-alertmanager. By default, this Secret will never be modified on an uninstall or upgrade of this chart.
cattle-monitoring-systemnamespace on deploying this chart under the name
alertmanager-rancher-monitoring-alertmanager. By default, this secret will never be modified on an uninstall or upgrade of this chart.
cattle-monitoring-systemnamespace. If the namespace does not exist, you will not be able to select an existing secret. + label: Alertmanager Secret + new: Create default configuration + radio: + label: Configuration Secret +>>>>>>> b5455bcb (fix: separate used/allocated units) validation: duplicatedReceiverName: A receiver with the name {name} already exists. templates: @@ -3170,7 +3556,11 @@ monitoring: adminApi: Admin API evaluation: Evaluation Interval ignoreNamespaceSelectors: +<<<<<<< HEAD help: 'Ignoring Namespace Selectors allows Cluster Admins to limit teams from monitoring resources outside of namespaces they have permissions to but can break the functionality of Apps that rely on setting up Monitors that scrape targets across multiple namespaces, such as Istio.' +======= + help: 'Ignoring Namespace Selectors allows cluster admins to limit teams from monitoring resources outside of namespaces they have permissions to but can break the functionality of applications that rely on setting up monitors that scrape targets across multiple namespaces, such as Istio.' +>>>>>>> b5455bcb (fix: separate used/allocated units) label: Namespace Selectors radio: enforced: 'Use: Monitors can access resources based on namespaces that match the namespace selector field' @@ -3190,13 +3580,21 @@ monitoring: label: Persistent Storage for Prometheus mode: Access Mode selector: Selector +<<<<<<< HEAD selectorWarning: 'If you are using a dynamic provisioner (e.g. Longhorn), no Selectors should be specified since a PVC with a non-empty selector can''t have a PV dynamically provisioned for it.' +======= + selectorWarning: 'If you are using a dynamic provisioner (e.g. Longhorn), no selectors should be specified since a PVC with a non-empty selector cannot have a PV dynamically provisioned for it.' +>>>>>>> b5455bcb (fix: separate used/allocated units) size: Size volumeName: Volume Name title: Configure Prometheus warningInstalled: | Warning: Prometheus Operators are currently deployed. Deploying multiple Prometheus Operators onto one cluster is not currently supported. Please remove all other Prometheus Operator deployments from this cluster before trying to install this chart. +<<<<<<< HEAD If you are migrating from an older version of {vendor} with Monitoring enabled, please disable Monitoring on this cluster completely before attempting to install this chart. +======= + If you are migrating from an older version of {vendor} with monitoring enabled, please disable monitoring on this cluster completely before attempting to install this chart. +>>>>>>> b5455bcb (fix: separate used/allocated units) receiver: addReceiver: Add Receiver fields: @@ -3212,6 +3610,7 @@ monitoring: keyFilePath: label: Key File Path placeholder: e.g. ./key-file.pfx +<<<<<<< HEAD secretsBanner: The file paths below must be referenced in
alertmanager.alertmanagerSpec.secretswhen deploying the Monitoring chart. For more information see our documentation. projectMonitoring: detail: @@ -3222,6 +3621,18 @@ monitoring: message: Project Monitoring has not been configured for any projects canCreate: Get started by clicking Create to add monitoring to a project cannotCreate: Contact the admin to add project monitoring +======= + secretsBanner: The file paths below must be referenced in
alertmanager.alertmanagerSpec.secretswhen deploying the monitoring chart. For more information see our documentation. + projectMonitoring: + detail: + error: "Unable to fetch dashboard values with status: " + list: + banner: Project monitoring configuration is stored in ProjectHelmChart resources + empty: + message: Project monitoring has not been configured for any projects + canCreate: Get started by clicking create to add monitoring to a project + cannotCreate: Contact the administrator to add project monitoring +>>>>>>> b5455bcb (fix: separate used/allocated units) route: label: Route fields: @@ -3236,9 +3647,15 @@ monitoring: alertmanagerConfig: description: Routes and receivers for project alerting and cluster alerting are configured within AlertmanagerConfig resources. empty: Alerts have not been configured for any accessible namespaces. +<<<<<<< HEAD getStarted: Get started by clicking Create to configure an alert. receiverTooltip: This route will direct alerts to the selected receiver, which must be defined in the same AlertmanagerConfig. deprecationWarning: The Route and Receiver resources are deprecated. Going forward, routes and receivers should not be managed as separate Kubernetes resources on this page. They should be configured as YAML fields in an AlertmanagerConfig resource. +======= + getStarted: Get started by clicking create to configure an alert. + receiverTooltip: This route will direct alerts to the selected receiver, which must be defined in the same AlertmanagerConfig. + deprecationWarning: The route and receiver resources are deprecated. Going forward, routes and receivers should not be managed as separate Kubernetes resources on this page. They should be configured as YAML fields in an AlertmanagerConfig resource. +>>>>>>> b5455bcb (fix: separate used/allocated units) routeInfo: This form supports configuring one route that directs traffic to a receiver. Alerts can be directed to more receiver(s) by configuring child routes in YAML. receiverFormNames: create: Create Receiver in AlertmanagerConfig @@ -3273,19 +3690,28 @@ monitoring: warning2: Learn more about the migration steps to V2 Monitoring. promptDescription:
rancher-alerting-driversdefault values were changed, please update the url below in the format http://<new_service_name>.<new_namespace>.svc.<port>/<path> banner: To use MS Teams or SMS you will need to have at least one instance of
rancher-alerting-driversinstalled first. +======= + title: Custom Configuration + info: The YAML provided here will be directly appended to your receiver within the Alertmanager configuration secret. + email: + label: Email + title: Email Configuration + opsgenie: + label: Opsgenie + title: Opsgenie Configuration + pagerduty: + label: PagerDuty + title: PagerDuty Configuration + info: "You can find additional info on creating an Integration Key for PagerDuty here." + slack: + label: Slack + title: Slack Configuration + info: "You can find additional info on creating Incoming Webhooks for Slack here ." + webhook: + label: Webhook + title: Webhook Configuration + urlTooltip: For some webhooks this a URL that point to the service DNS + modifyNamespace: If
rancher-alerting-driversdefault values were changed, please update the URL below in the format http://<new_service_name>.<new_namespace>.svc.<port>/<path> + banner: To use MS Teams or SMS, you will need to have at least one instance of
rancher-alerting-driversinstalled first. +>>>>>>> b5455bcb (fix: separate used/allocated units) add: selectWebhookType: Select Webhook Type generic: Generic @@ -3342,7 +3792,11 @@ monitoringReceiver: label: Enable send resolved alerts alertmanagerConfigReceiver: +<<<<<<< HEAD secretKeyId: Key Id from Secret +======= + secretKeyId: Key ID from Secret +>>>>>>> b5455bcb (fix: separate used/allocated units) name: Receiver Name addButton: Add Receiver receivers: Receivers @@ -3356,7 +3810,11 @@ monitoringRoute: label: Group By addGroupByLabel: Labels to Group Alerts By groupByTooltip: Add each label as a string in the format key:value. The special label ... will aggregate by all possible labels. If provided, the ... must be the only element in the list. +<<<<<<< HEAD info: This is the top-level Route used by Alertmanager as the default destination for any Alerts that do not match any other Routes. This Route must exist and cannot be deleted. +======= + info: This is the top-level route used by Alertmanager as the default destination for any alerts that do not match any other routes. This route must exist and cannot be deleted. +>>>>>>> b5455bcb (fix: separate used/allocated units) interval: label: Group Interval matching: @@ -3417,6 +3875,12 @@ namespace: selectNamespace: Select Namespace createNamespace: Create a New Namespace selectOrCreate: Select or Create a Namespace +<<<<<<< HEAD +======= + snapshots: + label: Snapshots + totalSnapshotSize: Total Snapshot Size +>>>>>>> b5455bcb (fix: separate used/allocated units) resourceStates: success: 'Active' info: 'Transitioning' @@ -3505,7 +3969,11 @@ networkpolicy: ruleHint: Incoming traffic is only allowed from the configured sources portHint: Incoming traffic is only allowed to connect to the configured ports labelsAnnotations: +<<<<<<< HEAD label: Labels & Annotations +======= + label: Labels and Annotations +>>>>>>> b5455bcb (fix: separate used/allocated units) rules: pod: Pod namespace: Namespace @@ -3536,12 +4004,20 @@ networkpolicy: namespaceSelector: label: Namespace Selector namespaceAndPodSelector: +<<<<<<< HEAD label: Namespace/Pod Selector +======= + label: Namespace and Pod Selector +>>>>>>> b5455bcb (fix: separate used/allocated units) config: label: Configuration selectors: label: Selectors +<<<<<<< HEAD hint: The NetworkPolicy is applied to the selected Pods +======= + hint: The NetworkPolicy is applied to the selected pods +>>>>>>> b5455bcb (fix: separate used/allocated units) matchingPods: matchesSome: |- {matched, plural, @@ -3593,8 +4069,13 @@ node: used: Used amount: "{used} of {total} {unit}" cpu: CPU +<<<<<<< HEAD memory: MEMORY pods: PODS +======= + memory: Memory + pods: Pods +>>>>>>> b5455bcb (fix: separate used/allocated units) diskPressure: Disk Pressure kubelet: kubelet memoryPressure: Memory Pressure @@ -3716,6 +4197,10 @@ persistentVolume: file-csi-azure-com: Azure File (CSI) driver-longhorn-io: Longhorn (CSI) driver-harvesterhci-io: Harvester (CSI) +<<<<<<< HEAD +======= + lvm-driver-harvesterhci-io: LVM +>>>>>>> b5455bcb (fix: separate used/allocated units) nfs-csi-k8s-io: NFS (CSI) ebs-csi-aws-com: AWS Elastic Block Store (CSI) rbd-csi-ceph-com: Ceph RBD (CSI) @@ -3807,7 +4292,11 @@ persistentVolume: portals: add: Add Portal cinder: +<<<<<<< HEAD label: Openstack Cinder Volume (Unsupported) +======= + label: OpenStack Cinder Volume (Unsupported) +>>>>>>> b5455bcb (fix: separate used/allocated units) volumeId: label: Volume ID placeholder: e.g. vol @@ -3892,7 +4381,11 @@ persistentVolume: label: Path on the Node placeholder: /mnt/disks/ssd1 mustBe: +<<<<<<< HEAD label: The Path on the Node must be +======= + label: The path on the node must be +>>>>>>> b5455bcb (fix: separate used/allocated units) anything: 'Anything: do not check the target path' directory: A directory, or create if it does not exist file: A file, or create if it does not exist @@ -3955,8 +4448,13 @@ persistentVolumeClaim: source: label: Source options: +<<<<<<< HEAD new: Use a Storage Class to provision a new Persistent Volume existing: Use an existing Persistent Volume +======= + new: Use a storage class to provision a new persistent volume + existing: Use an existing persistent volume +>>>>>>> b5455bcb (fix: separate used/allocated units) expand: label: Expand notSupported: Storage class does not support volume expansion @@ -3967,8 +4465,13 @@ persistentVolumeClaim: requestStorage: Request Storage persistentVolume: Persistent Volume tooltips: +<<<<<<< HEAD noStorageClass: You don't have permission to list Storage Classes, enter a name manually noPersistentVolume: You don't have permission to list Persistent Volumes, enter a name manually +======= + noStorageClass: You do not have permission to list storage classes, enter a name manually + noPersistentVolume: You do not have permission to list persistent volumes, enter a name manually +>>>>>>> b5455bcb (fix: separate used/allocated units) customize: label: Customize accessModes: @@ -4007,18 +4510,32 @@ plugins: installing: Installing ... uninstalling: Uninstalling ... descriptions: +<<<<<<< HEAD experimental: This Extension is marked as experimental third-party: This Extension is provided by a Third-Party built-in: This Extension is built-in image: This Extension Image has been loaded manually +======= + experimental: This extension is marked as experimental + third-party: This extension is provided by a third-party + built-in: This extension is built-in + image: This extension image has been loaded manually +>>>>>>> b5455bcb (fix: separate used/allocated units) error: title: Error loading extension message: Could not load extension code generic: Extension error +<<<<<<< HEAD api: This Extension is not compatible with the Extensions API host: This Extension is not compatible with this application version: This Extension is not compatible with this version of Rancher load: An error occurred loading the code for this Extension +======= + api: This extension is not compatible with the extension API + host: This extension is not compatible with this application + version: This extension is not compatible with this version of Rancher + load: An error occurred loading the code for this extension +>>>>>>> b5455bcb (fix: separate used/allocated units) success: title: Loaded extension {name} message: Extension was loaded successfully @@ -4037,10 +4554,17 @@ plugins: requiresVersion: "Requires Rancher {version}" empty: all: Extensions are neither installed nor available +<<<<<<< HEAD available: No Extensions available installed: No Extensions installed updates: No updates available for installed Extensions images: No Extension Images installed +======= + available: No extension available + installed: No extension installed + updates: No updates available for installed extension + images: No extension images installed +>>>>>>> b5455bcb (fix: separate used/allocated units) loadError: An error occurred loading the code for this extension helmError: "An error occurred installing the extension via Helm" manageRepos: Manage Repositories @@ -4051,14 +4575,23 @@ plugins: subtitle: Catalogs imageLoad: load: Import Extension Catalog +<<<<<<< HEAD prompt: An Extension Catalog contains extension assets bundled into an image, importing will take the image and host a Helm repository to act as a catalog for custom built Extensions. +======= + prompt: An extension catalog contains extension assets bundled into an image, importing will take the image and host a Helm repository to act as a catalog for custom built extensions. +>>>>>>> b5455bcb (fix: separate used/allocated units) fields: image: label: Catalog Image Reference placeholder: "e.g. hub.docker.io/example-org/my-image:latest" secrets: +<<<<<<< HEAD banner: "If the registry that hosts the Catalog Image requires Pull Secrets, they must be created in the following namespace:
cattle-ui-plugin-system" banner: This will create an Deployment, Service, and Helm repository to serve the extension charts. +======= + banner: "If the registry that hosts the catalog image requires pull secrets, they must be created in the following namespace:
cattle-ui-plugin-system" + banner: This will create a deployment, service, and Helm repository to serve the extension charts. +>>>>>>> b5455bcb (fix: separate used/allocated units) imageVersion: title: Image Version Not Found message: Unable to determine image version from {image}, defaulting to latest @@ -4075,7 +4608,11 @@ plugins: message: A repository with the name {repo} already exists success: title: "Imported Extension Catalog from: {name}" +<<<<<<< HEAD message: Extension Catalog image was imported successfully +======= + message: Extension catalog image was imported successfully +>>>>>>> b5455bcb (fix: separate used/allocated units) headers: image: name: images @@ -4094,6 +4631,7 @@ plugins: install: label: Install title: Install Extension {name} +<<<<<<< HEAD prompt: "Are you sure that you want to install this Extension?" version: Version warnNotCertified: Please ensure that you are aware of the risks of installing Extensions from untrusted authors @@ -4111,6 +4649,25 @@ plugins: prompt: "Are you sure that you want to uninstall this Extension?" custom: "Are you sure that you want to uninstall this Extension Image? This will also remove any Extensions provided by this image." upgradeAvailable: A newer version of this Extension is available +======= + prompt: "Are you sure that you want to install this extension?" + version: Version + warnNotCertified: Please ensure that you are aware of the risks of installing extensions from untrusted authors + update: + label: Update + title: Update Extension {name} + prompt: "Are you sure that you want to update this extension?" + rollback: + label: Rollback + title: Rollback Extension {name} + prompt: "Are you sure that you want to rollback this extension?" + uninstall: + label: Uninstall + title: "Uninstall Extension: {name}" + prompt: "Are you sure that you want to uninstall this extension?" + custom: "Are you sure that you want to uninstall this extension image? This will also remove any extensions provided by this image." + upgradeAvailable: A newer version of this extension is available +>>>>>>> b5455bcb (fix: separate used/allocated units) reload: Extensions changed - reload required safeMode: title: Extensions Safe Mode @@ -4119,19 +4676,34 @@ plugins: title: Extension support is not enabled prompt: cant: Automatic installation is not available - required Helm Charts could not be found +<<<<<<< HEAD can: You need to install the Extension Operator install: title: Enable Extension Support? prompt: This will install the Helm charts to enable Extension support airgap: The Rancher Extensions Repository provides extensions published by Rancher. Un-check if your Rancher installation is air-gapped +======= + can: You need to install the extension operator + install: + title: Enable Extension Support? + prompt: This will install the Helm charts to enable extension support + airgap: The Rancher extensions repository provides extensions published by Rancher. De-select if your Rancher installation is air-gapped +>>>>>>> b5455bcb (fix: separate used/allocated units) addRancherRepo: Add the Rancher Extension Repository remove: label: Disable Extension Support title: Disable Extension Support? +<<<<<<< HEAD prompt: This will un-install the Helm charts that enable Extension support registry: title: Remove the Rancher Extensions Repository prompt: The Rancher Extensions Repository provides extensions published by Rancher +======= + prompt: This will un-install the Helm charts that enable extension support + registry: + title: Remove the Rancher Extensions Repository + prompt: The Rancher extension repository provides extensions published by Rancher +>>>>>>> b5455bcb (fix: separate used/allocated units) crd: title: Remove the Rancher Extensions Custom Resource Definition prompt: There are one or more extensions installed - removing the CRD will require you to manually reinstall these extensions if you subsequently re-enable extensions support. @@ -4154,7 +4726,11 @@ podSecurityAdmission: placeholder: 'Version (default: latest)' exemptions: title: Exemptions +<<<<<<< HEAD description: Allow the creation of pods for specific Usernames, RuntimeClassNames, and Namespaces that would otherwise be prohibited due to the policies set above. +======= + description: Allow the creation of pods for specific usernames, RuntimeClassNames, and namespaces that would otherwise be prohibited due to the policies set above. +>>>>>>> b5455bcb (fix: separate used/allocated units) placeholder: Enter a comma separated list of {psaExemptionsControl} prefs: title: Preferences @@ -4192,9 +4768,15 @@ prefs: advFeatures: title: Advanced Features viewInApi: Enable "View in API" +<<<<<<< HEAD allNamespaces: Show system Namespaces managed by Rancher (not intended for editing or deletion) themeShortcut: Enable Dark/Light Theme keyboard shortcut toggle (shift+T) pluginDeveloper: Enable Extension developer features +======= + allNamespaces: Show system namespaces managed by Rancher (not intended for editing or deletion) + themeShortcut: Enable Dark/Light theme keyboard shortcut toggle (shift+T) + pluginDeveloper: Enable extension developer features +>>>>>>> b5455bcb (fix: separate used/allocated units) hideDesc: label: Hide All Type Descriptions helm: @@ -4250,9 +4832,15 @@ project: members: label: Members containerDefaultResourceLimit: Container Default Resource Limit +<<<<<<< HEAD vmDefaultResourceLimit: VM Default Resource Limit resourceQuotas: Resource Quotas haveOneOwner: There must be at least one member with the Owner role. +======= + vmDefaultResourceLimit: Virtual Machine Default Resource Limit + resourceQuotas: Resource Quotas + haveOneOwner: There must be at least one member with the owner role. +>>>>>>> b5455bcb (fix: separate used/allocated units) psp: default: Cluster Default label: Pod Security Policy @@ -4264,11 +4852,16 @@ projectMembers: label: Project projectPermissions: label: Project Permissions +<<<<<<< HEAD description: Controls what access users have to the Project +======= + description: Controls what access users have to the project +>>>>>>> b5455bcb (fix: separate used/allocated units) noDescription: User created - no description searchForMember: Search for a member to provide project access owner: label: Owner +<<<<<<< HEAD description: Owners have full control over the Project and all resources inside it. member: label: Member @@ -4276,11 +4869,24 @@ projectMembers: readOnly: label: Read Only description: Members can only view the resources inside the Project but not change the resources. +======= + description: Owners have full control over the project and all resources inside it. + member: + label: Member + description: Members can manage the resources inside the project but not change the project itself. + readOnly: + label: Read Only + description: Members can only view the resources inside the project but not change the resources. +>>>>>>> b5455bcb (fix: separate used/allocated units) custom: label: Custom description: Choose individual roles for this user. createNs: Create Namespaces +<<<<<<< HEAD configmapsManage: Manage Config Maps +======= + configmapsManage: Manage Configuration Maps +>>>>>>> b5455bcb (fix: separate used/allocated units) ingressManage: Manage Ingress projectcatalogsManage: Manage Project Catalogs projectroletemplatebindingsManage: Manage Project Members @@ -4289,7 +4895,11 @@ projectMembers: servicesManage: Manage Services persistentvolumeclaimsManage: Manage Volumes workloadsManage: Manage Workloads +<<<<<<< HEAD configmapsView: View Config Maps +======= + configmapsView: View Configuration Maps +>>>>>>> b5455bcb (fix: separate used/allocated units) ingressView: View Ingress monitoringUiView: View Monitoring projectcatalogsView: View Project Catalogs @@ -4325,7 +4935,11 @@ prometheusRule: summary: input: Summary Annotation Value label: Summary +<<<<<<< HEAD bannerText: 'When firing alerts, the annotations and labels will be passed to the configured AlertManagers to allow them to construct the notification that will be sent to any configured Receivers.' +======= + bannerText: 'When firing alerts, the annotations and labels will be passed to the configured AlertManagers to allow them to construct the notification that will be sent to any configured receivers.' +>>>>>>> b5455bcb (fix: separate used/allocated units) for: label: Wait to fire for placeholder: '60' @@ -4364,14 +4978,22 @@ prometheusRule: promptForceRemove: modalTitle: Are you sure? +<<<<<<< HEAD removeWarning: "There was an issue with deleting underlying infrastructure. If you proceed with this action, the Machine {nameToMatch} will be deleted from Rancher only. It's highly recommended to manually delete any referenced infrastructure." +======= + removeWarning: "There was an issue with deleting underlying infrastructure. If you proceed with this action, the Machine {nameToMatch} will be deleted from Rancher only. We recommend to manually delete any referenced infrastructure." +>>>>>>> b5455bcb (fix: separate used/allocated units) forceDelete: Force Delete confirmName: "Enter in the pool name below to confirm:" podRemoveWarning: "Force deleting pods does not wait for confirmation that the pod's processes have been terminated. This may result in data corruption or inconsistencies" promptScaleMachineDown: attemptingToRemove: "You are attempting to delete {count} {type}" +<<<<<<< HEAD retainedMachine1: At least one Machine must exist for roles Control Plane and Etcd. +======= + retainedMachine1: At least one machine must exist for roles control plane and Etcd. +>>>>>>> b5455bcb (fix: separate used/allocated units) retainedMachine2: { name } will remain promptRemove: @@ -4383,7 +5005,11 @@ promptRemove: other { and {count} others.} } attemptingToRemove: "You are attempting to delete the {type}" +<<<<<<< HEAD attemptingToRemoveAuthConfig: "You are attempting to disable this Auth Provider.
docker ps
, then run:'
dockerSuffix: ""
@@ -5278,7 +5985,11 @@ tableHeaders:
apiGroup: API Groups
apikey: API Key
available: Available
+<<<<<<< HEAD
attachedVM: Attached VM
+=======
+ attachedVM: Attached Virtual Machine
+>>>>>>> b5455bcb (fix: separate used/allocated units)
authRoles:
globalDefault: New User Default
@@ -5381,7 +6092,11 @@ tableHeaders:
namespaceName: Name
namespaceNameUnlinked: Name
networkType: Type
+<<<<<<< HEAD
networkVlan: Vlan ID
+=======
+ networkVlan: VLAN ID
+>>>>>>> b5455bcb (fix: separate used/allocated units)
node: Node
nodeName: Node Name
nodesReady: Nodes Ready
@@ -5471,6 +6186,10 @@ tableHeaders:
targetKind: Target Type
targetPort: Target
template: Template
+<<<<<<< HEAD
+=======
+ totalSnapshotQuota: Total Snapshot Quota
+>>>>>>> b5455bcb (fix: separate used/allocated units)
type: Type
updated: Updated
up-to-date: Up To Date
@@ -5549,7 +6268,11 @@ validation:
name: Cluster name cannot be 'local' or take the form 'c-xxxxx'
conflict: |-
This resource has been modified since you started editing it, and some of those modifications conflict with your changes.
+<<<<<<< HEAD
This screen has been updated to reflect the current values on the cluster. Review and reapply the changes you wanted to make, then Save again.
+=======
+ This screen has been updated to reflect the current values on the cluster. Review and reapply the changes you wanted to make, then save again.
+>>>>>>> b5455bcb (fix: separate used/allocated units)
Conflicting {fieldCount, plural, =1 {field} other {fields}}: {fields}
custom:
missing: 'No validator exists for { validatorName }! Does the validator exist in custom-validators? Is the name spelled correctly?'
@@ -5578,7 +6301,11 @@ validation:
global: Requires "Cluster Output" to be selected.
output:
logdna:
+<<<<<<< HEAD
apiKey: Required an "Api Key" to be set.
+=======
+ apiKey: Required an "API Key" to be set.
+>>>>>>> b5455bcb (fix: separate used/allocated units)
invalidCron: Invalid cron schedule
k8s:
name: Must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character (e.g. 'my-name', or '123-abc').
@@ -5618,17 +6345,28 @@ validation:
port: A port must be a number between 1 and 65535.
path: '"{key}" must be an absolute path'
prometheusRule:
+<<<<<<< HEAD
noEdit: This Prometheus Rule may not be edited due to invalid characters in name.
+=======
+ noEdit: This Prometheus rule may not be edited due to invalid characters in name.
+>>>>>>> b5455bcb (fix: separate used/allocated units)
groups:
required: At least one rule group is required.
singleAlert: A rule may contain alert rules or recording rules but not both.
valid:
name: 'Name is required for rule group {index}.'
rule:
+<<<<<<< HEAD
alertName: 'Rule group {groupIndex} rule {ruleIndex} requires a Alert Name.'
expr: 'Rule group {groupIndex} rule {ruleIndex} requires a PromQL Expression.'
labels: 'Rule group {groupIndex} rule {ruleIndex} requires at least one label. Severity is recommended.'
recordName: 'Rule group {groupIndex} rule {ruleIndex} requires a Time Series Name.'
+=======
+ alertName: 'Rule group {groupIndex} rule {ruleIndex} requires a alert name.'
+ expr: 'Rule group {groupIndex} rule {ruleIndex} requires a PromQL expression.'
+ labels: 'Rule group {groupIndex} rule {ruleIndex} requires at least one label. Severity is recommended.'
+ recordName: 'Rule group {groupIndex} rule {ruleIndex} requires a time series name.'
+>>>>>>> b5455bcb (fix: separate used/allocated units)
singleEntry: 'At least one alert rule or one recording rule is required in rule group {index}.'
required: '"{key}" is required'
invalid: '"{key}" is invalid'
@@ -5637,12 +6375,21 @@ validation:
roleTemplate:
roleTemplateRules:
missingVerb: You must specify at least one verb for each resource grant
+<<<<<<< HEAD
missingResource: You must specify a Resource for each resource grant
missingApiGroup: You must specify an API Group for each resource grant
missingOneResource: You must specify at least one Resource, Non-Resource URL or API Group for each resource grant
service:
externalName:
none: External Name is required on an ExternalName Service.
+=======
+ missingResource: You must specify a resource for each resource grant
+ missingApiGroup: You must specify an API group for each resource grant
+ missingOneResource: You must specify at least one resource, non-resource URL or API group for each resource grant
+ service:
+ externalName:
+ none: External name is required on an ExternalName service.
+>>>>>>> b5455bcb (fix: separate used/allocated units)
ports:
name:
required: 'Port Rule [{position}] - Name is required.'
@@ -5670,7 +6417,11 @@ validation:
missingProjectId: A target must have a project selected.
monitoring:
route:
+<<<<<<< HEAD
match: At least one Match or Match Regex must be selected
+=======
+ match: At least one match or match regex must be selected
+>>>>>>> b5455bcb (fix: separate used/allocated units)
interval: '"{key}" must be of a format with digits followed by a unit i.e. 1h, 2m, 30s'
tab: "One or more fields in this tab contain a form validation error"
@@ -5762,9 +6513,15 @@ workload:
initialDelay: Initial Delay
livenessProbe: Liveness Check
livenessTip: Containers will be restarted when this check is failing. Not recommended for most uses.
+<<<<<<< HEAD
noHealthCheck: "There is not a Readiness Check, Liveness Check or Startup Check configured."
readinessProbe: Readiness Checks
readinessTip: Containers will be removed from service endpoints when this check is failing. Recommended.
+=======
+ noHealthCheck: "There is not a readiness check, liveness check or startup check configured."
+ readinessProbe: Readiness Checks
+ readinessTip: Containers will be removed from service endpoints when this check is failing. Recommended.
+>>>>>>> b5455bcb (fix: separate used/allocated units)
startupProbe: Startup Check
startupTip: Containers will wait until this check succeeds before attempting other health checks.
successThreshold: Success Threshold
@@ -5830,9 +6587,15 @@ workload:
noServiceAccess: You do not have permission to create or manage services
ports:
expose: Networking
+<<<<<<< HEAD
description: 'Define a Service to expose the container, or define a non-functional, named port so that humans will know where the app within the container is expected to run.'
detailedDescription: If ClusterIP, LoadBalancer, or NodePort is selected, a Service is automatically created that will select the Pods in this workload using labels.
toolTip: 'For help exposing workloads on Kubernetes, see the official Kubernetes documentation on Services. You can also manually create a Service to expose Pods by selecting their labels, and you can use an Ingress to map HTTP routes to Services.'
+=======
+ description: 'Define a service to expose the container, or define a non-functional, named port so that other users will know where the application within the container is expected to run.'
+ detailedDescription: If ClusterIP, LoadBalancer, or NodePort is selected, a service is automatically created that will select the pods in this workload using labels.
+ toolTip: 'For help exposing workloads on Kubernetes, see the official Kubernetes documentation on services. You can also manually create a service to expose pods by selecting their labels, and you can use an ingress to map HTTP routes to services.'
+>>>>>>> b5455bcb (fix: separate used/allocated units)
createService: Service Type
noCreateService: Do not create a service
containerPort: Private Container Port
@@ -5906,6 +6669,7 @@ workload:
detail:
services: Services
ingresses: Ingresses
+<<<<<<< HEAD
cannotViewServices: Could not list Services due to lack of permission.
cannotFindServices: Could not find any Services that select Pods from this workload.
serviceListCaption: "The following Services select Pods from this workload:"
@@ -5913,6 +6677,15 @@ workload:
cannotFindIngresses: Could not find any Ingresses that forward traffic to Services that select Pods in this workload.
ingressListCaption: "The following Ingresses forward traffic to Services that select Pods from this workload:"
cannotViewIngressesBecauseCannotViewServices: Could not find relevant relevant Ingresses due to lack of permission to view Services.
+=======
+ cannotViewServices: Could not list services due to lack of permission.
+ cannotFindServices: Could not find any services that select pods from this workload.
+ serviceListCaption: "The following services select pods from this workload:"
+ cannotViewIngresses: Could not list ingresses due to lack of permission.
+ cannotFindIngresses: Could not find any ingresses that forward traffic to services that select pods in this workload.
+ ingressListCaption: "The following ingresses forward traffic to services that select pods from this workload:"
+ cannotViewIngressesBecauseCannotViewServices: Could not find relevant relevant ingresses due to lack of permission to view services.
+>>>>>>> b5455bcb (fix: separate used/allocated units)
pods:
title: Pods
detailTop:
@@ -6101,7 +6874,11 @@ workload:
addMount: Add Mount
addVolume: Add Volume
selectVolume: Select Volume
+<<<<<<< HEAD
noVolumes: Volumes will appear here after they are added in the Pod tab
+=======
+ noVolumes: Volumes will appear here after they are added in the pod tab
+>>>>>>> b5455bcb (fix: separate used/allocated units)
certificate: Certificate
csi:
diskName: Disk Name
@@ -6132,12 +6909,21 @@ workload:
defaultMode: Default Mode
driver: driver
hostPath:
+<<<<<<< HEAD
label: The Path on the Node must be
options:
default: 'Anything: do not check the target path'
directoryOrCreate: A directory, or create if it doesn't exist
directory: An existing directory
fileOrCreate: A file, or create if it doesn't exist
+=======
+ label: The Path on the node must be
+ options:
+ default: 'Anything: do not check the target path'
+ directoryOrCreate: A directory, or create if it does not exist
+ directory: An existing directory
+ fileOrCreate: A file, or create if it does not exist
+>>>>>>> b5455bcb (fix: separate used/allocated units)
file: An existing file
socket: An existing socket
charDevice: An existing character device
@@ -6166,11 +6952,19 @@ workload:
placeholder: "e.g. 300"
typeDescriptions:
apps.daemonset: DaemonSets run exactly one pod on every eligible node. When new nodes are added to the cluster, DaemonSets automatically deploy to them. Recommended for system-wide or vertically-scalable workloads that never need more than one pod per node.
+<<<<<<< HEAD
apps.deployment: Deployments run a scalable number of replicas of a pod distributed among the eligible nodes. Changes are rolled out incrementally and can be rolled back to the previous revision when needed. Recommended for stateless & horizontally-scalable workloads.
apps.statefulset: StatefulSets manage stateful applications and provide guarantees about the ordering and uniqueness of the pods created. Recommended for workloads with persistent storage or strict identity, quorum, or upgrade order requirements.
batch.cronjob: CronJobs create Jobs, which then run Pods, on a repeating schedule. The schedule is expressed in standard Unix cron format, and uses the timezone of the Kubernetes control plane (typically UTC).
batch.job: Jobs create one or more pods to reliably perform a one-time task by running a pod until it exits successfully. Failed pods are automatically replaced until the specified number of completed runs has been reached. Jobs can also run multiple pods in parallel or function as a batch work queue.
pod: Pods are the smallest deployable units of computing that you can create and manage in Kubernetes. A Pod is a group of one or more containers, with shared storage and network resources, and a specification for how to run the containers.
+=======
+ apps.deployment: Deployments run a scalable number of replicas of a pod distributed among the eligible nodes. Changes are rolled out incrementally and can be rolled back to the previous revision when needed. Recommended for stateless and horizontally-scalable workloads.
+ apps.statefulset: StatefulSets manage stateful applications and provide guarantees about the ordering and uniqueness of the pods created. Recommended for workloads with persistent storage or strict identity, quorum, or upgrade order requirements.
+ batch.cronjob: CronJobs create jobs, which then run pods, on a repeating schedule. The schedule is expressed in standard Unix cron format, and uses the timezone of the Kubernetes control plane (typically UTC).
+ batch.job: Jobs create one or more pods to reliably perform a one-time task by running a pod until it exits successfully. Failed pods are automatically replaced until the specified number of completed runs has been reached. Jobs can also run multiple pods in parallel or function as a batch work queue.
+ pod: Pods are the smallest deployable units of computing that you can create and manage in Kubernetes. A pod is a group of one or more containers, with shared storage and network resources, and a specification for how to run the containers.
+>>>>>>> b5455bcb (fix: separate used/allocated units)
upgrading:
activeDeadlineSeconds:
label: Pod Active Deadline
@@ -6179,8 +6973,13 @@ workload:
label: Concurrency
options:
allow: Allow CronJobs to run concurrently
+<<<<<<< HEAD
forbid: Skip next run if current run hasn't finished
replace: Replace run if current run hasn't finished
+=======
+ forbid: Skip next run if current run has not finished
+ replace: Replace run if current run has not finished
+>>>>>>> b5455bcb (fix: separate used/allocated units)
maxSurge:
label: Max Surge
tip: The maximum number of pods allowed beyond the desired scale at any given time.
@@ -6202,7 +7001,11 @@ workload:
labels:
delete: "On Delete: New pods are only created when old pods are manually deleted."
recreate: "Recreate: Kill ALL pods, then start new pods."
+<<<<<<< HEAD
rollingUpdate: "Rolling Update: Create new pods, until max surge is reached, before deleting old pods. Don't stop more pods than max unavailable."
+=======
+ rollingUpdate: "Rolling Update: Create new pods, until max surge is reached, before deleting old pods. Do not stop more pods than max unavailable."
+>>>>>>> b5455bcb (fix: separate used/allocated units)
terminationGracePeriodSeconds:
label: Termination Grace Period
tip: The duration the pod needs to terminate successfully.
@@ -6299,14 +7102,22 @@ typeDescription:
cis.cattle.io.clusterscanprofile: A profile is the configuration for the CIS scan, which is the benchmark versions to use and any specific tests to skip in that benchmark.
cis.cattle.io.clusterscan: A scan is created to trigger a CIS scan on the cluster based on the defined profile. A report is created after the scan is completed.
cis.cattle.io.clusterscanreport: A report is the result of a CIS scan of the cluster.
+<<<<<<< HEAD
management.cattle.io.feature: Feature Flags allow certain {vendor} features to be toggled on and off. Features that are off by default should be considered experimental functionality.
cluster.x-k8s.io.machine: A Machine encapsulates the configuration of a Kubernetes Node. Use this view to see what happens after updating a cluster.
cluster.x-k8s.io.machinedeployment: A Machine Deployment orchestrates deployments via templates over a collection of Machine Sets (similar to a Deployment). Use this view to see what happens after updating a cluster.
cluster.x-k8s.io.machineset: A Machine Set ensures the desired number of Machine resources are up and running at all times (similar to a ReplicaSet). Use this view to see what happens after updating a cluster.
+=======
+ management.cattle.io.feature: Feature flags allow certain {vendor} features to be toggled on and off. Features that are off by default should be considered experimental functionality.
+ cluster.x-k8s.io.machine: A machine encapsulates the configuration of a Kubernetes node. Use this view to see what happens after updating a cluster.
+ cluster.x-k8s.io.machinedeployment: A machine deployment orchestrates deployments via templates over a collection of machine sets (similar to a deployment). Use this view to see what happens after updating a cluster.
+ cluster.x-k8s.io.machineset: A machine set ensures the desired number of machine resources are up and running at all times (similar to a ReplicaSet). Use this view to see what happens after updating a cluster.
+>>>>>>> b5455bcb (fix: separate used/allocated units)
resources.cattle.io.backup: A backup is created to perform one-time backups or schedule recurring backups based on a ResourceSet.
resources.cattle.io.restore: A restore is created to trigger a restore to the cluster based on a backup file.
resources.cattle.io.resourceset: A resource set defines which CRDs and resources to store in the backup.
monitoring.coreos.com.servicemonitor: A service monitor defines the group of services and the endpoints that Prometheus will scrape for metrics. This is the most common way to define metrics collection.
+<<<<<<< HEAD
monitoring.coreos.com.podmonitor: A pod monitor defines the group of pods that Prometheus will scrape for metrics. The common way is to use service monitors, but pod monitors allow you to handle any situation where a service monitor wouldn't work.
monitoring.coreos.com.prometheusrule: A Prometheus Rule resource defines both recording and/or alert rules. A recording rule can pre-compute values and save the results. Alerting rules allow you to define conditions on when to send notifications to AlertManager.
monitoring.coreos.com.prometheus: A Prometheus server is a Prometheus deployment whose scrape configuration and rules are determined by selected ServiceMonitors, PodMonitors, and PrometheusRules and whose alerts will be sent to all selected Alertmanagers with the custom resource's configuration.
@@ -6317,6 +7128,18 @@ typeDescription:
catalog.cattle.io.operation: An operation is the list of recent Helm operations that have been applied to the cluster.
catalog.cattle.io.app: An installed application is a Helm 3 chart that was installed either via our charts or through the Helm CLI.
logging.banzaicloud.io.clusterflow: Logs from the cluster will be collected and logged to the selected Cluster Output.
+=======
+ monitoring.coreos.com.podmonitor: A pod monitor defines the group of pods that Prometheus will scrape for metrics. The common way is to use service monitors, but pod monitors allow you to handle any situation where a service monitor would not work.
+ monitoring.coreos.com.prometheusrule: A Prometheus rule resource defines both recording or alert rules. A recording rule can pre-compute values and save the results. Alerting rules allow you to define conditions on when to send notifications to AlertManager.
+ monitoring.coreos.com.prometheus: A Prometheus server is a Prometheus deployment whose scrape configuration and rules are determined by selected ServiceMonitors, PodMonitors, and PrometheusRules and whose alerts will be sent to all selected Alertmanagers with the custom resource's configuration.
+ monitoring.coreos.com.alertmanager: An alert manager is deployment whose configuration will be specified by a secret in the same namespace, which determines which alerts should go to which receiver.
+ node: The base Kubernetes node resource represents a virtual or physical machine which hosts deployments. To manage the machine lifecycle, if available, go to Cluster Management.
+ catalog.cattle.io.clusterrepo: 'A chart repository is a Helm repository or {vendor} git based application catalog. It provides the list of available charts in the cluster.'
+ catalog.cattle.io.clusterrepo.local: 'A chart repository is a Helm repository or {vendor} git based application catalog. It provides the list of available charts in the cluster. Cluster Templates are deployed via Helm charts.'
+ catalog.cattle.io.operation: An operation is the list of recent Helm operations that have been applied to the cluster.
+ catalog.cattle.io.app: An installed application is a Helm 3 chart that was installed either via our charts or through the Helm CLI.
+ logging.banzaicloud.io.clusterflow: Logs from the cluster will be collected and logged to the selected cluster output.
+>>>>>>> b5455bcb (fix: separate used/allocated units)
logging.banzaicloud.io.clusteroutput: A cluster output defines which logging providers that logs can be sent to and is only effective when deployed in the namespace that the logging operator is in.
logging.banzaicloud.io.flow: A flow defines which logs to collect and filter as well as which output to send the logs. The flow is a namespaced resource, which means logs will only be collected from the namespace that the flow is deployed in.
logging.banzaicloud.io.output: An output defines which logging providers that logs can be sent to. The output needs to be in the same namespace as the flow that is using it.
@@ -6350,8 +7173,13 @@ typeLabel:
}
catalog.cattle.io.app: |-
{count, plural,
+<<<<<<< HEAD
one { Installed App }
other { Installed Apps }
+=======
+ one { Installed Application }
+ other { Installed Applications }
+>>>>>>> b5455bcb (fix: separate used/allocated units)
}
catalog.cattle.io.clusterrepo: |-
{count, plural,
@@ -6360,6 +7188,7 @@ typeLabel:
}
catalog.cattle.io.repo: |-
{count, plural,
+<<<<<<< HEAD
one { Namespaced Repo }
other { Namespaced Repos }
}
@@ -6372,6 +7201,20 @@ typeLabel:
{count, plural,
one { App }
other { Apps }
+=======
+ one { Namespaced Repository }
+ other { Namespaced Repositories }
+ }
+ chartinstallaction: |-
+ {count, plural,
+ one { Application }
+ other { Applications }
+ }
+ chartupgradeaction: |-
+ {count, plural,
+ one { Application }
+ other { Applications }
+>>>>>>> b5455bcb (fix: separate used/allocated units)
}
cloudcredential: |-
{count, plural,
@@ -6395,8 +7238,13 @@ typeLabel:
}
fleet.cattle.io.gitrepo: |-
{count, plural,
+<<<<<<< HEAD
one { Git Repo }
other {Git Repos }
+=======
+ one { Git Repository }
+ other {Git Repositories }
+>>>>>>> b5455bcb (fix: separate used/allocated units)
}
management.cattle.io.authconfig: |-
{count, plural,
@@ -6501,8 +7349,13 @@ typeLabel:
}
'management.cattle.io.cluster': |-
{count, plural,
+<<<<<<< HEAD
one { Mgmt Cluster }
other { Mgmt Clusters }
+=======
+ one { Manaagement Cluster }
+ other { Management Clusters }
+>>>>>>> b5455bcb (fix: separate used/allocated units)
}
'cluster.x-k8s.io.cluster': |-
{count, plural,
@@ -6681,8 +7534,13 @@ typeLabel:
}
harvesterhci.io.cloudtemplate: |-
{count, plural,
+<<<<<<< HEAD
one { Cloud Config Template }
other { Cloud Config Templates }
+=======
+ one { Cloud Configuration Template }
+ other { Cloud Configuration Templates }
+>>>>>>> b5455bcb (fix: separate used/allocated units)
}
fleet.cattle.io.content: |-
{count, plural,
@@ -6701,8 +7559,13 @@ typeLabel:
}
k3s.cattle.io.addon: |-
{count, plural,
+<<<<<<< HEAD
one { Addon }
other { Addons }
+=======
+ one { Add-on }
+ other { Add-ons }
+>>>>>>> b5455bcb (fix: separate used/allocated units)
}
management.cattle.io.apiservice: |-
{count, plural,
@@ -6801,8 +7664,13 @@ typeLabel:
}
management.cattle.io.rkeaddon: |-
{count, plural,
+<<<<<<< HEAD
one { RkeAddon }
other { RkeAddons }
+=======
+ one { RKEAddon }
+ other { RKEAddons }
+>>>>>>> b5455bcb (fix: separate used/allocated units)
}
management.cattle.io.rkek8sserviceoption: |-
{count, plural,
@@ -6811,8 +7679,13 @@ typeLabel:
}
management.cattle.io.rkek8ssystemimage: |-
{count, plural,
+<<<<<<< HEAD
one { RkeK8sSystemImage }
other { RkeK8sSystemImages }
+=======
+ one { RKEK8sSystemImage }
+ other { RKEK8sSystemImages }
+>>>>>>> b5455bcb (fix: separate used/allocated units)
}
rbac.authorization.k8s.io.clusterrolebinding: |-
{count, plural,
@@ -6880,6 +7753,10 @@ action:
viewYaml: View YAML
activate: Activate
deactivate: Deactivate
+<<<<<<< HEAD
+=======
+ editQuota: Edit Quota
+>>>>>>> b5455bcb (fix: separate used/allocated units)
show: Show
hide: Hide
copy: Copy
@@ -6917,11 +7794,19 @@ podAffinity:
keyValue:
keyPlaceholder: e.g. foo
valuePlaceholder: e.g. bar
+<<<<<<< HEAD
protip: 'Paste lines of key=value or key: value into any key field for easy bulk entry'
registryMirror:
header: Mirrors
toolTip: 'Mirrors can be used to redirect requests for images from one registry to come from a list of endpoints you specify instead. For example docker.io could redirect to your internal registry instead of ever going to DockerHub.'
+=======
+ protip: 'Paste lines of key=value or key:value into any key field for easy bulk entry'
+
+registryMirror:
+ header: Mirrors
+ toolTip: 'Mirrors can be used to redirect requests for images from one registry to come from a list of endpoints you specify instead. For example docker.io could redirect to your internal registry instead of ever going to DockerHub.'
+>>>>>>> b5455bcb (fix: separate used/allocated units)
addLabel: Add Mirror
description: Mirrors define the names and endpoints for private registries. The endpoints are tried one by one, and the first working one is used.
hostnameLabel: Registry Hostname
@@ -6950,7 +7835,11 @@ registryConfig:
advancedSettings:
label: Settings
+<<<<<<< HEAD
subtext: Typical users will not need to change these. Proceed with caution, incorrect values can break your {appName} installation. Settings which have been customized from default settings are tagged 'Modified'.
+=======
+ subtext: Users will not need to change these settings. Proceed with caution as incorrect values can break your {appName} installation. Settings that have been customized from default settings are tagged with 'Modified'.
+>>>>>>> b5455bcb (fix: separate used/allocated units)
show: Show
hide: Hide
none: None
@@ -6969,12 +7858,21 @@ advancedSettings:
'cluster-defaults': 'Override RKE Defaults when creating new clusters.'
'engine-install-url': 'Default Docker engine installation URL (for most node drivers).'
'engine-iso-url': 'Default OS installation URL (for vSphere driver).'
+<<<<<<< HEAD
'engine-newest-version': 'The newest supported version of Docker at the time of this release. A Docker version that does not satisfy supported docker range but is newer than this will be marked as untested.'
'engine-supported-range': 'Semver range for supported Docker engine versions. Versions which do not satisfy this range will be marked unsupported in the UI.'
'ingress-ip-domain': 'Wildcard DNS domain to use for automatically generated Ingress hostnames. auth-user-session-ttl-minutes
and auth-token-max-ttl-minutes
) in the Settings page.
+=======
+ information: To change the automatic log out behaviour, edit the authorisation and session token timeout values (auth-user-session-ttl-minutes
and auth-token-max-ttl-minutes
) in the settings page.
+>>>>>>> b5455bcb (fix: separate used/allocated units)
description: When enabled and the user is inactive past the specified timeout, the UI will no longer fresh page content and the user must reload the page to continue.
authUserTTL: This timeout cannot be higher than the user session timeout auth-user-session-ttl-minutes, which is currently {current} minutes.
@@ -7178,7 +8111,11 @@ resourceQuota:
helpText: Configure how much of the resources the namespace as a whole can consume.
helpTextDetail: The amount of resources the namespace as a whole can consume.
helpTextHarvester: VMs need to reserve additional memory overhead.
+<<<<<<< HEAD
configMaps: Config Maps
+=======
+ configMaps: Configuration Maps
+>>>>>>> b5455bcb (fix: separate used/allocated units)
limitsCpu: CPU Limit
limitsMemory: Memory Limit
persistentVolumeClaims: Persistent Volume Claims
@@ -7262,8 +8199,13 @@ support:
text: Login to SUSE Customer Center to access support for your subscription
action: SUSE Customer Center
aws:
+<<<<<<< HEAD
generateConfig: Generate Support Config
text: 'Login to SUSE Customer Center to access support for your subscription. Need to open a new support case? Download a support config file below.'
+=======
+ generateConfig: Generate Support Configuration
+ text: 'Login to SUSE Customer Center to access support for your subscription. Need to open a new support case? Download a support configuration file below.'
+>>>>>>> b5455bcb (fix: separate used/allocated units)
promos:
one:
title: 24x7 Support
@@ -7297,7 +8239,11 @@ legacy:
alerts: Alerts
apps: Apps
catalogs: Catalogs
+<<<<<<< HEAD
configMaps: Config Maps
+=======
+ configMaps: Configuration Maps
+>>>>>>> b5455bcb (fix: separate used/allocated units)
configuration: Configuration
globalDnsEntries: Global DNS Entries
globalDnsProviders: Global DNS Providers
@@ -7308,7 +8254,11 @@ legacy:
project:
label: Project
+<<<<<<< HEAD
select: "Use the Project/Namespace filter at the top of the page to select a Project in order to see legacy Project features."
+=======
+ select: "Use the namespace or project filter at the top of the page to select a project in order to see legacy project features."
+>>>>>>> b5455bcb (fix: separate used/allocated units)
serverUpgrade:
title: "{vendor} Server Changed"
diff --git a/shell/cloud-credential/generic.vue b/shell/cloud-credential/generic.vue
index 17ec5a1f8b0..4e6343d65d1 100644
--- a/shell/cloud-credential/generic.vue
+++ b/shell/cloud-credential/generic.vue
@@ -3,10 +3,20 @@ import CreateEditView from '@shell/mixins/create-edit-view';
import KeyValue from '@shell/components/form/KeyValue';
import { Banner } from '@components/Banner';
import { simplify, iffyFields, likelyFields } from '@shell/store/plugins';
+<<<<<<< HEAD
export default {
components: { KeyValue, Banner },
mixins: [CreateEditView],
+=======
+import Loading from '@shell/components/Loading';
+
+export default {
+ components: {
+ KeyValue, Banner, Loading
+ },
+ mixins: [CreateEditView],
+>>>>>>> b5455bcb (fix: separate used/allocated units)
props: {
driverName: {
@@ -15,19 +25,34 @@ export default {
}
},
+<<<<<<< HEAD
data() {
let keyOptions = [];
const normanType = this.$store.getters['plugins/credentialFieldForDriver'](this.driverName);
const normanSchema = this.$store.getters['rancher/schemaFor'](`${ normanType }credentialconfig`);
+=======
+ async fetch() {
+ let keyOptions = [];
+
+ const { normanSchema } = this;
+>>>>>>> b5455bcb (fix: separate used/allocated units)
if ( normanSchema?.resourceFields ) {
keyOptions = Object.keys(normanSchema.resourceFields);
} else {
+<<<<<<< HEAD
keyOptions = this.$store.getters['plugins/fieldNamesForDriver'](this.driverName);
}
// Prepopulate empty values for keys that sound like they're cloud-credential-ey
+=======
+ keyOptions = await this.$store.getters['plugins/fieldNamesForDriver'](this.driverName);
+ }
+
+ this.keyOptions = keyOptions;
+
+>>>>>>> b5455bcb (fix: separate used/allocated units)
const keys = [];
for ( const k of keyOptions ) {
@@ -43,11 +68,24 @@ export default {
this.value.setData(k, '');
}
}
+<<<<<<< HEAD
return {
hasSupport: !!normanSchema,
keyOptions,
errors: null,
+=======
+ },
+
+ data() {
+ const normanType = this.$store.getters['plugins/credentialFieldForDriver'](this.driverName);
+ const normanSchema = this.$store.getters['rancher/schemaFor'](`${ normanType }credentialconfig`);
+
+ return {
+ hasSupport: !!normanSchema,
+ errors: null,
+ normanSchema,
+>>>>>>> b5455bcb (fix: separate used/allocated units)
};
},
@@ -67,7 +105,12 @@ export default {
+<<<<<<< HEAD