diff --git a/api/src/main/java/com/cloud/storage/Storage.java b/api/src/main/java/com/cloud/storage/Storage.java
index 407935919ca2..d44628a198a4 100644
--- a/api/src/main/java/com/cloud/storage/Storage.java
+++ b/api/src/main/java/com/cloud/storage/Storage.java
@@ -147,6 +147,7 @@ public static enum StoragePoolType {
Gluster(true, false),
PowerFlex(true, true), // Dell EMC PowerFlex/ScaleIO (formerly VxFlexOS)
ManagedNFS(true, false),
+ Linstor(true, true),
DatastoreCluster(true, true); // for VMware, to abstract pool of clusters
private final boolean shared;
diff --git a/api/src/test/java/com/cloud/storage/StorageTest.java b/api/src/test/java/com/cloud/storage/StorageTest.java
index bf451696260b..951ee9fe76a1 100644
--- a/api/src/test/java/com/cloud/storage/StorageTest.java
+++ b/api/src/test/java/com/cloud/storage/StorageTest.java
@@ -48,6 +48,7 @@ public void isSharedStoragePool() {
Assert.assertTrue(StoragePoolType.Gluster.isShared());
Assert.assertTrue(StoragePoolType.ManagedNFS.isShared());
Assert.assertTrue(StoragePoolType.DatastoreCluster.isShared());
+ Assert.assertTrue(StoragePoolType.Linstor.isShared());
}
@Test
@@ -71,5 +72,6 @@ public void supportsOverprovisioningStoragePool() {
Assert.assertFalse(StoragePoolType.Gluster.supportsOverProvisioning());
Assert.assertFalse(StoragePoolType.ManagedNFS.supportsOverProvisioning());
Assert.assertTrue(StoragePoolType.DatastoreCluster.supportsOverProvisioning());
+ Assert.assertTrue(StoragePoolType.Linstor.supportsOverProvisioning());
}
}
diff --git a/client/pom.xml b/client/pom.xml
index 904b98b762d4..67585f67ece2 100644
--- a/client/pom.xml
+++ b/client/pom.xml
@@ -92,6 +92,11 @@
cloud-plugin-storage-volume-scaleio
${project.version}
+
+ org.apache.cloudstack
+ cloud-plugin-storage-volume-linstor
+ ${project.version}
+
org.apache.cloudstack
cloud-server
diff --git a/engine/storage/integration-test/pom.xml b/engine/storage/integration-test/pom.xml
index 4cae6ef2feed..22c4584611dd 100644
--- a/engine/storage/integration-test/pom.xml
+++ b/engine/storage/integration-test/pom.xml
@@ -99,6 +99,12 @@
${project.version}
test
+
+ org.apache.cloudstack
+ cloud-plugin-storage-volume-linstor
+ ${project.version}
+ test
+
org.apache.cloudstack
cloud-secondary-storage
diff --git a/plugins/hypervisors/kvm/pom.xml b/plugins/hypervisors/kvm/pom.xml
index 394acdc9b590..a34bfa19001e 100644
--- a/plugins/hypervisors/kvm/pom.xml
+++ b/plugins/hypervisors/kvm/pom.xml
@@ -62,6 +62,11 @@
rados
${cs.rados-java.version}
+
+ com.linbit.linstor.api
+ java-linstor
+ ${cs.java-linstor.version}
+
net.java.dev.jna
jna
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
index 239430757737..e3f536a4ba84 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
@@ -278,6 +278,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
*/
private static final String AARCH64 = "aarch64";
+ public static final String RESIZE_NOTIFY_ONLY = "NOTIFYONLY";
+
private String _modifyVlanPath;
private String _versionstringpath;
private String _patchScriptPath;
@@ -1894,6 +1896,8 @@ public String getResizeScriptType(final KVMStoragePool pool, final KVMPhysicalDi
|| poolType == StoragePoolType.Gluster)
&& volFormat == PhysicalDiskFormat.QCOW2 ) {
return "QCOW2";
+ } else if (poolType == StoragePoolType.Linstor) {
+ return RESIZE_NOTIFY_ONLY;
}
throw new CloudRuntimeException("Cannot determine resize type from pool type " + pool.getType());
}
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtResizeVolumeCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtResizeVolumeCommandWrapper.java
index 52ed64a32f2b..685fea9f1bcf 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtResizeVolumeCommandWrapper.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtResizeVolumeCommandWrapper.java
@@ -57,7 +57,7 @@ public Answer execute(final ResizeVolumeCommand command, final LibvirtComputingR
final String vmInstanceName = command.getInstanceName();
final boolean shrinkOk = command.getShrinkOk();
final StorageFilerTO spool = command.getPool();
- final String notifyOnlyType = "NOTIFYONLY";
+ final String notifyOnlyType = LibvirtComputingResource.RESIZE_NOTIFY_ONLY;
if ( currentSize == newSize) {
// nothing to do
@@ -73,19 +73,20 @@ public Answer execute(final ResizeVolumeCommand command, final LibvirtComputingR
final String path = vol.getPath();
String type = notifyOnlyType;
- if (pool.getType() != StoragePoolType.RBD) {
+ if (pool.getType() != StoragePoolType.RBD && pool.getType() != StoragePoolType.Linstor) {
type = libvirtComputingResource.getResizeScriptType(pool, vol);
if (type.equals("QCOW2") && shrinkOk) {
return new ResizeVolumeAnswer(command, false, "Unable to shrink volumes of type " + type);
}
} else {
- s_logger.debug("Volume " + path + " is on a RBD storage pool. No need to query for additional information.");
+ s_logger.debug("Volume " + path + " is on a RBD/Linstor storage pool. No need to query for additional information.");
}
s_logger.debug("Resizing volume: " + path + ", from: " + toHumanReadableSize(currentSize) + ", to: " + toHumanReadableSize(newSize) + ", type: " + type + ", name: " + vmInstanceName + ", shrinkOk: " + shrinkOk);
/* libvirt doesn't support resizing (C)LVM devices, and corrupts QCOW2 in some scenarios, so we have to do these via Bash script */
- if (pool.getType() != StoragePoolType.CLVM && vol.getFormat() != PhysicalDiskFormat.QCOW2) {
+ if (pool.getType() != StoragePoolType.CLVM && pool.getType() != StoragePoolType.Linstor &&
+ vol.getFormat() != PhysicalDiskFormat.QCOW2) {
s_logger.debug("Volume " + path + " can be resized by libvirt. Asking libvirt to resize the volume.");
try {
final LibvirtUtilitiesHelper libvirtUtilitiesHelper = libvirtComputingResource.getLibvirtUtilitiesHelper();
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java
index 4383e3f6efed..77579c4eca6b 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java
@@ -378,7 +378,7 @@ public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, String n
return adaptor.createDiskFromTemplate(template, name,
PhysicalDiskFormat.DIR, provisioningType,
size, destPool, timeout);
- } else if (destPool.getType() == StoragePoolType.PowerFlex) {
+ } else if (destPool.getType() == StoragePoolType.PowerFlex || destPool.getType() == StoragePoolType.Linstor) {
return adaptor.createDiskFromTemplate(template, name,
PhysicalDiskFormat.RAW, provisioningType,
size, destPool, timeout);
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
index 46f2c2ab17ee..ca8026c9a0cb 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
@@ -282,7 +282,9 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) {
final TemplateObjectTO newTemplate = new TemplateObjectTO();
newTemplate.setPath(primaryVol.getName());
newTemplate.setSize(primaryVol.getSize());
- if (primaryPool.getType() == StoragePoolType.RBD || primaryPool.getType() == StoragePoolType.PowerFlex) {
+ if (primaryPool.getType() == StoragePoolType.RBD ||
+ primaryPool.getType() == StoragePoolType.PowerFlex ||
+ primaryPool.getType() == StoragePoolType.Linstor) {
newTemplate.setFormat(ImageFormat.RAW);
} else {
newTemplate.setFormat(ImageFormat.QCOW2);
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LinstorStorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LinstorStorageAdaptor.java
new file mode 100644
index 000000000000..c4afb1888954
--- /dev/null
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LinstorStorageAdaptor.java
@@ -0,0 +1,586 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.hypervisor.kvm.storage;
+
+import com.linbit.linstor.api.ApiClient;
+import com.linbit.linstor.api.ApiException;
+import com.linbit.linstor.api.Configuration;
+import com.linbit.linstor.api.DevelopersApi;
+import com.linbit.linstor.api.model.ApiCallRc;
+import com.linbit.linstor.api.model.ApiCallRcList;
+import com.linbit.linstor.api.model.Properties;
+import com.linbit.linstor.api.model.ProviderKind;
+import com.linbit.linstor.api.model.ResourceDefinition;
+import com.linbit.linstor.api.model.ResourceDefinitionModify;
+import com.linbit.linstor.api.model.ResourceGroup;
+import com.linbit.linstor.api.model.ResourceGroupSpawn;
+import com.linbit.linstor.api.model.ResourceMakeAvailable;
+import com.linbit.linstor.api.model.ResourceWithVolumes;
+import com.linbit.linstor.api.model.StoragePool;
+import com.linbit.linstor.api.model.VolumeDefinition;
+
+import javax.annotation.Nonnull;
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.StringJoiner;
+
+import com.cloud.storage.Storage;
+import com.cloud.utils.exception.CloudRuntimeException;
+import org.apache.cloudstack.utils.qemu.QemuImg;
+import org.apache.cloudstack.utils.qemu.QemuImgException;
+import org.apache.cloudstack.utils.qemu.QemuImgFile;
+import org.apache.log4j.Logger;
+import org.libvirt.LibvirtException;
+
+@StorageAdaptorInfo(storagePoolType=Storage.StoragePoolType.Linstor)
+public class LinstorStorageAdaptor implements StorageAdaptor {
+ private static final Logger s_logger = Logger.getLogger(LinstorStorageAdaptor.class);
+ private static final Map MapStorageUuidToStoragePool = new HashMap<>();
+ private final String localNodeName;
+
+ private DevelopersApi getLinstorAPI(KVMStoragePool pool) {
+ ApiClient client = Configuration.getDefaultApiClient();
+ client.setBasePath(pool.getSourceHost());
+ return new DevelopersApi(client);
+ }
+
+ private String getLinstorRscName(String name) {
+ return "cs-" + name;
+ }
+
+ private String getHostname() {
+ // either there is already some function for that in the agent or a better way.
+ ProcessBuilder pb = new ProcessBuilder("hostname");
+ try
+ {
+ String result;
+ Process p = pb.start();
+ final BufferedReader reader = new BufferedReader(new InputStreamReader(p.getInputStream()));
+
+ StringJoiner sj = new StringJoiner(System.getProperty("line.separator"));
+ reader.lines().iterator().forEachRemaining(sj::add);
+ result = sj.toString();
+
+ p.waitFor();
+ p.destroy();
+ return result.trim();
+ } catch (IOException | InterruptedException exc) {
+ throw new CloudRuntimeException("Unable to run 'hostname' command.");
+ }
+ }
+
+ private void logLinstorAnswer(@Nonnull ApiCallRc answer) {
+ if (answer.isError()) {
+ s_logger.error(answer.getMessage());
+ } else if (answer.isWarning()) {
+ s_logger.warn(answer.getMessage());
+ } else if (answer.isInfo()) {
+ s_logger.info(answer.getMessage());
+ }
+ }
+
+ private void checkLinstorAnswersThrow(@Nonnull ApiCallRcList answers) {
+ answers.forEach(this::logLinstorAnswer);
+ if (answers.hasError())
+ {
+ String errMsg = answers.stream()
+ .filter(ApiCallRc::isError)
+ .findFirst()
+ .map(ApiCallRc::getMessage).orElse("Unknown linstor error");
+ throw new CloudRuntimeException(errMsg);
+ }
+ }
+
+ private void handleLinstorApiAnswers(ApiCallRcList answers, String excMessage) {
+ answers.forEach(this::logLinstorAnswer);
+ if (answers.hasError()) {
+ throw new CloudRuntimeException(excMessage);
+ }
+ }
+
+ public LinstorStorageAdaptor() {
+ localNodeName = getHostname();
+ }
+
+ @Override
+ public KVMStoragePool getStoragePool(String uuid) {
+ return MapStorageUuidToStoragePool.get(uuid);
+ }
+
+ @Override
+ public KVMStoragePool getStoragePool(String uuid, boolean refreshInfo) {
+ s_logger.debug("Linstor getStoragePool: " + uuid + " -> " + refreshInfo);
+ return MapStorageUuidToStoragePool.get(uuid);
+ }
+
+ @Override
+ public KVMPhysicalDisk getPhysicalDisk(String name, KVMStoragePool pool)
+ {
+ s_logger.debug("Linstor: getPhysicalDisk for " + name);
+ if (name == null) {
+ return null;
+ }
+
+ final DevelopersApi api = getLinstorAPI(pool);
+ try {
+ final String rscName = getLinstorRscName(name);
+
+ List volumeDefs = api.volumeDefinitionList(rscName, null, null);
+ final long size = volumeDefs.isEmpty() ? 0 : volumeDefs.get(0).getSizeKib() * 1024;
+
+ List resources = api.viewResources(
+ Collections.emptyList(),
+ Collections.singletonList(rscName),
+ Collections.emptyList(),
+ null,
+ null,
+ null);
+ if (!resources.isEmpty() && !resources.get(0).getVolumes().isEmpty()) {
+ final String devPath = resources.get(0).getVolumes().get(0).getDevicePath();
+ final KVMPhysicalDisk kvmDisk = new KVMPhysicalDisk(devPath, name, pool);
+ kvmDisk.setFormat(QemuImg.PhysicalDiskFormat.RAW);
+ kvmDisk.setSize(size);
+ kvmDisk.setVirtualSize(size);
+ return kvmDisk;
+ } else {
+ s_logger.error("Linstor: viewResources didn't return resources or volumes for " + rscName);
+ throw new CloudRuntimeException("Linstor: viewResources didn't return resources or volumes.");
+ }
+ } catch (ApiException apiEx) {
+ s_logger.error(apiEx);
+ throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
+ }
+ }
+
+ @Override
+ public KVMStoragePool createStoragePool(String name, String host, int port, String path, String userInfo,
+ Storage.StoragePoolType type)
+ {
+ s_logger.debug(String.format(
+ "Linstor createStoragePool: name: '%s', host: '%s', path: %s, userinfo: %s", name, host, path, userInfo));
+ LinstorStoragePool storagePool = new LinstorStoragePool(name, host, port, userInfo, type, this);
+
+ MapStorageUuidToStoragePool.put(name, storagePool);
+
+ return storagePool;
+ }
+
+ @Override
+ public boolean deleteStoragePool(String uuid) {
+ return MapStorageUuidToStoragePool.remove(uuid) != null;
+ }
+
+ @Override
+ public boolean deleteStoragePool(KVMStoragePool pool) {
+ return deleteStoragePool(pool.getUuid());
+ }
+
+ @Override
+ public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, QemuImg.PhysicalDiskFormat format,
+ Storage.ProvisioningType provisioningType, long size)
+ {
+ final String rscName = getLinstorRscName(name);
+ LinstorStoragePool lpool = (LinstorStoragePool) pool;
+ final DevelopersApi api = getLinstorAPI(pool);
+
+ try {
+ List definitionList = api.resourceDefinitionList(
+ Collections.singletonList(rscName), null, null, null);
+
+ if (definitionList.isEmpty()) {
+ ResourceGroupSpawn rgSpawn = new ResourceGroupSpawn();
+ rgSpawn.setResourceDefinitionName(rscName);
+ rgSpawn.addVolumeSizesItem(size / 1024); // linstor uses KiB
+
+ s_logger.debug("Linstor: Spawn resource " + rscName);
+ ApiCallRcList answers = api.resourceGroupSpawn(lpool.getResourceGroup(), rgSpawn);
+ handleLinstorApiAnswers(answers, "Linstor: Unable to spawn resource.");
+ }
+
+ // query linstor for the device path
+ List resources = api.viewResources(
+ Collections.emptyList(),
+ Collections.singletonList(rscName),
+ Collections.emptyList(),
+ null,
+ null,
+ null);
+
+ // TODO make avialable on node
+
+ if (!resources.isEmpty() && !resources.get(0).getVolumes().isEmpty()) {
+ final String devPath = resources.get(0).getVolumes().get(0).getDevicePath();
+ s_logger.info("Linstor: Created drbd device: " + devPath);
+ final KVMPhysicalDisk kvmDisk = new KVMPhysicalDisk(devPath, name, pool);
+ kvmDisk.setFormat(QemuImg.PhysicalDiskFormat.RAW);
+ return kvmDisk;
+ } else {
+ s_logger.error("Linstor: viewResources didn't return resources or volumes.");
+ throw new CloudRuntimeException("Linstor: viewResources didn't return resources or volumes.");
+ }
+ } catch (ApiException apiEx) {
+ throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
+ }
+ }
+
+ @Override
+ public boolean connectPhysicalDisk(String volumePath, KVMStoragePool pool, Map details)
+ {
+ s_logger.debug(String.format("Linstor: connectPhysicalDisk %s:%s -> %s", pool.getUuid(), volumePath, details));
+ if (volumePath == null) {
+ s_logger.warn("volumePath is null, ignoring");
+ return false;
+ }
+
+ final DevelopersApi api = getLinstorAPI(pool);
+ try
+ {
+ final String rscName = getLinstorRscName(volumePath);
+
+ ResourceMakeAvailable rma = new ResourceMakeAvailable();
+ rma.setDiskful(true);
+ ApiCallRcList answers = api.resourceMakeAvailableOnNode(rscName, localNodeName, rma);
+ checkLinstorAnswersThrow(answers);
+
+ // allow 2 primaries for live migration, should be removed by disconnect on the other end
+ ResourceDefinitionModify rdm = new ResourceDefinitionModify();
+ Properties props = new Properties();
+ props.put("DrbdOptions/Net/allow-two-primaries", "yes");
+ rdm.setOverrideProps(props);
+ answers = api.resourceDefinitionModify(rscName, rdm);
+ if (answers.hasError()) {
+ s_logger.error("Unable to set 'allow-two-primaries' on " + rscName);
+ throw new CloudRuntimeException(answers.get(0).getMessage());
+ }
+ } catch (ApiException apiEx) {
+ s_logger.error(apiEx);
+ throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
+ }
+ return true;
+ }
+
+ @Override
+ public boolean disconnectPhysicalDisk(String volumePath, KVMStoragePool pool)
+ {
+ s_logger.debug("Linstor: disconnectPhysicalDisk " + pool.getUuid() + ":" + volumePath);
+ return true;
+ }
+
+ @Override
+ public boolean disconnectPhysicalDisk(Map volumeToDisconnect)
+ {
+ s_logger.debug("Linstor: disconnectPhysicalDisk map");
+ return true;
+ }
+
+ private Optional getResourceByPath(final List resources, String path) {
+ return resources.stream()
+ .filter(rsc -> rsc.getVolumes().stream()
+ .anyMatch(v -> v.getDevicePath().equals(path)))
+ .findFirst();
+ }
+
+ /**
+ * disconnectPhysicalDiskByPath is called after e.g. a live migration.
+ * The problem is we have no idea just from the path to which linstor-controller
+ * this resource would belong to. But as it should be highly unlikely that someone
+ * uses more than one linstor-controller to manage resource on the same kvm host.
+ * We will just take the first stored storagepool.
+ */
+ @Override
+ public boolean disconnectPhysicalDiskByPath(String localPath)
+ {
+ s_logger.debug("Linstor: disconnectPhysicalDiskByPath " + localPath);
+ // get first storage pool from the map, as we don't know any better:
+ if (!MapStorageUuidToStoragePool.isEmpty())
+ {
+ String firstKey = MapStorageUuidToStoragePool.keySet().stream().findFirst().get();
+ final KVMStoragePool pool = MapStorageUuidToStoragePool.get(firstKey);
+
+ s_logger.debug("Linstor: Using storpool: " + pool.getUuid());
+ final DevelopersApi api = getLinstorAPI(pool);
+
+ try
+ {
+ List resources = api.viewResources(
+ Collections.singletonList(localNodeName),
+ null,
+ null,
+ null,
+ null,
+ null);
+
+ Optional rsc = getResourceByPath(resources, localPath);
+
+ if (rsc.isPresent())
+ {
+ ResourceDefinitionModify rdm = new ResourceDefinitionModify();
+ rdm.deleteProps(Collections.singletonList("DrbdOptions/Net/allow-two-primaries"));
+ ApiCallRcList answers = api.resourceDefinitionModify(rsc.get().getName(), rdm);
+ if (answers.hasError())
+ {
+ s_logger.error("Failed to remove 'allow-two-primaries' on " + rsc.get().getName());
+ throw new CloudRuntimeException(answers.get(0).getMessage());
+ }
+
+ return true;
+ }
+ s_logger.warn("Linstor: Couldn't find resource for this path: " + localPath);
+ } catch (ApiException apiEx) {
+ s_logger.error(apiEx);
+ throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
+ }
+ }
+ return false;
+ }
+
+ @Override
+ public boolean deletePhysicalDisk(String name, KVMStoragePool pool, Storage.ImageFormat format)
+ {
+ s_logger.debug("Linstor: deletePhysicalDisk " + name);
+ final DevelopersApi api = getLinstorAPI(pool);
+
+ try {
+ final String rscName = getLinstorRscName(name);
+ s_logger.debug("Linstor: delete resource definition " + rscName);
+ ApiCallRcList answers = api.resourceDefinitionDelete(rscName);
+ handleLinstorApiAnswers(answers, "Linstor: Unable to delete resource definition " + rscName);
+ } catch (ApiException apiEx) {
+ throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
+ }
+ return true;
+ }
+
+ @Override
+ public KVMPhysicalDisk createDiskFromTemplate(
+ KVMPhysicalDisk template,
+ String name,
+ QemuImg.PhysicalDiskFormat format,
+ Storage.ProvisioningType provisioningType,
+ long size,
+ KVMStoragePool destPool,
+ int timeout)
+ {
+ s_logger.info("Linstor: createDiskFromTemplate");
+ return copyPhysicalDisk(template, name, destPool, timeout);
+ }
+
+ @Override
+ public List listPhysicalDisks(String storagePoolUuid, KVMStoragePool pool)
+ {
+ throw new UnsupportedOperationException("Listing disks is not supported for this configuration.");
+ }
+
+ @Override
+ public KVMPhysicalDisk createTemplateFromDisk(
+ KVMPhysicalDisk disk,
+ String name,
+ QemuImg.PhysicalDiskFormat format,
+ long size,
+ KVMStoragePool destPool)
+ {
+ throw new UnsupportedOperationException("Copying a template from disk is not supported in this configuration.");
+ }
+
+ @Override
+ public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPools, int timeout)
+ {
+ s_logger.debug("Linstor: copyPhysicalDisk");
+ final QemuImg.PhysicalDiskFormat sourceFormat = disk.getFormat();
+ final String sourcePath = disk.getPath();
+ final QemuImg qemu = new QemuImg(timeout);
+
+ final QemuImgFile srcFile = new QemuImgFile(sourcePath, sourceFormat);
+
+ final KVMPhysicalDisk dstDisk = destPools.createPhysicalDisk(
+ name, QemuImg.PhysicalDiskFormat.RAW, Storage.ProvisioningType.FAT, disk.getVirtualSize());
+
+ final QemuImgFile destFile = new QemuImgFile(dstDisk.getPath());
+ destFile.setFormat(dstDisk.getFormat());
+ destFile.setSize(disk.getVirtualSize());
+
+ try {
+ qemu.convert(srcFile, destFile);
+ } catch (QemuImgException | LibvirtException e) {
+ s_logger.error(e);
+ destPools.deletePhysicalDisk(name, Storage.ImageFormat.RAW);
+ throw new CloudRuntimeException("Failed to copy " + disk.getPath() + " to " + name);
+ }
+
+ return dstDisk;
+ }
+
+ @Override
+ public KVMPhysicalDisk createDiskFromSnapshot(
+ KVMPhysicalDisk snapshot,
+ String snapshotName,
+ String name,
+ KVMStoragePool destPool,
+ int timeout)
+ {
+ s_logger.debug("Linstor: createDiskFromSnapshot");
+ return null;
+ }
+
+ @Override
+ public boolean refresh(KVMStoragePool pool)
+ {
+ s_logger.debug("Linstor: refresh");
+ return true;
+ }
+
+ @Override
+ public boolean createFolder(String uuid, String path)
+ {
+ throw new UnsupportedOperationException("A folder cannot be created in this configuration.");
+ }
+
+ @Override
+ public KVMPhysicalDisk createDiskFromTemplateBacking(
+ KVMPhysicalDisk template,
+ String name,
+ QemuImg.PhysicalDiskFormat format,
+ long size,
+ KVMStoragePool destPool,
+ int timeout)
+ {
+ s_logger.debug("Linstor: createDiskFromTemplateBacking");
+ return null;
+ }
+
+ @Override
+ public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, String destTemplatePath,
+ KVMStoragePool destPool, Storage.ImageFormat format,
+ int timeout)
+ {
+ s_logger.debug("Linstor: createTemplateFromDirectDownloadFile");
+ return null;
+ }
+
+ public long getCapacity(LinstorStoragePool pool) {
+ DevelopersApi linstorApi = getLinstorAPI(pool);
+ final String rscGroupName = pool.getResourceGroup();
+ try {
+ List rscGrps = linstorApi.resourceGroupList(
+ Collections.singletonList(rscGroupName),
+ null,
+ null,
+ null);
+
+ if (rscGrps.isEmpty()) {
+ final String errMsg = String.format("Linstor: Resource group '%s' not found", rscGroupName);
+ s_logger.error(errMsg);
+ throw new CloudRuntimeException(errMsg);
+ }
+
+ List storagePools = linstorApi.viewStoragePools(
+ Collections.emptyList(),
+ rscGrps.get(0).getSelectFilter().getStoragePoolList(),
+ null,
+ null,
+ null
+ );
+
+ final long capacity = storagePools.stream()
+ .filter(sp -> sp.getProviderKind() != ProviderKind.DISKLESS)
+ .mapToLong(sp -> sp.getTotalCapacity() != null ? sp.getTotalCapacity() : 0)
+ .sum() * 1024; // linstor uses kiB
+ s_logger.debug("Linstor: GetCapacity() -> " + capacity);
+ return capacity;
+ } catch (ApiException apiEx) {
+ s_logger.error(apiEx.getMessage());
+ throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
+ }
+ }
+
+ public long getAvailable(LinstorStoragePool pool) {
+ DevelopersApi linstorApi = getLinstorAPI(pool);
+ final String rscGroupName = pool.getResourceGroup();
+ try {
+ List rscGrps = linstorApi.resourceGroupList(
+ Collections.singletonList(rscGroupName),
+ null,
+ null,
+ null);
+
+ if (rscGrps.isEmpty()) {
+ final String errMsg = String.format("Linstor: Resource group '%s' not found", rscGroupName);
+ s_logger.error(errMsg);
+ throw new CloudRuntimeException(errMsg);
+ }
+
+ List storagePools = linstorApi.viewStoragePools(
+ Collections.emptyList(),
+ rscGrps.get(0).getSelectFilter().getStoragePoolList(),
+ null,
+ null,
+ null
+ );
+
+ final long free = storagePools.stream()
+ .filter(sp -> sp.getProviderKind() != ProviderKind.DISKLESS)
+ .mapToLong(StoragePool::getFreeCapacity).sum() * 1024; // linstor uses KiB
+
+ s_logger.debug("Linstor: getAvailable() -> " + free);
+ return free;
+ } catch (ApiException apiEx) {
+ s_logger.error(apiEx.getMessage());
+ throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
+ }
+ }
+
+ public long getUsed(LinstorStoragePool pool) {
+ DevelopersApi linstorApi = getLinstorAPI(pool);
+ final String rscGroupName = pool.getResourceGroup();
+ try {
+ List rscGrps = linstorApi.resourceGroupList(
+ Collections.singletonList(rscGroupName),
+ null,
+ null,
+ null);
+
+ if (rscGrps.isEmpty()) {
+ final String errMsg = String.format("Linstor: Resource group '%s' not found", rscGroupName);
+ s_logger.error(errMsg);
+ throw new CloudRuntimeException(errMsg);
+ }
+
+ List storagePools = linstorApi.viewStoragePools(
+ Collections.emptyList(),
+ rscGrps.get(0).getSelectFilter().getStoragePoolList(),
+ null,
+ null,
+ null
+ );
+
+ final long used = storagePools.stream()
+ .filter(sp -> sp.getProviderKind() != ProviderKind.DISKLESS)
+ .mapToLong(sp -> sp.getTotalCapacity() - sp.getFreeCapacity()).sum() * 1024; // linstor uses Kib
+ s_logger.debug("Linstor: getUsed() -> " + used);
+ return used;
+ } catch (ApiException apiEx) {
+ s_logger.error(apiEx.getMessage());
+ throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
+ }
+ }
+}
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LinstorStoragePool.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LinstorStoragePool.java
new file mode 100644
index 000000000000..0e8a4ed5bedb
--- /dev/null
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LinstorStoragePool.java
@@ -0,0 +1,191 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.hypervisor.kvm.storage;
+
+import java.util.List;
+import java.util.Map;
+
+import com.cloud.storage.Storage;
+import org.apache.cloudstack.utils.qemu.QemuImg;
+
+public class LinstorStoragePool implements KVMStoragePool {
+ private final String _uuid;
+ private final String _sourceHost;
+ private final int _sourcePort;
+ private final Storage.StoragePoolType _storagePoolType;
+ private final StorageAdaptor _storageAdaptor;
+ private final String _resourceGroup;
+
+ public LinstorStoragePool(String uuid, String host, int port, String resourceGroup,
+ Storage.StoragePoolType storagePoolType, StorageAdaptor storageAdaptor) {
+ _uuid = uuid;
+ _sourceHost = host;
+ _sourcePort = port;
+ _storagePoolType = storagePoolType;
+ _storageAdaptor = storageAdaptor;
+ _resourceGroup = resourceGroup;
+ }
+
+ @Override
+ public KVMPhysicalDisk createPhysicalDisk(String name, QemuImg.PhysicalDiskFormat format,
+ Storage.ProvisioningType provisioningType, long size)
+ {
+ return _storageAdaptor.createPhysicalDisk(name, this, format, provisioningType, size);
+ }
+
+ @Override
+ public KVMPhysicalDisk createPhysicalDisk(String volumeUuid, Storage.ProvisioningType provisioningType, long size)
+ {
+ return _storageAdaptor.createPhysicalDisk(volumeUuid,this, getDefaultFormat(), provisioningType, size);
+ }
+
+ @Override
+ public boolean connectPhysicalDisk(String volumeUuid, Map details)
+ {
+ return _storageAdaptor.connectPhysicalDisk(volumeUuid, this, details);
+ }
+
+ @Override
+ public KVMPhysicalDisk getPhysicalDisk(String volumeUuid)
+ {
+ return _storageAdaptor.getPhysicalDisk(volumeUuid, this);
+ }
+
+ @Override
+ public boolean disconnectPhysicalDisk(String volumeUuid)
+ {
+ return _storageAdaptor.disconnectPhysicalDisk(volumeUuid, this);
+ }
+
+ @Override
+ public boolean deletePhysicalDisk(String volumeUuid, Storage.ImageFormat format)
+ {
+ return _storageAdaptor.deletePhysicalDisk(volumeUuid, this, format);
+ }
+
+ @Override
+ public List listPhysicalDisks()
+ {
+ return _storageAdaptor.listPhysicalDisks(_uuid, this);
+ }
+
+ @Override
+ public String getUuid()
+ {
+ return _uuid;
+ }
+
+ @Override
+ public long getCapacity()
+ {
+ return ((LinstorStorageAdaptor)_storageAdaptor).getCapacity(this);
+ }
+
+ @Override
+ public long getUsed()
+ {
+ return ((LinstorStorageAdaptor)_storageAdaptor).getUsed(this);
+ }
+
+ @Override
+ public long getAvailable()
+ {
+ return ((LinstorStorageAdaptor)_storageAdaptor).getAvailable(this);
+ }
+
+ @Override
+ public boolean refresh()
+ {
+ return _storageAdaptor.refresh(this);
+ }
+
+ @Override
+ public boolean isExternalSnapshot()
+ {
+ return true;
+ }
+
+ @Override
+ public String getLocalPath()
+ {
+ return null;
+ }
+
+ @Override
+ public String getSourceHost()
+ {
+ return _sourceHost;
+ }
+
+ @Override
+ public String getSourceDir()
+ {
+ return null;
+ }
+
+ @Override
+ public int getSourcePort()
+ {
+ return _sourcePort;
+ }
+
+ @Override
+ public String getAuthUserName()
+ {
+ return null;
+ }
+
+ @Override
+ public String getAuthSecret()
+ {
+ return null;
+ }
+
+ @Override
+ public Storage.StoragePoolType getType()
+ {
+ return _storagePoolType;
+ }
+
+ @Override
+ public boolean delete()
+ {
+ return _storageAdaptor.deleteStoragePool(this);
+ }
+
+ @Override
+ public QemuImg.PhysicalDiskFormat getDefaultFormat()
+ {
+ return QemuImg.PhysicalDiskFormat.RAW;
+ }
+
+ @Override
+ public boolean createFolder(String path)
+ {
+ return _storageAdaptor.createFolder(_uuid, path);
+ }
+
+ @Override
+ public boolean supportsConfigDriveIso()
+ {
+ return false;
+ }
+
+ public String getResourceGroup() {
+ return _resourceGroup;
+ }
+}
diff --git a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java
index fbf1ec614ada..27399eadf8e0 100644
--- a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java
+++ b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java
@@ -4880,6 +4880,46 @@ public void testResizeVolumeCommand() {
}
}
+ @Test
+ public void testResizeVolumeCommandLinstorNotifyOnly() {
+ final String path = "/dev/drbd1000";
+ final StorageFilerTO pool = Mockito.mock(StorageFilerTO.class);
+ final Long currentSize = 100l;
+ final Long newSize = 200l;
+ final boolean shrinkOk = false;
+ final String vmInstance = "Test";
+
+ final ResizeVolumeCommand command = new ResizeVolumeCommand(path, pool, currentSize, newSize, shrinkOk, vmInstance);
+
+ final KVMStoragePoolManager storagePoolMgr = Mockito.mock(KVMStoragePoolManager.class);
+ final KVMStoragePool storagePool = Mockito.mock(KVMStoragePool.class);
+ final KVMPhysicalDisk vol = Mockito.mock(KVMPhysicalDisk.class);
+ final LibvirtUtilitiesHelper libvirtUtilitiesHelper = Mockito.mock(LibvirtUtilitiesHelper.class);
+
+ when(libvirtComputingResource.getStoragePoolMgr()).thenReturn(storagePoolMgr);
+ when(storagePoolMgr.getStoragePool(pool.getType(), pool.getUuid())).thenReturn(storagePool);
+ when(storagePool.getPhysicalDisk(path)).thenReturn(vol);
+ when(vol.getPath()).thenReturn(path);
+ when(storagePool.getType()).thenReturn(StoragePoolType.Linstor);
+ when(vol.getFormat()).thenReturn(PhysicalDiskFormat.RAW);
+
+ final LibvirtRequestWrapper wrapper = LibvirtRequestWrapper.getInstance();
+ assertNotNull(wrapper);
+
+ final Answer answer = wrapper.execute(command, libvirtComputingResource);
+ assertTrue(answer.getResult());
+
+ verify(libvirtComputingResource, times(1)).getStoragePoolMgr();
+ verify(libvirtComputingResource, times(0)).getResizeScriptType(storagePool, vol);
+
+ verify(libvirtComputingResource, times(0)).getLibvirtUtilitiesHelper();
+ try {
+ verify(libvirtUtilitiesHelper, times(0)).getConnection();
+ } catch (final LibvirtException e) {
+ fail(e.getMessage());
+ }
+ }
+
@Test
public void testResizeVolumeCommandSameSize() {
final String path = "nfs:/127.0.0.1/storage/secondary";
diff --git a/plugins/pom.xml b/plugins/pom.xml
index 29cfbc18503b..e43f9b1976a9 100755
--- a/plugins/pom.xml
+++ b/plugins/pom.xml
@@ -122,6 +122,7 @@
storage/volume/sample
storage/volume/solidfire
storage/volume/scaleio
+ storage/volume/linstor
storage-allocators/random
diff --git a/plugins/storage/volume/cloudbyte/pom.xml b/plugins/storage/volume/cloudbyte/pom.xml
index 739fcecd7c6b..82c8d6233c5d 100644
--- a/plugins/storage/volume/cloudbyte/pom.xml
+++ b/plugins/storage/volume/cloudbyte/pom.xml
@@ -48,8 +48,9 @@
gson
- com.sun.jersey
- jersey-bundle
+ org.glassfish.jersey.core
+ jersey-client
+ ${cs.jersey-client.version}
diff --git a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/util/ElastistorUtil.java b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/util/ElastistorUtil.java
index 58ee9f020d07..47b89b16b6af 100644
--- a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/util/ElastistorUtil.java
+++ b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/util/ElastistorUtil.java
@@ -20,20 +20,17 @@
package org.apache.cloudstack.storage.datastore.util;
import com.cloud.agent.api.Answer;
+import com.cloud.utils.Pair;
import com.cloud.utils.exception.CloudRuntimeException;
import com.google.gson.Gson;
import com.google.gson.annotations.SerializedName;
-import com.sun.jersey.api.client.Client;
-import com.sun.jersey.api.client.ClientResponse;
-import com.sun.jersey.api.client.WebResource;
-import com.sun.jersey.api.client.config.ClientConfig;
-import com.sun.jersey.api.client.config.DefaultClientConfig;
-import com.sun.jersey.core.util.MultivaluedMapImpl;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.utils.security.SSLUtils;
import org.apache.cloudstack.utils.security.SecureSSLSocketFactory;
import org.apache.http.auth.InvalidCredentialsException;
import org.apache.log4j.Logger;
+import org.glassfish.jersey.client.ClientConfig;
+import org.glassfish.jersey.client.ClientResponse;
import javax.naming.ServiceUnavailableException;
import javax.net.ssl.HostnameVerifier;
@@ -43,14 +40,18 @@
import javax.net.ssl.SSLSession;
import javax.net.ssl.TrustManager;
import javax.net.ssl.X509TrustManager;
+import javax.ws.rs.client.Client;
+import javax.ws.rs.client.ClientBuilder;
+import javax.ws.rs.client.WebTarget;
import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.MultivaluedMap;
import javax.ws.rs.core.UriBuilder;
import java.net.ConnectException;
import java.security.InvalidParameterException;
import java.security.SecureRandom;
import java.security.cert.X509Certificate;
+import java.util.ArrayList;
import java.util.HashMap;
+import java.util.List;
public class ElastistorUtil {
@@ -960,7 +961,7 @@ static interface ElastiCenterCommand {
* Returns null if there are query parameters associated with the
* command
*/
- public MultivaluedMap getCommandParameters();
+ public List> getCommandParameters();
/*
* Adds new key-value pair to the query paramters lists.
@@ -978,7 +979,7 @@ static interface ElastiCenterCommand {
private static class BaseCommand implements ElastiCenterCommand {
private String commandName = null;
- private MultivaluedMap commandParameters = null;
+ private List> commandParameters = null;
private Object responseObject = null;
/*
@@ -1003,16 +1004,16 @@ public boolean validate() {
}
@Override
- public MultivaluedMap getCommandParameters() {
+ public List> getCommandParameters() {
return commandParameters;
}
@Override
public void putCommandParameter(String key, String value) {
if (null == commandParameters) {
- commandParameters = new MultivaluedMapImpl();
+ commandParameters = new ArrayList<>();
}
- commandParameters.add(key, value);
+ commandParameters.add(new Pair<>(key, value));
}
@Override
@@ -1134,7 +1135,7 @@ public Object executeCommand(ElastiCenterCommand cmd) throws Throwable {
return executeCommand(cmd.getCommandName(), cmd.getCommandParameters(), cmd.getResponseObject());
}
- public Object executeCommand(String command, MultivaluedMap params, Object responeObj) throws Throwable {
+ public Object executeCommand(String command, List> params, Object responeObj) throws Throwable {
if (!initialized) {
throw new IllegalStateException("Error : ElastiCenterClient is not initialized.");
@@ -1145,25 +1146,27 @@ public Object executeCommand(String command, MultivaluedMap para
}
try {
- ClientConfig config = new DefaultClientConfig();
- Client client = Client.create(config);
- WebResource webResource = client.resource(UriBuilder.fromUri(restprotocol + elastiCenterAddress + restpath).build());
-
- MultivaluedMap queryParams = new MultivaluedMapImpl();
- queryParams.add(queryparamapikey, apiKey);
- queryParams.add(queryparamresponse, responseType);
-
- queryParams.add(queryparamcommand, command);
+ ClientConfig config = new ClientConfig();
+ Client client = ClientBuilder.newClient(config);
+ WebTarget webResource = client.target(UriBuilder.fromUri(restprotocol + elastiCenterAddress + restpath).build())
+ .queryParam(queryparamapikey, apiKey)
+ .queryParam(queryparamresponse, responseType)
+ .queryParam(queryparamcommand, command);
if (null != params) {
- for (String key : params.keySet()) {
- queryParams.add(key, params.getFirst(key));
+ for (Pair pair : params) {
+ webResource = webResource.queryParam(pair.first(), pair.second());
}
}
if (debug) {
- System.out.println("Command Sent " + command + " : " + queryParams);
+ List> qryParams = new ArrayList<>();
+ qryParams.add(new Pair<>(queryparamapikey, apiKey));
+ qryParams.add(new Pair<>(queryparamresponse, responseType));
+ qryParams.add(new Pair<>(queryparamcommand, command));
+ qryParams.addAll(params);
+ System.out.println("Command Sent " + command + " : " + params);
}
- ClientResponse response = webResource.queryParams(queryParams).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+ ClientResponse response = webResource.request(MediaType.APPLICATION_JSON).get(ClientResponse.class);
if (response.getStatus() >= 300) {
if (debug)
@@ -1178,7 +1181,7 @@ public Object executeCommand(String command, MultivaluedMap para
throw new ServiceUnavailableException("Internal Error. Please contact your ElastiCenter Administrator.");
}
} else if (null != responeObj) {
- String jsonResponse = response.getEntity(String.class);
+ String jsonResponse = String.valueOf(response.readEntity(String.class));
if (debug) {
System.out.println("Command Response : " + jsonResponse);
}
diff --git a/plugins/storage/volume/linstor/pom.xml b/plugins/storage/volume/linstor/pom.xml
new file mode 100644
index 000000000000..895d3482e935
--- /dev/null
+++ b/plugins/storage/volume/linstor/pom.xml
@@ -0,0 +1,60 @@
+
+
+ 4.0.0
+ cloud-plugin-storage-volume-linstor
+ Apache CloudStack Plugin - Storage Volume Linstor provider
+
+ org.apache.cloudstack
+ cloudstack-plugins
+ 4.16.0.0-SNAPSHOT
+ ../../../pom.xml
+
+
+
+ org.apache.cloudstack
+ cloud-engine-storage-volume
+ ${project.version}
+
+
+ com.linbit.linstor.api
+ java-linstor
+ ${cs.java-linstor.version}
+
+
+
+
+
+ maven-surefire-plugin
+
+ true
+
+
+
+ integration-test
+
+ test
+
+
+
+
+
+
+
diff --git a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java
new file mode 100644
index 000000000000..19cf297b5ad4
--- /dev/null
+++ b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java
@@ -0,0 +1,768 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.storage.datastore.driver;
+
+import com.linbit.linstor.api.ApiException;
+import com.linbit.linstor.api.CloneWaiter;
+import com.linbit.linstor.api.DevelopersApi;
+import com.linbit.linstor.api.model.ApiCallRc;
+import com.linbit.linstor.api.model.ApiCallRcList;
+import com.linbit.linstor.api.model.ResourceDefinition;
+import com.linbit.linstor.api.model.ResourceDefinitionCloneRequest;
+import com.linbit.linstor.api.model.ResourceDefinitionCloneStarted;
+import com.linbit.linstor.api.model.ResourceDefinitionCreate;
+import com.linbit.linstor.api.model.ResourceGroupSpawn;
+import com.linbit.linstor.api.model.ResourceWithVolumes;
+import com.linbit.linstor.api.model.Snapshot;
+import com.linbit.linstor.api.model.SnapshotRestore;
+import com.linbit.linstor.api.model.VolumeDefinitionModify;
+
+import javax.annotation.Nonnull;
+import javax.inject.Inject;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.storage.ResizeVolumeAnswer;
+import com.cloud.agent.api.storage.ResizeVolumeCommand;
+import com.cloud.agent.api.to.DataStoreTO;
+import com.cloud.agent.api.to.DataTO;
+import com.cloud.agent.api.to.DiskTO;
+import com.cloud.agent.api.to.StorageFilerTO;
+import com.cloud.host.Host;
+import com.cloud.storage.ResizeVolumePayload;
+import com.cloud.storage.SnapshotVO;
+import com.cloud.storage.StorageManager;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.VMTemplateStoragePoolVO;
+import com.cloud.storage.VolumeDetailVO;
+import com.cloud.storage.VolumeVO;
+import com.cloud.storage.dao.SnapshotDao;
+import com.cloud.storage.dao.SnapshotDetailsDao;
+import com.cloud.storage.dao.SnapshotDetailsVO;
+import com.cloud.storage.dao.VMTemplatePoolDao;
+import com.cloud.storage.dao.VolumeDao;
+import com.cloud.storage.dao.VolumeDetailsDao;
+import com.cloud.utils.Pair;
+import com.cloud.utils.exception.CloudRuntimeException;
+import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
+import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
+import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
+import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
+import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
+import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
+import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
+import org.apache.cloudstack.storage.command.CommandResult;
+import org.apache.cloudstack.storage.command.CreateObjectAnswer;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.cloudstack.storage.datastore.util.LinstorUtil;
+import org.apache.cloudstack.storage.to.SnapshotObjectTO;
+import org.apache.cloudstack.storage.volume.VolumeObject;
+import org.apache.log4j.Logger;
+
+public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver {
+ private static final Logger s_logger = Logger.getLogger(LinstorPrimaryDataStoreDriverImpl.class);
+ @Inject private PrimaryDataStoreDao _storagePoolDao;
+ @Inject private VolumeDao _volumeDao;
+ @Inject private VolumeDetailsDao _volumeDetailsDao;
+ @Inject private VMTemplatePoolDao _vmTemplatePoolDao;
+ @Inject private SnapshotDao _snapshotDao;
+ @Inject private SnapshotDetailsDao _snapshotDetailsDao;
+ @Inject private StorageManager _storageMgr;
+
+ public LinstorPrimaryDataStoreDriverImpl()
+ {
+ }
+
+ @Override
+ public Map getCapabilities()
+ {
+ Map mapCapabilities = new HashMap<>();
+
+ // Linstor will be restricted to only run on LVM-THIN and ZFS storage pools with ACS
+ mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString(), Boolean.TRUE.toString());
+
+ // fetch if lvm-thin or ZFS
+ mapCapabilities.put(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString(), Boolean.TRUE.toString());
+
+ // CAN_CREATE_VOLUME_FROM_SNAPSHOT see note from CAN_CREATE_VOLUME_FROM_VOLUME
+ mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.TRUE.toString());
+ mapCapabilities.put(DataStoreCapabilities.CAN_REVERT_VOLUME_TO_SNAPSHOT.toString(), Boolean.TRUE.toString());
+
+ return mapCapabilities;
+ }
+
+ @Override
+ public DataTO getTO(DataObject data)
+ {
+ return null;
+ }
+
+ @Override
+ public DataStoreTO getStoreTO(DataStore store)
+ {
+ return null;
+ }
+
+ @Override
+ public ChapInfo getChapInfo(DataObject dataObject)
+ {
+ return null;
+ }
+
+ @Override
+ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore)
+ {
+ return false;
+ }
+
+ @Override
+ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore)
+ {
+ }
+
+ @Override
+ public long getUsedBytes(StoragePool storagePool)
+ {
+ return 0;
+ }
+
+ @Override
+ public long getUsedIops(StoragePool storagePool)
+ {
+ return 0;
+ }
+
+ @Override
+ public long getDataObjectSizeIncludingHypervisorSnapshotReserve(DataObject dataObject, StoragePool pool)
+ {
+ return dataObject.getSize();
+ }
+
+ @Override
+ public long getBytesRequiredForTemplate(TemplateInfo templateInfo, StoragePool storagePool)
+ {
+ return 0;
+ }
+
+ private String getSnapshotName(String snapshotUuid) {
+ return LinstorUtil.RSC_PREFIX + snapshotUuid;
+ }
+
+ private void deleteResourceDefinition(StoragePoolVO storagePoolVO, String rscDefName)
+ {
+ DevelopersApi linstorApi = LinstorUtil.getLinstorAPI(storagePoolVO.getHostAddress());
+
+ try
+ {
+ ApiCallRcList answers = linstorApi.resourceDefinitionDelete(rscDefName);
+ if (answers.hasError())
+ {
+ for (ApiCallRc answer : answers)
+ {
+ s_logger.error(answer.getMessage());
+ }
+ throw new CloudRuntimeException("Linstor: Unable to delete resource definition: " + rscDefName);
+ }
+ } catch (ApiException apiEx)
+ {
+ s_logger.error("Linstor: ApiEx - " + apiEx.getMessage());
+ throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
+ }
+ }
+
+ private void deleteSnapshot(@Nonnull DataStore dataStore, @Nonnull String rscDefName, @Nonnull String snapshotName)
+ {
+ StoragePoolVO storagePool = _storagePoolDao.findById(dataStore.getId());
+ DevelopersApi linstorApi = LinstorUtil.getLinstorAPI(storagePool.getHostAddress());
+
+ try
+ {
+ ApiCallRcList answers = linstorApi.resourceSnapshotDelete(rscDefName, snapshotName);
+ if (answers.hasError())
+ {
+ for (ApiCallRc answer : answers)
+ {
+ s_logger.error(answer.getMessage());
+ }
+ throw new CloudRuntimeException("Linstor: Unable to delete snapshot: " + rscDefName);
+ }
+ } catch (ApiException apiEx)
+ {
+ s_logger.error("Linstor: ApiEx - " + apiEx.getMessage());
+ throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
+ }
+ }
+
+ private long getCsIdForCloning(long volumeId, String cloneOf) {
+ VolumeDetailVO volumeDetail = _volumeDetailsDao.findDetail(volumeId, cloneOf);
+
+ if (volumeDetail != null && volumeDetail.getValue() != null) {
+ return Long.parseLong(volumeDetail.getValue());
+ }
+
+ return Long.MIN_VALUE;
+ }
+
+ @Override
+ public void deleteAsync(DataStore dataStore, DataObject dataObject, AsyncCompletionCallback callback)
+ {
+ s_logger.debug("deleteAsync: " + dataObject.getType() + ";" + dataObject.getUuid());
+ String errMsg = null;
+
+ final long storagePoolId = dataStore.getId();
+ final StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId);
+
+ switch (dataObject.getType()) {
+ case VOLUME:
+ {
+ final VolumeInfo volumeInfo = (VolumeInfo) dataObject;
+ final String rscName = LinstorUtil.RSC_PREFIX + volumeInfo.getPath();
+ deleteResourceDefinition(storagePool, rscName);
+
+ long usedBytes = storagePool.getUsedBytes();
+ long capacityIops = storagePool.getCapacityIops();
+
+ usedBytes -= volumeInfo.getSize();
+ if (volumeInfo.getMaxIops() != null)
+ capacityIops += volumeInfo.getMaxIops();
+
+ storagePool.setUsedBytes(Math.max(0, usedBytes));
+ storagePool.setCapacityIops(Math.max(0, capacityIops));
+
+ _storagePoolDao.update(storagePoolId, storagePool);
+ }
+ break;
+ case SNAPSHOT:
+ final SnapshotInfo snapshotInfo = (SnapshotInfo) dataObject;
+ final String rscName = LinstorUtil.RSC_PREFIX + snapshotInfo.getBaseVolume().getPath();
+ deleteSnapshot(dataStore, rscName, getSnapshotName(snapshotInfo.getUuid()));
+ long usedBytes = storagePool.getUsedBytes() - snapshotInfo.getSize();
+ storagePool.setUsedBytes(Math.max(0, usedBytes));
+ _storagePoolDao.update(storagePoolId, storagePool);
+ break;
+ default:
+ errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to deleteAsync";
+ s_logger.error(errMsg);
+ }
+
+ if (callback != null) {
+ CommandResult result = new CommandResult();
+ result.setResult(errMsg);
+
+ callback.complete(result);
+ }
+ }
+
+ private void logLinstorAnswer(@Nonnull ApiCallRc answer) {
+ if (answer.isError()) {
+ s_logger.error(answer.getMessage());
+ } else if (answer.isWarning()) {
+ s_logger.warn(answer.getMessage());
+ } else if (answer.isInfo()) {
+ s_logger.info(answer.getMessage());
+ }
+ }
+
+ private void logLinstorAnswers(@Nonnull ApiCallRcList answers) {
+ answers.forEach(this::logLinstorAnswer);
+ }
+
+ private void checkLinstorAnswersThrow(@Nonnull ApiCallRcList answers) {
+ logLinstorAnswers(answers);
+ if (answers.hasError())
+ {
+ String errMsg = answers.stream()
+ .filter(ApiCallRc::isError)
+ .findFirst()
+ .map(ApiCallRc::getMessage).orElse("Unknown linstor error");
+ throw new CloudRuntimeException(errMsg);
+ }
+ }
+
+ private String checkLinstorAnswers(@Nonnull ApiCallRcList answers) {
+ logLinstorAnswers(answers);
+ return answers.stream().filter(ApiCallRc::isError).findFirst().map(ApiCallRc::getMessage).orElse(null);
+ }
+
+ private String getDeviceName(DevelopersApi linstorApi, String rscName) throws ApiException {
+ List resources = linstorApi.viewResources(
+ Collections.emptyList(),
+ Collections.singletonList(rscName),
+ Collections.emptyList(),
+ null,
+ null,
+ null);
+ if (!resources.isEmpty() && !resources.get(0).getVolumes().isEmpty())
+ {
+ s_logger.info("Linstor: Created drbd device: " + resources.get(0).getVolumes().get(0).getDevicePath());
+ return resources.get(0).getVolumes().get(0).getDevicePath();
+ } else
+ {
+ s_logger.error("Linstor: viewResources didn't return resources or volumes.");
+ throw new CloudRuntimeException("Linstor: viewResources didn't return resources or volumes.");
+ }
+ }
+
+ private String createResource(VolumeInfo vol, StoragePoolVO storagePoolVO)
+ {
+ DevelopersApi linstorApi = LinstorUtil.getLinstorAPI(storagePoolVO.getHostAddress());
+ final String rscGrp = storagePoolVO.getUserInfo() != null && !storagePoolVO.getUserInfo().isEmpty() ?
+ storagePoolVO.getUserInfo() : "DfltRscGrp";
+
+ ResourceGroupSpawn rscGrpSpawn = new ResourceGroupSpawn();
+ final String rscName = LinstorUtil.RSC_PREFIX + vol.getUuid();
+ rscGrpSpawn.setResourceDefinitionName(rscName);
+ rscGrpSpawn.addVolumeSizesItem(vol.getSize() / 1024);
+
+ try
+ {
+ s_logger.debug("Linstor: Spawn resource " + rscName);
+ ApiCallRcList answers = linstorApi.resourceGroupSpawn(rscGrp, rscGrpSpawn);
+ checkLinstorAnswersThrow(answers);
+
+ return getDeviceName(linstorApi, rscName);
+ } catch (ApiException apiEx)
+ {
+ s_logger.error("Linstor: ApiEx - " + apiEx.getMessage());
+ throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
+ }
+ }
+
+ private String cloneResource(long csCloneId, VolumeInfo volumeInfo, StoragePoolVO storagePoolVO) {
+ // get the cached template on this storage
+ VMTemplateStoragePoolVO tmplPoolRef = _vmTemplatePoolDao.findByPoolTemplate(
+ storagePoolVO.getId(), csCloneId, null);
+
+ if (tmplPoolRef != null) {
+ final String cloneRes = LinstorUtil.RSC_PREFIX + tmplPoolRef.getLocalDownloadPath();
+ final String rscName = LinstorUtil.RSC_PREFIX + volumeInfo.getUuid();
+ final DevelopersApi linstorApi = LinstorUtil.getLinstorAPI(storagePoolVO.getHostAddress());
+
+ try {
+ s_logger.debug("Clone resource definition " + cloneRes + " to " + rscName);
+ ResourceDefinitionCloneRequest cloneRequest = new ResourceDefinitionCloneRequest();
+ cloneRequest.setName(rscName);
+ ResourceDefinitionCloneStarted cloneStarted = linstorApi.resourceDefinitionClone(
+ cloneRes, cloneRequest);
+
+ checkLinstorAnswersThrow(cloneStarted.getMessages());
+
+ if (!CloneWaiter.waitFor(linstorApi, cloneStarted)) {
+ throw new CloudRuntimeException("Clone for resource " + rscName + " failed.");
+ }
+
+ return getDeviceName(linstorApi, rscName);
+ } catch (ApiException apiEx) {
+ s_logger.error("Linstor: ApiEx - " + apiEx.getMessage());
+ throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
+ }
+ } else {
+ throw new CloudRuntimeException(
+ "Unable to find Linstor resource for the following template data-object ID: " + csCloneId);
+ }
+ }
+
+ private String createResourceFromSnapshot(long csSnapshotId, String rscName, StoragePoolVO storagePoolVO) {
+ final String rscGrp = storagePoolVO.getUserInfo() != null && !storagePoolVO.getUserInfo().isEmpty() ?
+ storagePoolVO.getUserInfo() : "DfltRscGrp";
+ final DevelopersApi linstorApi = LinstorUtil.getLinstorAPI(storagePoolVO.getHostAddress());
+
+ SnapshotVO snapshotVO = _snapshotDao.findById(csSnapshotId);
+ String snapName = LinstorUtil.RSC_PREFIX + snapshotVO.getUuid();
+ VolumeVO volumeVO = _volumeDao.findById(snapshotVO.getVolumeId());
+ String cloneRes = LinstorUtil.RSC_PREFIX + volumeVO.getPath();
+
+ try
+ {
+ s_logger.debug("Create new resource definition: " + rscName);
+ ResourceDefinitionCreate rdCreate = new ResourceDefinitionCreate();
+ ResourceDefinition rd = new ResourceDefinition();
+ rd.setName(rscName);
+ rd.setResourceGroupName(rscGrp);
+ rdCreate.setResourceDefinition(rd);
+ ApiCallRcList answers = linstorApi.resourceDefinitionCreate(rdCreate);
+ checkLinstorAnswersThrow(answers);
+
+ SnapshotRestore snapshotRestore = new SnapshotRestore();
+ snapshotRestore.toResource(rscName);
+
+ s_logger.debug("Create new volume definition for snapshot: " + cloneRes + ":" + snapName);
+ answers = linstorApi.resourceSnapshotsRestoreVolumeDefinition(cloneRes, snapName, snapshotRestore);
+ checkLinstorAnswersThrow(answers);
+
+ // restore snapshot to new resource
+ s_logger.debug("Restore resource from snapshot: " + cloneRes + ":" + snapName);
+ answers = linstorApi.resourceSnapshotRestore(cloneRes, snapName, snapshotRestore);
+ checkLinstorAnswersThrow(answers);
+
+ return getDeviceName(linstorApi, rscName);
+ } catch (ApiException apiEx) {
+ s_logger.error("Linstor: ApiEx - " + apiEx.getMessage());
+ throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
+ }
+ }
+
+ private String createVolume(VolumeInfo volumeInfo, StoragePoolVO storagePoolVO) {
+ long csSnapshotId = getCsIdForCloning(volumeInfo.getId(), "cloneOfSnapshot");
+ long csTemplateId = getCsIdForCloning(volumeInfo.getId(), "cloneOfTemplate");
+
+ if (csSnapshotId > 0) {
+ return createResourceFromSnapshot(csSnapshotId, LinstorUtil.RSC_PREFIX + volumeInfo.getUuid(), storagePoolVO);
+ } else if (csTemplateId > 0) {
+ return cloneResource(csTemplateId, volumeInfo, storagePoolVO);
+ } else {
+ return createResource(volumeInfo, storagePoolVO);
+ }
+ }
+
+ private void handleSnapshotDetails(long csSnapshotId, String name, String value) {
+ _snapshotDetailsDao.removeDetail(csSnapshotId, name);
+ SnapshotDetailsVO snapshotDetails = new SnapshotDetailsVO(csSnapshotId, name, value, false);
+ _snapshotDetailsDao.persist(snapshotDetails);
+ }
+
+ private void addTempVolumeToDb(long csSnapshotId, String tempVolumeName) {
+ // TEMP_VOLUME_ID is needed, to find which temporary resource should be deleted after copying it on agent side
+ handleSnapshotDetails(csSnapshotId, LinstorUtil.TEMP_VOLUME_ID, LinstorUtil.RSC_PREFIX + tempVolumeName);
+ // the iqn will be used on the agent side to copy from, even though linstor doesn't have anything to do with IQN
+ handleSnapshotDetails(csSnapshotId, DiskTO.IQN, tempVolumeName);
+ }
+
+ private void removeTempVolumeFromDb(long csSnapshotId) {
+ SnapshotDetailsVO snapshotDetails = _snapshotDetailsDao.findDetail(csSnapshotId, LinstorUtil.TEMP_VOLUME_ID);
+
+ if (snapshotDetails == null || snapshotDetails.getValue() == null) {
+ throw new CloudRuntimeException(
+ "'removeTempVolumeId' should not be invoked unless " + LinstorUtil.TEMP_VOLUME_ID + " exists.");
+ }
+
+ String originalVolumeId = snapshotDetails.getValue();
+
+ handleSnapshotDetails(csSnapshotId, LinstorUtil.TEMP_VOLUME_ID, originalVolumeId);
+
+ _snapshotDetailsDao.remove(snapshotDetails.getId());
+ }
+
+ private void createVolumeFromSnapshot(SnapshotInfo snapshotInfo, StoragePoolVO storagePoolVO) {
+ long csSnapshotId = snapshotInfo.getId();
+
+ SnapshotDetailsVO snapshotDetails = _snapshotDetailsDao.findDetail(csSnapshotId, "tempVolume");
+
+ if (snapshotDetails != null && snapshotDetails.getValue() != null &&
+ snapshotDetails.getValue().equalsIgnoreCase("create"))
+ {
+ final String csName = "Temp-" + snapshotInfo.getUuid();
+ final String tempRscName = LinstorUtil.RSC_PREFIX + csName;
+ createResourceFromSnapshot(csSnapshotId, tempRscName, storagePoolVO);
+
+ s_logger.debug("Temp resource created: " + tempRscName);
+ addTempVolumeToDb(csSnapshotId, csName);
+ }
+ else if (snapshotDetails != null && snapshotDetails.getValue() != null &&
+ snapshotDetails.getValue().equalsIgnoreCase("delete"))
+ {
+ snapshotDetails = _snapshotDetailsDao.findDetail(csSnapshotId, LinstorUtil.TEMP_VOLUME_ID);
+
+ deleteResourceDefinition(storagePoolVO, snapshotDetails.getValue());
+
+ s_logger.debug("Temp resource deleted: " + snapshotDetails.getValue());
+ removeTempVolumeFromDb(csSnapshotId);
+ }
+ else {
+ throw new CloudRuntimeException("Invalid state in 'createVolumeFromSnapshot(SnapshotInfo, StoragePoolVO)'");
+ }
+ }
+
+ @Override
+ public void createAsync(DataStore dataStore, DataObject vol, AsyncCompletionCallback callback)
+ {
+ String devPath = null;
+ String errMsg = null;
+ StoragePoolVO storagePool = _storagePoolDao.findById(dataStore.getId());
+
+ try
+ {
+ switch (vol.getType())
+ {
+ case VOLUME:
+ VolumeInfo volumeInfo = (VolumeInfo) vol;
+ VolumeVO volume = _volumeDao.findById(volumeInfo.getId());
+ s_logger.debug("createAsync - creating volume");
+ devPath = createVolume(volumeInfo, storagePool);
+ volume.setFolder("/dev/");
+ volume.setPoolId(storagePool.getId());
+ volume.setUuid(vol.getUuid());
+ volume.setPath(vol.getUuid());
+
+ _volumeDao.update(volume.getId(), volume);
+ break;
+ case SNAPSHOT:
+ s_logger.debug("createAsync - SNAPSHOT");
+ createVolumeFromSnapshot((SnapshotInfo) vol, storagePool);
+ break;
+ case TEMPLATE:
+ errMsg = "creating template - not supported";
+ s_logger.error("createAsync - " + errMsg);
+ break;
+ default:
+ errMsg = "Invalid DataObjectType (" + vol.getType() + ") passed to createAsync";
+ s_logger.error(errMsg);
+ }
+ } catch (Exception ex)
+ {
+ errMsg = ex.getMessage();
+
+ s_logger.error("createAsync: " + errMsg);
+ if (callback == null)
+ {
+ throw ex;
+ }
+ }
+
+ if (callback != null)
+ {
+ CreateCmdResult result = new CreateCmdResult(devPath, new Answer(null, errMsg == null, errMsg));
+ result.setResult(errMsg);
+ callback.complete(result);
+ }
+ }
+
+ @Override
+ public void revertSnapshot(
+ SnapshotInfo snapshot,
+ SnapshotInfo snapshotOnPrimaryStore,
+ AsyncCompletionCallback callback)
+ {
+ s_logger.debug("Linstor: revertSnapshot");
+ final VolumeInfo volumeInfo = snapshot.getBaseVolume();
+ VolumeVO volumeVO = _volumeDao.findById(volumeInfo.getId());
+ if (volumeVO == null || volumeVO.getRemoved() != null) {
+ CommandResult commandResult = new CommandResult();
+ commandResult.setResult("The volume that the snapshot belongs to no longer exists.");
+ callback.complete(commandResult);
+ return;
+ }
+
+ String resultMsg;
+ try {
+ final StoragePool pool = (StoragePool) snapshot.getDataStore();
+ final String rscName = LinstorUtil.RSC_PREFIX + volumeInfo.getUuid();
+ final String snapName = LinstorUtil.RSC_PREFIX + snapshot.getUuid();
+ final DevelopersApi linstorApi = LinstorUtil.getLinstorAPI(pool.getHostAddress());
+
+ ApiCallRcList answers = linstorApi.resourceSnapshotRollback(rscName, snapName);
+ resultMsg = checkLinstorAnswers(answers);
+ } catch (ApiException apiEx) {
+ s_logger.error("Linstor: ApiEx - " + apiEx.getMessage());
+ resultMsg = apiEx.getBestMessage();
+ }
+
+ if (callback != null)
+ {
+ CommandResult result = new CommandResult();
+ result.setResult(resultMsg);
+ callback.complete(result);
+ }
+ }
+
+ @Override
+ public boolean canCopy(DataObject srcData, DataObject destData)
+ {
+ return false;
+ }
+
+ @Override
+ public void copyAsync(DataObject srcData, DataObject destData, AsyncCompletionCallback callback)
+ {
+ // as long as canCopy is false, this isn't called
+ s_logger.debug("Linstor: copyAsync with srcdata: " + srcData.getUuid());
+ }
+
+ @Override
+ public void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback callback)
+ {
+ // as long as canCopy is false, this isn't called
+ s_logger.debug("Linstor: copyAsync with srcdata: " + srcData.getUuid());
+ }
+
+ private CreateCmdResult notifyResize(
+ DataObject data,
+ long oldSize,
+ ResizeVolumePayload resizeParameter)
+ {
+ VolumeObject vol = (VolumeObject) data;
+ StoragePool pool = (StoragePool) data.getDataStore();
+
+ ResizeVolumeCommand resizeCmd =
+ new ResizeVolumeCommand(vol.getPath(), new StorageFilerTO(pool), oldSize, resizeParameter.newSize, resizeParameter.shrinkOk,
+ resizeParameter.instanceName);
+ CreateCmdResult result = new CreateCmdResult(null, null);
+ try {
+ ResizeVolumeAnswer answer = (ResizeVolumeAnswer) _storageMgr.sendToPool(pool, resizeParameter.hosts, resizeCmd);
+ if (answer != null && answer.getResult()) {
+ s_logger.debug("Resize: notified hosts");
+ } else if (answer != null) {
+ result.setResult(answer.getDetails());
+ } else {
+ s_logger.debug("return a null answer, mark it as failed for unknown reason");
+ result.setResult("return a null answer, mark it as failed for unknown reason");
+ }
+
+ } catch (Exception e) {
+ s_logger.debug("sending resize command failed", e);
+ result.setResult(e.toString());
+ }
+
+ return result;
+ }
+
+ @Override
+ public void resize(DataObject data, AsyncCompletionCallback callback)
+ {
+ final VolumeObject vol = (VolumeObject) data;
+ final StoragePool pool = (StoragePool) data.getDataStore();
+ final DevelopersApi api = LinstorUtil.getLinstorAPI(pool.getHostAddress());
+ final ResizeVolumePayload resizeParameter = (ResizeVolumePayload) vol.getpayload();
+
+ final String rscName = LinstorUtil.RSC_PREFIX + vol.getPath();
+ final long oldSize = vol.getSize();
+
+ String errMsg = null;
+ VolumeDefinitionModify dfm = new VolumeDefinitionModify();
+ dfm.setSizeKib(resizeParameter.newSize / 1024);
+ try
+ {
+ ApiCallRcList answers = api.volumeDefinitionModify(rscName, 0, dfm);
+ if (answers.hasError())
+ {
+ s_logger.error("Resize error: " + answers.get(0).getMessage());
+ errMsg = answers.get(0).getMessage();
+ } else
+ {
+ s_logger.info(String.format("Successfully resized %s to %d kib", rscName, dfm.getSizeKib()));
+ vol.setSize(resizeParameter.newSize);
+ vol.update();
+ }
+
+ } catch (ApiException apiExc)
+ {
+ s_logger.error(apiExc);
+ errMsg = apiExc.getBestMessage();
+ }
+
+ CreateCmdResult result;
+ if (errMsg != null)
+ {
+ result = new CreateCmdResult(null, new Answer(null, false, errMsg));
+ result.setResult(errMsg);
+ } else
+ {
+ // notify guests
+ result = notifyResize(data, oldSize, resizeParameter);
+ }
+
+ callback.complete(result);
+ }
+
+ @Override
+ public void handleQualityOfServiceForVolumeMigration(
+ VolumeInfo volumeInfo,
+ QualityOfServiceState qualityOfServiceState)
+ {
+ s_logger.debug("Linstor: handleQualityOfServiceForVolumeMigration");
+ }
+
+ @Override
+ public void takeSnapshot(SnapshotInfo snapshotInfo, AsyncCompletionCallback callback)
+ {
+ s_logger.debug("Linstor: takeSnapshot with snapshot: " + snapshotInfo.getUuid());
+
+ final VolumeInfo volumeInfo = snapshotInfo.getBaseVolume();
+ final VolumeVO volumeVO = _volumeDao.findById(volumeInfo.getId());
+
+ long storagePoolId = volumeVO.getPoolId();
+ final StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId);
+ final DevelopersApi api = LinstorUtil.getLinstorAPI(storagePool.getHostAddress());
+ final String rscName = LinstorUtil.RSC_PREFIX + volumeVO.getPath();
+
+ Snapshot snapshot = new Snapshot();
+ snapshot.setName(getSnapshotName(snapshotInfo.getUuid()));
+
+ CreateCmdResult result;
+ try
+ {
+ ApiCallRcList answers = api.resourceSnapshotCreate(rscName, snapshot);
+
+ if (answers.hasError())
+ {
+ final String errMsg = answers.get(0).getMessage();
+ s_logger.error("Snapshot error: " + errMsg);
+ result = new CreateCmdResult(null, new Answer(null, false, errMsg));
+ result.setResult(errMsg);
+ } else
+ {
+ s_logger.info(String.format("Successfully took snapshot from %s", rscName));
+
+ SnapshotObjectTO snapshotObjectTo = (SnapshotObjectTO)snapshotInfo.getTO();
+ snapshotObjectTo.setPath(rscName + "-" + snapshotInfo.getName());
+
+ result = new CreateCmdResult(null, new CreateObjectAnswer(snapshotObjectTo));
+ result.setResult(null);
+ }
+ } catch (ApiException apiExc)
+ {
+ s_logger.error(apiExc);
+ result = new CreateCmdResult(null, new Answer(null, false, apiExc.getBestMessage()));
+ result.setResult(apiExc.getBestMessage());
+ }
+
+ callback.complete(result);
+ }
+
+ @Override
+ public boolean canProvideStorageStats() {
+ return false;
+ }
+
+ @Override
+ public Pair getStorageStats(StoragePool storagePool) {
+ return null;
+ }
+
+ @Override
+ public boolean canProvideVolumeStats() {
+ return false;
+ }
+
+ @Override
+ public Pair getVolumeStats(StoragePool storagePool, String volumeId) {
+ return null;
+ }
+
+ @Override
+ public boolean canHostAccessStoragePool(Host host, StoragePool pool) {
+ return true;
+ }
+}
diff --git a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/LinstorPrimaryDataStoreLifeCycleImpl.java b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/LinstorPrimaryDataStoreLifeCycleImpl.java
new file mode 100644
index 000000000000..b9cdae2ea606
--- /dev/null
+++ b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/LinstorPrimaryDataStoreLifeCycleImpl.java
@@ -0,0 +1,335 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.storage.datastore.lifecycle;
+
+import javax.inject.Inject;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+
+import com.cloud.agent.AgentManager;
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.CreateStoragePoolCommand;
+import com.cloud.agent.api.StoragePoolInfo;
+import com.cloud.capacity.CapacityManager;
+import com.cloud.dc.ClusterVO;
+import com.cloud.dc.dao.ClusterDao;
+import com.cloud.host.Host;
+import com.cloud.host.HostVO;
+import com.cloud.hypervisor.Hypervisor.HypervisorType;
+import com.cloud.resource.ResourceManager;
+import com.cloud.storage.Storage;
+import com.cloud.storage.StorageManager;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.StoragePoolAutomation;
+import com.cloud.utils.exception.CloudRuntimeException;
+import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
+import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.cloudstack.storage.datastore.util.LinstorUtil;
+import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
+import org.apache.log4j.Logger;
+
+public class LinstorPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
+ private static final Logger s_logger = Logger.getLogger(LinstorPrimaryDataStoreLifeCycleImpl.class);
+
+ @Inject
+ private ClusterDao clusterDao;
+ @Inject
+ PrimaryDataStoreDao _primaryDataStoreDao;
+ @Inject
+ private ResourceManager resourceMgr;
+ @Inject
+ private StorageManager _storageMgr;
+ @Inject
+ PrimaryDataStoreHelper dataStoreHelper;
+ @Inject
+ private StoragePoolAutomation storagePoolAutomation;
+ @Inject
+ private CapacityManager _capacityMgr;
+ @Inject
+ AgentManager _agentMgr;
+
+ public LinstorPrimaryDataStoreLifeCycleImpl()
+ {
+ }
+
+ private static boolean isSupportedHypervisorType(HypervisorType hypervisorType) {
+ return HypervisorType.KVM.equals(hypervisorType);
+ }
+
+ @Override
+ public DataStore initialize(Map dsInfos) {
+ String url = (String) dsInfos.get("url");
+ Long zoneId = (Long) dsInfos.get("zoneId");
+ Long podId = (Long) dsInfos.get("podId");
+ Long clusterId = (Long) dsInfos.get("clusterId");
+ String storagePoolName = (String) dsInfos.get("name");
+ String providerName = (String) dsInfos.get("providerName");
+ String tags = (String) dsInfos.get("tags");
+ @SuppressWarnings("unchecked")
+ Map details = (Map) dsInfos.get("details");
+
+ final String resourceGroup = details.get(LinstorUtil.RSC_GROUP);
+
+ final String uuid = UUID.randomUUID().toString();
+
+ PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
+
+ // checks if primary datastore is clusterwide. If so, uses the clusterId to set
+ // the uuid and then sets the podId and clusterId parameters
+ if (clusterId != null) {
+ if (podId == null) {
+ throw new CloudRuntimeException("The Pod ID must be specified.");
+ }
+ if (zoneId == null) {
+ throw new CloudRuntimeException("The Zone ID must be specified.");
+ }
+ ClusterVO cluster = clusterDao.findById(clusterId);
+ s_logger.info("Linstor: Setting Linstor cluster-wide primary storage uuid to " + uuid);
+ parameters.setPodId(podId);
+ parameters.setClusterId(clusterId);
+
+ HypervisorType hypervisorType = cluster.getHypervisorType();
+
+ if (!isSupportedHypervisorType(hypervisorType)) {
+ throw new CloudRuntimeException(hypervisorType + " is not a supported hypervisor type.");
+ }
+ }
+
+ if (!url.contains("://")) {
+ url = "http://" + url;
+ }
+
+ URL controllerURL;
+ int port = 3370;
+ try
+ {
+ controllerURL = new URL(url);
+ if (!controllerURL.getProtocol().startsWith("http")) {
+ throw new IllegalArgumentException("Linstor controller URL wrong protocol: " + url);
+ }
+ if (!controllerURL.getPath().isEmpty()) {
+ throw new IllegalArgumentException("Linstor controller URL shouldn't have a path: " + url);
+ }
+ if (controllerURL.getPort() == -1) {
+ port = controllerURL.getProtocol().equals("https") ? 3371 : 3370;
+ url += ":" + port;
+ }
+ } catch (MalformedURLException e)
+ {
+ throw new IllegalArgumentException("Linstor controller URL is not valid: " + e);
+ }
+
+ long capacityBytes = LinstorUtil.getCapacityBytes(url, resourceGroup);
+
+ if (capacityBytes <= 0) {
+ throw new IllegalArgumentException("'capacityBytes' must be present and greater than 0.");
+ }
+
+ parameters.setHost(url);
+ parameters.setPort(port);
+ parameters.setPath(resourceGroup);
+ parameters.setType(Storage.StoragePoolType.Linstor);
+ parameters.setUuid(uuid);
+ parameters.setZoneId(zoneId);
+ parameters.setName(storagePoolName);
+ parameters.setProviderName(providerName);
+ parameters.setManaged(false);
+ parameters.setCapacityBytes(capacityBytes);
+ parameters.setUsedBytes(0);
+ parameters.setCapacityIops(0L);
+ parameters.setHypervisorType(HypervisorType.KVM);
+ parameters.setTags(tags);
+ parameters.setDetails(details);
+ parameters.setUserInfo(resourceGroup);
+
+ return dataStoreHelper.createPrimaryDataStore(parameters);
+ }
+
+ protected boolean createStoragePool(long hostId, StoragePool pool) {
+ s_logger.debug("creating pool " + pool.getName() + " on host " + hostId);
+
+ if (pool.getPoolType() != Storage.StoragePoolType.Linstor) {
+ s_logger.warn(" Doesn't support storage pool type " + pool.getPoolType());
+ return false;
+ }
+ CreateStoragePoolCommand cmd = new CreateStoragePoolCommand(true, pool);
+ final Answer answer = _agentMgr.easySend(hostId, cmd);
+ if (answer != null && answer.getResult()) {
+ return true;
+ } else {
+ _primaryDataStoreDao.expunge(pool.getId());
+ String msg = answer != null ?
+ "Can not create storage pool through host " + hostId + " due to " + answer.getDetails() :
+ "Can not create storage pool through host " + hostId + " due to CreateStoragePoolCommand returns null";
+ s_logger.warn(msg);
+ throw new CloudRuntimeException(msg);
+ }
+ }
+
+ @Override
+ public boolean attachCluster(DataStore dataStore, ClusterScope scope) {
+ final PrimaryDataStoreInfo primaryDataStoreInfo = (PrimaryDataStoreInfo) dataStore;
+
+ final ClusterVO cluster = clusterDao.findById(primaryDataStoreInfo.getClusterId());
+ final HypervisorType hypervisorType = cluster.getHypervisorType();
+ if (!isSupportedHypervisorType(hypervisorType)) {
+ throw new CloudRuntimeException(hypervisorType + " is not a supported hypervisor type.");
+ }
+
+ // check if there is at least one host up in this cluster
+ List allHosts = resourceMgr.listAllUpAndEnabledHosts(Host.Type.Routing,
+ primaryDataStoreInfo.getClusterId(), primaryDataStoreInfo.getPodId(),
+ primaryDataStoreInfo.getDataCenterId());
+
+ if (allHosts.isEmpty()) {
+ _primaryDataStoreDao.expunge(primaryDataStoreInfo.getId());
+
+ throw new CloudRuntimeException(
+ "No host up to associate a storage pool with in cluster " + primaryDataStoreInfo.getClusterId());
+ }
+
+ List poolHosts = new ArrayList<>();
+ for (HostVO host : allHosts) {
+ try {
+ createStoragePool(host.getId(), primaryDataStoreInfo);
+
+ _storageMgr.connectHostToSharedPool(host.getId(), primaryDataStoreInfo.getId());
+
+ poolHosts.add(host);
+ } catch (Exception e) {
+ s_logger.warn("Unable to establish a connection between " + host + " and " + primaryDataStoreInfo, e);
+ }
+ }
+
+ if (poolHosts.isEmpty()) {
+ s_logger.warn("No host can access storage pool '" + primaryDataStoreInfo + "' on cluster '"
+ + primaryDataStoreInfo.getClusterId() + "'.");
+
+ _primaryDataStoreDao.expunge(primaryDataStoreInfo.getId());
+
+ throw new CloudRuntimeException("Failed to access storage pool");
+ }
+
+ dataStoreHelper.attachCluster(dataStore);
+
+ return true;
+ }
+
+ @Override
+ public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) {
+ if (!isSupportedHypervisorType(hypervisorType)) {
+ throw new CloudRuntimeException(hypervisorType + " is not a supported hypervisor type.");
+ }
+
+ List hosts = resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType,
+ scope.getScopeId());
+
+ for (HostVO host : hosts) {
+ try {
+ _storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId());
+ } catch (Exception e) {
+ s_logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e);
+ }
+ }
+
+ dataStoreHelper.attachZone(dataStore, hypervisorType);
+ return true;
+ }
+
+ @Override
+ public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) {
+ return true;
+ }
+
+ @Override
+ public boolean maintain(DataStore dataStore) {
+ storagePoolAutomation.maintain(dataStore);
+ dataStoreHelper.maintain(dataStore);
+ return true;
+ }
+
+ @Override
+ public boolean cancelMaintain(DataStore store) {
+ dataStoreHelper.cancelMaintain(store);
+ storagePoolAutomation.cancelMaintain(store);
+
+ return true;
+ }
+
+ @Override
+ public boolean deleteDataStore(DataStore store) {
+ return dataStoreHelper.deletePrimaryDataStore(store);
+ }
+
+ /* (non-Javadoc)
+ * @see org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle#migrateToObjectStore(org.apache.cloudstack.engine.subsystem.api.storage.DataStore)
+ */
+ @Override
+ public boolean migrateToObjectStore(DataStore store) {
+ return false;
+ }
+
+ @Override
+ public void updateStoragePool(StoragePool storagePool, Map details) {
+ StoragePoolVO storagePoolVo = _primaryDataStoreDao.findById(storagePool.getId());
+
+ String strCapacityBytes = details.get(PrimaryDataStoreLifeCycle.CAPACITY_BYTES);
+ Long capacityBytes = strCapacityBytes != null ? Long.parseLong(strCapacityBytes) : null;
+
+ if (capacityBytes != null) {
+ long usedBytes = _capacityMgr.getUsedBytes(storagePoolVo);
+
+ if (capacityBytes < usedBytes) {
+ throw new CloudRuntimeException(
+ "Cannot reduce the number of bytes for this storage pool as it would lead to an insufficient number of bytes");
+ }
+ }
+
+ String strCapacityIops = details.get(PrimaryDataStoreLifeCycle.CAPACITY_IOPS);
+ Long capacityIops = strCapacityIops != null ? Long.parseLong(strCapacityIops) : null;
+
+ if (capacityIops != null) {
+ long usedIops = _capacityMgr.getUsedIops(storagePoolVo);
+
+ if (capacityIops < usedIops) {
+ throw new CloudRuntimeException(
+ "Cannot reduce the number of IOPS for this storage pool as it would lead to an insufficient number of IOPS");
+ }
+ }
+ }
+
+ @Override
+ public void enableStoragePool(DataStore store) {
+ dataStoreHelper.enable(store);
+ }
+
+ @Override
+ public void disableStoragePool(DataStore store) {
+ dataStoreHelper.disable(store);
+ }
+}
diff --git a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/provider/LinstorPrimaryDatastoreProviderImpl.java b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/provider/LinstorPrimaryDatastoreProviderImpl.java
new file mode 100644
index 000000000000..de0a16986c57
--- /dev/null
+++ b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/provider/LinstorPrimaryDatastoreProviderImpl.java
@@ -0,0 +1,73 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.storage.datastore.provider;
+
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import com.cloud.utils.component.ComponentContext;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle;
+import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider;
+import org.apache.cloudstack.storage.datastore.driver.LinstorPrimaryDataStoreDriverImpl;
+import org.apache.cloudstack.storage.datastore.lifecycle.LinstorPrimaryDataStoreLifeCycleImpl;
+
+public class LinstorPrimaryDatastoreProviderImpl implements PrimaryDataStoreProvider {
+ private final static String PROVIDER_NAME = "Linstor";
+ protected PrimaryDataStoreDriver driver;
+ protected HypervisorHostListener listener;
+ protected DataStoreLifeCycle lifecycle;
+
+ @Override
+ public String getName() {
+ return PROVIDER_NAME;
+ }
+
+ @Override
+ public DataStoreLifeCycle getDataStoreLifeCycle() {
+ return this.lifecycle;
+ }
+
+ @Override
+ public boolean configure(Map params) {
+ lifecycle = ComponentContext.inject(LinstorPrimaryDataStoreLifeCycleImpl.class);
+ driver = ComponentContext.inject(LinstorPrimaryDataStoreDriverImpl.class);
+ listener = ComponentContext.inject(DefaultHostListener.class);
+ return true;
+ }
+
+ @Override
+ public PrimaryDataStoreDriver getDataStoreDriver() {
+ return this.driver;
+ }
+
+ @Override
+ public HypervisorHostListener getHostListener() {
+ return this.listener;
+ }
+
+ @Override
+ public Set getTypes() {
+ Set types = new HashSet<>();
+ types.add(DataStoreProviderType.PRIMARY);
+ return types;
+ }
+
+
+}
diff --git a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/util/LinstorUtil.java b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/util/LinstorUtil.java
new file mode 100644
index 000000000000..f1760a003ab5
--- /dev/null
+++ b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/util/LinstorUtil.java
@@ -0,0 +1,81 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.storage.datastore.util;
+
+import com.linbit.linstor.api.ApiClient;
+import com.linbit.linstor.api.ApiException;
+import com.linbit.linstor.api.Configuration;
+import com.linbit.linstor.api.DevelopersApi;
+import com.linbit.linstor.api.model.ProviderKind;
+import com.linbit.linstor.api.model.ResourceGroup;
+import com.linbit.linstor.api.model.StoragePool;
+
+import java.util.Collections;
+import java.util.List;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+import org.apache.log4j.Logger;
+
+public class LinstorUtil {
+ private static final Logger s_logger = Logger.getLogger(LinstorUtil.class);
+
+ public static final String RSC_PREFIX = "cs-";
+ public static final String RSC_GROUP = "resourceGroup";
+
+ public static final String TEMP_VOLUME_ID = "tempVolumeId";
+
+ public static final String CLUSTER_DEFAULT_MIN_IOPS = "clusterDefaultMinIops";
+ public static final String CLUSTER_DEFAULT_MAX_IOPS = "clusterDefaultMaxIops";
+
+ public static DevelopersApi getLinstorAPI(String linstorUrl) {
+ ApiClient client = Configuration.getDefaultApiClient();
+ client.setBasePath(linstorUrl);
+ return new DevelopersApi(client);
+ }
+
+ public static long getCapacityBytes(String linstorUrl, String rscGroupName) {
+ DevelopersApi linstorApi = getLinstorAPI(linstorUrl);
+ try {
+ List rscGrps = linstorApi.resourceGroupList(
+ Collections.singletonList(rscGroupName),
+ null,
+ null,
+ null);
+
+ if (rscGrps.isEmpty()) {
+ final String errMsg = String.format("Linstor: Resource group '%s' not found", rscGroupName);
+ s_logger.error(errMsg);
+ throw new CloudRuntimeException(errMsg);
+ }
+
+ List storagePools = linstorApi.viewStoragePools(
+ Collections.emptyList(),
+ rscGrps.get(0).getSelectFilter().getStoragePoolList(),
+ null,
+ null,
+ null
+ );
+
+ return storagePools.stream()
+ .filter(sp -> sp.getProviderKind() != ProviderKind.DISKLESS)
+ .mapToLong(StoragePool::getTotalCapacity).sum() * 1024; // linstor uses kiB
+ } catch (ApiException apiEx) {
+ s_logger.error(apiEx.getMessage());
+ throw new CloudRuntimeException(apiEx);
+ }
+ }
+}
diff --git a/plugins/storage/volume/linstor/src/main/resources/META-INF/cloudstack/storage-volume-linstor/module.properties b/plugins/storage/volume/linstor/src/main/resources/META-INF/cloudstack/storage-volume-linstor/module.properties
new file mode 100644
index 000000000000..5bcae17d1d50
--- /dev/null
+++ b/plugins/storage/volume/linstor/src/main/resources/META-INF/cloudstack/storage-volume-linstor/module.properties
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+name=storage-volume-linstor
+parent=storage
diff --git a/plugins/storage/volume/linstor/src/main/resources/META-INF/cloudstack/storage-volume-linstor/spring-storage-volume-linstor-context.xml b/plugins/storage/volume/linstor/src/main/resources/META-INF/cloudstack/storage-volume-linstor/spring-storage-volume-linstor-context.xml
new file mode 100644
index 000000000000..e4fcb4562636
--- /dev/null
+++ b/plugins/storage/volume/linstor/src/main/resources/META-INF/cloudstack/storage-volume-linstor/spring-storage-volume-linstor-context.xml
@@ -0,0 +1,32 @@
+
+
+
+
+
diff --git a/pom.xml b/pom.xml
index 291005217798..8b9839272c30 100644
--- a/pom.xml
+++ b/pom.xml
@@ -144,7 +144,7 @@
1.3.2
2.3.0
2.3.2-1
- 1.19.4
+ 2.26
9.4.36.v20210114
9.4.27.v20200227
5.5.0
@@ -161,6 +161,7 @@
10.1
2.6.4
0.6.0
+ 0.3.0
0.9.12
3.4.4_1
4.0.1
@@ -323,11 +324,6 @@
amqp-client
${cs.amqp-client.version}
-
- com.sun.jersey
- jersey-bundle
- ${cs.jersey-bundle.version}
-
com.thoughtworks.xstream
xstream
diff --git a/server/src/main/java/com/cloud/api/ApiDBUtils.java b/server/src/main/java/com/cloud/api/ApiDBUtils.java
index d688c276a338..d8f6a98b6826 100644
--- a/server/src/main/java/com/cloud/api/ApiDBUtils.java
+++ b/server/src/main/java/com/cloud/api/ApiDBUtils.java
@@ -1231,7 +1231,10 @@ public static HypervisorType getHypervisorTypeFromFormat(long dcId, ImageFormat
ListIterator itr = pools.listIterator();
while(itr.hasNext()) {
StoragePoolVO pool = itr.next();
- if(pool.getPoolType() == StoragePoolType.RBD || pool.getPoolType() == StoragePoolType.PowerFlex || pool.getPoolType() == StoragePoolType.CLVM) {
+ if(pool.getPoolType() == StoragePoolType.RBD ||
+ pool.getPoolType() == StoragePoolType.PowerFlex ||
+ pool.getPoolType() == StoragePoolType.CLVM ||
+ pool.getPoolType() == StoragePoolType.Linstor) {
// This case will note the presence of non-qcow2 primary stores, suggesting KVM without NFS. Otherwse,
// If this check is not passed, the hypervisor type will remain OVM.
type = HypervisorType.KVM;
diff --git a/test/integration/plugins/linstor/README.md b/test/integration/plugins/linstor/README.md
new file mode 100644
index 000000000000..a0a559c9da41
--- /dev/null
+++ b/test/integration/plugins/linstor/README.md
@@ -0,0 +1,31 @@
+# Linstor storage plugin
+==================================
+This directory contains the basic VM, Volume life cycle tests for Linstor storage pool (in KVM hypervisor).
+
+# Running tests
+===============
+To run the basic volume tests, first update the below test data of the CloudStack environment
+
+````
+TestData.zoneId:
+TestData.clusterId:
+TestData.domainId:
+TestData.url:
+TestData.primaryStorage "url":
+````
+
+and to enable and run volume migration tests, update the below test data
+
+````
+TestData.migrationTests: True
+TestData.primaryStorageSameInstance "url":
+TestData.primaryStorageDistinctInstance "url":
+````
+
+Then run the tests using python unittest runner: nosetests
+
+````
+nosetests --with-marvin --marvin-config= /test/integration/plugins/linstor/test_linstor_volumes.py --zone= --hypervisor=kvm
+````
+
+You can also run these tests out of the box with PyDev or PyCharm or whatever.
diff --git a/test/integration/plugins/linstor/test_linstor_volumes.py b/test/integration/plugins/linstor/test_linstor_volumes.py
new file mode 100644
index 000000000000..60dd84a13b15
--- /dev/null
+++ b/test/integration/plugins/linstor/test_linstor_volumes.py
@@ -0,0 +1,1218 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import logging
+import random
+import time
+
+# All tests inherit from cloudstackTestCase
+from marvin.cloudstackTestCase import cloudstackTestCase
+
+# Import Integration Libraries
+# base - contains all resources as entities and defines create, delete, list operations on them
+from marvin.lib.base import Account, DiskOffering, ServiceOffering, Snapshot, StoragePool, Template, User, \
+ VirtualMachine, Volume
+
+# common - commonly used methods for all tests are listed here
+from marvin.lib.common import get_domain, get_template, get_zone, list_clusters, list_hosts, list_virtual_machines, \
+ list_volumes
+
+# utils - utility classes for common cleanup, external library wrappers, etc.
+from marvin.lib.utils import cleanup_resources, validateList
+from marvin.codes import PASS
+from nose.plugins.attrib import attr
+
+# Prerequisites:
+# Only one zone
+# Only one pod
+# Only one cluster
+#
+# One linstor storage pool for basic tests
+# Only KVM hypervisor is supported for linstor storage pool
+# KVM host(s) have linstor-satellite installed
+#
+# For volume migration tests, additional resource-groups are required
+# One Linstor resource group on the same Linstor storage cluster/instance (name: acs-test-same)
+# One Linstor resource group on different Linstor storage cluster/instance (name: acs-test-diff)
+#
+
+
+class TestData:
+ # constants
+ account = "account"
+ clusterId = "clusterId"
+ computeOffering = "computeoffering"
+ diskName = "diskname"
+ diskOffering = "diskoffering"
+ diskOfferingSameInstance = "diskOfferingSameInstance"
+ diskOfferingDistinctInstance = "diskOfferingDistinctInstance"
+ domainId = "domainId"
+ hypervisor = "hypervisor"
+ kvm = "kvm"
+ login = "login"
+ gatewayip = "gatewayip"
+ one_GB_in_bytes = 1073741824
+ password = "password"
+ port = "port"
+ primaryStorage = "primarystorage"
+ primaryStorageSameInstance = "primaryStorageSameInstance"
+ primaryStorageDistinctInstance = "primaryStorageDistinctInstance"
+ provider = "provider"
+ scope = "scope"
+ powerFlex = "powerflex"
+ storageTag = "linstor"
+ storageTagSameInstance = "linstorsame"
+ storageTagDistinctInstance = "linstordiff"
+ tags = "tags"
+ templateCacheNameKvm = "centos55-x86-64"
+ testAccount = "testaccount"
+ url = "url"
+ user = "user"
+ username = "username"
+ virtualMachine = "virtualmachine"
+ virtualMachine2 = "virtualmachine2"
+ virtualMachine3 = "virtualmachine3"
+ virtualMachine4 = "virtualmachine4"
+ volume_1 = "volume_1"
+ volume_2 = "volume_2"
+ volume_3 = "volume_3"
+ volume_4 = "volume_4"
+ zoneId = "zoneId"
+ migrationTests = "migrationTests"
+
+ # hypervisor type to test
+ hypervisor_type = kvm
+
+ def __init__(self):
+ linstor_controller_url = "http://10.43.224.8"
+ self.testdata = {
+ TestData.kvm: {
+ TestData.username: "admin",
+ TestData.password: "P@ssword123"
+ },
+ TestData.account: {
+ "email": "test1@test1.com",
+ "firstname": "John",
+ "lastname": "Doe",
+ "username": "test1",
+ "password": "test"
+ },
+ TestData.testAccount: {
+ "email": "test2@test2.com",
+ "firstname": "Jane",
+ "lastname": "Doe",
+ "username": "test2",
+ "password": "test"
+ },
+ TestData.user: {
+ "email": "user@test1.com",
+ "firstname": "Jane",
+ "lastname": "Doe",
+ "username": "test1user",
+ "password": "password"
+ },
+ TestData.primaryStorage: {
+ "name": "LinstorPool-%d" % random.randint(0, 100),
+ TestData.scope: "ZONE",
+ "url": linstor_controller_url,
+ TestData.provider: "Linstor",
+ TestData.tags: TestData.storageTag + "," + TestData.storageTagSameInstance + "," + TestData.storageTagDistinctInstance,
+ TestData.hypervisor: "KVM",
+ "details": {
+ "resourceGroup": "acs-basic"
+ }
+ },
+ TestData.virtualMachine: {
+ "name": "TestVM1",
+ "displayname": "Test VM 1"
+ },
+ TestData.virtualMachine2: {
+ "name": "TestVM2",
+ "displayname": "Test VM 2"
+ },
+ TestData.virtualMachine3: {
+ "name": "TestVM3",
+ "displayname": "Test VM 3"
+ },
+ TestData.virtualMachine4: {
+ "name": "TestVM4",
+ "displayname": "Test VM 4"
+ },
+ TestData.computeOffering: {
+ "name": "Linstor_Compute",
+ "displaytext": "Linstor_Compute",
+ "cpunumber": 1,
+ "cpuspeed": 500,
+ "memory": 512,
+ "storagetype": "shared",
+ TestData.tags: TestData.storageTag
+ },
+ TestData.diskOffering: {
+ "name": "Linstor_Disk",
+ "displaytext": "Linstor_Disk",
+ "disksize": 8,
+ TestData.tags: TestData.storageTag,
+ "storagetype": "shared"
+ },
+ TestData.volume_1: {
+ TestData.diskName: "test-volume-1",
+ },
+ TestData.volume_2: {
+ TestData.diskName: "test-volume-2",
+ },
+ TestData.volume_3: {
+ TestData.diskName: "test-volume-3",
+ },
+ TestData.volume_4: {
+ TestData.diskName: "test-volume-4",
+ },
+ TestData.zoneId: 1,
+ TestData.clusterId: 1,
+ TestData.domainId: 1,
+ # for volume migration tests
+ TestData.migrationTests: True,
+ # Linstor storage pool on the same Linstor storage instance
+ TestData.primaryStorageSameInstance: {
+ "name": "Linstor-%d" % random.randint(0, 100),
+ TestData.scope: "ZONE",
+ "url": linstor_controller_url,
+ TestData.provider: "Linstor",
+ TestData.tags: TestData.storageTag + "," + TestData.storageTagSameInstance,
+ TestData.hypervisor: "KVM",
+ "details": {
+ "resourceGroup": "acs-test-same"
+ }
+ },
+ # Linstor storage pool on different ScaleIO storage instance
+ TestData.primaryStorageDistinctInstance: {
+ "name": "Linstor-%d" % random.randint(0, 100),
+ TestData.scope: "ZONE",
+ "url": linstor_controller_url,
+ TestData.provider: "Linstor",
+ TestData.tags: TestData.storageTag + "," + TestData.storageTagDistinctInstance,
+ TestData.hypervisor: "KVM",
+ "details": {
+ "resourceGroup": "acs-test-diff"
+ }
+ },
+ TestData.diskOfferingSameInstance: {
+ "name": "Linstor_Disk_Same_Inst",
+ "displaytext": "Linstor_Disk_Same_Inst",
+ "disksize": 8,
+ TestData.tags: TestData.storageTagSameInstance,
+ "storagetype": "shared"
+ },
+ TestData.diskOfferingDistinctInstance: {
+ "name": "Linstor_Disk_Diff_Inst",
+ "displaytext": "Linstor_Disk_Diff_Inst",
+ "disksize": 8,
+ TestData.tags: TestData.storageTagDistinctInstance,
+ "storagetype": "shared"
+ },
+ }
+
+
+class TestLinstorVolumes(cloudstackTestCase):
+ _volume_vm_id_and_vm_id_do_not_match_err_msg = "The volume's VM ID and the VM's ID do not match."
+ _vm_not_in_running_state_err_msg = "The VM is not in the 'Running' state."
+ _vm_not_in_stopped_state_err_msg = "The VM is not in the 'Stopped' state."
+
+ @classmethod
+ def setUpClass(cls):
+ # Set up API client
+ testclient = super(TestLinstorVolumes, cls).getClsTestClient()
+
+ cls.apiClient = testclient.getApiClient()
+ cls.configData = testclient.getParsedTestDataConfig()
+ cls.dbConnection = testclient.getDbConnection()
+ cls.testdata = TestData().testdata
+
+ # Get Resources from Cloud Infrastructure
+ cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId])
+ cls.cluster = list_clusters(cls.apiClient)[0]
+ cls.template = get_template(cls.apiClient, cls.zone.id, hypervisor=TestData.hypervisor_type)
+ cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId])
+
+ # Create test account
+ cls.account = Account.create(
+ cls.apiClient,
+ cls.testdata["account"],
+ admin=1
+ )
+
+ # Set up connection to make customized API calls
+ cls.user = User.create(
+ cls.apiClient,
+ cls.testdata["user"],
+ account=cls.account.name,
+ domainid=cls.domain.id
+ )
+
+ primarystorage = cls.testdata[TestData.primaryStorage]
+
+ cls.primary_storage = StoragePool.create(
+ cls.apiClient,
+ primarystorage,
+ scope=primarystorage[TestData.scope],
+ zoneid=cls.zone.id,
+ provider=primarystorage[TestData.provider],
+ tags=primarystorage[TestData.tags],
+ hypervisor=primarystorage[TestData.hypervisor]
+ )
+
+ cls.compute_offering = ServiceOffering.create(
+ cls.apiClient,
+ cls.testdata[TestData.computeOffering]
+ )
+
+ cls.disk_offering = DiskOffering.create(
+ cls.apiClient,
+ cls.testdata[TestData.diskOffering]
+ )
+
+ if cls.testdata[TestData.migrationTests]:
+ primarystorage_sameinst = cls.testdata[TestData.primaryStorageSameInstance]
+ cls.primary_storage_same_inst = StoragePool.create(
+ cls.apiClient,
+ primarystorage_sameinst,
+ scope=primarystorage_sameinst[TestData.scope],
+ zoneid=cls.zone.id,
+ provider=primarystorage_sameinst[TestData.provider],
+ tags=primarystorage_sameinst[TestData.tags],
+ hypervisor=primarystorage_sameinst[TestData.hypervisor]
+ )
+
+ primarystorage_distinctinst = cls.testdata[TestData.primaryStorageDistinctInstance]
+ cls.primary_storage_distinct_inst = StoragePool.create(
+ cls.apiClient,
+ primarystorage_distinctinst,
+ scope=primarystorage_distinctinst[TestData.scope],
+ zoneid=cls.zone.id,
+ provider=primarystorage_distinctinst[TestData.provider],
+ tags=primarystorage_distinctinst[TestData.tags],
+ hypervisor=primarystorage_distinctinst[TestData.hypervisor]
+ )
+
+ cls.disk_offering_same_inst = DiskOffering.create(
+ cls.apiClient,
+ cls.testdata[TestData.diskOfferingSameInstance]
+ )
+
+ cls.disk_offering_distinct_inst = DiskOffering.create(
+ cls.apiClient,
+ cls.testdata[TestData.diskOfferingDistinctInstance]
+ )
+
+ # Create VM and volume for tests
+ cls.virtual_machine = VirtualMachine.create(
+ cls.apiClient,
+ cls.testdata[TestData.virtualMachine],
+ accountid=cls.account.name,
+ zoneid=cls.zone.id,
+ serviceofferingid=cls.compute_offering.id,
+ templateid=cls.template.id,
+ domainid=cls.domain.id,
+ startvm=False
+ )
+
+ TestLinstorVolumes._start_vm(cls.virtual_machine)
+
+ cls.volume = Volume.create(
+ cls.apiClient,
+ cls.testdata[TestData.volume_1],
+ account=cls.account.name,
+ domainid=cls.domain.id,
+ zoneid=cls.zone.id,
+ diskofferingid=cls.disk_offering.id
+ )
+
+ # Resources that are to be destroyed
+ cls._cleanup = [
+ cls.volume,
+ cls.virtual_machine,
+ cls.compute_offering,
+ cls.disk_offering,
+ cls.user,
+ cls.account
+ ]
+
+ @classmethod
+ def tearDownClass(cls):
+ try:
+ if cls.testdata[TestData.migrationTests]:
+ cls._cleanup.append(cls.disk_offering_same_inst)
+ cls._cleanup.append(cls.disk_offering_distinct_inst)
+
+ cleanup_resources(cls.apiClient, cls._cleanup)
+
+ cls.primary_storage.delete(cls.apiClient)
+
+ if cls.testdata[TestData.migrationTests]:
+ cls.primary_storage_same_inst.delete(cls.apiClient)
+ cls.primary_storage_distinct_inst.delete(cls.apiClient)
+
+ except Exception as e:
+ logging.debug("Exception in tearDownClass(cls): %s" % e)
+
+ def setUp(self):
+ self.attached = False
+ self.cleanup = []
+
+ def tearDown(self):
+ if self.attached:
+ self.virtual_machine.detach_volume(self.apiClient, self.volume)
+
+ cleanup_resources(self.apiClient, self.cleanup)
+
+ @attr(tags=['basic'], required_hardware=False)
+ def test_01_create_vm_with_volume(self):
+ """Create VM with attached volume and expunge VM"""
+
+ #######################################
+ # STEP 1: Create VM and attach volume #
+ #######################################
+
+ test_virtual_machine = VirtualMachine.create(
+ self.apiClient,
+ self.testdata[TestData.virtualMachine2],
+ accountid=self.account.name,
+ zoneid=self.zone.id,
+ serviceofferingid=self.compute_offering.id,
+ templateid=self.template.id,
+ domainid=self.domain.id,
+ startvm=False
+ )
+
+ TestLinstorVolumes._start_vm(test_virtual_machine)
+
+ self.volume = test_virtual_machine.attach_volume(
+ self.apiClient,
+ self.volume
+ )
+
+ self.attached = True
+
+ vm = self._get_vm(test_virtual_machine.id)
+
+ self.assertEqual(
+ self.volume.virtualmachineid,
+ vm.id,
+ TestLinstorVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg
+ )
+
+ self.assertEqual(
+ vm.state.lower(),
+ 'running',
+ TestLinstorVolumes._vm_not_in_running_state_err_msg
+ )
+
+ #######################################
+ # STEP 2: Destroy and Expunge VM #
+ #######################################
+
+ test_virtual_machine.delete(self.apiClient, True)
+
+ self.attached = False
+
+ vol = self._get_volume(self.volume.id)
+
+ self.assertEqual(
+ vol.virtualmachineid,
+ None,
+ "Check if attached to virtual machine"
+ )
+
+ self.assertEqual(
+ vol.vmname,
+ None,
+ "Check if VM was expunged"
+ )
+
+ list_virtual_machine_response = list_virtual_machines(
+ self.apiClient,
+ id=test_virtual_machine.id
+ )
+
+ self.assertEqual(
+ list_virtual_machine_response,
+ None,
+ "Check if VM was actually expunged"
+ )
+
+ @attr(tags=['basic'], required_hardware=False)
+ def test_02_attach_new_volume_to_stopped_vm(self):
+ """Attach a volume to a stopped virtual machine, then start VM"""
+
+ self.virtual_machine.stop(self.apiClient)
+
+ new_volume = Volume.create(
+ self.apiClient,
+ self.testdata[TestData.volume_2],
+ account=self.account.name,
+ domainid=self.domain.id,
+ zoneid=self.zone.id,
+ diskofferingid=self.disk_offering.id
+ )
+
+ self.cleanup.append(new_volume)
+
+ new_volume = self.virtual_machine.attach_volume(
+ self.apiClient,
+ new_volume
+ )
+
+ TestLinstorVolumes._start_vm(self.virtual_machine)
+
+ vm = self._get_vm(self.virtual_machine.id)
+
+ self.assertEqual(
+ vm.state.lower(),
+ "running",
+ TestLinstorVolumes._vm_not_in_running_state_err_msg
+ )
+
+ # Detach volume
+ new_volume = self.virtual_machine.detach_volume(
+ self.apiClient,
+ new_volume
+ )
+
+ self.assertEqual(
+ new_volume.virtualmachineid,
+ None,
+ "The volume should not be attached to a VM."
+ )
+
+ @attr(tags=['basic'], required_hardware=False)
+ def test_03_attach_detach_attach_volume_to_vm(self):
+ """Attach, detach, and attach volume to a running VM"""
+
+ TestLinstorVolumes._start_vm(self.virtual_machine)
+
+ #######################################
+ # STEP 1: Attach volume to running VM #
+ #######################################
+
+ self.volume = self.virtual_machine.attach_volume(
+ self.apiClient,
+ self.volume
+ )
+
+ self.attached = True
+
+ vm = self._get_vm(self.virtual_machine.id)
+
+ self.assertEqual(
+ self.volume.virtualmachineid,
+ vm.id,
+ TestLinstorVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg
+ )
+
+ self.assertEqual(
+ vm.state.lower(),
+ 'running',
+ TestLinstorVolumes._vm_not_in_running_state_err_msg
+ )
+
+ #########################################
+ # STEP 2: Detach volume from running VM #
+ #########################################
+
+ self.volume = self.virtual_machine.detach_volume(
+ self.apiClient,
+ self.volume
+ )
+
+ self.attached = False
+
+ vm = self._get_vm(self.virtual_machine.id)
+
+ self.assertEqual(
+ self.volume.virtualmachineid,
+ None,
+ "The volume should not be attached to a VM."
+ )
+
+ self.assertEqual(
+ vm.state.lower(),
+ 'running',
+ str(vm.state)
+ )
+
+ #######################################
+ # STEP 3: Attach volume to running VM #
+ #######################################
+
+ self.volume = self.virtual_machine.attach_volume(
+ self.apiClient,
+ self.volume
+ )
+
+ self.attached = True
+
+ vm = self._get_vm(self.virtual_machine.id)
+
+ self.assertEqual(
+ self.volume.virtualmachineid,
+ vm.id,
+ TestLinstorVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg
+ )
+
+ self.assertEqual(
+ vm.state.lower(),
+ 'running',
+ TestLinstorVolumes._vm_not_in_running_state_err_msg
+ )
+
+ @attr(tags=['basic'], required_hardware=False)
+ def test_04_detach_vol_stopped_vm_start(self):
+ """Detach volume from a stopped VM, then start."""
+
+ TestLinstorVolumes._start_vm(self.virtual_machine)
+
+ #######################################
+ # STEP 1: Attach volume to running VM #
+ #######################################
+
+ self.volume = self.virtual_machine.attach_volume(
+ self.apiClient,
+ self.volume
+ )
+
+ self.attached = True
+
+ vm = self._get_vm(self.virtual_machine.id)
+
+ self.assertEqual(
+ self.volume.virtualmachineid,
+ vm.id,
+ TestLinstorVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg
+ )
+
+ self.assertEqual(
+ vm.state.lower(),
+ 'running',
+ TestLinstorVolumes._vm_not_in_running_state_err_msg
+ )
+
+ #########################################
+ # STEP 2: Detach volume from stopped VM #
+ #########################################
+
+ self.virtual_machine.stop(self.apiClient)
+
+ self.volume = self.virtual_machine.detach_volume(
+ self.apiClient,
+ self.volume
+ )
+
+ self.attached = False
+
+ vm = self._get_vm(self.virtual_machine.id)
+
+ self.assertEqual(
+ self.volume.virtualmachineid,
+ None,
+ "The volume should not be attached to a VM."
+ )
+
+ self.assertEqual(
+ vm.state.lower(),
+ 'stopped',
+ TestLinstorVolumes._vm_not_in_stopped_state_err_msg
+ )
+
+ #######################################
+ # STEP 3: Start VM with detached vol #
+ #######################################
+
+ TestLinstorVolumes._start_vm(self.virtual_machine)
+
+ vm = self._get_vm(self.virtual_machine.id)
+
+ self.assertEqual(
+ vm.state.lower(),
+ 'running',
+ TestLinstorVolumes._vm_not_in_running_state_err_msg
+ )
+
+ @attr(tags=['basic'], required_hardware=False)
+ def test_05_attach_volume_to_stopped_vm(self):
+ """Attach a volume to a stopped virtual machine, then start VM"""
+
+ self.virtual_machine.stop(self.apiClient)
+
+ #######################################
+ # STEP 1: Attach volume to stopped VM #
+ #######################################
+
+ self.volume = self.virtual_machine.attach_volume(
+ self.apiClient,
+ self.volume
+ )
+
+ self.attached = True
+
+ vm = self._get_vm(self.virtual_machine.id)
+
+ self.assertEqual(
+ self.volume.virtualmachineid,
+ vm.id,
+ TestLinstorVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg
+ )
+
+ self.assertEqual(
+ vm.state.lower(),
+ 'stopped',
+ TestLinstorVolumes._vm_not_in_stopped_state_err_msg
+ )
+
+ TestLinstorVolumes._start_vm(self.virtual_machine)
+
+ vm = self._get_vm(self.virtual_machine.id)
+
+ self.assertEqual(
+ self.volume.virtualmachineid,
+ vm.id,
+ TestLinstorVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg
+ )
+
+ self.assertEqual(
+ vm.state.lower(),
+ 'running',
+ TestLinstorVolumes._vm_not_in_running_state_err_msg
+ )
+
+ @attr(tags=['basic'], required_hardware=False)
+ def test_06_attached_volume_reboot_vm(self):
+ """Attach volume to running VM, then reboot."""
+
+ TestLinstorVolumes._start_vm(self.virtual_machine)
+
+ #######################################
+ # STEP 1: Attach volume to running VM #
+ #######################################
+
+ self.volume = self.virtual_machine.attach_volume(
+ self.apiClient,
+ self.volume
+ )
+
+ self.attached = True
+
+ vm = self._get_vm(self.virtual_machine.id)
+
+ self.assertEqual(
+ self.volume.virtualmachineid,
+ vm.id,
+ TestLinstorVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg
+ )
+
+ self.assertEqual(
+ vm.state.lower(),
+ 'running',
+ TestLinstorVolumes._vm_not_in_running_state_err_msg
+ )
+
+ #######################################
+ # STEP 2: Reboot VM with attached vol #
+ #######################################
+ TestLinstorVolumes._reboot_vm(self.virtual_machine)
+
+ vm = self._get_vm(self.virtual_machine.id)
+
+ self.assertEqual(
+ vm.state.lower(),
+ 'running',
+ TestLinstorVolumes._vm_not_in_running_state_err_msg
+ )
+
+ @attr(tags=['basic'], required_hardware=False)
+ def test_07_detach_volume_reboot_vm(self):
+ """Detach volume from a running VM, then reboot."""
+
+ TestLinstorVolumes._start_vm(self.virtual_machine)
+
+ #######################################
+ # STEP 1: Attach volume to running VM #
+ #######################################
+
+ self.volume = self.virtual_machine.attach_volume(
+ self.apiClient,
+ self.volume
+ )
+
+ self.attached = True
+
+ vm = self._get_vm(self.virtual_machine.id)
+
+ self.assertEqual(
+ self.volume.virtualmachineid,
+ vm.id,
+ TestLinstorVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg
+ )
+
+ self.assertEqual(
+ vm.state.lower(),
+ 'running',
+ TestLinstorVolumes._vm_not_in_running_state_err_msg
+ )
+
+ #########################################
+ # STEP 2: Detach volume from running VM #
+ #########################################
+
+ self.volume = self.virtual_machine.detach_volume(
+ self.apiClient,
+ self.volume
+ )
+
+ self.attached = False
+
+ vm = self._get_vm(self.virtual_machine.id)
+
+ self.assertEqual(
+ self.volume.virtualmachineid,
+ None,
+ "The volume should not be attached to a VM."
+ )
+
+ self.assertEqual(
+ vm.state.lower(),
+ 'running',
+ TestLinstorVolumes._vm_not_in_running_state_err_msg
+ )
+
+ #######################################
+ # STEP 3: Reboot VM with detached vol #
+ #######################################
+
+ self.virtual_machine.reboot(self.apiClient)
+
+ vm = self._get_vm(self.virtual_machine.id)
+
+ self.assertEqual(
+ vm.state.lower(),
+ 'running',
+ TestLinstorVolumes._vm_not_in_running_state_err_msg
+ )
+
+ @attr(tags=['basic'], required_hardware=False)
+ def test_08_delete_volume_was_attached(self):
+ """Delete volume that was attached to a VM and is detached now"""
+
+ TestLinstorVolumes._start_vm(self.virtual_machine)
+
+ #######################################
+ # STEP 1: Create vol and attach to VM #
+ #######################################
+
+ new_volume = Volume.create(
+ self.apiClient,
+ self.testdata[TestData.volume_2],
+ account=self.account.name,
+ domainid=self.domain.id,
+ zoneid=self.zone.id,
+ diskofferingid=self.disk_offering.id
+ )
+
+ volume_to_delete_later = new_volume
+
+ new_volume = self.virtual_machine.attach_volume(
+ self.apiClient,
+ new_volume
+ )
+
+ vm = self._get_vm(self.virtual_machine.id)
+
+ self.assertEqual(
+ new_volume.virtualmachineid,
+ vm.id,
+ "Check if attached to virtual machine"
+ )
+
+ self.assertEqual(
+ vm.state.lower(),
+ 'running',
+ str(vm.state)
+ )
+
+ #######################################
+ # STEP 2: Detach and delete volume #
+ #######################################
+
+ new_volume = self.virtual_machine.detach_volume(
+ self.apiClient,
+ new_volume
+ )
+
+ vm = self._get_vm(self.virtual_machine.id)
+
+ self.assertEqual(
+ new_volume.virtualmachineid,
+ None,
+ "Check if attached to virtual machine"
+ )
+
+ self.assertEqual(
+ vm.state.lower(),
+ 'running',
+ str(vm.state)
+ )
+
+ volume_to_delete_later.delete(self.apiClient)
+
+ list_volumes_response = list_volumes(
+ self.apiClient,
+ id=new_volume.id
+ )
+
+ self.assertEqual(
+ list_volumes_response,
+ None,
+ "Check volume was deleted"
+ )
+
+ @attr(tags=['advanced', 'migration'], required_hardware=False)
+ def test_09_migrate_volume_to_same_instance_pool(self):
+ """Migrate volume to the same instance pool"""
+
+ if not self.testdata[TestData.migrationTests]:
+ self.skipTest("Volume migration tests not enabled, skipping test")
+
+ #######################################
+ # STEP 1: Create VM and Start VM #
+ #######################################
+
+ test_virtual_machine = VirtualMachine.create(
+ self.apiClient,
+ self.testdata[TestData.virtualMachine3],
+ accountid=self.account.name,
+ zoneid=self.zone.id,
+ serviceofferingid=self.compute_offering.id,
+ templateid=self.template.id,
+ domainid=self.domain.id,
+ startvm=False
+ )
+
+ TestLinstorVolumes._start_vm(test_virtual_machine)
+
+ #######################################
+ # STEP 2: Create vol and attach to VM #
+ #######################################
+
+ new_volume = Volume.create(
+ self.apiClient,
+ self.testdata[TestData.volume_3],
+ account=self.account.name,
+ domainid=self.domain.id,
+ zoneid=self.zone.id,
+ diskofferingid=self.disk_offering_same_inst.id
+ )
+
+ volume_to_delete_later = new_volume
+
+ new_volume = test_virtual_machine.attach_volume(
+ self.apiClient,
+ new_volume
+ )
+
+ vm = self._get_vm(test_virtual_machine.id)
+
+ self.assertEqual(
+ new_volume.virtualmachineid,
+ vm.id,
+ "Check if attached to virtual machine"
+ )
+
+ self.assertEqual(
+ vm.state.lower(),
+ 'running',
+ str(vm.state)
+ )
+
+ #######################################
+ # STEP 3: Stop VM and Migrate volume #
+ #######################################
+
+ test_virtual_machine.stop(self.apiClient)
+
+ vm = self._get_vm(test_virtual_machine.id)
+
+ self.assertEqual(
+ vm.state.lower(),
+ 'stopped',
+ str(vm.state)
+ )
+
+ pools = StoragePool.listForMigration(
+ self.apiClient,
+ id=new_volume.id
+ )
+
+ if not pools:
+ self.skipTest("No suitable storage pools found for volume migration, skipping test")
+
+ self.assertEqual(
+ validateList(pools)[0],
+ PASS,
+ "Invalid pool response from findStoragePoolsForMigration API"
+ )
+
+ pool = pools[0]
+ self.debug("Migrating Volume-ID: {} to Same Instance Pool: {}".format(new_volume.id, pool.id))
+
+ try:
+ Volume.migrate(
+ self.apiClient,
+ volumeid=new_volume.id,
+ storageid=pool.id
+ )
+ except Exception as e:
+ self.fail("Volume migration failed with error %s" % e)
+
+ #######################################
+ # STEP 4: Detach and delete volume #
+ #######################################
+
+ new_volume = test_virtual_machine.detach_volume(
+ self.apiClient,
+ new_volume
+ )
+
+ self.assertEqual(
+ new_volume.virtualmachineid,
+ None,
+ "Check if attached to virtual machine"
+ )
+
+ volume_to_delete_later.delete(self.apiClient)
+
+ list_volumes_response = list_volumes(
+ self.apiClient,
+ id=new_volume.id
+ )
+
+ self.assertEqual(
+ list_volumes_response,
+ None,
+ "Check volume was deleted"
+ )
+
+ #######################################
+ # STEP 4: Delete VM #
+ #######################################
+
+ test_virtual_machine.delete(self.apiClient, True)
+
+ @attr(tags=['advanced', 'migration'], required_hardware=False)
+ def test_10_migrate_volume_to_distinct_instance_pool(self):
+ """Migrate volume to distinct instance pool"""
+
+ if not self.testdata[TestData.migrationTests]:
+ self.skipTest("Volume migration tests not enabled, skipping test")
+
+ #######################################
+ # STEP 1: Create VM and Start VM #
+ #######################################
+
+ test_virtual_machine = VirtualMachine.create(
+ self.apiClient,
+ self.testdata[TestData.virtualMachine4],
+ accountid=self.account.name,
+ zoneid=self.zone.id,
+ serviceofferingid=self.compute_offering.id,
+ templateid=self.template.id,
+ domainid=self.domain.id,
+ startvm=False
+ )
+
+ TestLinstorVolumes._start_vm(test_virtual_machine)
+
+ #######################################
+ # STEP 2: Create vol and attach to VM #
+ #######################################
+
+ new_volume = Volume.create(
+ self.apiClient,
+ self.testdata[TestData.volume_4],
+ account=self.account.name,
+ domainid=self.domain.id,
+ zoneid=self.zone.id,
+ diskofferingid=self.disk_offering_distinct_inst.id
+ )
+
+ volume_to_delete_later = new_volume
+
+ new_volume = test_virtual_machine.attach_volume(
+ self.apiClient,
+ new_volume
+ )
+
+ vm = self._get_vm(test_virtual_machine.id)
+
+ self.assertEqual(
+ new_volume.virtualmachineid,
+ vm.id,
+ "Check if attached to virtual machine"
+ )
+
+ self.assertEqual(
+ vm.state.lower(),
+ 'running',
+ str(vm.state)
+ )
+
+ #######################################
+ # STEP 3: Stop VM and Migrate volume #
+ #######################################
+
+ test_virtual_machine.stop(self.apiClient)
+
+ vm = self._get_vm(test_virtual_machine.id)
+
+ self.assertEqual(
+ vm.state.lower(),
+ 'stopped',
+ str(vm.state)
+ )
+
+ pools = StoragePool.listForMigration(
+ self.apiClient,
+ id=new_volume.id
+ )
+
+ if not pools:
+ self.skipTest("No suitable storage pools found for volume migration, skipping test")
+
+ self.assertEqual(
+ validateList(pools)[0],
+ PASS,
+ "Invalid pool response from findStoragePoolsForMigration API"
+ )
+
+ pool = pools[0]
+ self.debug("Migrating Volume-ID: {} to Distinct Instance Pool: {}".format(new_volume.id, pool.id))
+
+ try:
+ Volume.migrate(
+ self.apiClient,
+ volumeid=new_volume.id,
+ storageid=pool.id
+ )
+ except Exception as e:
+ self.fail("Volume migration failed with error %s" % e)
+
+ #######################################
+ # STEP 4: Detach and delete volume #
+ #######################################
+
+ new_volume = test_virtual_machine.detach_volume(
+ self.apiClient,
+ new_volume
+ )
+
+ self.assertEqual(
+ new_volume.virtualmachineid,
+ None,
+ "Check if attached to virtual machine"
+ )
+
+ volume_to_delete_later.delete(self.apiClient)
+
+ list_volumes_response = list_volumes(
+ self.apiClient,
+ id=new_volume.id
+ )
+
+ self.assertEqual(
+ list_volumes_response,
+ None,
+ "Check volume was deleted"
+ )
+
+ #######################################
+ # STEP 4: Delete VM #
+ #######################################
+
+ test_virtual_machine.delete(self.apiClient, True)
+
+ def _create_vm_using_template_and_destroy_vm(self, template):
+ vm_name = "VM-%d" % random.randint(0, 100)
+
+ virtual_machine_dict = {"name": vm_name, "displayname": vm_name}
+
+ virtual_machine = VirtualMachine.create(
+ self.apiClient,
+ virtual_machine_dict,
+ accountid=self.account.name,
+ zoneid=self.zone.id,
+ serviceofferingid=self.compute_offering.id,
+ templateid=template.id,
+ domainid=self.domain.id,
+ startvm=True
+ )
+
+ list_volumes_response = list_volumes(
+ self.apiClient,
+ virtualmachineid=virtual_machine.id,
+ listall=True
+ )
+
+ vm_root_volume = list_volumes_response[0]
+
+ virtual_machine.delete(self.apiClient, True)
+
+ @staticmethod
+ def _get_bytes_from_gb(number_in_gb):
+ return number_in_gb * 1024 * 1024 * 1024
+
+ def _get_volume(self, volume_id):
+ list_vols_response = list_volumes(self.apiClient, id=volume_id)
+ return list_vols_response[0]
+
+ def _get_vm(self, vm_id):
+ list_vms_response = list_virtual_machines(self.apiClient, id=vm_id)
+ return list_vms_response[0]
+
+ def _get_template_cache_name(self):
+ if TestData.hypervisor_type == TestData.kvm:
+ return TestData.templateCacheNameKvm
+
+ self.assert_(False, "Invalid hypervisor type")
+
+ @classmethod
+ def _start_vm(cls, vm):
+ vm_for_check = list_virtual_machines(
+ cls.apiClient,
+ id=vm.id
+ )[0]
+
+ if vm_for_check.state == VirtualMachine.STOPPED:
+ vm.start(cls.apiClient)
+
+ # For KVM, just give it 90 seconds to boot up.
+ if TestData.hypervisor_type == TestData.kvm:
+ time.sleep(90)
+
+ @classmethod
+ def _reboot_vm(cls, vm):
+ vm.reboot(cls.apiClient)
+
+ # For KVM, just give it 90 seconds to boot up.
+ if TestData.hypervisor_type == TestData.kvm:
+ time.sleep(90)
diff --git a/tools/marvin/marvin/lib/base.py b/tools/marvin/marvin/lib/base.py
index 45f51f796dde..aeb8bfb9d101 100755
--- a/tools/marvin/marvin/lib/base.py
+++ b/tools/marvin/marvin/lib/base.py
@@ -2978,7 +2978,8 @@ def __init__(self, items):
@classmethod
def create(cls, apiclient, services, scope=None, clusterid=None,
zoneid=None, podid=None, provider=None, tags=None,
- capacityiops=None, capacitybytes=None, hypervisor=None):
+ capacityiops=None, capacitybytes=None, hypervisor=None,
+ details=None):
"""Create Storage pool (Primary Storage)"""
cmd = createStoragePool.createStoragePoolCmd()
@@ -3030,6 +3031,13 @@ def create(cls, apiclient, services, scope=None, clusterid=None,
elif "hypervisor" in services:
cmd.hypervisor = services["hypervisor"]
+ d = services.get("details", details)
+ if d:
+ count = 1
+ for key, value in d.items():
+ setattr(cmd, "details[{}].{}".format(count, key), value)
+ count = count + 1
+
return StoragePool(apiclient.createStoragePool(cmd).__dict__)
def delete(self, apiclient):
diff --git a/ui/public/locales/en.json b/ui/public/locales/en.json
index 90fcad857c15..9adffd9aacd1 100644
--- a/ui/public/locales/en.json
+++ b/ui/public/locales/en.json
@@ -1894,6 +1894,7 @@
"label.routerrequiresupgrade": "Upgrade is required",
"label.routertype": "Type",
"label.routing.host": "Routing Host",
+"label.resourcegroup": "Resource Group",
"label.rule": "Rule",
"label.rule.number": "Rule Number",
"label.rules": "Rules",
@@ -2965,6 +2966,7 @@
"message.error.zone.for.cluster": "Please select zone for Kubernetes cluster",
"message.error.zone.name": "Please enter zone name",
"message.error.zone.type": "Please select zone type",
+"message.error.linstor.resourcegroup": "Please enter the Linstor Resource-Group",
"message.fail.to.delete": "Failed to delete.",
"message.failed.to.add": "Failed to add",
"message.failed.to.assign.vms": "Failed to assign VMs",
@@ -3165,6 +3167,7 @@
"message.reset.password.warning.notpasswordenabled": "The template of this instance was created without password enabled",
"message.reset.password.warning.notstopped": "Your instance must be stopped before attempting to change its current password",
"message.reset.vpn.connection": "Please confirm that you want to reset VPN connection",
+"message.linstor.resourcegroup.description": "Linstor resource group to use for primary storage",
"message.resize.volume.failed": "Failed to resize volume",
"message.resource.not.found": "Resource not found",
"message.restart.mgmt.server": "Please restart your management server(s) for your new settings to take effect.",
@@ -3191,7 +3194,7 @@
"message.select.tier": "Please select a tier",
"message.select.zone.description": "Select type of zone basic/advanced",
"message.select.zone.hint": "This is the type of zone deployement that you want to use. Basic zone: provides a single network where each VM instance is assigned an IP directly from the network. Guest isolation can be provided through layer-3 means such as security groups (IP address source filtering). Advanced zone: For more sophisticated network topologies. This network model provides the most flexibility in defining guest networks and providing custom network offerings such as firewall, VPN, or load balancer support.",
-"message.server.description": "NFS, iSCSI, or PreSetup: IP address or DNS name of the storage device. VMWare PreSetup: IP address or DNS name of the vCenter server.",
+"message.server.description": "NFS, iSCSI, or PreSetup: IP address or DNS name of the storage device. VMWare PreSetup: IP address or DNS name of the vCenter server. Linstor: http(s) url of the linstor-controller.",
"message.set.default.nic": "Please confirm that you would like to make this NIC the default for this VM.",
"message.set.default.nic.manual": "Please manually update the default NIC on the VM now.",
"message.setting.updated": "Setting Updated:",
diff --git a/ui/src/views/infra/AddPrimaryStorage.vue b/ui/src/views/infra/AddPrimaryStorage.vue
index aee89d420a10..54d12996aa0a 100644
--- a/ui/src/views/infra/AddPrimaryStorage.vue
+++ b/ui/src/views/infra/AddPrimaryStorage.vue
@@ -134,7 +134,7 @@
@@ -191,7 +191,19 @@
-
+
+
+
+
+
+ {{ provider }}
+
+
+
+
+
+
+
+
+ {{ $t('label.resourcegroup') }}
+
+
+
+
+
+
+
cluster.id === this.clusterSelected)
this.hypervisorType = cluster.hypervisortype
if (this.hypervisorType === 'KVM') {
- this.protocols = ['nfs', 'SharedMountPoint', 'RBD', 'CLVM', 'Gluster', 'custom']
+ this.protocols = ['nfs', 'SharedMountPoint', 'RBD', 'CLVM', 'Gluster', 'Linstor', 'custom']
} else if (this.hypervisorType === 'XenServer') {
this.protocols = ['nfs', 'PreSetup', 'iscsi', 'custom']
} else if (this.hypervisorType === 'VMware') {
@@ -558,6 +581,16 @@ export default {
closeModal () {
this.$parent.$parent.close()
},
+ linstorURL (server) {
+ var url
+ if (server.indexOf('://') === -1) {
+ url = 'http://' + server
+ } else {
+ url = server
+ }
+
+ return url
+ },
handleSubmit (e) {
e.preventDefault()
if (this.loading) return
@@ -646,6 +679,11 @@ export default {
}
var lun = values.lun
url = this.iscsiURL(server, iqn, lun)
+ } else if (values.protocol === 'Linstor') {
+ url = this.linstorURL(server)
+ params.provider = 'Linstor'
+ values.managed = false
+ params['details[0].resourceGroup'] = values.resourcegroup
}
params.url = url
if (values.provider !== 'DefaultPrimary' && values.provider !== 'PowerFlex') {
diff --git a/ui/src/views/infra/zone/ZoneWizardAddResources.vue b/ui/src/views/infra/zone/ZoneWizardAddResources.vue
index f1a82dafde5f..04d09536d821 100644
--- a/ui/src/views/infra/zone/ZoneWizardAddResources.vue
+++ b/ui/src/views/infra/zone/ZoneWizardAddResources.vue
@@ -350,7 +350,7 @@ export default {
placeHolder: 'message.error.server',
required: true,
display: {
- primaryStorageProtocol: ['nfs', 'iscsi', 'gluster', 'SMB']
+ primaryStorageProtocol: ['nfs', 'iscsi', 'gluster', 'SMB', 'Linstor']
}
},
{
@@ -489,6 +489,15 @@ export default {
primaryStorageProtocol: 'vmfs'
}
},
+ {
+ title: 'label.resourcegroup',
+ key: 'primaryStorageLinstorResourceGroup',
+ placeHolder: 'message.error.linstor.resourcegroup',
+ required: true,
+ display: {
+ primaryStorageProtocol: 'Linstor'
+ }
+ },
{
title: 'label.storage.tags',
key: 'primaryStorageTags',
@@ -826,6 +835,10 @@ export default {
id: 'gluster',
description: 'Gluster'
})
+ protocols.push({
+ id: 'Linstor',
+ description: 'Linstor'
+ })
} else if (hypervisor === 'XenServer') {
protocols.push({
id: 'nfs',
diff --git a/ui/src/views/infra/zone/ZoneWizardLaunchZone.vue b/ui/src/views/infra/zone/ZoneWizardLaunchZone.vue
index 3a086d12381b..a245d3dc03bb 100644
--- a/ui/src/views/infra/zone/ZoneWizardLaunchZone.vue
+++ b/ui/src/views/infra/zone/ZoneWizardLaunchZone.vue
@@ -1345,6 +1345,9 @@ export default {
}
path += '/' + this.prefillContent.primaryStorageVmfsDatastore?.value || ''
url = this.vmfsURL('dummy', path)
+ } else if (protocol === 'Linstor') {
+ url = this.linstorURL(server)
+ params['details[0].resourceGroup'] = this.prefillContent.primaryStorageLinstorResourceGroup.value
} else {
let iqn = this.prefillContent.primaryStorageTargetIQN?.value || ''
if (iqn.substring(0, 1) !== '/') {
@@ -2125,6 +2128,15 @@ export default {
}
return url
},
+ linstorURL (server) {
+ var url
+ if (server.indexOf('://') === -1) {
+ url = 'http://' + server
+ } else {
+ url = server
+ }
+ return url
+ },
iscsiURL (server, iqn, lun) {
let url = ''
if (server.indexOf('://') === -1) {