diff --git a/Build-Instructions b/Build-Instructions new file mode 100644 index 00000000..9a35dd9e --- /dev/null +++ b/Build-Instructions @@ -0,0 +1,7 @@ +To build the kadalu containers you can run inside the kadalu directory the command: + +make build-containers KADALU_VERSION=0.9.1 BUILD_BASE=no + +You need docker already installed, once the make command complete the docker images are available, you can view them by running command: + +docker image ls \ No newline at end of file diff --git a/build.sh b/build.sh index cc347622..dcd540ab 100644 --- a/build.sh +++ b/build.sh @@ -79,14 +79,24 @@ if [[ "$CONTAINERS_FOR" == "TESTING" ]]; then fi echo "Building base builder image - This may take a while" - +# In Unified we install Gluster 10.5 which has OpVersion=100000, Kadalu is a Gluster client and should use the same OpVersion +# Kadalu release greater then 0.9.1 uses OpVersion=110000, so we have to use the Gluster client that comes with release 0.9.1 +# It seems that the Gluster client version used by Kadalu is deteterminated by the builder release, so we don't use the latest +#if [ ${BUILD_BASE} == "yes" ]; then +# $RUNTIME_CMD $build \ +# -t "${DOCKER_USER}/builder:latest" "${build_args[@]}" \ +# --network host -f extras/Dockerfile.builder . +#else + # pull the base image if we don't want to build it +# $RUNTIME_CMD pull "${DOCKER_USER}/builder:latest" +#fi if [ ${BUILD_BASE} == "yes" ]; then $RUNTIME_CMD $build \ - -t "${DOCKER_USER}/builder:latest" "${build_args[@]}" \ + -t "${DOCKER_USER}/builder:0.9.1" "${build_args[@]}" \ --network host -f extras/Dockerfile.builder . else # pull the base image if we don't want to build it - $RUNTIME_CMD pull "${DOCKER_USER}/builder:latest" + $RUNTIME_CMD pull "${DOCKER_USER}/builder:0.9.1" fi echo "Building kadalu-server with version tag as ${VERSION}"; diff --git a/csi/Dockerfile b/csi/Dockerfile index 7b65a41f..ce2286ac 100644 --- a/csi/Dockerfile +++ b/csi/Dockerfile @@ -1,4 +1,8 @@ -ARG builder_version="latest" +# ARG builder_version="latest" +# In Unified we install Gluster 10.5 which has OpVersion=100000, Kadalu is a Gluster client and should use the same OpVersion +# Kadalu release greater then 0.9.1 uses OpVersion=110000, so we have to use the Gluster client that comes with release 0.9.1 +# It seems that the Gluster client version used by Kadalu is deteterminated by the builder release, so we don't use the latest +ARG builder_version="0.9.1" FROM kadalu/builder:${builder_version} as builder diff --git a/csi/controllerserver.py b/csi/controllerserver.py index 52572de0..4d5ab6fc 100644 --- a/csi/controllerserver.py +++ b/csi/controllerserver.py @@ -259,14 +259,10 @@ def CreateVolume(self, request, context): if not is_hosting_volume_free(ext_volume['name'], pvsize): - logging.error(logf( - "Hosting volume is full. Add more storage", + logging.info(logf( + "Hosting volume is full...Over-provisioning!", volume=ext_volume['name'] )) - errmsg = "External resource is exhausted" - context.set_details(errmsg) - context.set_code(grpc.StatusCode.RESOURCE_EXHAUSTED) - return csi_pb2.CreateVolumeResponse() if pvtype in [PV_TYPE_VIRTBLOCK, PV_TYPE_RAWBLOCK]: vol = create_block_volume( @@ -305,6 +301,8 @@ def CreateVolume(self, request, context): duration_seconds=time.time() - start_time )) + update_free_size(ext_volume['name'], request.name, -pvsize) + send_analytics_tracker("pvc-external-kadalu", uid) # Pass required argument to get mount working on # nodeplugin through volume_context @@ -648,19 +646,25 @@ def ControllerExpandVolume(self, request, context): mntdir = os.path.join(HOSTVOL_MOUNTDIR, hostvol) use_gluster_quota = False + hostvoltype = existing_volume.extra['hostvoltype'] + # Check free-size in storage-pool before expansion + # For external gluster volume (the only one we currently use for HyperFile) we need the over-provisioning if not is_hosting_volume_free(hostvol, additional_pvsize_required): - - logging.error(logf( - "Hosting volume is full. Add more storage", - volume=hostvol - )) - errmsg = "Host volume resource is exhausted" - context.set_details(errmsg) - context.set_code(grpc.StatusCode.RESOURCE_EXHAUSTED) - return csi_pb2.CreateVolumeResponse() - - hostvoltype = existing_volume.extra['hostvoltype'] + if hostvoltype != 'External': + logging.error(logf( + "Hosting volume is full. Add more storage", + volume=hostvol + )) + errmsg = "Host volume resource is exhausted" + context.set_details(errmsg) + context.set_code(grpc.StatusCode.RESOURCE_EXHAUSTED) + return csi_pb2.CreateVolumeResponse() + else: + logging.info(logf( + "Hosting volume is full...Over-provisioning!", + volume=hostvol + )) if pvtype == PV_TYPE_SUBVOL: update_subdir_volume(