feat: update article files async upload
This commit is contained in:
parent
c95c6f0b2e
commit
08dbe4b190
|
|
@ -5,6 +5,7 @@ import "time"
|
|||
type ArticleFiles struct {
|
||||
ID uint `json:"id" gorm:"primaryKey;type:int4;autoIncrement"`
|
||||
ArticleId uint `json:"article_id" gorm:"type:int4"`
|
||||
UploadID *string `json:"upload_id" gorm:"type:varchar"`
|
||||
FilePath *string `json:"file_path" gorm:"type:varchar"`
|
||||
FileUrl *string `json:"file_url" gorm:"type:varchar"`
|
||||
FileName *string `json:"file_name" gorm:"type:varchar"`
|
||||
|
|
|
|||
|
|
@ -21,6 +21,8 @@ var NewArticleFilesModule = fx.Options(
|
|||
|
||||
// register service of ArticleFiles module
|
||||
fx.Provide(service.NewArticleFilesService),
|
||||
fx.Provide(service.NewUploadService),
|
||||
fx.Provide(service.NewUploadManager),
|
||||
|
||||
// register controller of ArticleFiles module
|
||||
fx.Provide(controller.NewController),
|
||||
|
|
@ -50,5 +52,6 @@ func (_i *ArticleFilesRouter) RegisterArticleFilesRoutes() {
|
|||
router.Put("/:id", articleFilesController.Update)
|
||||
router.Delete("/:id", articleFilesController.Delete)
|
||||
router.Get("/viewer/:filename", articleFilesController.Viewer)
|
||||
router.Get("/upload-status/:uploadId", articleFilesController.GetUploadStatus)
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"go-humas-be/app/module/article_files/request"
|
||||
"go-humas-be/app/module/article_files/service"
|
||||
|
|
@ -21,6 +22,7 @@ type ArticleFilesController interface {
|
|||
Update(c *fiber.Ctx) error
|
||||
Delete(c *fiber.Ctx) error
|
||||
Viewer(c *fiber.Ctx) error
|
||||
GetUploadStatus(c *fiber.Ctx) error
|
||||
}
|
||||
|
||||
func NewArticleFilesController(articleFilesService service.ArticleFilesService) ArticleFilesController {
|
||||
|
|
@ -104,7 +106,7 @@ func (_i *articleFilesController) Show(c *fiber.Ctx) error {
|
|||
// @Tags Article Files
|
||||
// @Security Bearer
|
||||
// @Produce json
|
||||
// @Param files formData file true "Upload file"
|
||||
// @Param files formData file true "Upload file" multiple true
|
||||
// @Param articleId path int true "Article ID"
|
||||
// @Success 200 {object} response.Response
|
||||
// @Failure 400 {object} response.BadRequestError
|
||||
|
|
@ -204,3 +206,25 @@ func (_i *articleFilesController) Delete(c *fiber.Ctx) error {
|
|||
func (_i *articleFilesController) Viewer(c *fiber.Ctx) error {
|
||||
return _i.articleFilesService.Viewer(c)
|
||||
}
|
||||
|
||||
// GetUploadStatus ArticleFiles
|
||||
// @Summary GetUploadStatus ArticleFiles
|
||||
// @Description API for GetUploadStatus ArticleFiles
|
||||
// @Tags Article Files
|
||||
// @Security Bearer
|
||||
// @Param uploadId path string true "Upload ID of ArticleFiles"
|
||||
// @Success 200 {object} response.Response
|
||||
// @Failure 400 {object} response.BadRequestError
|
||||
// @Failure 401 {object} response.UnauthorizedError
|
||||
// @Failure 500 {object} response.InternalServerError
|
||||
// @Router /article-files/upload-status/{uploadId} [get]
|
||||
func (_i *articleFilesController) GetUploadStatus(c *fiber.Ctx) error {
|
||||
progress, _ := _i.articleFilesService.GetUploadStatus(c)
|
||||
progressMessage := fmt.Sprintf("Upload Progress: %d%%", progress)
|
||||
|
||||
return utilRes.Resp(c, utilRes.Response{
|
||||
Success: true,
|
||||
Messages: utilRes.Messages{"Upload Status Retrieve"},
|
||||
Data: progressMessage,
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ type ArticleFilesQueryRequest struct {
|
|||
type ArticleFilesCreateRequest struct {
|
||||
ArticleId uint `json:"articleId" validate:"required"`
|
||||
StatusId int `json:"statusId" validate:"required"`
|
||||
UploadId *string `json:"uploadId"`
|
||||
FilePath *string `json:"filePath"`
|
||||
FileUrl *string `json:"fileUrl"`
|
||||
FileName *string `json:"fileName"`
|
||||
|
|
@ -35,6 +36,7 @@ type ArticleFilesCreateRequest struct {
|
|||
func (req ArticleFilesCreateRequest) ToEntity() *entity.ArticleFiles {
|
||||
return &entity.ArticleFiles{
|
||||
ArticleId: req.ArticleId,
|
||||
UploadID: req.UploadId,
|
||||
FilePath: req.FilePath,
|
||||
FileUrl: req.FileUrl,
|
||||
FileName: req.FileName,
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package service
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/minio/minio-go/v7"
|
||||
"github.com/rs/zerolog"
|
||||
|
|
@ -15,9 +16,12 @@ import (
|
|||
"log"
|
||||
"math/rand"
|
||||
"mime"
|
||||
"mime/multipart"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
|
@ -34,6 +38,7 @@ type ArticleFilesService interface {
|
|||
Show(id uint) (articleFiles *response.ArticleFilesResponse, err error)
|
||||
Save(c *fiber.Ctx, id uint) error
|
||||
Update(id uint, req request.ArticleFilesUpdateRequest) (err error)
|
||||
GetUploadStatus(c *fiber.Ctx) (progress int, err error)
|
||||
Delete(id uint) error
|
||||
Viewer(c *fiber.Ctx) error
|
||||
}
|
||||
|
|
@ -48,6 +53,17 @@ func NewArticleFilesService(repo repository.ArticleFilesRepository, log zerolog.
|
|||
}
|
||||
}
|
||||
|
||||
var (
|
||||
progressMap = make(map[string]int) // Menyimpan progress upload per UploadID
|
||||
progressLock = sync.Mutex{}
|
||||
)
|
||||
|
||||
type progressWriter struct {
|
||||
uploadID string
|
||||
totalSize int64
|
||||
uploadedSize *int64
|
||||
}
|
||||
|
||||
// All implement interface of ArticleFilesService
|
||||
func (_i *articleFilesService) All(req request.ArticleFilesQueryRequest) (articleFiless []*response.ArticleFilesResponse, paging paginator.Pagination, err error) {
|
||||
results, paging, err := _i.Repo.GetAll(req)
|
||||
|
|
@ -74,6 +90,8 @@ func (_i *articleFilesService) Show(id uint) (articleFiles *response.ArticleFile
|
|||
func (_i *articleFilesService) Save(c *fiber.Ctx, id uint) (err error) {
|
||||
bucketName := _i.MinioStorage.Cfg.ObjectStorage.MinioStorage.BucketName
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
form, err := c.MultipartForm()
|
||||
|
||||
if err != nil {
|
||||
|
|
@ -91,6 +109,103 @@ func (_i *articleFilesService) Save(c *fiber.Ctx, id uint) (err error) {
|
|||
})
|
||||
}
|
||||
|
||||
for _, files := range form.File {
|
||||
|
||||
_i.Log.Info().Str("timestamp", time.Now().
|
||||
Format(time.RFC3339)).Str("Service:Resource", "Uploader:: top").
|
||||
Interface("files", files).Msg("")
|
||||
|
||||
for _, fileHeader := range files {
|
||||
_i.Log.Info().Str("timestamp", time.Now().
|
||||
Format(time.RFC3339)).Str("Service:Resource", "Uploader:: loop").
|
||||
Interface("data", fileHeader).Msg("")
|
||||
|
||||
filename := filepath.Base(fileHeader.Filename)
|
||||
filenameAlt := filepath.Clean(filename[:len(filename)-len(filepath.Ext(filename))])
|
||||
filename = strings.ReplaceAll(filename, " ", "")
|
||||
filenameWithoutExt := filepath.Clean(filename[:len(filename)-len(filepath.Ext(filename))])
|
||||
extension := filepath.Ext(fileHeader.Filename)[1:]
|
||||
|
||||
rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
randUniqueId := rand.Intn(1000000)
|
||||
uploadID := strconv.Itoa(randUniqueId)
|
||||
|
||||
newFilenameWithoutExt := filenameWithoutExt + "_" + strconv.Itoa(randUniqueId)
|
||||
newFilename := newFilenameWithoutExt + "." + extension
|
||||
|
||||
objectName := "articles/upload/" + newFilename
|
||||
fileSize := strconv.FormatInt(fileHeader.Size, 10)
|
||||
fileSizeInt := fileHeader.Size
|
||||
|
||||
_i.Log.Info().Str("timestamp", time.Now().
|
||||
Format(time.RFC3339)).Str("Service:Resource", "Uploader:: top").
|
||||
Interface("Start upload", uploadID).Msg("")
|
||||
|
||||
req := request.ArticleFilesCreateRequest{
|
||||
ArticleId: id,
|
||||
UploadId: &uploadID,
|
||||
FilePath: &objectName,
|
||||
FileName: &newFilename,
|
||||
FileAlt: &filenameAlt,
|
||||
Size: &fileSize,
|
||||
}
|
||||
|
||||
err = _i.Repo.Create(req.ToEntity())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
src, err := fileHeader.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer src.Close()
|
||||
|
||||
tempFilePath := fmt.Sprintf("/tmp/%s", newFilename)
|
||||
tempFile, err := os.Create(tempFilePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tempFile.Close()
|
||||
|
||||
// Copy file ke direktori sementara
|
||||
_, err = io.Copy(tempFile, src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go uploadToMinIO(ctx, _i.Log, minioClient, uploadID, tempFilePath, bucketName, objectName, fileSizeInt)
|
||||
}
|
||||
}
|
||||
|
||||
_i.Log.Info().Str("timestamp", time.Now().
|
||||
Format(time.RFC3339)).Str("Service:Resource", "User:All").
|
||||
Interface("data", "Successfully uploaded").Msg("")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (_i *articleFilesService) SaveSync(c *fiber.Ctx, id uint) (err error) {
|
||||
bucketName := _i.MinioStorage.Cfg.ObjectStorage.MinioStorage.BucketName
|
||||
|
||||
form, err := c.MultipartForm()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//filess := form.File["files"]
|
||||
|
||||
// Create minio connection.
|
||||
minioClient, err := _i.MinioStorage.ConnectMinio()
|
||||
|
||||
if err != nil {
|
||||
// Return status 500 and minio connection error.
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": true,
|
||||
"msg": err.Error(),
|
||||
})
|
||||
}
|
||||
|
||||
for _, files := range form.File {
|
||||
|
||||
_i.Log.Info().Str("timestamp", time.Now().
|
||||
|
|
@ -223,3 +338,98 @@ func getFileExtension(filename string) string {
|
|||
// ambil ekstensi terakhir
|
||||
return parts[len(parts)-1]
|
||||
}
|
||||
|
||||
func uploadTempFile(log zerolog.Logger, fileHeader *multipart.FileHeader, filePath string) {
|
||||
src, err := fileHeader.Open()
|
||||
if err != nil {
|
||||
log.Info().Str("timestamp", time.Now().
|
||||
Format(time.RFC3339)).Str("Service:Resource", "Article:uploadToMinIO-0").
|
||||
Interface("err", err).Msg("")
|
||||
}
|
||||
defer src.Close()
|
||||
|
||||
tempFile, err := os.Create(filePath)
|
||||
if err != nil {
|
||||
log.Info().Str("timestamp", time.Now().
|
||||
Format(time.RFC3339)).Str("Service:Resource", "Article:uploadToMinIO-1").
|
||||
Interface("err", err).Msg("")
|
||||
}
|
||||
defer tempFile.Close()
|
||||
|
||||
// Copy file ke direktori sementara
|
||||
_, err = io.Copy(tempFile, src)
|
||||
if err != nil {
|
||||
log.Info().Str("timestamp", time.Now().
|
||||
Format(time.RFC3339)).Str("Service:Resource", "Article:uploadToMinIO-2").
|
||||
Interface("err", err).Msg("")
|
||||
}
|
||||
}
|
||||
|
||||
func uploadToMinIO(ctx context.Context, log zerolog.Logger, minioClient *minio.Client, uploadID, filePath, bucketName string, objectName string, fileSize int64) {
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
log.Info().Str("timestamp", time.Now().
|
||||
Format(time.RFC3339)).Str("Service:Resource", "Article:uploadToMinIO-3").
|
||||
Interface("err", err).Msg("")
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Upload file ke MinIO dengan progress tracking
|
||||
uploadProgress := int64(0)
|
||||
reader := io.TeeReader(file, &progressWriter{uploadID: uploadID, totalSize: fileSize, uploadedSize: &uploadProgress})
|
||||
|
||||
_, err = minioClient.PutObject(ctx, bucketName, objectName, reader, fileSize, minio.PutObjectOptions{})
|
||||
if err != nil {
|
||||
|
||||
log.Info().Str("timestamp", time.Now().
|
||||
Format(time.RFC3339)).Str("Service:Resource", "Article:uploadToMinIO-4").
|
||||
Interface("err", err).Msg("")
|
||||
return
|
||||
}
|
||||
|
||||
// Upload selesai, update progress menjadi 100
|
||||
progressLock.Lock()
|
||||
progressMap[uploadID] = 100
|
||||
progressLock.Unlock()
|
||||
|
||||
go removeFileTemp(log, filePath)
|
||||
}
|
||||
|
||||
func removeFileTemp(log zerolog.Logger, filePath string) {
|
||||
err := os.Remove(filePath)
|
||||
if err != nil {
|
||||
log.Info().Str("timestamp", time.Now().
|
||||
Format(time.RFC3339)).Str("Service:Resource", "Article:uploadToMinIO-5").
|
||||
Interface("Failed to remove temporary file", err).Msg("")
|
||||
} else {
|
||||
log.Info().Str("timestamp", time.Now().
|
||||
Format(time.RFC3339)).Str("Service:Resource", "Article:uploadToMinIO-6").
|
||||
Interface("err", "Temporary file removed").Msg("")
|
||||
}
|
||||
}
|
||||
|
||||
func (p *progressWriter) Write(data []byte) (int, error) {
|
||||
n := len(data)
|
||||
progressLock.Lock()
|
||||
defer progressLock.Unlock()
|
||||
|
||||
*p.uploadedSize += int64(n)
|
||||
progress := int(float64(*p.uploadedSize) / float64(p.totalSize) * 100)
|
||||
|
||||
// Update progress di map
|
||||
progressMap[p.uploadID] = progress
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (_i *articleFilesService) GetUploadStatus(c *fiber.Ctx) (progress int, err error) {
|
||||
uploadID := c.Params("uploadId")
|
||||
|
||||
// Ambil progress dari map
|
||||
progressLock.Lock()
|
||||
progress, _ = progressMap[uploadID]
|
||||
progressLock.Unlock()
|
||||
|
||||
return progress, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,139 @@
|
|||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/minio/minio-go/v7"
|
||||
"github.com/rs/zerolog"
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
// AsyncUploader menangani proses upload secara asynchronous
|
||||
type UploadService interface {
|
||||
UploadFile(ctx context.Context, minioClient *minio.Client, uploadID string, reader io.Reader, bucketName string, objectName string, size int64, contentType string) error
|
||||
}
|
||||
|
||||
type uploadService struct {
|
||||
uploadManager UploadManager
|
||||
Log zerolog.Logger
|
||||
}
|
||||
|
||||
func NewUploadService(uploadManager UploadManager, log zerolog.Logger) UploadService {
|
||||
return &uploadService{
|
||||
uploadManager: uploadManager,
|
||||
Log: log,
|
||||
}
|
||||
}
|
||||
|
||||
func (u *uploadService) UploadFile(ctx context.Context, minioClient *minio.Client, uploadID string, reader io.Reader, bucketName string, objectName string, size int64, contentType string) error {
|
||||
status := &UploadStatus{
|
||||
FileName: objectName,
|
||||
Size: size,
|
||||
Progress: 0,
|
||||
Status: "uploading",
|
||||
ObjectName: objectName,
|
||||
BucketName: bucketName,
|
||||
StartTime: time.Now(),
|
||||
}
|
||||
|
||||
u.uploadManager.Add(uploadID, status)
|
||||
|
||||
u.Log.Info().Str("timestamp", time.Now().
|
||||
Format(time.RFC3339)).Str("Service:Resource", "UploadService::UploadFile").
|
||||
Interface("add status", status).Msg("")
|
||||
|
||||
// Upload ke Minio
|
||||
_, err := minioClient.PutObject(
|
||||
ctx,
|
||||
bucketName,
|
||||
objectName,
|
||||
reader,
|
||||
size,
|
||||
minio.PutObjectOptions{
|
||||
ContentType: contentType,
|
||||
PartSize: 10 * 1024 * 1024, // 10MB part size
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
u.uploadManager.UpdateStatus(uploadID, "error", err)
|
||||
|
||||
u.Log.Info().Str("timestamp", time.Now().
|
||||
Format(time.RFC3339)).Str("Service:Resource", "UploadService::UploadFile").
|
||||
Interface("error when upload", err).Msg("")
|
||||
}
|
||||
|
||||
u.uploadManager.UpdateStatus(uploadID, "completed", nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
//func (au *UploadService) UploadFile() {
|
||||
// // Buat context dengan timeout
|
||||
// ctx, cancel := context.WithTimeout(au.ctx, 30*time.Minute)
|
||||
// defer cancel()
|
||||
//
|
||||
// // Buat reader dari byte slice
|
||||
// reader := bytes.NewReader(au.fileData)
|
||||
// pipeReader, pipeWriter := io.Pipe()
|
||||
//
|
||||
// au.progressMap.Store(au.uploadID, 0.0)
|
||||
//
|
||||
// // Start goroutine to read from reader and write to pipe
|
||||
// go func() {
|
||||
// defer pipeWriter.Close()
|
||||
// buf := make([]byte, au.partSize)
|
||||
//
|
||||
// totalParts := int(reader.Size() / au.partSize)
|
||||
// if reader.Size()%au.partSize != 0 {
|
||||
// totalParts++
|
||||
// }
|
||||
//
|
||||
// for i := 0; i < totalParts; i++ {
|
||||
// n, err := reader.Read(buf)
|
||||
// if err != nil && err != io.EOF {
|
||||
// log.Println("Error reading file:", err)
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// if _, err := pipeWriter.Write(buf[:n]); err != nil {
|
||||
// log.Println("Error writing to pipe:", err)
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// progress := float64(i+1) / float64(totalParts) * 100
|
||||
// au.progressMap.Store(au.uploadID, progress)
|
||||
// au.uploadManager.UpdateProgress(au.uploadID, int(progress))
|
||||
// }
|
||||
// }()
|
||||
//
|
||||
// // Upload ke Minio
|
||||
// _, err := au.minioClient.PutObject(
|
||||
// ctx,
|
||||
// au.bucketName,
|
||||
// au.objectName,
|
||||
// pipeReader,
|
||||
// reader.Size(),
|
||||
// minio.PutObjectOptions{
|
||||
// ContentType: au.contentType,
|
||||
// PartSize: 10 * 1024 * 1024, // 10MB part size
|
||||
// },
|
||||
// )
|
||||
//
|
||||
// if err != nil {
|
||||
// log.Println("Error uploading file:", err)
|
||||
// au.progressMap.Store(au.uploadID, "error")
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// fmt.Printf("Uploading process for %s", au.objectName)
|
||||
//
|
||||
// if err != nil {
|
||||
// uploadManager.UpdateStatus(au.uploadID, "error", err)
|
||||
// fmt.Printf("Upload error for %s: %v\n", au.objectName, err)
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// au.progressMap.Store(au.uploadID, 100)
|
||||
// au.uploadManager.UpdateProgress(au.uploadID, 100)
|
||||
// au.uploadManager.UpdateStatus(au.uploadID, "completed", nil)
|
||||
//}
|
||||
|
|
@ -0,0 +1,71 @@
|
|||
package service
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type UploadStatus struct {
|
||||
FileName string `json:"fileName"`
|
||||
Size int64 `json:"size"`
|
||||
Progress int `json:"progress"`
|
||||
Status string `json:"status"`
|
||||
ObjectName string `json:"objectName"`
|
||||
BucketName string `json:"bucketName"`
|
||||
StartTime time.Time `json:"startTime"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
type UploadManager interface {
|
||||
Add(uploadID string, status *UploadStatus)
|
||||
UpdateProgress(uploadID string, progress int)
|
||||
UpdateStatus(uploadID string, status string, err error)
|
||||
Get(uploadID string) (*UploadStatus, bool)
|
||||
}
|
||||
|
||||
type uploadManager struct {
|
||||
uploads map[string]*UploadStatus
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
func NewUploadManager() UploadManager {
|
||||
return &uploadManager{
|
||||
uploads: make(map[string]*UploadStatus),
|
||||
}
|
||||
}
|
||||
|
||||
// Add menambahkan status upload baru
|
||||
func (um *uploadManager) Add(uploadID string, status *UploadStatus) {
|
||||
um.mutex.Lock()
|
||||
defer um.mutex.Unlock()
|
||||
um.uploads[uploadID] = status
|
||||
}
|
||||
|
||||
// UpdateProgress memperbarui progress upload
|
||||
func (um *uploadManager) UpdateProgress(uploadID string, progress int) {
|
||||
um.mutex.Lock()
|
||||
defer um.mutex.Unlock()
|
||||
if status, exists := um.uploads[uploadID]; exists {
|
||||
status.Progress = progress
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateStatus memperbarui status upload
|
||||
func (um *uploadManager) UpdateStatus(uploadID string, status string, err error) {
|
||||
um.mutex.Lock()
|
||||
defer um.mutex.Unlock()
|
||||
if upload, exists := um.uploads[uploadID]; exists {
|
||||
upload.Status = status
|
||||
if err != nil {
|
||||
upload.Error = err.Error()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get mendapatkan status upload berdasarkan ID
|
||||
func (um *uploadManager) Get(uploadID string) (*UploadStatus, bool) {
|
||||
um.mutex.RLock()
|
||||
defer um.mutex.RUnlock()
|
||||
status, exists := um.uploads[uploadID]
|
||||
return status, exists
|
||||
}
|
||||
|
|
@ -9,7 +9,7 @@ idle-timeout = 5 # As seconds
|
|||
print-routes = false
|
||||
prefork = true
|
||||
production = false
|
||||
body-limit = 104857600 # "100 * 1024 * 1024"
|
||||
body-limit = 1048576000 # "100 * 1024 * 1024"
|
||||
|
||||
[db.postgres]
|
||||
dsn = "postgresql://humas_user:HumasDB@2024@38.47.180.165:5432/humas_db" # <driver>://<username>:<password>@<host>:<port>/<database>
|
||||
|
|
|
|||
|
|
@ -792,6 +792,55 @@ const docTemplate = `{
|
|||
}
|
||||
}
|
||||
},
|
||||
"/article-files/upload-status/{uploadId}": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
"Bearer": []
|
||||
}
|
||||
],
|
||||
"description": "API for GetUploadStatus ArticleFiles",
|
||||
"tags": [
|
||||
"Article Files"
|
||||
],
|
||||
"summary": "GetUploadStatus ArticleFiles",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Upload ID of ArticleFiles",
|
||||
"name": "uploadId",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/response.Response"
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Bad Request",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/response.BadRequestError"
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"description": "Unauthorized",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/response.UnauthorizedError"
|
||||
}
|
||||
},
|
||||
"500": {
|
||||
"description": "Internal Server Error",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/response.InternalServerError"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/article-files/viewer/{filename}": {
|
||||
"get": {
|
||||
"security": [
|
||||
|
|
|
|||
|
|
@ -781,6 +781,55 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"/article-files/upload-status/{uploadId}": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
"Bearer": []
|
||||
}
|
||||
],
|
||||
"description": "API for GetUploadStatus ArticleFiles",
|
||||
"tags": [
|
||||
"Article Files"
|
||||
],
|
||||
"summary": "GetUploadStatus ArticleFiles",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Upload ID of ArticleFiles",
|
||||
"name": "uploadId",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/response.Response"
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Bad Request",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/response.BadRequestError"
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"description": "Unauthorized",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/response.UnauthorizedError"
|
||||
}
|
||||
},
|
||||
"500": {
|
||||
"description": "Internal Server Error",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/response.InternalServerError"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/article-files/viewer/{filename}": {
|
||||
"get": {
|
||||
"security": [
|
||||
|
|
|
|||
|
|
@ -1242,6 +1242,37 @@ paths:
|
|||
summary: Update ArticleFiles
|
||||
tags:
|
||||
- Article Files
|
||||
/article-files/upload-status/{uploadId}:
|
||||
get:
|
||||
description: API for GetUploadStatus ArticleFiles
|
||||
parameters:
|
||||
- description: Upload ID of ArticleFiles
|
||||
in: path
|
||||
name: uploadId
|
||||
required: true
|
||||
type: string
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
schema:
|
||||
$ref: '#/definitions/response.Response'
|
||||
"400":
|
||||
description: Bad Request
|
||||
schema:
|
||||
$ref: '#/definitions/response.BadRequestError'
|
||||
"401":
|
||||
description: Unauthorized
|
||||
schema:
|
||||
$ref: '#/definitions/response.UnauthorizedError'
|
||||
"500":
|
||||
description: Internal Server Error
|
||||
schema:
|
||||
$ref: '#/definitions/response.InternalServerError'
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: GetUploadStatus ArticleFiles
|
||||
tags:
|
||||
- Article Files
|
||||
/article-files/viewer/{filename}:
|
||||
get:
|
||||
description: API for create ArticleFiles
|
||||
|
|
|
|||
Loading…
Reference in New Issue