我的 Google App Engine Go 項目在 Google Cloud Storage 中的“文件夾”中創建了多個文件的 zip。當它使用現已棄用和刪除的 Files API 在 BlobStore 中實現時,它曾經非??臁N易罱鼘⒋a轉換為使用 Google Cloud Storage,現在性能非常糟糕,有時會超時。被壓縮的文件大小在 1K 到 2M 之間。我正在尋找任何建議來改進壓縮文件內容。下面的代碼是我為將云中的多個文件壓縮為云中的新 zip 文件而編寫的。執行可能需要很長時間,并且需要在將每個文件寫入 zip 之前將其全部內容(請參閱下面的性能問題)加載到內存中。一定有更好的方法。// Pack a folder into zip filefunc (cloud *Cloud) Pack(srcFolder string, fileName string, contentType string, metaData *map[string]string) { log.Infof(cloud.c, "Packing bucket %v folder %v to file %v", cloud.bucket, srcFolder, fileName) srcFolder = fmt.Sprintf("%v/", srcFolder) query := &storage.Query{Prefix: srcFolder, Delimiter: "/"} objs, err := storage.ListObjects(cloud.ctx, cloud.bucket, query) if err != nil { log.Errorf(cloud.c, "Packing failed to list bucket %q: %v", cloud.bucket, err) return } totalFiles := len(objs.Results) if totalFiles == 0 { log.Errorf(cloud.c, "Packing failed to find objects found in folder %q: %v", cloud.bucket, srcFolder) return } // create storage file for writing log.Infof(cloud.c, "Writing new zip file to %v/%v for %v files", cloud.bucket, fileName, totalFiles) storageWriter := storage.NewWriter(cloud.ctx, cloud.bucket, fileName) // add optional content type and meta data if len(contentType) > 0 { storageWriter.ContentType = contentType } if metaData != nil { storageWriter.Metadata = *metaData } // Create a buffer to write our archive to. buf := new(bytes.Buffer) // Create a new zip archive to memory buffer zipWriter := zip.NewWriter(buf) // go through each file in the folder for _, obj := range objs.Results { log.Infof(cloud.c, "Packing file %v of size %v to zip file", obj.Name, obj.Size) //d.dumpStats(obj)
- 1 回答
- 0 關注
- 297 瀏覽
添加回答
舉報
0/150
提交
取消