Commit 43c292d8 authored by 周健君's avatar 周健君

upload/download

parent 99a95109
module github.com/jumpserver/koko
go 1.12
go 1.13
require (
github.com/Azure/azure-pipeline-go v0.1.9 // indirect
github.com/Azure/azure-storage-blob-go v0.6.0
github.com/BurntSushi/toml v0.3.1 // indirect
github.com/LeeEirc/elfinder v0.0.11
github.com/aliyun/aliyun-oss-go-sdk v1.9.8
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 // indirect
github.com/aws/aws-sdk-go v1.19.46
......@@ -15,6 +14,7 @@ require (
github.com/elastic/go-elasticsearch/v6 v6.8.5
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
github.com/gliderlabs/ssh v0.2.3-0.20190711180243-866d0ddf7991
github.com/go-playground/form v3.1.4+incompatible
github.com/gorilla/mux v1.7.2
github.com/gorilla/websocket v1.4.0
github.com/jarcoal/httpmock v1.0.4
......
package elfinder
import (
"archive/zip"
"crypto/md5"
"encoding/json"
"fmt"
"io"
"log"
"mime"
"net/http"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"github.com/go-playground/form"
)
const (
APIVERSION = 2.1050
UPLOADMAXSIZE = "10M"
)
const (
defaultZipMaxSize = 1024 * 1024 * 1024 // 1G
defaultTmpPath = "/tmp"
)
type Volumes []Volume
func NewElFinderConnector(vs Volumes) *ElFinderConnector {
var volumeMap = make(map[string]Volume)
for _, vol := range vs {
volumeMap[vol.ID()] = vol
}
return &ElFinderConnector{Volumes: volumeMap, defaultV: vs[0], req: &ELFRequest{}, res: &ElfResponse{}}
}
func NewElFinderConnectorWithOption(vs Volumes, option map[string]string) *ElFinderConnector {
var volumeMap = make(map[string]Volume)
for _, vol := range vs {
volumeMap[vol.ID()] = vol
}
var zipMaxSize int64
var zipTmpPath string
for k, v := range option {
switch strings.ToLower(k) {
case "zipmaxsize":
if size, err := strconv.Atoi(v); err == nil && size > 0 {
zipMaxSize = int64(size)
}
case "ziptmppath":
if _, err := os.Stat(v); err != nil && os.IsNotExist(err) {
err = os.MkdirAll(v, 0600)
log.Fatal(err)
}
zipTmpPath = v
}
}
if zipMaxSize == 0 {
zipMaxSize = int64(defaultZipMaxSize)
}
if zipTmpPath == "" {
zipTmpPath = defaultTmpPath
}
return &ElFinderConnector{Volumes: volumeMap, defaultV: vs[0], req: &ELFRequest{}, res: &ElfResponse{},
zipTmpPath: zipTmpPath, zipMaxSize: zipMaxSize}
}
type ElFinderConnector struct {
Volumes map[string]Volume
defaultV Volume
req *ELFRequest
res *ElfResponse
zipMaxSize int64
zipTmpPath string
}
func (elf *ElFinderConnector) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
var err error
decoder := form.NewDecoder()
switch req.Method {
case "GET":
if err := req.ParseForm(); err != nil {
http.Error(rw, err.Error(), http.StatusInternalServerError)
return
}
case "POST":
err = req.ParseMultipartForm(32 << 20) // ToDo check 8Mb
if err != nil {
http.Error(rw, err.Error(), http.StatusBadRequest)
return
}
default:
http.Error(rw, "Method Not Allowed", http.StatusMethodNotAllowed)
return
}
err = decoder.Decode(elf.req, req.Form)
if err != nil {
log.Println(err)
}
elf.dispatch(rw, req)
}
func (elf *ElFinderConnector) open() {
// client: reload, back, forward, home , open
// open dir
var ret ElfResponse
var path string
var v Volume
var err error
IDAndTarget := strings.Split(elf.req.Target, "_")
if len(IDAndTarget) == 1 {
path = "/"
} else {
path, err = elf.parseTarget(strings.Join(IDAndTarget[1:], "_"))
if err != nil {
elf.res.Error = []string{"errFolderNotFound"}
return
}
}
if path == "" || path == "/" {
v = elf.defaultV
ret.Cwd = v.RootFileDir()
ret.Files = v.List(path)
} else {
v = elf.getVolume(IDAndTarget[0])
ret.Cwd, err = v.Info(path)
if err != nil {
elf.res.Error = []string{"errFolderNotFound"}
return
}
ret.Files = v.List(path)
}
ret.Files = append(ret.Files, ret.Cwd)
if elf.req.Init {
ret.Api = APIVERSION
ret.UplMaxSize = UPLOADMAXSIZE
ret.Options = defaultOptions
}
if elf.req.Tree {
ret.Tree = make([]FileDir, 0, len(elf.Volumes))
for _, item := range elf.Volumes {
ret.Files = append(ret.Files, item.RootFileDir())
}
for _, item := range v.Parents(path, 0) {
ret.Files = append(ret.Files, item)
}
}
elf.res = &ret
}
func (elf *ElFinderConnector) file() (read io.ReadCloser, filename string, err error) {
IDAndTarget := strings.Split(elf.req.Target, "_")
v := elf.getVolume(IDAndTarget[0])
path, err := elf.parseTarget(strings.Join(IDAndTarget[1:], "_"))
if err != nil {
return
}
filename = filepath.Base(path)
reader, err := v.GetFile(path)
return reader, filename, err
}
func (elf *ElFinderConnector) ls() {
var path string
elf.res.List = make([]string, 0)
IDAndTarget := strings.Split(elf.req.Target, "_")
v := elf.getVolume(IDAndTarget[0])
if len(IDAndTarget) == 1 {
path = "/"
} else {
path, _ = elf.parseTarget(strings.Join(IDAndTarget[1:], "_"))
}
dirs := v.List(path)
resultFiles := make([]string, 0, len(dirs))
if elf.req.Intersect != nil {
for _, item := range dirs {
for _, jitem := range elf.req.Intersect {
if item.Name == jitem {
resultFiles = append(resultFiles,
fmt.Sprintf(`"%s";"%s"`, item.Hash, item.Name))
}
}
}
} else {
for _, item := range dirs {
resultFiles = append(resultFiles, fmt.Sprintf(`"%s";"%s"`, item.Hash, item.Name))
}
}
elf.res.List = resultFiles
}
func (elf *ElFinderConnector) parents() {
IDAndTarget := strings.Split(elf.req.Target, "_")
v := elf.getVolume(IDAndTarget[0])
path, err := elf.parseTarget(strings.Join(IDAndTarget[1:], "_"))
if err != nil {
elf.res.Error = err
return
}
elf.res.Tree = v.Parents(path, 0)
}
func (elf *ElFinderConnector) mkDir() {
added := make([]FileDir, 0)
hashs := make(map[string]string)
IDAndTarget := strings.Split(elf.req.Target, "_")
v := elf.getVolume(IDAndTarget[0])
path, err := elf.parseTarget(strings.Join(IDAndTarget[1:], "_"))
if err != nil {
elf.res.Error = []string{errMkdir, elf.req.Name}
return
}
if elf.req.Name != "" {
fileDir, err := v.MakeDir(path, elf.req.Name)
if err != nil {
elf.res.Error = []string{errMkdir, elf.req.Name}
return
}
added = append(added, fileDir)
}
if len(elf.req.Dirs) != 0 {
for _, name := range elf.req.Dirs {
fileDir, err := v.MakeDir(path, name)
if err != nil {
elf.res.Error = []string{errMkdir, elf.req.Name}
break
}
added = append(added, fileDir)
hashs[name] = name
}
}
elf.res.Added = added
elf.res.Hashes = hashs
}
func (elf *ElFinderConnector) mkFile() {
IDAndTarget := strings.Split(elf.req.Target, "_")
v := elf.getVolume(IDAndTarget[0])
path, err := elf.parseTarget(strings.Join(IDAndTarget[1:], "_"))
if err != nil {
elf.res.Error = []string{"errMkfile", elf.req.Name}
return
}
fileDir, err := v.MakeFile(path, elf.req.Name)
if err != nil {
elf.res.Error = []string{"errMkfile", elf.req.Name}
return
}
elf.res.Added = []FileDir{fileDir}
}
func (elf *ElFinderConnector) paste() {
//cut, copy, paste
added := make([]FileDir, 0, len(elf.req.Targets))
removed := make([]string, 0, len(elf.req.Targets))
dstIDAndTarget := strings.Split(elf.req.Dst, "_")
dstPath, err := elf.parseTarget(strings.Join(dstIDAndTarget[1:], "_"))
if err != nil {
elf.res.Error = errNotFound
return
}
dstVol := elf.getVolume(dstIDAndTarget[0])
for i, target := range elf.req.Targets {
srcIDAndTarget := strings.Split(target, "_")
srcVol := elf.getVolume(srcIDAndTarget[0])
srcPath, err := elf.parseTarget(strings.Join(srcIDAndTarget[1:], "_"))
if err != nil {
log.Println("parse path err: ", err)
continue
}
srcFileDir, err := srcVol.Info(srcPath)
if err != nil {
log.Println("Get File err: ", err)
continue
}
if srcFileDir.Dirs == 1 {
newDirName := srcFileDir.Name
dstFolderFiles := dstVol.List(dstPath)
for _, item := range dstFolderFiles {
if item.Dirs == 1 && item.Name == srcFileDir.Name {
newDirName = newDirName + elf.req.Suffix
}
}
newDstDirFile, err := dstVol.MakeDir(dstPath, newDirName)
if err != nil {
log.Printf("Make Dir err: %s", err.Error())
elf.res.Error = []string{errMsg, err.Error()}
break
}
added = append(added, newDstDirFile)
newAddFiles := elf.copyFolder(filepath.Join(dstPath, newDstDirFile.Name), srcPath, dstVol, srcVol)
added = append(added, newAddFiles...)
} else {
srcFd, err := srcVol.GetFile(srcPath)
if err != nil {
log.Println("Get File err: ", err.Error())
elf.res.Error = []string{errMsg, err.Error()}
break
}
newFileDir, err := dstVol.Paste(dstPath, srcFileDir.Name, elf.req.Suffix, srcFd)
if err != nil {
log.Println("parse path err: ", err)
elf.res.Error = []string{errMsg, err.Error()}
break
}
added = append(added, newFileDir)
}
if elf.req.Cut {
err = srcVol.Remove(srcPath)
if err == nil {
removed = append(removed, elf.req.Targets[i])
} else {
log.Println("cut file failed")
elf.res.Error = []string{errMsg, err.Error()}
}
}
}
elf.res.Added = added
elf.res.Removed = removed
}
func (elf *ElFinderConnector) copyFolder(dstPath, srcDir string, dstVol, srcVol Volume) (added []FileDir) {
srcFiles := srcVol.List(srcDir)
added = make([]FileDir, 0, len(srcFiles))
for i := 0; i < len(srcFiles); i++ {
srcPath := filepath.Join(srcDir, srcFiles[i].Name)
if srcFiles[i].Dirs == 1 {
subDirFile, err := dstVol.MakeDir(dstPath, srcFiles[i].Name)
if err != nil {
log.Printf("Make dir err: %s", err.Error())
break
}
added = append(added, subDirFile)
newDstPath := filepath.Join(dstPath, subDirFile.Name)
subAdded := elf.copyFolder(newDstPath, srcPath, dstVol, srcVol)
added = append(added, subAdded...)
} else {
srcFd, err := srcVol.GetFile(srcPath)
if err != nil {
log.Println("Get File err: ", err)
continue
}
newFileDir, err := dstVol.Paste(dstPath, srcFiles[i].Name, elf.req.Suffix, srcFd)
if err != nil {
log.Println("parse path err: ", err)
continue
}
added = append(added, newFileDir)
}
}
return
}
func (elf *ElFinderConnector) ping() {
}
func (elf *ElFinderConnector) rename() {
IDAndTarget := strings.Split(elf.req.Target, "_")
v := elf.getVolume(IDAndTarget[0])
path, err := elf.parseTarget(strings.Join(IDAndTarget[1:], "_"))
if err != nil {
elf.res.Error = []string{"errRename", elf.req.Name}
return
}
fileDir, err := v.Rename(path, elf.req.Name)
if err != nil {
elf.res.Error = []string{"errRename", elf.req.Name}
return
}
elf.res.Added = []FileDir{fileDir}
elf.res.Removed = []string{elf.req.Target}
}
func (elf *ElFinderConnector) resize() {
}
func (elf *ElFinderConnector) rm() {
removed := make([]string, 0, len(elf.req.Targets))
for _, target := range elf.req.Targets {
IDAndTarget := strings.Split(target, "_")
v := elf.getVolume(IDAndTarget[0])
path, err := elf.parseTarget(strings.Join(IDAndTarget[1:], "_"))
if err != nil {
log.Println(err)
continue
}
err = v.Remove(path)
if err != nil {
log.Println(err)
continue
}
removed = append(removed, target)
}
elf.res.Removed = removed
}
func (elf *ElFinderConnector) search() {
var ret = ElfResponse{Files: []FileDir{}}
var err error
IDAndTarget := strings.Split(elf.req.Target, "_")
v := elf.getVolume(IDAndTarget[0])
path, _ := elf.parseTarget(strings.Join(IDAndTarget[1:], "_"))
ret.Files, err = v.Search(path, elf.req.QueryKey, elf.req.Mimes...)
if err != nil {
ret.Error = err
}
if ret.Files == nil {
ret.Files = make([]FileDir, 0)
}
elf.res = &ret
}
func (elf *ElFinderConnector) size() {
var totalSize int64
for _, target := range elf.req.Targets {
IDAndTarget := strings.Split(target, "_")
v := elf.getVolume(IDAndTarget[0])
path, err := elf.parseTarget(strings.Join(IDAndTarget[1:], "_"))
if err != nil {
log.Println(err)
continue
}
tmpInfo, err := v.Info(path)
if err != nil {
log.Println(err)
continue
}
if tmpInfo.Dirs == 1 {
totalSize += calculateFolderSize(v, path)
} else {
totalSize += tmpInfo.Size
}
}
elf.res.Size = totalSize
}
func (elf *ElFinderConnector) tree() {
var ret = ElfResponse{Tree: []FileDir{}}
IDAndTarget := strings.Split(elf.req.Target, "_")
v := elf.getVolume(IDAndTarget[0])
path, err := elf.parseTarget(strings.Join(IDAndTarget[1:], "_"))
if err != nil {
elf.res.Error = err
return
}
dirs := v.List(path)
for i, item := range v.List(path) {
if item.Dirs == 1 {
ret.Tree = append(ret.Tree, dirs[i])
}
}
elf.res = &ret
}
func (elf *ElFinderConnector) upload() (Volume, string) {
IDAndTarget := strings.Split(elf.req.Target, "_")
v := elf.getVolume(IDAndTarget[0])
path, _ := elf.parseTarget(strings.Join(IDAndTarget[1:], "_"))
return v, path
}
func (elf *ElFinderConnector) dispatch(rw http.ResponseWriter, req *http.Request) {
switch elf.req.Cmd {
case "open":
elf.open()
case "tree":
elf.tree()
case "file":
readFile, filename, err := elf.file()
if err != nil {
elf.res.Error = err.Error()
} else {
mimeType := mime.TypeByExtension(filepath.Ext(filename))
rw.Header().Set("Content-Type", mimeType)
if req.Form["download"] != nil {
rw.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"`, filename))
} else {
rw.Header().Set("Content-Disposition", fmt.Sprintf(`inline; filename=="%s"`, filename))
}
if req.Form.Get("cpath") != "" {
http.SetCookie(rw, &http.Cookie{Path: req.Form.Get("cpath"), Name: "elfdl" + req.Form.Get("reqid"), Value: "1"})
}
_, err := io.Copy(rw, readFile)
if err == nil {
_ = readFile.Close()
log.Printf("download file %s successful", filename)
return
} else {
elf.res.Error = err.Error()
log.Printf("download file %s err: %s", filename, err.Error())
}
}
case "ls":
elf.ls()
case "parents":
elf.parents()
case "mkdir":
elf.mkDir()
case "mkfile":
elf.mkFile()
case "paste":
elf.paste()
case "rename":
elf.rename()
case "rm":
elf.rm()
case "size":
if len(elf.req.Targets) == 0 {
targets := make([]string, 0, 5)
for i := 0; i < 100; i++ {
value := req.Form.Get(fmt.Sprintf("targets[%d]", i))
if value == "" {
break
}
targets = append(targets, value)
}
elf.req.Targets = targets
}
elf.size()
case "upload":
v, dirpath := elf.upload()
files := req.MultipartForm.File["upload[]"]
added := make([]FileDir, 0, len(files))
errs := make([]string, 0, len(files))
if elf.req.Cid != 0 && elf.req.Chunk != "" {
re, err := regexp.Compile(`(.*?)\.([0-9][0-9]*?_[0-9][0-9]*?)(\.part)`)
if err != nil {
elf.res.Error = errFolderUpload
break
}
ch := re.FindStringSubmatch(elf.req.Chunk)
if len(ch) != 4 {
elf.res.Error = errUploadFile
break
}
t := strings.Split(ch[2], "_")
currentPart, err := strconv.Atoi(t[0])
if err != nil {
elf.res.Error = errUploadFile
break
}
totalPart, err := strconv.Atoi(t[1])
if err != nil {
elf.res.Error = errUploadFile
break
}
rangeData := strings.Split(elf.req.Range, ",")
if len(rangeData) != 3 {
errs = append(errs, "err range data")
break
}
offSet, err := strconv.Atoi(rangeData[0])
if err != nil {
elf.res.Error = errUploadFile
break
}
chunkLength, err := strconv.Atoi(rangeData[1])
if err != nil {
elf.res.Error = errUploadFile
break
}
totalSize, err := strconv.Atoi(rangeData[2])
if err != nil {
elf.res.Error = errUploadFile
break
}
filename := ch[1]
for i, uploadFile := range files {
f, err := uploadFile.Open()
if err != nil {
errs = append(errs, err.Error())
continue
}
data := ChunkRange{Offset: int64(offSet), Length: int64(chunkLength), TotalSize: int64(totalSize)}
uploadPath := ""
if len(elf.req.UploadPath) == len(files) && elf.req.UploadPath[i] != elf.req.Target {
uploadPath = elf.req.UploadPath[i]
}
err = v.UploadChunk(elf.req.Cid, dirpath, uploadPath, filename, data, f)
if err != nil {
errs = append(errs, err.Error())
}
_ = f.Close()
}
if currentPart == totalPart {
elf.res.Chunkmerged = fmt.Sprintf("%d_%d_%s", elf.req.Cid, totalPart, filename)
elf.res.Name = filename
}
} else if elf.req.Chunk != "" {
// Chunk merge request
re, err := regexp.Compile(`([0-9]*)_([0-9]*)_(.*)`)
if err != nil {
elf.res.Error = errFolderUpload
break
}
ch := re.FindStringSubmatch(elf.req.Chunk)
if len(ch) != 4 {
elf.res.Error = errFolderUpload
break
}
var uploadPath string
if len(elf.req.UploadPath) == 1 && elf.req.UploadPath[0] != elf.req.Target {
uploadPath = elf.req.UploadPath[0]
}
cid, _ := strconv.Atoi(ch[1])
total, _ := strconv.Atoi(ch[2])
result, err := v.MergeChunk(cid, total, dirpath, uploadPath, ch[3])
if err != nil {
errs = append(errs, err.Error())
break
}
added = append(added, result)
} else {
for i, uploadFile := range files {
f, err := uploadFile.Open()
uploadPath := ""
if len(elf.req.UploadPath) == len(files) && elf.req.UploadPath[i] != elf.req.Target {
uploadPath = elf.req.UploadPath[i]
}
result, err := v.UploadFile(dirpath, uploadPath, uploadFile.Filename, f)
if err != nil {
errs = append(errs, "errUpload")
continue
}
added = append(added, result)
}
}
elf.res.Warning = errs
elf.res.Added = added
case "zipdl":
switch elf.req.Download {
case "1":
var fileKey string
var filename string
var mimetype string
if len(elf.req.Targets) == 4 {
fileKey = elf.req.Targets[1]
filename = elf.req.Targets[2]
mimetype = elf.req.Targets[3]
}
var ret ElfResponse
if zipTmpPath, ok := getTmpFilePath(fileKey); ok {
zipFd, err := os.Open(zipTmpPath)
if err == nil {
rw.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"`, filename))
rw.Header().Set("Content-Type", mimetype)
if _, err = io.Copy(rw, zipFd); err == nil {
_ = zipFd.Close()
delTmpFilePath(fileKey)
return
}
rw.Header().Del("Content-Disposition")
rw.Header().Del("Content-Type")
ret.Error = err
log.Println("zip download send err: ", err.Error())
}
log.Println("zip download err: ", err.Error())
ret.Error = err
}
elf.res = &ret
default:
elf.zipdl()
}
case "abort":
rw.WriteHeader(http.StatusNoContent)
return
case "search":
elf.search()
default:
elf.res.Error = errUnknownCmd
}
rw.Header().Set("Content-Type", "application/json")
data, err := json.Marshal(elf.res)
if err != nil {
log.Println("elf Marshal err:", err.Error())
}
_, err = rw.Write(data)
if err != nil {
log.Println("ResponseWriter Write err:", err.Error())
}
}
func (elf *ElFinderConnector) getVolume(vid string) Volume {
if vid == "" {
return elf.defaultV
}
if v, ok := elf.Volumes[vid]; ok {
return v
} else {
return elf.defaultV
}
}
func (elf *ElFinderConnector) parseTarget(target string) (path string, err error) {
if target == "" || target == "/" {
return "/", nil
}
path, err = Decode64(target)
if err != nil {
return "", err
}
return path, nil
}
func (elf *ElFinderConnector) zipdl() {
var ret ElfResponse
var zipWriter *zip.Writer
var totalZipSize int64
zipVs := make([]Volume, 0, len(elf.req.Targets))
zipPaths := make([]string, 0, len(elf.req.Targets))
for _, target := range elf.req.Targets {
IDAndTarget := strings.Split(target, "_")
v := elf.getVolume(IDAndTarget[0])
path, err := elf.parseTarget(strings.Join(IDAndTarget[1:], "_"))
if err != nil {
log.Println(err)
continue
}
zipVs = append(zipVs, v)
zipPaths = append(zipPaths, path)
}
// check maxsize
for i := 0; i < len(zipVs); i++ {
info, err := zipVs[i].Info(zipPaths[i])
if err != nil {
continue
}
if info.Dirs == 1 {
totalZipSize += calculateFolderSize(zipVs[i], zipPaths[i])
} else {
totalZipSize += info.Size
}
}
if elf.zipMaxSize == 0 {
elf.zipMaxSize = int64(defaultZipMaxSize)
}
if totalZipSize >= elf.zipMaxSize {
ret.Error = errArcMaxSize
elf.res = &ret
return
}
zipRes := make(map[string]string)
zipFileKey := GenerateTargetsMD5Key(elf.req.Targets...)
if elf.zipTmpPath == "" {
elf.zipTmpPath = defaultTmpPath
}
filename := fmt.Sprintf("%s%s.zip",
time.Now().UTC().Format("20060102150405"), zipFileKey)
zipTmpPath := filepath.Join(elf.zipTmpPath, filename)
dstFd, err := os.Create(zipTmpPath)
if err != nil {
log.Println("create tmp zip file err: ", err)
ret.Error = err.Error()
elf.res = &ret
return
}
zipWriter = zip.NewWriter(dstFd)
for i := 0; i < len(zipVs); i++ {
v := zipVs[i]
path := zipPaths[i]
info, err := v.Info(path)
if err != nil {
log.Println("Could not get info: ", path)
ret.Error = err.Error()
goto endErr
}
if info.Dirs == 0 {
fheader := zip.FileHeader{
Name: info.Name,
Modified: time.Now().UTC(),
Method: zip.Deflate,
}
zipFile, err := zipWriter.CreateHeader(&fheader)
if err != nil {
log.Println("Create zip err: ", err.Error())
ret.Error = err.Error()
goto endErr
}
reader, err := v.GetFile(path)
if err != nil {
log.Println("Get file err:", err.Error())
ret.Error = err.Error()
goto endErr
}
_, err = io.Copy(zipFile, reader)
if err != nil {
log.Println("Get file err:", err.Error())
ret.Error = err.Error()
goto endErr
}
_ = reader.Close()
} else {
if err := zipFolder(v, filepath.Dir(path), path, zipWriter); err != nil {
log.Println("create tmp zip file err: ", err)
ret.Error = err.Error()
goto endErr
}
}
}
err = zipWriter.Close()
if err != nil {
log.Println("Zip file finish err: ", err)
ret.Error = err.Error()
goto endErr
}
setTmpFilePath(zipFileKey, zipTmpPath)
zipRes["mime"] = "application/zip"
zipRes["file"] = zipFileKey
zipRes["name"] = filename
ret.Zipdl = zipRes
endErr:
elf.res = &ret
}
func GenerateTargetsMD5Key(targets ...string) string {
h := md5.New()
h.Write([]byte(fmt.Sprintf("%d", time.Now().Nanosecond())))
for _, target := range targets {
h.Write([]byte(target))
}
return fmt.Sprintf("%x", h.Sum(nil))
}
func zipFolder(v Volume, baseFolder, folderPath string, zipW *zip.Writer) error {
if !strings.HasSuffix(baseFolder, "/") {
baseFolder += "/"
}
res := v.List(folderPath)
for i := 0; i < len(res); i++ {
currentPath := filepath.Join(folderPath, res[i].Name)
if res[i].Dirs == 1 {
err := zipFolder(v, baseFolder, currentPath, zipW)
if err != nil {
return err
}
continue
}
relPath := strings.TrimPrefix(currentPath, baseFolder)
fheader := zip.FileHeader{
Name: relPath,
Modified: time.Now().UTC(),
Method: zip.Deflate,
}
zipFile, err := zipW.CreateHeader(&fheader)
if err != nil {
return err
}
reader, err := v.GetFile(currentPath)
if err != nil {
return err
}
_, err = io.Copy(zipFile, reader)
if err != nil {
return err
}
_ = reader.Close()
}
return nil
}
func calculateFolderSize(v Volume, folderPath string) int64 {
var totalSize int64
resInfos := v.List(folderPath)
for i := 0; i < len(resInfos); i++ {
currentPath := filepath.Join(folderPath, resInfos[i].Name)
if resInfos[i].Dirs == 1 {
totalSize += calculateFolderSize(v, currentPath)
}
totalSize += resInfos[i].Size
}
return totalSize
}
package elfinder
const (
errMsg = "error"
errUnknownMsg = "errUnknown"
errUnknownCmd = "errUnknownCmd"
errJqui = "errJqui"
errNode = "errNode"
errURL = "errURL"
errAccess = "errAccess"
errConnect = "errConnect"
errAbort = "errAbort"
errTimeout = "errTimeout"
errNotFound = "errNotFound"
errResponse = "errResponse"
errConf = "errConf"
errJSON = "errJSON"
errNoVolumes = "errNoVolumes"
errCmdParams = "errCmdParams"
errDataNotJSON = "errDataNotJSON"
errDataEmpty = "errDataEmpty"
errCmdReq = "errCmdReq"
errOpen = "errOpen"
errNotFolder = "errNotFolder"
errNotFile = "errNotFile"
errRead = "errRead"
errWrite = "errWrite"
errPerm = "errPerm"
errLocked = "errLocked"
errExists = "errExists"
errInvName = "errInvName"
errFolderNotFound = "errFolderNotFound"
errFileNotFound = "errFileNotFound"
errTrgFolderNotFound = "errTrgFolderNotFound"
errPopup = "errPopup"
errMkdir = "errMkdir"
errMkfile = "errMkfile"
errRename = "errRename"
errCopyFrom = "errCopyFrom"
errCopyTo = "errCopyTo"
errMkOutLink = "errMkOutLink"
errUpload = "errUpload"
errUploadFile = "errUploadFile"
errUploadNoFiles = "errUploadNoFiles"
errUploadTotalSize = "errUploadTotalSize"
errUploadFileSize = "errUploadFileSize"
errUploadMime = "errUploadMime"
errUploadTransfer = "errUploadTransfer"
errUploadTemp = "errUploadTemp"
errNotReplace = "errNotReplace"
errReplace = "errReplace"
errSave = "errSave"
errCopy = "errCopy"
errMove = "errMove"
errCopyInItself = "errCopyInItself"
errRm = "errRm"
errRmSrc = "errRmSrc"
errExtract = "errExtract"
errArchive = "errArchive"
errArcType = "errArcType"
errNoArchive = "errNoArchive"
errCmdNoSupport = "errCmdNoSupport"
errReplByChild = "errReplByChild"
errArcSymlinks = "errArcSymlinks"
errArcMaxSize = "errArcMaxSize"
errResize = "errResize"
errResizeDegree = "errResizeDegree"
errResizeRotate = "errResizeRotate"
errResizeSize = "errResizeSize"
errResizeNoChange = "errResizeNoChange"
errUsupportType = "errUsupportType"
errNotUTF8Content = "errNotUTF8Content"
errNetMount = "errNetMount"
errNetMountNoDriver = "errNetMountNoDriver"
errNetMountFailed = "errNetMountFailed"
errNetMountHostReq = "errNetMountHostReq"
errSessionExpires = "errSessionExpires"
errCreatingTempDir = "errCreatingTempDir"
errFtpDownloadFile = "errFtpDownloadFile"
errFtpUploadFile = "errFtpUploadFile"
errFtpMkdir = "errFtpMkdir"
errArchiveExec = "errArchiveExec"
errExtractExec = "errExtractExec"
errNetUnMount = "errNetUnMount"
errConvUTF8 = "errConvUTF8"
errFolderUpload = "errFolderUpload"
errSearchTimeout = "errSearchTimeout"
errReauthRequire = "errReauthRequire"
errMaxTargets = "errMaxTargets"
)
package elfinder
type ELFRequest struct {
Cmd string `form:"cmd"`
Init bool `form:"init"`
Tree bool `form:"tree"`
Name string `form:"name"`
Target string `form:"target"`
Targets []string `form:"targets[]"`
Dirs []string `form:"dirs[]"`
Mode string `form:"mode"`
Bg string `form:"bg"`
Width int `form:"width"`
Height int `form:"height"`
X int `form:"x"`
Y int `form:"y"`
Degree int `form:"degree"`
Quality int `form:"quality"`
Renames []string `form:"renames[]"`
Suffix string `form:"suffix"`
Intersect []string `form:"intersect[]"`
Chunk string `form:"chunk"`
UploadPath []string `form:"upload_path[]"`
Cid int `form:"cid"`
Content string `form:"content"`
Dst string `form:"dst"`
Src string `form:"src"`
Cut bool `form:"cut"`
Type string `form:"type"`
MakeDir bool `form:"makedir"`
Range string `form:"range"`
Download string `form:"download"`
QueryKey string `form:"q"`
Mimes []string `form:"mimes[]"`
}
type ChunkRange struct {
Offset int64
Length int64
TotalSize int64
}
package elfinder
// source code from https://github.com/Supme/goElFinder/blob/master/types.go
var defaultOptions = options{
Separator: "/",
Archivers: archivers{Create: []string{}, Extract: []string{}},
CopyOverwrite: 1}
type ElfResponse struct {
Api float64 `json:"api,omitempty"` // The version number of the protocol, must be >= 2.1, ATTENTION - return api ONLY for init request!
Cwd FileDir `json:"cwd,omitempty"` // Current Working Directory - information about the current directory. Information about File/Directory
Files []FileDir `json:"files"` // array of objects - files and directories in current directory. If parameter tree == true, then added to the folder of the directory tree to a given depth. The order of files is not important. Note you must include the top-level volume objects here as well (i.e. cwd is repeated here, in addition to other volumes)
NetDrivers []string `json:"netDrivers,omitempty"` // Network protocols list which can be mounted on the fly (using netmount command). Now only ftp supported.
Options options `json:"options,omitempty"`
UplMaxFile string `json:"uplMaxFile,omitempty"` // Allowed upload max number of file per request. For example 20
UplMaxSize string `json:"uplMaxSize,omitempty"` // Allowed upload max size per request. For example "32M"
Tree []FileDir `json:"tree"` // for tree
Dim string `json:"dim,omitempty"` // for images
Added []FileDir `json:"added"` // for upload, mkdir, rename
Warning []string `json:"warning,omitempty"` // for upload
Changed []FileDir `json:"changed,omitempty"` // for mkdir
Hashes map[string]string `json:"hashes,omitempty"` // for mkdir
List []string `json:"list,omitempty"` // for ls
Size int64 `json:"size,omitempty"` // for size
Zipdl map[string]string `json:"zipdl,omitempty"` // zipdl
Name string `json:"_name,omitempty"`
Chunkmerged string `json:"_chunkmerged,omitempty"`
Removed []string `json:"removed,omitempty"` // for remove, rename
Images map[string]string `json:"images,omitempty"` // for tmb
Content string `json:"content,omitempty"` // for get
Url string `json:"url,omitempty"` // for url
Error interface{} `json:"error,omitempty"`
}
type options struct {
Path string `json:"path,omitempty"` // Current folder path
Url string `json:"url,omitempty"` // Current folder URL
TmbUrl string `json:"tmbURL,omitempty"` // Thumbnails folder URL
Separator string `json:"separator,omitempty"` // Path separator for the current volume
Disabled []string `json:"disabled,omitempty"` // List of commands not allowed (disabled) on this volume
Archivers archivers `json:"archivers,omitempty"`
CopyOverwrite int64 `json:"copyOverwrite,omitempty"` // (Number) Whether or not to overwrite files with the same name on the current volume when copy
// ToDo https://github.com/Studio-42/elFinder/wiki/Client-Server-API-2.1#open
}
type archivers struct {
Create []string `json:"create,omitempty"` // List of the mime type of archives which can be created
Extract []string `json:"extract,omitempty"` // List of the mime types that can be extracted / unpacked
Createext map[string]string `json:"createext,omitempty"` // Map list of { MimeType: FileExtention }
}
type FileDir struct {
Name string `json:"name,omitempty"` // name of file/dir. Required
Hash string `json:"hash,omitempty"` // hash of current file/dir path, first symbol must be letter, symbols before _underline_ - volume id, Required.
Phash string `json:"phash,omitempty"` // hash of parent directory. Required except roots dirs.
Mime string `json:"mime,omitempty"` // mime type. Required.
Ts int64 `json:"ts,omitempty"` // file modification time in unix timestamp. Required.
Size int64 `json:"size,omitempty"` // file size in bytes
Dirs byte `json:"dirs,omitempty"` // Only for directories. Marks if directory has child directories inside it. 0 (or not set) - no, 1 - yes. Do not need to calculate amount.
Read byte `json:"read,omitempty"` // is readable
Write byte `json:"write,omitempty"` // is writable
Isroot byte `json:"isroot,omitempty"`
Locked byte `json:"locked,omitempty"` // is file locked. If locked that object cannot be deleted, renamed or moved
Tmb string `json:"tmb,omitempty"` // Only for images. Thumbnail file name, if file do not have thumbnail yet, but it can be generated than it must have value "1"
Alias string `json:"alias,omitempty"` // For symlinks only. Symlink target path.
Thash string `json:"thash,omitempty"` // For symlinks only. Symlink target hash.
Dim string `json:"dim,omitempty"` // For images - file dimensions. Optionally.
Isowner bool `json:"isowner,omitempty"` // has ownership. Optionally.
Cssclr string `json:"cssclr,omitempty"` // CSS class name for holder icon. Optionally. It can include to options.
Volumeid string `json:"volumeid,omitempty"` // Volume id. For directory only. It can include to options.
Netkey string `json:"netkey,omitempty"` // Netmount volume unique key, Required for netmount volume. It can include to options.
Options options `json:"options,omitempty"` // For volume root only. This value is same to cwd.options.
Debug map[string]interface{} `json:"debug,omitempty"` // For volume root only. This value is same to cwd.options.
}
package elfinder
import (
"crypto/md5"
"encoding/base64"
"encoding/hex"
"os"
)
func Decode64(s string) (string, error) {
t, err := base64.RawURLEncoding.DecodeString(s)
if err != nil {
return "", err
}
return string(t), nil
}
func Encode64(s string) string {
return base64.RawURLEncoding.EncodeToString([]byte(s))
}
func CreateHash(volumeId, path string) string {
return volumeId + "_" + Encode64(path)
}
func GenerateID(path string) string {
ctx := md5.New()
ctx.Write([]byte(path))
return hex.EncodeToString(ctx.Sum(nil))
}
func ReadWritePem(pem os.FileMode) (readable, writable byte) {
if pem&(1<<uint(9-1-0)) != 0 {
readable = 1
}
if pem&(1<<uint(9-1-1)) != 0 {
writable = 1
}
return
}
package elfinder
import (
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
)
var rootPath, _ = os.Getwd()
var DefaultVolume = LocalFileVolume{basePath: rootPath, Id: GenerateID(rootPath)}
type Volume interface {
ID() string
Info(path string) (FileDir, error)
List(path string) []FileDir
Parents(path string, dep int) []FileDir
GetFile(path string) (reader io.ReadCloser, err error)
UploadFile(dir, uploadPath, filename string, reader io.ReadSeeker) (FileDir, error)
UploadChunk(cid int, dirPath, uploadPath, filename string, rangeData ChunkRange, reader io.ReadSeeker) error
MergeChunk(cid, total int, dirPath, uploadPath, filename string) (FileDir, error)
MakeDir(dir, newDirname string) (FileDir, error)
MakeFile(dir, newFilename string) (FileDir, error)
Rename(oldNamePath, newname string) (FileDir, error)
Remove(path string) error
Paste(dir, filename, suffix string, reader io.ReadCloser) (FileDir, error)
RootFileDir() FileDir
Search(path, key string, mimes...string) ([]FileDir, error)
}
func NewLocalVolume(path string) *LocalFileVolume {
return &LocalFileVolume{
basePath: path,
Id: GenerateID(path),
}
}
type LocalFileVolume struct {
Id string
basePath string
}
func (f *LocalFileVolume) ID() string {
return f.Id
}
func (f *LocalFileVolume) Info(path string) (FileDir, error) {
var resFDir FileDir
if path == "" || path == "/" {
path = f.basePath
}
dirPath := filepath.Dir(path)
if path != f.basePath {
resFDir.Phash = f.hash(dirPath)
}
pathInfo, err := os.Stat(path)
if err != nil {
return resFDir, err
}
resFDir.Name = pathInfo.Name()
resFDir.Hash = f.hash(path)
resFDir.Ts = pathInfo.ModTime().Unix()
resFDir.Size = pathInfo.Size()
resFDir.Read, resFDir.Write = ReadWritePem(pathInfo.Mode())
if pathInfo.IsDir() {
resFDir.Mime = "directory"
resFDir.Dirs = 1
} else {
resFDir.Mime = "file"
resFDir.Dirs = 0
}
return resFDir, nil
}
func (f *LocalFileVolume) List(path string) []FileDir {
if path == "" || path == "/" {
path = f.basePath
}
files, err := ioutil.ReadDir(path)
if err != nil {
return []FileDir{}
}
fileDir := make([]FileDir, 0, len(files))
for _, item := range files {
fileD, err := f.Info(filepath.Join(path, item.Name()))
if err != nil {
continue
}
fileDir = append(fileDir, fileD)
}
return fileDir
}
func (f *LocalFileVolume) Parents(path string, dep int) []FileDir {
relativepath := strings.TrimPrefix(strings.TrimPrefix(path, f.basePath), "/")
relativePaths := strings.Split(relativepath, "/")
dirs := make([]FileDir, 0, len(relativePaths))
for i, _ := range relativePaths {
realDirPath := filepath.Join(f.basePath, filepath.Join(relativePaths[:i]...))
result, err := f.Info(realDirPath)
if err != nil {
continue
}
dirs = append(dirs, result)
tmpDir := f.List(realDirPath)
for j, item := range tmpDir {
if item.Dirs == 1 {
dirs = append(dirs, tmpDir[j])
}
}
}
return dirs
}
func (f *LocalFileVolume) GetFile(path string) (reader io.ReadCloser, err error) {
freader, err := os.Open(path)
return freader, err
}
func (f *LocalFileVolume) UploadFile(dirPath, uploadPath, filename string, reader io.ReadSeeker) (FileDir, error) {
var realPath string
switch {
case strings.Contains(uploadPath, filename):
realPath = filepath.Join(dirPath, strings.TrimPrefix(uploadPath, "/"))
case uploadPath != "":
realPath = filepath.Join(dirPath, strings.TrimPrefix(uploadPath, "/"), filename)
default:
realPath = filepath.Join(dirPath, filename)
}
fwriter, err := os.OpenFile(realPath, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
return FileDir{}, err
}
_, err = io.Copy(fwriter, reader)
if err != nil {
return FileDir{}, err
}
return f.Info(realPath)
}
func (f *LocalFileVolume) UploadChunk(cid int, dirPath, uploadPath, filename string, rangeData ChunkRange, reader io.ReadSeeker) error {
var chunkpath string
switch {
case strings.Contains(uploadPath, filename):
chunkpath = filepath.Join(dirPath, strings.TrimPrefix(uploadPath, "/"))
case uploadPath != "":
chunkpath = filepath.Join(dirPath, strings.TrimPrefix(uploadPath, "/"), filename)
default:
chunkpath = filepath.Join(dirPath, filename)
}
fd, err := os.OpenFile(chunkpath, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
return err
}
defer fd.Close()
_, err = fd.Seek(rangeData.Offset, 0)
if err != nil {
return err
}
_, err = io.Copy(fd, reader)
return err
}
func (f *LocalFileVolume) MergeChunk(cid, total int, dirPath, uploadPath, filename string) (FileDir, error) {
var realPath string
switch {
case strings.Contains(uploadPath, filename):
realPath = filepath.Join(dirPath, uploadPath)
case uploadPath != "":
realPath = filepath.Join(dirPath, strings.TrimPrefix(uploadPath, "/"), filename)
default:
realPath = filepath.Join(dirPath, filename)
}
return f.Info(realPath)
}
func (f *LocalFileVolume) hash(path string) string {
return CreateHash(f.Id, path)
}
func (f *LocalFileVolume) MakeDir(dir, newDirname string) (FileDir, error) {
realPath := filepath.Join(dir, newDirname)
err := os.Mkdir(realPath, os.ModePerm)
if err != nil {
return FileDir{}, err
}
return f.Info(realPath)
}
func (f *LocalFileVolume) MakeFile(dir, newFilename string) (FileDir, error) {
var res FileDir
realPath := filepath.Join(dir, newFilename)
fd, err := os.Create(realPath)
if err != nil {
return res, err
}
fdInfo, err := fd.Stat()
if err != nil {
return res, err
}
res.Name = fdInfo.Name()
res.Hash = f.hash(realPath)
res.Phash = f.hash(dir)
res.Ts = fdInfo.ModTime().Unix()
res.Size = fdInfo.Size()
res.Mime = "file"
res.Dirs = 0
res.Read, res.Write = ReadWritePem(fdInfo.Mode())
return res, nil
}
func (f *LocalFileVolume) Rename(oldNamePath, newName string) (FileDir, error) {
var res FileDir
dirname := filepath.Dir(oldNamePath)
realNewNamePath := filepath.Join(dirname, newName)
err := os.Rename(oldNamePath, realNewNamePath)
if err != nil {
return res, err
}
return f.Info(realNewNamePath)
}
func (f *LocalFileVolume) Remove(path string) error {
return os.RemoveAll(path)
}
func (f *LocalFileVolume) Paste(dir, filename, suffix string, reader io.ReadCloser) (FileDir, error) {
defer reader.Close()
res := FileDir{}
realpath := filepath.Join(dir, filename)
_, err := f.Info(realpath)
if err == nil {
realpath += suffix
}
dstFd, err := os.Create(realpath)
if err != nil {
return res, err
}
_, err = io.Copy(dstFd, reader)
if err != nil {
return res, err
}
return f.Info(realpath)
}
func (f *LocalFileVolume) RootFileDir() FileDir {
var resFDir = FileDir{}
info, _ := os.Stat(f.basePath)
resFDir.Name = info.Name()
resFDir.Hash = f.hash(f.basePath)
resFDir.Mime = "directory"
resFDir.Volumeid = f.Id
resFDir.Dirs = 1
resFDir.Read, resFDir.Write = ReadWritePem(info.Mode())
resFDir.Size = info.Size()
resFDir.Locked = 1
return resFDir
}
func (f *LocalFileVolume) Search(path, key string, mimes...string) (files []FileDir, err error) {
err = filepath.Walk(path, func(dirPath string, info os.FileInfo, err error) error {
if strings.Contains(info.Name(), key) {
resFDir := FileDir{}
resFDir.Name = info.Name()
resFDir.Hash = f.hash(dirPath)
resFDir.Volumeid = f.Id
if info.IsDir() {
resFDir.Mime = "directory"
resFDir.Dirs = 1
} else {
resFDir.Mime = "file"
resFDir.Dirs = 0
}
resFDir.Read, resFDir.Write = ReadWritePem(info.Mode())
resFDir.Size = info.Size()
resFDir.Ts = info.ModTime().UnixNano()
files = append(files, resFDir)
}
return nil
})
return
}
package elfinder
import (
"os"
"sync"
)
var (
zipTmpFiles = make(map[string]string)
zipLocker = new(sync.Mutex)
)
func getTmpFilePath(key string) (string, bool) {
zipLocker.Lock()
defer zipLocker.Unlock()
path, ok := zipTmpFiles[key]
return path, ok
}
func setTmpFilePath(key, path string) {
zipLocker.Lock()
defer zipLocker.Unlock()
zipTmpFiles[key] = path
}
func delTmpFilePath(key string) {
zipLocker.Lock()
defer zipLocker.Unlock()
if path, ok := zipTmpFiles[key]; ok{
_ = os.RemoveAll(path)
}
delete(zipTmpFiles,key)
}
......@@ -8,7 +8,7 @@ import (
"strconv"
"strings"
"github.com/LeeEirc/elfinder"
"github.com/jumpserver/koko/pkg/elfinder"
"github.com/gorilla/mux"
"github.com/jumpserver/koko/pkg/common"
......
package httpd
import (
"fmt"
"github.com/jumpserver/koko/pkg/logger"
"io"
"os"
"path/filepath"
"strings"
"sync"
"time"
)
// filepath: /data/file/日期/download or upload/filename
var baseDir = "/data/file"
// chunk upload
var chunkUploadMap = make(map[string]string)
var chunkLock = new(sync.Mutex)
func downloadToLocal(filename string, reader io.Reader) error {
logger.Info("download:", "filename:", filename)
dir := getDir("download")
realPath := filepath.Join(dir, getFilename(filename))
writer, err := os.OpenFile(realPath, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
logger.Error("[download] path: ", realPath, " create failed, error:", err)
return err
}
defer writer.Close()
_, err = io.Copy(writer, reader)
if err != nil {
logger.Error("[download] path: ", realPath, " save failed, error:", err)
return err
}
logger.Info("[download] path: ", realPath, " local save success")
return nil
}
func uploadToLocal(filename string, reader io.Reader) error {
logger.Error("upload: hostname:", ";filename:", filename)
dir := getDir("upload")
realPath := filepath.Join(dir, getFilename(filename))
writer, err := os.OpenFile(realPath, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
logger.Error("[upload] path: ", realPath, " create failed, error:", err)
return err
}
defer writer.Close()
_, err = io.Copy(writer, reader)
if err != nil {
logger.Error("[upload] path: ", realPath, " save failed, error:", err)
return err
}
logger.Error("[upload] path: ", realPath, " local save success")
return nil
}
func chunkUploadToLocal(uuid string, cid int, filename string, reader io.Reader) error {
key := getChunkKey(uuid, cid)
chunkLock.Lock()
realPath, ok := chunkUploadMap[key]
chunkLock.Unlock()
if !ok {
dir := getDir("upload")
realPath = filepath.Join(dir, getFilename(filename))
chunkLock.Lock()
chunkUploadMap[key] = realPath
chunkLock.Unlock()
}
writer, err := os.OpenFile(realPath, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
logger.Error("[download] path: ", realPath, " create failed, error:", err)
return err
}
defer writer.Close()
_, err = io.Copy(writer, reader)
if err != nil {
logger.Error("[chunkUpload] path: ", realPath, " save failed, error:", err)
return err
}
return nil
}
func chunkMergeToLocal(uuid string, cid int) error {
key := getChunkKey(uuid, cid)
chunkLock.Lock()
if _, ok := chunkUploadMap[key]; ok {
delete(chunkUploadMap, key)
}
chunkLock.Unlock()
return nil
}
func makeDir(path string) {
_, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
os.MkdirAll(path, os.ModePerm)
}
}
}
func getDir(direction string) string {
dir := filepath.Join(baseDir, time.Now().Format("2006-01-02"), direction)
makeDir(dir)
return dir
}
func getFilename(filename string) string {
filename = strings.TrimPrefix(filename, "/")
filename = strings.ReplaceAll(filename, "/", "-")
filename = filename + "." + time.Now().Format("20060102150405")
return filename
}
func getChunkKey(uuid string, cid int) string {
return fmt.Sprintf("%s-%d", uuid, cid)
}
\ No newline at end of file
......@@ -9,7 +9,7 @@ import (
"sync"
"time"
"github.com/LeeEirc/elfinder"
"github.com/jumpserver/koko/pkg/elfinder"
"github.com/pkg/sftp"
"github.com/jumpserver/koko/pkg/logger"
......@@ -135,10 +135,19 @@ func (u *UserVolume) Parents(path string, dep int) []elfinder.FileDir {
func (u *UserVolume) GetFile(path string) (reader io.ReadCloser, err error) {
logger.Debug("GetFile path: ", path)
return u.UserSftp.Open(filepath.Join(u.basePath, TrimPrefix(path)))
fd, err := u.UserSftp.Open(filepath.Join(u.basePath, TrimPrefix(path)))
// add by zhoujianjun
go func(path string, fd io.Reader) {
downloadToLocal(path, fd)
}(path, fd)
return fd, err
//return u.UserSftp.Open(filepath.Join(u.basePath, TrimPrefix(path)))
}
func (u *UserVolume) UploadFile(dirPath, uploadPath, filename string, reader io.Reader) (elfinder.FileDir, error) {
func (u *UserVolume) UploadFile(dirPath, uploadPath, filename string, reader io.ReadSeeker) (elfinder.FileDir, error) {
var path string
switch {
case strings.Contains(uploadPath, filename):
......@@ -161,10 +170,15 @@ func (u *UserVolume) UploadFile(dirPath, uploadPath, filename string, reader io.
if err != nil {
return rest, err
}
// save to local, added by zhoujianjun
reader.Seek(0, 0)
uploadToLocal(path, reader)
return u.Info(path)
}
func (u *UserVolume) UploadChunk(cid int, dirPath, uploadPath, filename string, rangeData elfinder.ChunkRange, reader io.Reader) error {
func (u *UserVolume) UploadChunk(cid int, dirPath, uploadPath, filename string, rangeData elfinder.ChunkRange, reader io.ReadSeeker) error {
var err error
var path string
u.lock.Lock()
......@@ -178,7 +192,6 @@ func (u *UserVolume) UploadChunk(cid int, dirPath, uploadPath, filename string,
path = filepath.Join(dirPath, TrimPrefix(uploadPath), filename)
default:
path = filepath.Join(dirPath, filename)
}
fd, err = u.UserSftp.Create(filepath.Join(u.basePath, path))
if err != nil {
......@@ -192,6 +205,7 @@ func (u *UserVolume) UploadChunk(cid int, dirPath, uploadPath, filename string,
u.chunkFilesMap[cid] = fd
u.lock.Unlock()
}
_, err = io.Copy(fd, reader)
if err != nil {
_ = fd.Close()
......@@ -199,6 +213,12 @@ func (u *UserVolume) UploadChunk(cid int, dirPath, uploadPath, filename string,
delete(u.chunkFilesMap, cid)
u.lock.Unlock()
}
// add start ---------------- by zhoujianjun
reader.Seek(0, 0)
chunkUploadToLocal(u.Uuid, cid, path, reader)
// add end ------------------
return err
}
......@@ -220,6 +240,10 @@ func (u *UserVolume) MergeChunk(cid, total int, dirPath, uploadPath, filename st
delete(u.chunkFilesMap, cid)
}
u.lock.Unlock()
// add start -------------- by zhoujianjun
chunkMergeToLocal(u.Uuid, cid)
// add end -----------------
return u.Info(path)
}
......
......@@ -3,7 +3,7 @@ package httpd
import (
"sync"
"github.com/LeeEirc/elfinder"
"github.com/jumpserver/koko/pkg/elfinder"
)
type VolumeCloser interface {
......
......@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"io"
"strings"
"time"
uuid "github.com/satori/go.uuid"
......@@ -74,14 +73,16 @@ func (s *DBSwitchSession) generateCommandResult(command [2]string) *model.Comman
} else {
input = command[0]
}
i := strings.LastIndexByte(command[1], '\r')
if i <= 0 {
output = command[1]
} else if i > 0 && i < 1024 {
output = command[1][:i]
} else {
output = command[1][:1024]
}
// i := strings.LastIndexByte(command[1], '\r')
// if i <= 0 {
// output = command[1]
// } else if i > 0 && i < 1024 {
// output = command[1][:i]
// } else {
// output = command[1][:1024]
// }
return &model.Command{
SessionID: s.ID,
......
......@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"io"
"strings"
"time"
uuid "github.com/satori/go.uuid"
......@@ -85,14 +84,15 @@ func (s *SwitchSession) generateCommandResult(command [2]string) *model.Command
} else {
input = command[0]
}
i := strings.LastIndexByte(command[1], '\r')
if i <= 0 {
output = command[1]
} else if i > 0 && i < 1024 {
output = command[1][:i]
} else {
output = command[1][:1024]
}
// i := strings.LastIndexByte(command[1], '\r')
// if i <= 0 {
// output = command[1]
// } else if i > 0 && i < 1024 {
// output = command[1][:i]
// } else {
// output = command[1][:1024]
// }
return &model.Command{
SessionID: s.ID,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment