Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
K
koko
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
ops
koko
Commits
c7faba63
Unverified
Commit
c7faba63
authored
5 years ago
by
老广
Committed by
GitHub
5 years ago
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #63 from jumpserver/dev
Dev
parents
fa2c8e32
da1ee19e
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
121 additions
and
201 deletions
+121
-201
Dockerfile
Dockerfile
+1
-1
go.mod
go.mod
+1
-1
go.sum
go.sum
+2
-0
server.go
pkg/httpd/server.go
+10
-4
sftpvolume.go
pkg/httpd/sftpvolume.go
+104
-93
upgrader.go
pkg/httpd/upgrader.go
+0
-102
sftpconn.go
pkg/srvconn/sftpconn.go
+3
-0
No files found.
Dockerfile
View file @
c7faba63
...
@@ -4,8 +4,8 @@ WORKDIR /opt/coco
...
@@ -4,8 +4,8 @@ WORKDIR /opt/coco
ARG
GOPROXY
ARG
GOPROXY
ENV
GOPROXY=$GOPROXY
ENV
GOPROXY=$GOPROXY
ENV
GO111MODULE=on
ENV
GO111MODULE=on
COPY
go.mod go.sum ./
RUN
apk update
&&
apk add git
RUN
apk update
&&
apk add git
COPY
go.mod go.sum ./
RUN
go mod download
RUN
go mod download
COPY
. .
COPY
. .
RUN
cd
cmd
&&
go build koko.go
RUN
cd
cmd
&&
go build koko.go
...
...
This diff is collapsed.
Click to expand it.
go.mod
View file @
c7faba63
...
@@ -6,7 +6,7 @@ require (
...
@@ -6,7 +6,7 @@ require (
github.com/Azure/azure-pipeline-go v0.1.9 // indirect
github.com/Azure/azure-pipeline-go v0.1.9 // indirect
github.com/Azure/azure-storage-blob-go v0.6.0
github.com/Azure/azure-storage-blob-go v0.6.0
github.com/BurntSushi/toml v0.3.1 // indirect
github.com/BurntSushi/toml v0.3.1 // indirect
github.com/LeeEirc/elfinder v0.0.
0-20190718023636-5679c8bdb7bf
github.com/LeeEirc/elfinder v0.0.
1
github.com/aliyun/aliyun-oss-go-sdk v1.9.8
github.com/aliyun/aliyun-oss-go-sdk v1.9.8
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 // indirect
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 // indirect
github.com/aws/aws-sdk-go v1.19.46
github.com/aws/aws-sdk-go v1.19.46
...
...
This diff is collapsed.
Click to expand it.
go.sum
View file @
c7faba63
...
@@ -7,6 +7,8 @@ github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ
...
@@ -7,6 +7,8 @@ github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/LeeEirc/elfinder v0.0.0-20190718023636-5679c8bdb7bf h1:dZipr1cwienSKNTXsveMmyd7VFY3v/eMHNl/vueN10s=
github.com/LeeEirc/elfinder v0.0.0-20190718023636-5679c8bdb7bf h1:dZipr1cwienSKNTXsveMmyd7VFY3v/eMHNl/vueN10s=
github.com/LeeEirc/elfinder v0.0.0-20190718023636-5679c8bdb7bf/go.mod h1:ApL/XFs34Gvqinex9Z1sZdsp3Jeu26nNuEsf1wQFx8s=
github.com/LeeEirc/elfinder v0.0.0-20190718023636-5679c8bdb7bf/go.mod h1:ApL/XFs34Gvqinex9Z1sZdsp3Jeu26nNuEsf1wQFx8s=
github.com/LeeEirc/elfinder v0.0.1 h1:fFVy2xddwB2qQxLxJOGl+1Lj686pnRFnySsjPr7luZ0=
github.com/LeeEirc/elfinder v0.0.1/go.mod h1:VSfmUhE4Fvv+4Dfyo7Wmi44bdyDuIQgJtyi5EDcDSxE=
github.com/aliyun/aliyun-oss-go-sdk v1.9.8 h1:BOflvK0Zs/zGmoabyFIzTg5c3kguktWTXEwewwbuba0=
github.com/aliyun/aliyun-oss-go-sdk v1.9.8 h1:BOflvK0Zs/zGmoabyFIzTg5c3kguktWTXEwewwbuba0=
github.com/aliyun/aliyun-oss-go-sdk v1.9.8/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
github.com/aliyun/aliyun-oss-go-sdk v1.9.8/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
...
...
This diff is collapsed.
Click to expand it.
pkg/httpd/server.go
View file @
c7faba63
...
@@ -7,11 +7,11 @@ import (
...
@@ -7,11 +7,11 @@ import (
"time"
"time"
"github.com/gorilla/mux"
"github.com/gorilla/mux"
"github.com/kataras/neffos"
gorillaws
"github.com/gorilla/websocket"
"github.com/kataras/neffos/gorilla"
"github.com/jumpserver/koko/pkg/config"
"github.com/jumpserver/koko/pkg/config"
"github.com/jumpserver/koko/pkg/logger"
"github.com/jumpserver/koko/pkg/logger"
"github.com/kataras/neffos"
"github.com/kataras/neffos/gorilla"
)
)
var
(
var
(
...
@@ -19,6 +19,12 @@ var (
...
@@ -19,6 +19,12 @@ var (
Timeout
=
time
.
Duration
(
60
)
Timeout
=
time
.
Duration
(
60
)
)
)
var
upgrader
=
gorilla
.
Upgrader
(
gorillaws
.
Upgrader
{
CheckOrigin
:
func
(
r
*
http
.
Request
)
bool
{
return
true
},
})
var
wsEvents
=
neffos
.
WithTimeout
{
var
wsEvents
=
neffos
.
WithTimeout
{
ReadTimeout
:
Timeout
*
time
.
Second
,
ReadTimeout
:
Timeout
*
time
.
Second
,
WriteTimeout
:
Timeout
*
time
.
Second
,
WriteTimeout
:
Timeout
*
time
.
Second
,
...
@@ -50,7 +56,7 @@ var wsEvents = neffos.WithTimeout{
...
@@ -50,7 +56,7 @@ var wsEvents = neffos.WithTimeout{
func
StartHTTPServer
()
{
func
StartHTTPServer
()
{
conf
:=
config
.
GetConf
()
conf
:=
config
.
GetConf
()
sshWs
:=
neffos
.
New
(
gorilla
.
DefaultU
pgrader
,
wsEvents
)
sshWs
:=
neffos
.
New
(
u
pgrader
,
wsEvents
)
sshWs
.
IDGenerator
=
func
(
w
http
.
ResponseWriter
,
r
*
http
.
Request
)
string
{
sshWs
.
IDGenerator
=
func
(
w
http
.
ResponseWriter
,
r
*
http
.
Request
)
string
{
return
neffos
.
DefaultIDGenerator
(
w
,
r
)
return
neffos
.
DefaultIDGenerator
(
w
,
r
)
}
}
...
...
This diff is collapsed.
Click to expand it.
pkg/httpd/sftpvolume.go
View file @
c7faba63
...
@@ -5,24 +5,25 @@ import (
...
@@ -5,24 +5,25 @@ import (
"io"
"io"
"os"
"os"
"path/filepath"
"path/filepath"
"sync"
"github.com/LeeEirc/elfinder"
"github.com/LeeEirc/elfinder"
"github.com/pkg/sftp"
"github.com/jumpserver/koko/pkg/common"
"github.com/jumpserver/koko/pkg/config"
"github.com/jumpserver/koko/pkg/logger"
"github.com/jumpserver/koko/pkg/logger"
"github.com/jumpserver/koko/pkg/model"
"github.com/jumpserver/koko/pkg/model"
"github.com/jumpserver/koko/pkg/service"
"github.com/jumpserver/koko/pkg/service"
"github.com/jumpserver/koko/pkg/srvconn"
"github.com/jumpserver/koko/pkg/srvconn"
)
)
func
NewUserVolume
(
user
*
model
.
User
,
addr
,
hostId
string
)
*
UserVolume
{
func
NewUserVolume
(
user
*
model
.
User
,
addr
,
hostId
string
)
*
UserVolume
{
var
assets
[]
model
.
Asset
var
assets
[]
model
.
Asset
homename
:=
"Home"
homename
:=
"Home"
basePath
:=
"/"
switch
hostId
{
switch
hostId
{
case
""
:
case
""
:
assets
=
service
.
GetUserAssets
(
user
.
ID
,
"1"
,
""
)
assets
=
service
.
GetUserAssets
(
user
.
ID
,
"1"
,
""
)
default
:
default
:
assets
=
service
.
GetUserAssets
(
user
.
ID
,
"1"
,
hostId
)
assets
=
service
.
GetUserAssets
(
user
.
ID
,
"1"
,
hostId
)
if
len
(
assets
)
==
1
{
if
len
(
assets
)
==
1
{
...
@@ -30,16 +31,17 @@ func NewUserVolume(user *model.User, addr, hostId string) *UserVolume {
...
@@ -30,16 +31,17 @@ func NewUserVolume(user *model.User, addr, hostId string) *UserVolume {
if
assets
[
0
]
.
OrgID
!=
""
{
if
assets
[
0
]
.
OrgID
!=
""
{
homename
=
fmt
.
Sprintf
(
"%s.%s"
,
assets
[
0
]
.
Hostname
,
assets
[
0
]
.
OrgName
)
homename
=
fmt
.
Sprintf
(
"%s.%s"
,
assets
[
0
]
.
Hostname
,
assets
[
0
]
.
OrgName
)
}
}
basePath
=
filepath
.
Join
(
"/"
,
homename
)
}
}
}
}
conf
:=
config
.
GetConf
()
rawID
:=
fmt
.
Sprintf
(
"%s@%s"
,
user
.
Username
,
addr
)
rawID
:=
fmt
.
Sprintf
(
"%s@%s"
,
user
.
Username
,
addr
)
uVolume
:=
&
UserVolume
{
uVolume
:=
&
UserVolume
{
Uuid
:
elfinder
.
GenerateID
(
rawID
),
Uuid
:
elfinder
.
GenerateID
(
rawID
),
UserSftp
:
srvconn
.
NewUserSFTP
(
user
,
addr
,
assets
...
),
UserSftp
:
srvconn
.
NewUserSFTP
(
user
,
addr
,
assets
...
),
Homename
:
homename
,
Homename
:
homename
,
basePath
:
filepath
.
Join
(
"/"
,
homename
),
basePath
:
basePath
,
localTmpPath
:
filepath
.
Join
(
conf
.
RootPath
,
"data"
,
"tmp"
),
chunkFilesMap
:
make
(
map
[
int
]
*
sftp
.
File
),
lock
:
new
(
sync
.
Mutex
),
}
}
return
uVolume
return
uVolume
}
}
...
@@ -47,9 +49,11 @@ func NewUserVolume(user *model.User, addr, hostId string) *UserVolume {
...
@@ -47,9 +49,11 @@ func NewUserVolume(user *model.User, addr, hostId string) *UserVolume {
type
UserVolume
struct
{
type
UserVolume
struct
{
Uuid
string
Uuid
string
*
srvconn
.
UserSftp
*
srvconn
.
UserSftp
localTmpPath
string
Homename
string
Homename
string
basePath
string
basePath
string
chunkFilesMap
map
[
int
]
*
sftp
.
File
lock
*
sync
.
Mutex
}
}
func
(
u
*
UserVolume
)
ID
()
string
{
func
(
u
*
UserVolume
)
ID
()
string
{
...
@@ -58,13 +62,11 @@ func (u *UserVolume) ID() string {
...
@@ -58,13 +62,11 @@ func (u *UserVolume) ID() string {
func
(
u
*
UserVolume
)
Info
(
path
string
)
(
elfinder
.
FileDir
,
error
)
{
func
(
u
*
UserVolume
)
Info
(
path
string
)
(
elfinder
.
FileDir
,
error
)
{
logger
.
Debug
(
"volume Info: "
,
path
)
logger
.
Debug
(
"volume Info: "
,
path
)
var
rest
elfinder
.
FileDir
if
path
==
"/"
{
if
path
==
"/"
{
return
u
.
RootFileDir
(),
nil
return
u
.
RootFileDir
(),
nil
}
}
originFileInfo
,
err
:=
u
.
Stat
(
filepath
.
Join
(
u
.
basePath
,
path
))
var
rest
elfinder
.
FileDir
originFileInfo
,
err
:=
u
.
Stat
(
path
)
if
err
!=
nil
{
if
err
!=
nil
{
return
rest
,
err
return
rest
,
err
}
}
...
@@ -73,12 +75,13 @@ func (u *UserVolume) Info(path string) (elfinder.FileDir, error) {
...
@@ -73,12 +75,13 @@ func (u *UserVolume) Info(path string) (elfinder.FileDir, error) {
rest
.
Read
,
rest
.
Write
=
elfinder
.
ReadWritePem
(
originFileInfo
.
Mode
())
rest
.
Read
,
rest
.
Write
=
elfinder
.
ReadWritePem
(
originFileInfo
.
Mode
())
if
filename
!=
originFileInfo
.
Name
()
{
if
filename
!=
originFileInfo
.
Name
()
{
rest
.
Read
,
rest
.
Write
=
1
,
1
rest
.
Read
,
rest
.
Write
=
1
,
1
logger
.
Debug
(
"info filename no eque "
)
}
}
if
filename
==
"."
{
if
filename
==
"."
{
filename
=
originFileInfo
.
Name
()
filename
=
originFileInfo
.
Name
()
}
}
rest
.
Name
=
filename
rest
.
Name
=
filename
rest
.
Hash
=
hashPath
(
u
.
Uuid
,
filepath
.
Join
(
dirPath
,
filename
)
)
rest
.
Hash
=
hashPath
(
u
.
Uuid
,
path
)
rest
.
Phash
=
hashPath
(
u
.
Uuid
,
dirPath
)
rest
.
Phash
=
hashPath
(
u
.
Uuid
,
dirPath
)
if
rest
.
Hash
==
rest
.
Phash
{
if
rest
.
Hash
==
rest
.
Phash
{
rest
.
Phash
=
""
rest
.
Phash
=
""
...
@@ -98,12 +101,7 @@ func (u *UserVolume) Info(path string) (elfinder.FileDir, error) {
...
@@ -98,12 +101,7 @@ func (u *UserVolume) Info(path string) (elfinder.FileDir, error) {
func
(
u
*
UserVolume
)
List
(
path
string
)
[]
elfinder
.
FileDir
{
func
(
u
*
UserVolume
)
List
(
path
string
)
[]
elfinder
.
FileDir
{
dirs
:=
make
([]
elfinder
.
FileDir
,
0
)
dirs
:=
make
([]
elfinder
.
FileDir
,
0
)
logger
.
Debug
(
"volume List: "
,
path
)
logger
.
Debug
(
"volume List: "
,
path
)
dirInfo
,
err
:=
u
.
Info
(
path
)
originFileInfolist
,
err
:=
u
.
UserSftp
.
ReadDir
(
filepath
.
Join
(
u
.
basePath
,
path
))
if
err
!=
nil
{
return
dirs
}
dirs
=
append
(
dirs
,
dirInfo
)
originFileInfolist
,
err
:=
u
.
UserSftp
.
ReadDir
(
path
)
if
err
!=
nil
{
if
err
!=
nil
{
return
dirs
return
dirs
}
}
...
@@ -118,8 +116,15 @@ func (u *UserVolume) Parents(path string, dep int) []elfinder.FileDir {
...
@@ -118,8 +116,15 @@ func (u *UserVolume) Parents(path string, dep int) []elfinder.FileDir {
dirs
:=
make
([]
elfinder
.
FileDir
,
0
)
dirs
:=
make
([]
elfinder
.
FileDir
,
0
)
dirPath
:=
path
dirPath
:=
path
for
{
for
{
tmps
:=
u
.
List
(
dirPath
)
tmps
,
err
:=
u
.
UserSftp
.
ReadDir
(
filepath
.
Join
(
u
.
basePath
,
dirPath
))
dirs
=
append
(
dirs
,
tmps
...
)
if
err
!=
nil
{
return
dirs
}
for
i
:=
0
;
i
<
len
(
tmps
);
i
++
{
dirs
=
append
(
dirs
,
NewElfinderFileInfo
(
u
.
Uuid
,
dirPath
,
tmps
[
i
]))
}
if
dirPath
==
"/"
{
if
dirPath
==
"/"
{
break
break
}
}
...
@@ -129,14 +134,22 @@ func (u *UserVolume) Parents(path string, dep int) []elfinder.FileDir {
...
@@ -129,14 +134,22 @@ func (u *UserVolume) Parents(path string, dep int) []elfinder.FileDir {
}
}
func
(
u
*
UserVolume
)
GetFile
(
path
string
)
(
reader
io
.
ReadCloser
,
err
error
)
{
func
(
u
*
UserVolume
)
GetFile
(
path
string
)
(
reader
io
.
ReadCloser
,
err
error
)
{
return
u
.
UserSftp
.
Open
(
path
)
logger
.
Debug
(
"GetFile path: "
,
path
)
return
u
.
UserSftp
.
Open
(
filepath
.
Join
(
u
.
basePath
,
path
))
}
}
func
(
u
*
UserVolume
)
UploadFile
(
dir
,
filename
string
,
reader
io
.
Reader
)
(
elfinder
.
FileDir
,
error
)
{
func
(
u
*
UserVolume
)
UploadFile
(
dirPath
,
uploadPath
,
filename
string
,
reader
io
.
Reader
)
(
elfinder
.
FileDir
,
error
)
{
path
:=
filepath
.
Join
(
dir
,
filename
)
var
path
string
switch
uploadPath
{
case
""
:
path
=
filepath
.
Join
(
dirPath
,
filename
)
default
:
path
=
filepath
.
Join
(
dirPath
,
uploadPath
)
}
logger
.
Debug
(
"Volume upload file path: "
,
path
)
logger
.
Debug
(
"Volume upload file path: "
,
path
)
var
rest
elfinder
.
FileDir
var
rest
elfinder
.
FileDir
fd
,
err
:=
u
.
UserSftp
.
Create
(
path
)
fd
,
err
:=
u
.
UserSftp
.
Create
(
filepath
.
Join
(
u
.
basePath
,
path
)
)
if
err
!=
nil
{
if
err
!=
nil
{
return
rest
,
err
return
rest
,
err
}
}
...
@@ -149,77 +162,65 @@ func (u *UserVolume) UploadFile(dir, filename string, reader io.Reader) (elfinde
...
@@ -149,77 +162,65 @@ func (u *UserVolume) UploadFile(dir, filename string, reader io.Reader) (elfinde
return
u
.
Info
(
path
)
return
u
.
Info
(
path
)
}
}
func
(
u
*
UserVolume
)
UploadChunk
(
cid
int
,
dirPath
,
chunkName
string
,
reader
io
.
Reader
)
error
{
func
(
u
*
UserVolume
)
UploadChunk
(
cid
int
,
dirPath
,
uploadPath
,
filename
string
,
rangeData
elfinder
.
ChunkRange
,
reader
io
.
Reader
)
error
{
//chunkName format "filename.[NUMBER]_[TOTAL].part"
var
err
error
var
err
error
tmpDir
:=
filepath
.
Join
(
u
.
localTmpPath
,
dirPath
)
var
path
string
err
=
common
.
EnsureDirExist
(
tmpDir
)
u
.
lock
.
Lock
()
if
err
!=
nil
{
fd
,
ok
:=
u
.
chunkFilesMap
[
cid
]
return
err
u
.
lock
.
Unlock
()
}
if
!
ok
{
chunkRealPath
:=
fmt
.
Sprintf
(
"%s_%d"
,
switch
uploadPath
{
filepath
.
Join
(
tmpDir
,
chunkName
),
cid
)
case
""
:
path
=
filepath
.
Join
(
dirPath
,
filename
)
fd
,
err
:=
os
.
Create
(
chunkRealPath
)
default
:
defer
fd
.
Close
()
path
=
filepath
.
Join
(
dirPath
,
uploadPath
)
if
err
!=
nil
{
return
err
}
_
,
err
=
io
.
Copy
(
fd
,
reader
)
return
err
}
func
(
u
*
UserVolume
)
MergeChunk
(
cid
,
total
int
,
dirPath
,
filename
string
)
(
elfinder
.
FileDir
,
error
)
{
path
:=
filepath
.
Join
(
dirPath
,
filename
)
logger
.
Debug
(
"merge chunk path: "
,
path
)
var
rest
elfinder
.
FileDir
fd
,
err
:=
u
.
UserSftp
.
Create
(
path
)
if
err
!=
nil
{
for
i
:=
0
;
i
<=
total
;
i
++
{
partPath
:=
fmt
.
Sprintf
(
"%s.%d_%d.part_%d"
,
filepath
.
Join
(
u
.
localTmpPath
,
dirPath
,
filename
),
i
,
total
,
cid
)
_
=
os
.
Remove
(
partPath
)
}
}
return
rest
,
err
fd
,
err
=
u
.
UserSftp
.
Create
(
filepath
.
Join
(
u
.
basePath
,
path
))
}
defer
fd
.
Close
()
for
i
:=
0
;
i
<=
total
;
i
++
{
partPath
:=
fmt
.
Sprintf
(
"%s.%d_%d.part_%d"
,
filepath
.
Join
(
u
.
localTmpPath
,
dirPath
,
filename
),
i
,
total
,
cid
)
partFD
,
err
:=
os
.
Open
(
partPath
)
if
err
!=
nil
{
if
err
!=
nil
{
logger
.
Debug
(
err
)
return
err
_
=
os
.
Remove
(
partPath
)
continue
}
}
_
,
err
=
io
.
Copy
(
fd
,
partFD
)
_
,
err
=
fd
.
Seek
(
rangeData
.
Offset
,
0
)
if
err
!=
nil
{
if
err
!=
nil
{
return
rest
,
os
.
ErrNotExist
return
err
}
}
_
=
partFD
.
Close
()
u
.
lock
.
Lock
()
_
=
os
.
Remove
(
partPath
)
u
.
chunkFilesMap
[
cid
]
=
fd
u
.
lock
.
Unlock
()
}
}
return
u
.
Info
(
path
)
_
,
err
=
io
.
Copy
(
fd
,
reader
)
if
err
!=
nil
{
_
=
fd
.
Close
()
u
.
lock
.
Lock
()
delete
(
u
.
chunkFilesMap
,
cid
)
u
.
lock
.
Unlock
()
}
return
err
}
}
func
(
u
*
UserVolume
)
CompleteChunk
(
cid
,
total
int
,
dirPath
,
filename
string
)
bool
{
func
(
u
*
UserVolume
)
MergeChunk
(
cid
,
total
int
,
dirPath
,
uploadPath
,
filename
string
)
(
elfinder
.
FileDir
,
error
)
{
for
i
:=
0
;
i
<=
total
;
i
++
{
var
path
string
partPath
:=
fmt
.
Sprintf
(
"%s.%d_%d.part_%d"
,
switch
uploadPath
{
filepath
.
Join
(
u
.
localTmpPath
,
dirPath
,
filename
),
i
,
total
,
cid
)
case
""
:
_
,
err
:=
os
.
Stat
(
partPath
)
path
=
filepath
.
Join
(
dirPath
,
filename
)
if
err
!=
nil
{
default
:
return
false
path
=
filepath
.
Join
(
dirPath
,
uploadPath
)
}
}
}
return
true
logger
.
Debug
(
"merge chunk path: "
,
path
)
u
.
lock
.
Lock
()
if
fd
,
ok
:=
u
.
chunkFilesMap
[
cid
];
ok
{
_
=
fd
.
Close
()
delete
(
u
.
chunkFilesMap
,
cid
)
}
u
.
lock
.
Unlock
()
return
u
.
Info
(
path
)
}
}
func
(
u
*
UserVolume
)
MakeDir
(
dir
,
newDirname
string
)
(
elfinder
.
FileDir
,
error
)
{
func
(
u
*
UserVolume
)
MakeDir
(
dir
,
newDirname
string
)
(
elfinder
.
FileDir
,
error
)
{
logger
.
Debug
(
"volume Make Dir: "
,
newDirname
)
path
:=
filepath
.
Join
(
dir
,
newDirname
)
path
:=
filepath
.
Join
(
dir
,
newDirname
)
var
rest
elfinder
.
FileDir
var
rest
elfinder
.
FileDir
err
:=
u
.
UserSftp
.
MkdirAll
(
path
)
err
:=
u
.
UserSftp
.
MkdirAll
(
filepath
.
Join
(
u
.
basePath
,
path
)
)
if
err
!=
nil
{
if
err
!=
nil
{
return
rest
,
err
return
rest
,
err
}
}
...
@@ -227,20 +228,26 @@ func (u *UserVolume) MakeDir(dir, newDirname string) (elfinder.FileDir, error) {
...
@@ -227,20 +228,26 @@ func (u *UserVolume) MakeDir(dir, newDirname string) (elfinder.FileDir, error) {
}
}
func
(
u
*
UserVolume
)
MakeFile
(
dir
,
newFilename
string
)
(
elfinder
.
FileDir
,
error
)
{
func
(
u
*
UserVolume
)
MakeFile
(
dir
,
newFilename
string
)
(
elfinder
.
FileDir
,
error
)
{
logger
.
Debug
(
"volume MakeFile"
)
path
:=
filepath
.
Join
(
dir
,
newFilename
)
path
:=
filepath
.
Join
(
dir
,
newFilename
)
var
rest
elfinder
.
FileDir
var
rest
elfinder
.
FileDir
fd
,
err
:=
u
.
UserSftp
.
Create
(
path
)
fd
,
err
:=
u
.
UserSftp
.
Create
(
filepath
.
Join
(
u
.
basePath
,
path
)
)
if
err
!=
nil
{
if
err
!=
nil
{
return
rest
,
err
return
rest
,
err
}
}
defer
fd
.
Close
()
_
=
fd
.
Close
()
return
u
.
Info
(
path
)
res
,
err
:=
u
.
UserSftp
.
Stat
(
filepath
.
Join
(
u
.
basePath
,
path
))
return
NewElfinderFileInfo
(
u
.
Uuid
,
dir
,
res
),
err
}
}
func
(
u
*
UserVolume
)
Rename
(
oldNamePath
,
newName
string
)
(
elfinder
.
FileDir
,
error
)
{
func
(
u
*
UserVolume
)
Rename
(
oldNamePath
,
newName
string
)
(
elfinder
.
FileDir
,
error
)
{
logger
.
Debug
(
"volume Rename"
)
var
rest
elfinder
.
FileDir
var
rest
elfinder
.
FileDir
newNamePath
:=
filepath
.
Join
(
filepath
.
Dir
(
oldNamePath
),
newName
)
newNamePath
:=
filepath
.
Join
(
filepath
.
Dir
(
oldNamePath
),
newName
)
err
:=
u
.
UserSftp
.
Rename
(
oldNamePath
,
newNamePath
)
err
:=
u
.
UserSftp
.
Rename
(
filepath
.
Join
(
u
.
basePath
,
oldNamePath
),
filepath
.
Join
(
u
.
basePath
,
newNamePath
)
)
if
err
!=
nil
{
if
err
!=
nil
{
return
rest
,
err
return
rest
,
err
}
}
...
@@ -248,26 +255,30 @@ func (u *UserVolume) Rename(oldNamePath, newName string) (elfinder.FileDir, erro
...
@@ -248,26 +255,30 @@ func (u *UserVolume) Rename(oldNamePath, newName string) (elfinder.FileDir, erro
}
}
func
(
u
*
UserVolume
)
Remove
(
path
string
)
error
{
func
(
u
*
UserVolume
)
Remove
(
path
string
)
error
{
logger
.
Debug
(
"volume remove"
,
path
)
var
res
os
.
FileInfo
var
res
os
.
FileInfo
var
err
error
var
err
error
res
,
err
=
u
.
UserSftp
.
Stat
(
path
)
res
,
err
=
u
.
UserSftp
.
Stat
(
filepath
.
Join
(
u
.
basePath
,
path
)
)
if
err
!=
nil
{
if
err
!=
nil
{
return
err
return
err
}
}
if
res
.
IsDir
()
{
if
res
.
IsDir
()
{
return
u
.
UserSftp
.
RemoveDirectory
(
path
)
return
u
.
UserSftp
.
RemoveDirectory
(
filepath
.
Join
(
u
.
basePath
,
path
)
)
}
}
return
u
.
UserSftp
.
Remove
(
path
)
return
u
.
UserSftp
.
Remove
(
filepath
.
Join
(
u
.
basePath
,
path
)
)
}
}
func
(
u
*
UserVolume
)
Paste
(
dir
,
filename
,
suffix
string
,
reader
io
.
ReadCloser
)
(
elfinder
.
FileDir
,
error
)
{
func
(
u
*
UserVolume
)
Paste
(
dir
,
filename
,
suffix
string
,
reader
io
.
ReadCloser
)
(
elfinder
.
FileDir
,
error
)
{
defer
reader
.
Close
()
var
rest
elfinder
.
FileDir
var
rest
elfinder
.
FileDir
path
:=
filepath
.
Join
(
dir
,
filename
)
path
:=
filepath
.
Join
(
dir
,
filename
)
rest
,
err
:=
u
.
Info
(
path
)
_
,
err
:=
u
.
UserSftp
.
Stat
(
filepath
.
Join
(
u
.
basePath
,
path
)
)
if
err
!
=
nil
{
if
err
=
=
nil
{
path
+=
suffix
path
+=
suffix
}
}
fd
,
err
:=
u
.
UserSftp
.
Create
(
path
)
fd
,
err
:=
u
.
UserSftp
.
Create
(
filepath
.
Join
(
u
.
basePath
,
path
))
logger
.
Debug
(
"volume paste: "
,
path
,
err
)
if
err
!=
nil
{
if
err
!=
nil
{
return
rest
,
err
return
rest
,
err
}
}
...
@@ -281,7 +292,7 @@ func (u *UserVolume) Paste(dir, filename, suffix string, reader io.ReadCloser) (
...
@@ -281,7 +292,7 @@ func (u *UserVolume) Paste(dir, filename, suffix string, reader io.ReadCloser) (
func
(
u
*
UserVolume
)
RootFileDir
()
elfinder
.
FileDir
{
func
(
u
*
UserVolume
)
RootFileDir
()
elfinder
.
FileDir
{
logger
.
Debug
(
"Root File Dir"
)
logger
.
Debug
(
"Root File Dir"
)
fInfo
,
_
:=
u
.
UserSftp
.
Info
(
)
fInfo
,
_
:=
u
.
UserSftp
.
Stat
(
u
.
basePath
)
var
rest
elfinder
.
FileDir
var
rest
elfinder
.
FileDir
rest
.
Name
=
u
.
Homename
rest
.
Name
=
u
.
Homename
rest
.
Hash
=
hashPath
(
u
.
Uuid
,
"/"
)
rest
.
Hash
=
hashPath
(
u
.
Uuid
,
"/"
)
...
...
This diff is collapsed.
Click to expand it.
pkg/httpd/upgrader.go
deleted
100644 → 0
View file @
fa2c8e32
package
httpd
import
(
"net"
"net/http"
"sync"
"time"
"github.com/kataras/neffos"
gorilla
"github.com/gorilla/websocket"
)
// DefaultUpgrader is a gorilla/websocket Upgrader with all fields set to the default values.
var
DefaultUpgrader
=
Upgrader
(
gorilla
.
Upgrader
{})
// Upgrader is a `neffos.Upgrader` type for the gorilla/websocket subprotocol implementation.
// Should be used on `New` to construct the neffos server.
func
Upgrader
(
upgrader
gorilla
.
Upgrader
)
neffos
.
Upgrader
{
return
func
(
w
http
.
ResponseWriter
,
r
*
http
.
Request
)
(
neffos
.
Socket
,
error
)
{
header
:=
w
.
Header
()
header
.
Set
(
"Access-Control-Allow-Origin"
,
"*"
)
underline
,
err
:=
upgrader
.
Upgrade
(
w
,
r
,
header
)
if
err
!=
nil
{
return
nil
,
err
}
return
newSocket
(
underline
,
r
,
false
),
nil
}
}
// Socket completes the `neffos.Socket` interface,
// it describes the underline websocket connection.
type
Socket
struct
{
UnderlyingConn
*
gorilla
.
Conn
request
*
http
.
Request
client
bool
mu
sync
.
Mutex
}
func
newSocket
(
underline
*
gorilla
.
Conn
,
request
*
http
.
Request
,
client
bool
)
*
Socket
{
return
&
Socket
{
UnderlyingConn
:
underline
,
request
:
request
,
client
:
client
,
}
}
// NetConn returns the underline net connection.
func
(
s
*
Socket
)
NetConn
()
net
.
Conn
{
return
s
.
UnderlyingConn
.
UnderlyingConn
()
}
// Request returns the http request value.
func
(
s
*
Socket
)
Request
()
*
http
.
Request
{
return
s
.
request
}
// ReadData reads binary or text messages from the remote connection.
func
(
s
*
Socket
)
ReadData
(
timeout
time
.
Duration
)
([]
byte
,
error
)
{
for
{
if
timeout
>
0
{
s
.
UnderlyingConn
.
SetReadDeadline
(
time
.
Now
()
.
Add
(
timeout
))
}
opCode
,
data
,
err
:=
s
.
UnderlyingConn
.
ReadMessage
()
if
err
!=
nil
{
return
nil
,
err
}
if
opCode
!=
gorilla
.
BinaryMessage
&&
opCode
!=
gorilla
.
TextMessage
{
// if gorilla.IsUnexpectedCloseError(err, gorilla.CloseGoingAway) ...
continue
}
return
data
,
err
}
}
// WriteBinary sends a binary message to the remote connection.
func
(
s
*
Socket
)
WriteBinary
(
body
[]
byte
,
timeout
time
.
Duration
)
error
{
return
s
.
write
(
body
,
gorilla
.
BinaryMessage
,
timeout
)
}
// WriteText sends a text message to the remote connection.
func
(
s
*
Socket
)
WriteText
(
body
[]
byte
,
timeout
time
.
Duration
)
error
{
return
s
.
write
(
body
,
gorilla
.
TextMessage
,
timeout
)
}
func
(
s
*
Socket
)
write
(
body
[]
byte
,
opCode
int
,
timeout
time
.
Duration
)
error
{
if
timeout
>
0
{
s
.
UnderlyingConn
.
SetWriteDeadline
(
time
.
Now
()
.
Add
(
timeout
))
}
s
.
mu
.
Lock
()
err
:=
s
.
UnderlyingConn
.
WriteMessage
(
opCode
,
body
)
s
.
mu
.
Unlock
()
return
err
}
This diff is collapsed.
Click to expand it.
pkg/srvconn/sftpconn.go
View file @
c7faba63
...
@@ -245,6 +245,7 @@ func (u *UserSftp) Remove(path string) error {
...
@@ -245,6 +245,7 @@ func (u *UserSftp) Remove(path string) error {
if
conn
==
nil
{
if
conn
==
nil
{
return
sftp
.
ErrSshFxPermissionDenied
return
sftp
.
ErrSshFxPermissionDenied
}
}
logger
.
Debug
(
"remove file path"
,
realPath
)
err
:=
conn
.
client
.
Remove
(
realPath
)
err
:=
conn
.
client
.
Remove
(
realPath
)
filename
:=
realPath
filename
:=
realPath
isSuccess
:=
false
isSuccess
:=
false
...
@@ -391,6 +392,7 @@ func (u *UserSftp) Create(path string) (*sftp.File, error) {
...
@@ -391,6 +392,7 @@ func (u *UserSftp) Create(path string) (*sftp.File, error) {
if
conn
==
nil
{
if
conn
==
nil
{
return
nil
,
sftp
.
ErrSshFxPermissionDenied
return
nil
,
sftp
.
ErrSshFxPermissionDenied
}
}
logger
.
Debug
(
"create path:"
,
realPath
)
sf
,
err
:=
conn
.
client
.
Create
(
realPath
)
sf
,
err
:=
conn
.
client
.
Create
(
realPath
)
filename
:=
realPath
filename
:=
realPath
isSuccess
:=
false
isSuccess
:=
false
...
@@ -425,6 +427,7 @@ func (u *UserSftp) Open(path string) (*sftp.File, error) {
...
@@ -425,6 +427,7 @@ func (u *UserSftp) Open(path string) (*sftp.File, error) {
if
conn
==
nil
{
if
conn
==
nil
{
return
nil
,
sftp
.
ErrSshFxPermissionDenied
return
nil
,
sftp
.
ErrSshFxPermissionDenied
}
}
logger
.
Debug
(
"Open path:"
,
realPath
)
sf
,
err
:=
conn
.
client
.
Open
(
realPath
)
sf
,
err
:=
conn
.
client
.
Open
(
realPath
)
filename
:=
realPath
filename
:=
realPath
isSuccess
:=
false
isSuccess
:=
false
...
...
This diff is collapsed.
Click to expand it.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment