Skip to content

Commit 503f6ff

Browse files
committed
Fix(migrate): support migrate chunkserver successed
Signed-off-by: caoxianfei1 <[email protected]>
1 parent 9d83a9c commit 503f6ff

File tree

14 files changed

+625
-57
lines changed

14 files changed

+625
-57
lines changed

cli/command/deploy.go

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -231,7 +231,6 @@ func genDeployPlaybook(curveadm *cli.CurveAdm,
231231
Name: options.poolset,
232232
Type: options.poolsetDiskType,
233233
}
234-
diskType := options.poolsetDiskType
235234

236235
pb := playbook.NewPlaybook(curveadm)
237236
for _, step := range steps {
@@ -255,8 +254,7 @@ func genDeployPlaybook(curveadm *cli.CurveAdm,
255254
options[comm.KEY_NUMBER_OF_CHUNKSERVER] = calcNumOfChunkserver(curveadm, dcs)
256255
} else if step == CREATE_LOGICAL_POOL {
257256
options[comm.KEY_CREATE_POOL_TYPE] = comm.POOL_TYPE_LOGICAL
258-
options[comm.POOLSET] = poolset
259-
options[comm.POOLSET_DISK_TYPE] = diskType
257+
options[comm.KEY_POOLSET] = poolset
260258
options[comm.KEY_NUMBER_OF_CHUNKSERVER] = calcNumOfChunkserver(curveadm, dcs)
261259
}
262260

cli/command/migrate.go

Lines changed: 93 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,10 @@ import (
3030
"github.com/opencurve/curveadm/internal/configure/topology"
3131
"github.com/opencurve/curveadm/internal/errno"
3232
"github.com/opencurve/curveadm/internal/playbook"
33-
tui "github.com/opencurve/curveadm/internal/tui/common"
33+
"github.com/opencurve/curveadm/internal/task/task/common"
34+
tui "github.com/opencurve/curveadm/internal/tui"
35+
tuicomm "github.com/opencurve/curveadm/internal/tui/common"
36+
3437
cliutil "github.com/opencurve/curveadm/internal/utils"
3538
"github.com/spf13/cobra"
3639
)
@@ -71,14 +74,12 @@ var (
7174
// chunkserevr (curvebs)
7275
MIGRATE_CHUNKSERVER_STEPS = []int{
7376
playbook.BACKUP_ETCD_DATA,
74-
playbook.STOP_SERVICE,
75-
playbook.CLEAN_SERVICE, // only container
77+
playbook.CREATE_PHYSICAL_POOL, // add machine that migrate to
7678
playbook.PULL_IMAGE,
7779
playbook.CREATE_CONTAINER,
7880
playbook.SYNC_CONFIG,
79-
playbook.CREATE_PHYSICAL_POOL,
8081
playbook.START_CHUNKSERVER,
81-
playbook.CREATE_LOGICAL_POOL,
82+
playbook.MARK_SERVER_PENGDDING,
8283
}
8384

8485
// metaserver (curvefs)
@@ -100,12 +101,25 @@ var (
100101
topology.ROLE_SNAPSHOTCLONE: MIGRATE_SNAPSHOTCLONE_STEPS,
101102
topology.ROLE_METASERVER: MIGRATE_METASERVER_STEPS,
102103
}
104+
105+
MIGRATE_POST_CLEAN_STEPS = []int{
106+
playbook.STOP_SERVICE,
107+
playbook.CLEAN_SERVICE, // only container
108+
playbook.CREATE_PHYSICAL_POOL, // remove machine that migrate from, only for chunkserver or metaserver
109+
playbook.UPDATE_TOPOLOGY,
110+
}
111+
112+
GET_MIGRATE_STATUS = []int{
113+
playbook.GET_MIGRATE_STATUS,
114+
}
103115
)
104116

105117
type migrateOptions struct {
106118
filename string
107119
poolset string
108120
poolsetDiskType string
121+
showStatus bool
122+
clean bool
109123
}
110124

111125
func NewMigrateCommand(curveadm *cli.CurveAdm) *cobra.Command {
@@ -125,7 +139,8 @@ func NewMigrateCommand(curveadm *cli.CurveAdm) *cobra.Command {
125139
flags := cmd.Flags()
126140
flags.StringVar(&options.poolset, "poolset", "default", "Specify the poolset")
127141
flags.StringVar(&options.poolsetDiskType, "poolset-disktype", "ssd", "Specify the disk type of physical pool")
128-
142+
flags.BoolVar(&options.showStatus, "status", false, "Show copyset transferring status")
143+
flags.BoolVar(&options.clean, "clean", false, "Clean migrated environment for chunkserver or metaserver")
129144
return cmd
130145
}
131146

@@ -191,8 +206,21 @@ func genMigratePlaybook(curveadm *cli.CurveAdm,
191206
migrates := getMigrates(curveadm, data)
192207
role := migrates[0].From.GetRole()
193208
steps := MIGRATE_ROLE_STEPS[role]
194-
poolset := options.poolset
195-
poolsetDiskType := options.poolsetDiskType
209+
210+
// show status
211+
if options.showStatus {
212+
steps = GET_MIGRATE_STATUS
213+
}
214+
215+
// post clean
216+
if options.clean {
217+
steps = MIGRATE_POST_CLEAN_STEPS
218+
}
219+
220+
poolset := configure.Poolset{
221+
Name: options.poolset,
222+
Type: options.poolsetDiskType,
223+
}
196224

197225
pb := playbook.NewPlaybook(curveadm)
198226
for _, step := range steps {
@@ -204,36 +232,40 @@ func genMigratePlaybook(curveadm *cli.CurveAdm,
204232
config = dcs2del
205233
case playbook.BACKUP_ETCD_DATA:
206234
config = curveadm.FilterDeployConfigByRole(dcs, topology.ROLE_ETCD)
207-
case CREATE_PHYSICAL_POOL,
208-
CREATE_LOGICAL_POOL:
235+
case
236+
playbook.CREATE_PHYSICAL_POOL,
237+
playbook.CREATE_LOGICAL_POOL,
238+
playbook.MARK_SERVER_PENGDDING,
239+
playbook.GET_MIGRATE_STATUS:
209240
config = curveadm.FilterDeployConfigByRole(dcs, topology.ROLE_MDS)[:1]
210241
}
211242

212243
// options
213-
options := map[string]interface{}{}
244+
optionsKV := map[string]interface{}{}
214245
switch step {
215246
case playbook.CLEAN_SERVICE:
216-
options[comm.KEY_CLEAN_ITEMS] = []string{comm.CLEAN_ITEM_CONTAINER}
217-
options[comm.KEY_CLEAN_BY_RECYCLE] = true
247+
optionsKV[comm.KEY_CLEAN_ITEMS] = []string{comm.CLEAN_ITEM_CONTAINER}
248+
optionsKV[comm.KEY_CLEAN_BY_RECYCLE] = true
249+
optionsKV[comm.KEY_REMOVE_MIGRATED_SERVER] = true
218250
case playbook.CREATE_PHYSICAL_POOL:
219-
options[comm.KEY_CREATE_POOL_TYPE] = comm.POOL_TYPE_PHYSICAL
220-
options[comm.KEY_MIGRATE_SERVERS] = migrates
221-
options[comm.POOLSET] = poolset
222-
options[comm.POOLSET_DISK_TYPE] = poolsetDiskType
251+
optionsKV[comm.KEY_CREATE_POOL_TYPE] = comm.POOL_TYPE_PHYSICAL
252+
optionsKV[comm.KEY_MIGRATE_SERVERS] = migrates
253+
optionsKV[comm.KEY_POOLSET] = poolset
223254
case playbook.CREATE_LOGICAL_POOL:
224-
options[comm.KEY_CREATE_POOL_TYPE] = comm.POOL_TYPE_LOGICAL
225-
options[comm.KEY_MIGRATE_SERVERS] = migrates
226-
options[comm.KEY_NEW_TOPOLOGY_DATA] = data
227-
options[comm.POOLSET] = poolset
228-
options[comm.POOLSET_DISK_TYPE] = poolsetDiskType
255+
optionsKV[comm.KEY_CREATE_POOL_TYPE] = comm.POOL_TYPE_LOGICAL
256+
optionsKV[comm.KEY_MIGRATE_SERVERS] = migrates
257+
optionsKV[comm.KEY_NEW_TOPOLOGY_DATA] = data
258+
optionsKV[comm.KEY_POOLSET] = poolset
229259
case playbook.UPDATE_TOPOLOGY:
230-
options[comm.KEY_NEW_TOPOLOGY_DATA] = data
260+
optionsKV[comm.KEY_NEW_TOPOLOGY_DATA] = data
261+
case playbook.GET_MIGRATE_STATUS:
262+
optionsKV[comm.KEY_MIGRATE_SERVERS] = migrates
231263
}
232264

233265
pb.AddStep(&playbook.PlaybookStep{
234266
Type: step,
235267
Configs: config,
236-
Options: options,
268+
Options: optionsKV,
237269
ExecOptions: playbook.ExecOptions{
238270
SilentSubBar: step == playbook.UPDATE_TOPOLOGY,
239271
},
@@ -252,6 +284,23 @@ func displayMigrateTitle(curveadm *cli.CurveAdm, data string) {
252284
curveadm.WriteOutln(color.YellowString(" - Migrate host: from %s to %s", from.GetHost(), to.GetHost()))
253285
}
254286

287+
func displayMigrateStatus(curveadm *cli.CurveAdm) {
288+
var output string
289+
statuses := []common.MigrateStatus{}
290+
v := curveadm.MemStorage().Get(comm.KEY_MIGRATE_STATUS)
291+
if v != nil {
292+
m := v.(map[string]common.MigrateStatus)
293+
for _, status := range m {
294+
statuses = append(statuses, status)
295+
}
296+
}
297+
298+
output = tui.FormatMigrateStatus(statuses)
299+
300+
curveadm.WriteOutln("")
301+
curveadm.WriteOut("%s", output)
302+
}
303+
255304
func runMigrate(curveadm *cli.CurveAdm, options migrateOptions) error {
256305
// TODO(P0): added prechek for target host
257306
// 1) parse cluster topology
@@ -261,7 +310,11 @@ func runMigrate(curveadm *cli.CurveAdm, options migrateOptions) error {
261310
}
262311

263312
// 2) read topology from file
264-
data, err := readTopology(curveadm, options.filename)
313+
data, err := readTopology(curveadm,
314+
options.filename,
315+
options.showStatus,
316+
options.clean,
317+
)
265318
if err != nil {
266319
return err
267320
}
@@ -272,13 +325,15 @@ func runMigrate(curveadm *cli.CurveAdm, options migrateOptions) error {
272325
return err
273326
}
274327

275-
// 4) display title
276-
displayMigrateTitle(curveadm, data)
328+
if !options.showStatus && !options.clean {
329+
// 4) display title
330+
displayMigrateTitle(curveadm, data)
277331

278-
// 5) confirm by user
279-
if pass := tui.ConfirmYes(tui.DEFAULT_CONFIRM_PROMPT); !pass {
280-
curveadm.WriteOutln(tui.PromptCancelOpetation("migrate service"))
281-
return errno.ERR_CANCEL_OPERATION
332+
// 5) confirm by user
333+
if pass := tuicomm.ConfirmYes(tuicomm.DEFAULT_CONFIRM_PROMPT); !pass {
334+
curveadm.WriteOutln(tuicomm.PromptCancelOpetation("migrate service"))
335+
return errno.ERR_CANCEL_OPERATION
336+
}
282337
}
283338

284339
// 6) generate migrate playbook
@@ -294,6 +349,13 @@ func runMigrate(curveadm *cli.CurveAdm, options migrateOptions) error {
294349
}
295350

296351
// 9) print success prompt
352+
if options.showStatus {
353+
displayMigrateStatus(curveadm)
354+
return nil
355+
}
356+
if options.clean {
357+
return nil
358+
}
297359
curveadm.WriteOutln("")
298360
curveadm.WriteOutln(color.GreenString("Services successfully migrateed ^_^."))
299361
// TODO(P1): warning iff there is changed configs

cli/command/scale_out.go

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,7 @@ func NewScaleOutCommand(curveadm *cli.CurveAdm) *cobra.Command {
144144
return cmd
145145
}
146146

147-
func readTopology(curveadm *cli.CurveAdm, filename string) (string, error) {
147+
func readTopology(curveadm *cli.CurveAdm, filename string, showStatus bool, clean bool) (string, error) {
148148
if !utils.PathExist(filename) {
149149
return "", errno.ERR_TOPOLOGY_FILE_NOT_FOUND.
150150
F("%s: no such file", utils.AbsPath(filename))
@@ -156,7 +156,9 @@ func readTopology(curveadm *cli.CurveAdm, filename string) (string, error) {
156156
}
157157

158158
oldData := curveadm.ClusterTopologyData()
159-
curveadm.WriteOut("%s", utils.Diff(oldData, data))
159+
if !showStatus && !clean {
160+
curveadm.WriteOut("%s", utils.Diff(oldData, data))
161+
}
160162
return data, nil
161163
}
162164

@@ -384,7 +386,7 @@ func runScaleOut(curveadm *cli.CurveAdm, options scaleOutOptions) error {
384386
}
385387

386388
// 2) read topology from file
387-
data, err := readTopology(curveadm, options.filename)
389+
data, err := readTopology(curveadm, options.filename, false, false)
388390
if err != nil {
389391
return err
390392
}

internal/common/common.go

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,10 @@ const (
5353
// format
5454
KEY_ALL_FORMAT_STATUS = "ALL_FORMAT_STATUS"
5555

56+
// migrate
57+
KEY_MIGRATE_STATUS = "MIGRATE_STATUS"
58+
KEY_MIGRATE_COMMON_STATUS = "MIGRATE_COMMON_STATUS"
59+
5660
// check
5761
KEY_CHECK_WITH_WEAK = "CHECK_WITH_WEAK"
5862
KEY_CHECK_KERNEL_MODULE_NAME = "CHECK_KERNEL_MODULE_NAME"
@@ -71,12 +75,13 @@ const (
7175
SERVICE_STATUS_UNKNOWN = "Unknown"
7276

7377
// clean
74-
KEY_CLEAN_ITEMS = "CLEAN_ITEMS"
75-
KEY_CLEAN_BY_RECYCLE = "CLEAN_BY_RECYCLE"
76-
CLEAN_ITEM_LOG = "log"
77-
CLEAN_ITEM_DATA = "data"
78-
CLEAN_ITEM_CONTAINER = "container"
79-
CLEANED_CONTAINER_ID = "-"
78+
KEY_CLEAN_ITEMS = "CLEAN_ITEMS"
79+
KEY_CLEAN_BY_RECYCLE = "CLEAN_BY_RECYCLE"
80+
CLEAN_ITEM_LOG = "log"
81+
CLEAN_ITEM_DATA = "data"
82+
CLEAN_ITEM_CONTAINER = "container"
83+
CLEANED_CONTAINER_ID = "-"
84+
KEY_REMOVE_MIGRATED_SERVER = "REMOVE_MIGRATED_SERVER"
8085

8186
// client
8287
KEY_CLIENT_HOST = "CLIENT_HOST"

internal/configure/pool.go

Lines changed: 25 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -263,26 +263,39 @@ func ScaleOutClusterPool(old *CurveClusterTopo, dcs []*topology.DeployConfig, po
263263
old.NPools = old.NPools + 1
264264
}
265265

266-
func MigrateClusterServer(old *CurveClusterTopo, migrates []*MigrateServer) {
266+
func MigrateClusterServer(old *CurveClusterTopo, migrates []*MigrateServer, removeMigratedServer bool) {
267267
m := map[string]*topology.DeployConfig{} // key: from.Name, value: to.DeployConfig
268268
for _, migrate := range migrates {
269269
m[formatName(migrate.From)] = migrate.To
270270
}
271271

272-
for i, server := range old.Servers {
273-
dc, ok := m[server.Name]
274-
if !ok {
275-
continue
272+
// add server that will migrate to
273+
for fromName, toDc := range m {
274+
server := Server{}
275+
server.InternalIp = toDc.GetListenIp()
276+
server.ExternalIp = toDc.GetListenExternalIp()
277+
server.InternalPort = toDc.GetListenPort()
278+
server.ExternalPort = toDc.GetListenExternalPort()
279+
server.Name = formatName(toDc)
280+
281+
for _, oldServer := range old.Servers {
282+
if oldServer.Name == fromName {
283+
server.PhysicalPool = oldServer.PhysicalPool
284+
server.Poolset = oldServer.Poolset
285+
server.Zone = oldServer.Zone
286+
}
276287
}
288+
old.Servers = append(old.Servers, server)
289+
}
277290

278-
server.InternalIp = dc.GetListenIp()
279-
server.ExternalIp = dc.GetListenExternalIp()
280-
server.Name = formatName(dc)
281-
if server.InternalPort != 0 && server.ExternalPort != 0 {
282-
server.InternalPort = dc.GetListenPort()
283-
server.ExternalPort = dc.GetListenExternalPort()
291+
// remove server that has migrated
292+
if removeMigratedServer {
293+
for i := 0; i < len(old.Servers); i++ {
294+
_, ok := m[old.Servers[i].Name]
295+
if ok {
296+
old.Servers = append(old.Servers[:i], old.Servers[i+1:]...)
297+
}
284298
}
285-
old.Servers[i] = server
286299
}
287300
}
288301

internal/errno/errno.go

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -398,7 +398,11 @@ var (
398398
ERR_ENCRYPT_FILE_FAILED = EC(410021, "encrypt file failed")
399399
ERR_CLIENT_ID_NOT_FOUND = EC(410022, "client id not found")
400400
ERR_ENABLE_ETCD_AUTH_FAILED = EC(410023, "enable etcd auth failed")
401-
401+
ERR_MARK_CHUNKSERVER_PENDDING = EC(410024, "mark chunkserver pendding status failed when migrate")
402+
RRR_GET_CLUSTER_MDSADDR = EC(410025, "failed to get cluster mds addr")
403+
ERR_GET_CHUNKSERVER_COPYSET = EC(410026, "failed to get chunkserver copyset")
404+
ERR_GET_MIGRATE_COPYSET = EC(410027, "migrate chunkserver copyset info must be 2")
405+
ERR_CONTAINER_NOT_REMOVED = EC(410027, "container not removed")
402406
// 420: common (curvebs client)
403407
ERR_VOLUME_ALREADY_MAPPED = EC(420000, "volume already mapped")
404408
ERR_VOLUME_CONTAINER_LOSED = EC(420001, "volume container is losed")

internal/playbook/factory.go

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -83,6 +83,7 @@ const (
8383
GET_CLIENT_STATUS
8484
INSTALL_CLIENT
8585
UNINSTALL_CLIENT
86+
GET_MIGRATE_STATUS
8687

8788
// bs
8889
FORMAT_CHUNKFILE_POOL
@@ -93,6 +94,7 @@ const (
9394
CREATE_VOLUME
9495
MAP_IMAGE
9596
UNMAP_IMAGE
97+
MARK_SERVER_PENGDDING
9698

9799
// monitor
98100
PULL_MONITOR_IMAGE
@@ -247,6 +249,8 @@ func (p *Playbook) createTasks(step *PlaybookStep) (*tasks.Tasks, error) {
247249
t, err = comm.NewInstallClientTask(curveadm, config.GetCC(i))
248250
case UNINSTALL_CLIENT:
249251
t, err = comm.NewUninstallClientTask(curveadm, nil)
252+
case GET_MIGRATE_STATUS:
253+
t, err = comm.NewGetMigrateStatusTask(curveadm, config.GetDC(i))
250254
// bs
251255
case FORMAT_CHUNKFILE_POOL:
252256
t, err = bs.NewFormatChunkfilePoolTask(curveadm, config.GetFC(i))
@@ -275,6 +279,8 @@ func (p *Playbook) createTasks(step *PlaybookStep) (*tasks.Tasks, error) {
275279
t, err = bs.NewDeleteTargetTask(curveadm, nil)
276280
case LIST_TARGETS:
277281
t, err = bs.NewListTargetsTask(curveadm, nil)
282+
case MARK_SERVER_PENGDDING:
283+
t, err = bs.NewMarkServerPendding(curveadm, config.GetDC(i))
278284
// fs
279285
case CHECK_CLIENT_S3:
280286
t, err = checker.NewClientS3ConfigureTask(curveadm, config.GetCC(i))

0 commit comments

Comments
 (0)