|
| 1 | +package cmd |
| 2 | + |
| 3 | +import ( |
| 4 | + "fmt" |
| 5 | + "path/filepath" |
| 6 | + |
| 7 | + "github.com/spf13/cobra" |
| 8 | + |
| 9 | + "github.com/onflow/flow-go/cmd" |
| 10 | + "github.com/onflow/flow-go/cmd/bootstrap/run" |
| 11 | + "github.com/onflow/flow-go/cmd/util/cmd/common" |
| 12 | + hotstuff "github.com/onflow/flow-go/consensus/hotstuff/model" |
| 13 | + model "github.com/onflow/flow-go/model/bootstrap" |
| 14 | + "github.com/onflow/flow-go/model/flow" |
| 15 | + cluster2 "github.com/onflow/flow-go/state/cluster" |
| 16 | + "github.com/onflow/flow-go/state/protocol/prg" |
| 17 | +) |
| 18 | + |
| 19 | +var ( |
| 20 | + flagClusteringRandomSeed []byte |
| 21 | +) |
| 22 | + |
| 23 | +// clusterAssignmentCmd represents the clusterAssignment command |
| 24 | +var clusterAssignmentCmd = &cobra.Command{ |
| 25 | + Use: "cluster-assignment", |
| 26 | + Short: "Generate cluster assignment", |
| 27 | + Long: `Generate cluster assignment for collection nodes based on partner and internal node info and weights. Serialize into file with Epoch Counter`, |
| 28 | + Run: clusterAssignment, |
| 29 | +} |
| 30 | + |
| 31 | +func init() { |
| 32 | + rootCmd.AddCommand(clusterAssignmentCmd) |
| 33 | + addClusterAssignmentCmdFlags() |
| 34 | +} |
| 35 | + |
| 36 | +func addClusterAssignmentCmdFlags() { |
| 37 | + // required parameters for network configuration and generation of root node identities |
| 38 | + clusterAssignmentCmd.Flags().StringVar(&flagConfig, "config", "", |
| 39 | + "path to a JSON file containing multiple node configurations (fields Role, Address, Weight)") |
| 40 | + clusterAssignmentCmd.Flags().StringVar(&flagInternalNodePrivInfoDir, "internal-priv-dir", "", "path to directory "+ |
| 41 | + "containing the output from the `keygen` command for internal nodes") |
| 42 | + clusterAssignmentCmd.Flags().StringVar(&flagPartnerNodeInfoDir, "partner-dir", "", "path to directory "+ |
| 43 | + "containing one JSON file starting with node-info.pub.<NODE_ID>.json for every partner node (fields "+ |
| 44 | + " in the JSON file: Role, Address, NodeID, NetworkPubKey, StakingPubKey)") |
| 45 | + clusterAssignmentCmd.Flags().StringVar(&flagPartnerWeights, "partner-weights", "", "path to a JSON file containing "+ |
| 46 | + "a map from partner node's NodeID to their stake") |
| 47 | + |
| 48 | + cmd.MarkFlagRequired(clusterAssignmentCmd, "config") |
| 49 | + cmd.MarkFlagRequired(clusterAssignmentCmd, "internal-priv-dir") |
| 50 | + cmd.MarkFlagRequired(clusterAssignmentCmd, "partner-dir") |
| 51 | + cmd.MarkFlagRequired(clusterAssignmentCmd, "partner-weights") |
| 52 | + |
| 53 | + // optional parameters for cluster assignment |
| 54 | + clusterAssignmentCmd.Flags().UintVar(&flagCollectionClusters, "collection-clusters", 2, "number of collection clusters") |
| 55 | + |
| 56 | + // required parameters for generation of cluster root blocks |
| 57 | + clusterAssignmentCmd.Flags().Uint64Var(&flagEpochCounter, "epoch-counter", 0, "epoch counter for the epoch beginning with the root block") |
| 58 | + cmd.MarkFlagRequired(clusterAssignmentCmd, "epoch-counter") |
| 59 | + |
| 60 | + clusterAssignmentCmd.Flags().BytesHexVar(&flagClusteringRandomSeed, "clustering-random-seed", nil, "random seed to generate the clustering assignment") |
| 61 | + cmd.MarkFlagRequired(clusterAssignmentCmd, "clustering-random-seed") |
| 62 | + |
| 63 | +} |
| 64 | + |
| 65 | +func clusterAssignment(cmd *cobra.Command, args []string) { |
| 66 | + // Read partner node's information and internal node's information. |
| 67 | + // With "internal nodes" we reference nodes, whose private keys we have. In comparison, |
| 68 | + // for "partner nodes" we generally do not have their keys. However, we allow some overlap, |
| 69 | + // in that we tolerate a configuration where information about an "internal node" is also |
| 70 | + // duplicated in the list of "partner nodes". |
| 71 | + log.Info().Msg("collecting partner network and staking keys") |
| 72 | + rawPartnerNodes, err := common.ReadFullPartnerNodeInfos(log, flagPartnerWeights, flagPartnerNodeInfoDir) |
| 73 | + if err != nil { |
| 74 | + log.Fatal().Err(err).Msg("failed to read full partner node infos") |
| 75 | + } |
| 76 | + log.Info().Msg("") |
| 77 | + |
| 78 | + log.Info().Msg("generating internal private networking and staking keys") |
| 79 | + internalNodes, err := common.ReadFullInternalNodeInfos(log, flagInternalNodePrivInfoDir, flagConfig) |
| 80 | + if err != nil { |
| 81 | + log.Fatal().Err(err).Msg("failed to read full internal node infos") |
| 82 | + } |
| 83 | + log.Info().Msg("") |
| 84 | + |
| 85 | + // we now convert to the strict meaning of: "internal nodes" vs "partner nodes" |
| 86 | + // • "internal nodes" we have they private keys for |
| 87 | + // • "partner nodes" we don't have the keys for |
| 88 | + // • both sets are disjoint (no common nodes) |
| 89 | + log.Info().Msg("remove internal partner nodes") |
| 90 | + partnerNodes := common.FilterInternalPartners(rawPartnerNodes, internalNodes) |
| 91 | + log.Info().Msgf("removed %d internal partner nodes", len(rawPartnerNodes)-len(partnerNodes)) |
| 92 | + |
| 93 | + log.Info().Msg("checking constraints on consensus nodes") |
| 94 | + checkConstraints(partnerNodes, internalNodes) |
| 95 | + log.Info().Msg("") |
| 96 | + |
| 97 | + log.Info().Msg("assembling network and staking keys") |
| 98 | + stakingNodes, err := mergeNodeInfos(internalNodes, partnerNodes) |
| 99 | + if err != nil { |
| 100 | + log.Fatal().Err(err).Msgf("failed to merge node infos") |
| 101 | + } |
| 102 | + publicInfo, err := model.ToPublicNodeInfoList(stakingNodes) |
| 103 | + if err != nil { |
| 104 | + log.Fatal().Msg("failed to read public node info") |
| 105 | + } |
| 106 | + err = common.WriteJSON(model.PathNodeInfosPub, flagOutdir, publicInfo) |
| 107 | + if err != nil { |
| 108 | + log.Fatal().Err(err).Msg("failed to write json") |
| 109 | + } |
| 110 | + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathNodeInfosPub) |
| 111 | + log.Info().Msg("") |
| 112 | + |
| 113 | + // Convert to IdentityList |
| 114 | + partnerList := model.ToIdentityList(partnerNodes) |
| 115 | + internalList := model.ToIdentityList(internalNodes) |
| 116 | + |
| 117 | + clusteringPrg, err := prg.New(flagClusteringRandomSeed, prg.BootstrapClusterAssignment, nil) |
| 118 | + if err != nil { |
| 119 | + log.Fatal().Err(err).Msg("failed to initialize pseudorandom generator") |
| 120 | + } |
| 121 | + |
| 122 | + log.Info().Msg("computing collection node clusters") |
| 123 | + assignments, clusters, canConstructQCs, err := common.ConstructClusterAssignment(log, partnerList, internalList, int(flagCollectionClusters), clusteringPrg) |
| 124 | + if err != nil { |
| 125 | + log.Fatal().Err(err).Msg("unable to generate cluster assignment") |
| 126 | + } |
| 127 | + log.Info().Msg("") |
| 128 | + |
| 129 | + // Output assignment with epoch counter |
| 130 | + output := IntermediaryClusteringData{ |
| 131 | + EpochCounter: flagEpochCounter, |
| 132 | + Assignments: assignments, |
| 133 | + Clusters: clusters, |
| 134 | + } |
| 135 | + err = common.WriteJSON(model.PathClusteringData, flagOutdir, output) |
| 136 | + if err != nil { |
| 137 | + log.Fatal().Err(err).Msg("failed to write json") |
| 138 | + } |
| 139 | + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathClusteringData) |
| 140 | + log.Info().Msg("") |
| 141 | + |
| 142 | + log.Info().Msg("constructing and writing cluster block votes for internal nodes") |
| 143 | + constructClusterRootVotes( |
| 144 | + output, |
| 145 | + model.FilterByRole(internalNodes, flow.RoleCollection), |
| 146 | + ) |
| 147 | + log.Info().Msg("") |
| 148 | + |
| 149 | + if canConstructQCs { |
| 150 | + log.Info().Msg("enough votes for collection clusters are present - bootstrapping can continue with root block creation") |
| 151 | + } else { |
| 152 | + log.Info().Msg("not enough internal votes to generate cluster QCs, need partner votes before root block creation") |
| 153 | + } |
| 154 | +} |
| 155 | + |
| 156 | +// constructClusterRootVotes generates and writes vote files for internal collector nodes with private keys available. |
| 157 | +func constructClusterRootVotes(data IntermediaryClusteringData, internalCollectors []model.NodeInfo) { |
| 158 | + for i := range data.Clusters { |
| 159 | + clusterRootBlock, err := cluster2.CanonicalRootBlock(data.EpochCounter, data.Assignments[i]) |
| 160 | + if err != nil { |
| 161 | + log.Fatal().Err(err).Msg("could not construct cluster root block") |
| 162 | + } |
| 163 | + block := hotstuff.GenesisBlockFromFlow(clusterRootBlock.ToHeader()) |
| 164 | + // collate private NodeInfos for internal nodes in this cluster |
| 165 | + signers := make([]model.NodeInfo, 0) |
| 166 | + for _, nodeID := range data.Assignments[i] { |
| 167 | + for _, node := range internalCollectors { |
| 168 | + if node.NodeID == nodeID { |
| 169 | + signers = append(signers, node) |
| 170 | + } |
| 171 | + } |
| 172 | + } |
| 173 | + votes, err := run.CreateClusterRootBlockVotes(signers, block) |
| 174 | + if err != nil { |
| 175 | + log.Fatal().Err(err).Msg("could not create cluster root block votes") |
| 176 | + } |
| 177 | + for _, vote := range votes { |
| 178 | + path := filepath.Join(model.DirnameRootBlockVotes, fmt.Sprintf(model.FilenameRootClusterBlockVote, vote.SignerID)) |
| 179 | + err = common.WriteJSON(path, flagOutdir, vote) |
| 180 | + if err != nil { |
| 181 | + log.Fatal().Err(err).Msg("failed to write json") |
| 182 | + } |
| 183 | + log.Info().Msgf("wrote file %s/%s", flagOutdir, path) |
| 184 | + } |
| 185 | + } |
| 186 | +} |
0 commit comments