diff --git a/actors.md b/actors.md index de39d3ce6..88b382a73 100644 --- a/actors.md +++ b/actors.md @@ -455,6 +455,13 @@ type StorageMinerActorState struct { ## when a PoSt is submitted (not as each new sector commitment is added). provingSet &SectorSet + ## Faulty sectors reported since last SubmitPost, up to the current proving period's challenge time. + currentFaultSet BitField + + ## Faults submitted after the current proving period's challenge time, but before the PoSt for that period + ## is submitted. These become the currentFaultSet when a PoSt is submitted. + nextFaultSet BitField + ## Sectors reported during the last PoSt submission as being 'done'. The collateral ## for them is still being held until the next PoSt submission in case early sector ## removal penalization is needed. @@ -497,7 +504,6 @@ type MinerInfo struct { sectorSize BytesAmount } -``` #### Methods @@ -521,6 +527,7 @@ type MinerInfo struct { | `IsLate` | 16 | | `PaymentVerifyInclusion` | 17 | | `PaymentVerifySector` | 18 | +| `AddFaults` | 19 | #### `Constructor` @@ -590,11 +597,6 @@ func CommitSector(sectorID SectorID, commD, commR, commRStar []byte, proof SealP Fatal("not enough collateral") } - // ensure that the miner cannot commit more sectors than can be proved with a single PoSt - if miner.Sectors.Size() >= POST_SECTORS_COUNT { - Fatal("too many sectors") - } - // Note: There must exist a unique index in the miner's sector set for each // sector ID. The `faults`, `recovered`, and `done` parameters of the // SubmitPoSt method express indices into this sector set. @@ -632,43 +634,29 @@ func CollateralForPower(power BytesAmount) TokenAmount { ```sh type SubmitPost struct { proofs PoStProof - faults [FaultSet] - recovered Bitfield - done Bitfield + doneSet Bitfield } representation tuple ``` **Algorithm** -{{% notice todo %}} -TODO: GenerationAttackTime -{{% /notice %}} - ```go -func SubmitPost(proofs PoStProof, faults []FaultSet, recovered Bitfield, done Bitfield) { - if msg.From != miner.Worker { +func SubmitPost(proofs PoStProof, doneSet Bitfield) { + if msg.From != self.Worker { Fatal("not authorized to submit post for miner") } - // ensure recovered is a subset of the combined fault sets, and that done - // does not intersect with either, and that all sets only reference sectors - // that currently exist - allFaults = AggregateBitfields(faults) - if !miner.ValidateFaultSets(faults, recovered, done) { - Fatal("fault sets invalid") - } - feesRequired := 0 + nextProvingPeriodEnd := self.ProvingPeriodEnd + ProvingPeriodDuration(self.SectorSize) - if chain.Now() > self.ProvingPeriodEnd+GenerationAttackTime(self.SectorSize) { - // slashing ourselves - SlashStorageFault(self) - return + // TODO: rework fault handling, for now anything later than 2 proving periods is invalid + if chain.now() > nextProvingPeriodEnd { + Fatal("PoSt submited too late") } else if chain.Now() > self.ProvingPeriodEnd { - feesRequired += ComputeLateFee(self.power, chain.Now()-self.provingPeriodEnd) + feesRequired += ComputeLateFee(self.power, chain.Now() - self.provingPeriodEnd) } - feesRequired += ComputeTemporarySectorFailureFee(self.sectorSize, recovered) + feesRequired += ComputeTemporarySectorFailureFee(self.sectorSize, self.currentFaultSet) if msg.Value < feesRequired { Fatal("not enough funds to pay post submission fees") @@ -679,68 +667,44 @@ func SubmitPost(proofs PoStProof, faults []FaultSet, recovered Bitfield, done Bi TransferFunds(msg.From, msg.Value-feesRequired) } - if !CheckPostProof(miner.SectorSize, proof, faults) { + var seed + if chain.Now() < self.ProvingPeriodEnd { + // good case, submitted in time + seed = GetRandFromBlock(self.ProvingPeriodEnd - POST_CHALLENGE_TIME) + } else { + // bad case, submitted late, need to take new proving period end as reference + seed = GetRandFromBlock(nextPovingPeriodEnd - POST_CHALLENGE_TIME) + } + + faultSet := self.currentFaultSet + + if !VerifyPoSt(self.SectorSize, self.provingSet, seed, proof, faultSet) { Fatal("proof invalid") } - // combine all the fault set bitfields, and subtract out the recovered - // ones to get the set of sectors permanently lost - permLostSet = allFaults.Subtract(recovered) + // The next fault set becomes the current one + self.currentFaultSet = self.nextFaultSet + self.nextFaultSet = EmptySectorSet() - // burn funds for fees and collateral penalization - self.BurnFunds(CollateralForSize(self.SectorSize*permLostSet.Size()) + feesRequired) + // TODO: penalize for faults - // update sector sets and proving set - miner.Sectors.Subtract(done) - miner.Sectors.Subtract(permLostSet) + // Remove doneSet from the current sectors + self.Sectors.Subtract(doneSet) - // update miner power to the amount of data actually proved during - // the last proving period. - oldPower := miner.Power + // Update miner power to the amount of data actually proved during the last proving period. + oldPower := self.Power - miner.Power = (miner.ProvingSet.Size() - allFaults.Count()) * miner.SectorSize - StorageMarket.UpdateStorage(miner.Power - oldPower) + self.Power = (self.ProvingSet.Size() - faultSet.Count()) * self.SectorSize + StorageMarket.UpdateStorage(self.Power - oldPower) - miner.ProvingSet = miner.Sectors + self.ProvingSet = self.Sectors // Updating proving period given a fixed schedule, independent of late submissions. - miner.ProvingPeriodEnd = miner.ProvingPeriodEnd + ProvingPeriodDuration(miner.SectorSize) + self.ProvingPeriodEnd = nextProvingPeriodEnd // update next done set - miner.NextDoneSet = done - miner.ArbitratedDeals.Clear() -} - -func ValidateFaultSets(faults []FaultSet, recovered, done BitField) bool { - var aggregate BitField - for _, fs := range faults { - aggregate = aggregate.Union(fs.BitField) - } - - // all sectors marked recovered must have actually failed - if !recovered.IsSubsetOf(aggregate) { - return false - } - - // the done set cannot intersect with the aggregated faults - // you can't mark a fault as 'done' - if aggregate.Intersects(done) { - return false - } - - for _, bit := range aggregate.Bits() { - if !miner.HasSectorByID(bit) { - return false - } - } - - for _, bit := range done.Bits() { - if !miner.HasSectorByID(bit) { - return false - } - } - - return true + self.NextDoneSet = done + self.ArbitratedDeals.Clear() } func ProvingPeriodDuration(sectorSize uint64) Integer { @@ -768,34 +732,6 @@ type SlashStorageFault struct { **Algorithm** - -## late submission and resubmission - -- If After provingPeriodEnd - - post submission now requires paying pay late submission fee - - (fixed cost, dependent on the total storage size) - - [implicit] loose all power, can be explicitly slashed for - - post submission returns your power immediately -- If After `GenerationAttackTimeout` (<< 1 proving period) - - .... nothing changes atm -- If After `PoStTimeout` (< 1 proving period) - - [explicit - slashStorageFault] loose all storage collateral - - clients can arbitrate deals with the miner now - - post submission now requires paying both late fee + lost storage collateral - - the valid post submission returns your power immediately -- If After `SectorFailureTimeout` (> 1 proving period) - - [explicit - slashStorageFault] loose all sectors - - [implicit] resets proving period, as they need to resubmit all sectors after this - - there is now no way to reintroduce the sectors, unless they are resubmitted, etc - - power can not be recovered anymore - -- If [miner] does not call postsubmit - - -Notes: -- Should post submission only return your power, after the following proving period, when after the generation attack timeout -- Is one proving period enough time for abitration of faulty deals for a client? - ```go func SlashStorageFault() { // You can only be slashed once for missing your PoSt. @@ -1154,7 +1090,7 @@ func PaymentVerifyInclusion(extra PieceInclusionVoucherData, proof InclusionProo if !has { Fatal("miner does not have required sector") } - + return ValidatePIP(self.SectorSize, extra.PieceSize, extra.CommP, commD, proof.Proof) } ``` @@ -1186,11 +1122,37 @@ func PaymentVerifyInclusion(extra BigInt, proof Bytes) { if len(proof) > 0 { Fatal("unexpected proof bytes") } - + return self.HasSector(extra) } ``` +#### `AddFaults` + +**Parameters** + +```sh +type AddFaults struct { + faults FaultSet +} representation tuple +``` + +**Algorithm** + +```go +func AddFaults(faults FaultSet) { + challengeBlockHeight := self.ProvingPeriodEnd - POST_CHALLENGE_TIME + + if VM.CurrentBlockHeight() < challengeBlockHeight { + // Up to the challenge time new faults can be added. + self.currentFaultSet = Merge(self.currentFaultSet, faults) + } else { + // After that they are only accounted for in the next proving period + self.nextFaultSet = Merge(self.nextFaultSet, faults) + } +} +``` + ### Payment Channel Actor - **Code Cid:** `<"paych">` diff --git a/content/docs/proof-of-spacetime.md b/content/docs/proof-of-spacetime.md new file mode 120000 index 000000000..d7a0a1d73 --- /dev/null +++ b/content/docs/proof-of-spacetime.md @@ -0,0 +1 @@ +../../proof-of-spacetime.md \ No newline at end of file diff --git a/content/menu/index.md b/content/menu/index.md index d4872238f..54e851a29 100644 --- a/content/menu/index.md +++ b/content/menu/index.md @@ -30,6 +30,7 @@ headless: true * [Proof-of-Replication]({{< relref "/docs/zigzag-porep.md" >}}) * [Circuit]({{< relref "/docs/zigzag-circuit.md" >}}) +* [Proof-of-Spacetime]({{< relref "/docs/proof-of-spacetime.md" >}}) [**Glossary**]({{< relref "/docs/definitions.md" >}}) diff --git a/data-structures.md b/data-structures.md index b75ec1445..d6029be8d 100644 --- a/data-structures.md +++ b/data-structures.md @@ -298,8 +298,14 @@ type Bitfield Bytes ### SectorSet +A sector set stores a mapping of sector IDs to the respective `commR`s. + +```sh +type SectorSet map{SectorID:Bytes} +``` + {{% notice todo %}} -Define me +Improve on this, see https://github.com/filecoin-project/specs/issues/116 {{% /notice %}} diff --git a/drafts/proof-of-spacetime.md b/drafts/proof-of-spacetime.md deleted file mode 100644 index cd557ebee..000000000 --- a/drafts/proof-of-spacetime.md +++ /dev/null @@ -1,198 +0,0 @@ -## Proof-of-Spacetime - -This document descibes - -- VDF-PoSt: a Proof-of-Spacetime using VDFs -- An Extension to PoSt to support multiple sectors -- An Extension to PoSt to support challenges taken from a Random Beacon - -## Syntax - -- **PoSt Epoch**: The total time passing between Online PoReps in the PoSt Computation (in VDF, this interval is the time it takes to run a VDF and an Online PoRep prove step). We define the number of epochs as `POST_EPOCHS_COUNT` -- **PoSt Period**: The total time it takes to run a single PoSt. If a PoSt is repeated multiple times, we define the number of periods as `POST_PERIODS_COUNT`. This can be reasoned as: `PoSt Epoch * POST_EPOCHS_COUNT`. We assume that the best Post Period time is `MIN_POST_PERIOD_TIME` -- **Total proving time**: The time it takes to run a PoSt. Note that a PoSt could be a composition of multiple PoSt. This can be reasoned as: `PoSt Period * POST_PERIODS_COUNT` TODO Check with papers' syntax - -## VDF-PoSt: Proof-of-Spacetime based on VDFs - -VDF-PoSt is a Proof-of-Spacetime that hashes the input and the output of the VDFs it uses `H(Vdf(H(x)))`, hence `VDF`. - -### Parameters - -- **Setup Parameters** - - `CHALLENGE_COUNT`: number of challenges to be asked at each iteration - - `SECTOR_SIZE`: size of the sealed sector in bytes - - `POST_EPOCHS`: number of times we repeat an online Proof-of-Replication in one single PoSt - - `vdf_params`: vdf public parameters - - `sectors_count`: number of sectors over which the proof is performed -- **Public Parameters** - - `CHALLENGE_COUNT`: number of challenges to be asked at each iteration - - `SECTOR_SIZE`: size of the sealed sector in bytes - - `POST_EPOCHS`: number of times we repeat an online Proof-of-Replication in one single PoSt. - - `vdf_params`: vdf public parameters - - `sectors_count`: number of sectors over which the proof is performed - - `challenge_bits`: number of bits in one challenge (length of a merkle path) - - `seed_bits`: number of bits in one challenge -- **Public Inputs** - - `commR: Hash`: Root hash of the Merkle Tree of the sealed sec+tor - - `challenge_seed` : [32]byte: initial randomness (in Filecoin taken from the chain) from which challenges will be generated. -- **Private Inputs** - - `replica: SealedSector`: sealed sector -- **Proof** - - `ys: [POST_EPOCHS-1]Value` - - `vdf_proofs: [POST_EPOCHS-1]VDFProof` - - `porep_proofs: [POST_EPOCHS]PorepProof` - -### Methods - -#### `Prove(Public Parameters, Public Inputs, Private Inputs) -> Proof` - -- *Step 1*: Generate `POST_EPOCHS` proofs: - - `mix = challenge_seed` - - `challenge_stream = NewChallengeStream(PublicParams)` - - Repeat `POST_EPOCHS` times: - - `(challenges, challenged_sectors) = challenge_stream(mix)` - - Generate proof: `porep_proof = OnlinePoRep.prove(challenges, challenged_sectors, commR, replica)` - - Note: you can have the tree cached in memory - - append `porep_proof` to `porep_proofs[]` - - Add `porep_proof` to `porep_proofs` - - Slow challenge generation from previous proof `porep_proof`: - - Run VDF and generate a proof - - `x = ExtractVDFInput(porep_proof))` - - `y, vdf_proof = VDF.eval(x)` - - Add `vdf_proof` to `vdf_proofs` - - Add `y` to `ys` - - `mix = y` -- Step 3: Output `porep_proofs`, `vdf_proofs`, `ys` - -#### `Verify(Public Parameters, Public Inputs, Proof) -> bool` - -- *VDF Output Verification* - - For `i` in `0..POST_EPOCHS-1` - - assert: `VDF.verify(pp_vdf, ExtractVDFInput(porep_proofs[i]), ys[i], vdf_proofs[i])` -- *Sequential Online PoRep Verification* - - assert: `OnlinePoRep.verify(commR, challenges_0, porep_proofs[0])` - - for `i` in `1..POST_EPOCHS` - - Generate challenges `for j in 0..CHALLENGE_COUNT: challenges[j] = H(H(ys[i-1])|j)` ` - - assert: `OnlinePoRep.verify(commR, challenges, porep_proofs[i])` - -## EVDF-PoSt: Extending a single CommR PoSt to multiple CommRs - -**Problem**: A PoSt proves space on a single sector (whose Merkle root hash is `CommR`). In order to prove space over multiple sectors, we can either: - -- Run an PoSt for each sector (PoRep guarantees): this means running `Prove` `SECTORS_COUNT` times and have the proof size to be `SECTORS_COUNT`*`PROOF_SIZE` -- Extend a single PoSt to run sectors (PoS guarantees): this means security is not defined per sector, but across sector. For example: Assume PoSt guarantees 99% of the data being stored. A miner has 100 sectors and runs a single PoSt per sector. The worst that can happen is that the miner loses 1% of each sector. If the miner runs a single PoSt across all the sectors, then, the worst it can happen is that the miner loses 1% of all the sectors. - -**Filecoin note**: In Filecoin, we use the second strategy in order to have shorter proofs. It is worth mentioning that misbehaving miners have an economic incentive not to misbehave in Filecoin. This section documents how to extend VDF-PoSt over a single sector, into a VDF-PoSt over multiple sectors. - -### Difference between standard VDF-PoSt and the extension - -- **Public Parameters** & **Setup Parameters** - - `SECTORS_COUNT` which is the number of sectors over which we are running PoSt -- **Public Inputs** - - `CommRs : [SECTORS_COUNT]Hash` instead of `CommR : Hash`. CommRs must have a specific order (e.g. lexographical order, order of timestamps on the blockchain) - - `challenge_seed` : [32]byte: initial randomness (in Filecoin taken from the chain) from which challenges will be generated. -- **Private Inputs** - - `replicas: [SECTORS_COUNT]replica` instead of `replica`. (same order as the `CommRs`) -- **Prove** & **Verify** Computation - - A challenge in `challenges` points to a leaf in one of the sectors - - Sector is chosen by `challenge % SECTORS_COUNT` (TODO check if this is fine) - - Leaf is chosen in the same way as in Online Porep (`challenge % SECTORS_SIZE/32`) - -### Methods - -#### `Prove(Public Parameters, Public Inputs, Private Inputs) -> Proof` - -- *Step 1:* Generate first proof - - Generate proof `pos_proof = OnlinePoS.prove(commRs, challenges, replicas)` - - Add `porep_proof` to `pos_proofs` -- *Step 2:* Generate `POST_EPOCHS - 1` remaining proofs: - - Repeat `POST_EPOCHS - 1` times: - - Slow challenge generation from previous proof pos_proof`: - - Run VDF and generate a proof - - `x = ExtractVDFInput(pos_proof))` - - `y, vdf_proof = VDF.eval(x)` - - Add `vdf_proof` to `vdf_proofs` - - Add `y` to `ys` - - `r = H(y)` - - Generate challenges `for i in 0..CHALLENGE_COUNT: challenges[i] = H(r|i)` - - Generate a proof as done in Step 1 -- Step 3: Output `pos_proofs`, `vdf_proofs`, `ys` - -#### `Verify(Public Parameters, Public Inputs, Proof) -> bool` - -- *VDF Output Verification* - - For `i` in `0..POST_EPOCHS-1` - - assert: `VDF.verify(pp_vdf, ExtractVDFInput(pos_proofs[i]), ys[i], vdf_proofs[i])` -- *Sequential Online PoRep Verification* - - assert: `OnlinePoS.verify(commR, challenges_0, pos_proofs[0])` - - for `i` in `1..POST_EPOCHS` - - Generate challenges `for j in 0..CHALLENGE_COUNT: challenges[j] = H(H(ys[i-1])|j)` ` - - assert: `OnlinePoS.verify(commR, challenges, pos_proofs[i])` - -### Security note - -- **Avoiding grinding**: If the prover can choose arbitrary `SECTORS_COUNT`, after receiving a challenge, they can try different sector sizes to have more favourable challenged leaves. In order to avoid this, the prover commit to the `SECTORS_COUNT`, and the `CommR`s before receiving the challenges. In Filecoin, we get this for free, since all the sectors to be proven are committed on chain and the `SECTORS_COUNT` can't be altered. -- **Storage security**: An VDF-PoSt with a single CommR inherits the Online PoRep security guarentees, while this extension does not. In VDF-PoSt, the prover answer `CHALLENGES_COUNT` challenges on a single sector, in this extension, the prover answers `CHALLENGES_COUNT` across multiple sectors. - -## BeaconPost: Taking challenges over time via a Random Beacon - -**Problem with large `POST_EPOCH_COUNTS`**: Different VDF hardware run at different speed. A small percentage of gain in a `PoSt Epoch` would result in a large time difference in `Total Proving Time` between the fastest and the slowest prover. We call the difference between fastest and average prover `VDF speedup gap`. We define a VDF Speedup gap as a percentage (0-1) and we assume a concrete gap for a PoSt Period between the assumed fastest and the best known prover. We define this gap as `VDF_SPEEDUP_GAP`. - -**Mitigating VDF Speedups**: We break up a PoSt into multiple PoSt Periods. Each period must take challenges from a Random Beacon which outputs randomness every interval `MIN_POST_PERIOD_TIME` . In this way, the faster prover can be `VDF_SPEEDUP_GAP` faster in each PoSt Period, but cannot be `VDF_SPEEDUP_GAP` faster over the Total Proving Period. In other words, the fastest prover cannot accumulate the gains at each PoSt period because, they have to wait for the new challenges from the Random Beacon. In the case of Filecoin, the blockchain acts as a Random Beacon). - -- **Setup Parameters** - - Same as Public parameters. -- **Public Parameters** - - `POST_PUBLIC_PARAMS`: Public Parameters as defined for VDF PoSt. - - `POST_PERIODS_COUNT: uint` -- **Public Inputs** - - ` CommRs : [SECTORS_COUNT]Hash` instead of `CommR : Hash`. CommRs must have a specific order (e.g. lexographical order, order of timestamps on the blockchain) -- **Private Inputs** - - `replicas: [SECTORS_COUNT]SealedSector`: sealed sectors -- **Proof** - - `post_proofs [POST_PERIODS_COUNT]VDFProof` - -### Methods - -#### `Prove(Public Parameters, Public Inputs, Private Inputs) -> Proof` - -Prove is a process that has access to a Random Beacon functionality that outputs new randomness every `MIN_POST_PERIOD_TIME`: - -- `t = 0`: -- For `t = 0..POST_PERIODS_COUNT`: - - Query Random Beacon: `challenge_seed = RandomBeacon(t)` - - Compute a `post_proofs[t] = PoSt.Prove(CommRs, challenge_seed, replicas)` -- Outputs `post_proofs` - -#### `Verify(Public Parameters, Public Inputs, Proof) -> bool` - -- `t = 0`: - - Query Random Beacon: `r = RandomBeacon(t)` - - Generate challenges: `for i=0..CHALLENGES_COUNT: challenges[i] = H( r | t | i)` - - Assert: `PoSt.Verify(CommRs, challenges, post_proofs[t])` -- For `t = 1..POST_PERIODS_COUNT`: - - Query Random Beacon: `r = RandomBeacon(t)` - - Generate challenges: `for i=0..CHALLENGES_COUNT: challenges[i] = H(ExtractPoStInput(post_proofs[t-1]) | r | t | i )` - - Assert: `PoSt.Verify(CommRs, challenges, post_proofs[t])` TODO check - -### Random Beacon functionality - -A Random Beacon outputs a single randomness every `MIN_POST_PERIOD_TIME`. - -## Other Functions used - -### ExtractVDFInput - -##### Inputs - -- `porep_proof PoRep.Proof` - -##### Computation - -- Hash the concatenation of the leaves of each tree in `OnlinePoRep.Proof` - -### VDF - -- `VDF.setup() -> VDFPublicParams` -- `VDF.eval(pp: VDFPublicParams, x: Value) -> (Value, VDFProof)` -- `VDF.verify(pp: VDFPublicParams, x: Value, y: Value, proof: VDFProof) -> bool ` diff --git a/mining.md b/mining.md index 7012085e3..58ac84fc2 100644 --- a/mining.md +++ b/mining.md @@ -42,24 +42,18 @@ TODO: sectors need to be globally unique. This can be done either by having the #### Step 2: Proving Storage (PoSt creation) -At the beginning of their proving period, miners collect the proving set (the set of all live sealed sectors on the chain at this point), and then call `ProveStorage`. This process will take the entire proving period to complete. - ```go -func ProveStorage(sectorSize BytesAmount, sectors []commR, startTime BlockHeight) (PoSTProof, []FaultSet) { - var proofs []Proofs - var seeds []Seed - var faults []FaultSet - for t := 0; t < ProvingPeriod; t += ReseedPeriod { - seeds = append(seeds, GetSeedFromBlock(startTime+t)) - proof, faultset := GenPost(sectors, seeds[t], vdfParams) - proofs = append(proofs, proof) - faults = append(faults, faultset) - } - return GenPostSnark(sectorSize, sectors, seeds, proofs), faults +func ProveStorage(sectorSize BytesAmount, sectors []commR) PoStProof { + challengeBlockHeight := miner.ProvingPeriodEnd - POST_CHALLENGE_TIME + + // Faults to be used are the currentFaultSet for the miner. + faults := miner.currentFaultSet + seed := GetRandFromBlock(challengeBlockHeight) + return GeneratePoSt(sectorSize, sectors, seed, faults) } ``` -Note: See ['Proof of Space Time'](proofs.md#proof-of-space-time) for more details. +Note: See ['Proof of Space Time'](proof-of-spacetime.md) for more details. The proving set remains consistent during the proving period. Any sectors added in the meantime will be included in the next proving set, at the beginning of the next proving period. @@ -122,7 +116,7 @@ func VerifyBlock(blk Block) { } // 2. Verify Timestamp - // first check that it is not in the future + // first check that it is not in the future // allowing for some small grace period to deal with small asynchrony // a potential default for ALLOWABLE_CLOCK_DRIFT could be 2/3*blockTime if blk.GetTime() > networkTime() + ALLOWABLE_CLOCK_DRIFT { @@ -204,6 +198,12 @@ func (state StateTree) LookupPublicKey(a Address) PubKey { } Fatal("can only look up public keys for BLS controlled accounts") } + +// Get the canonical randomness from a block. +func GetRandFromBlock(blk) []byte { + ticket := minTicket(blk) + return blake2b(ticket) +} ``` If all of this lines up, the block is valid. The miner repeats this for all blocks in a TipSet, and for all TipSets formed from incoming blocks. @@ -256,7 +256,7 @@ Notice that there is an implicit check that all tickets in the `Tickets` array a For election proof generation, see [checking election results](expected-consensus.md#checking-election-results). -In order to determine that the mined block was generated by an elegible miner, one must check its `ElectionProof`. +In order to determine that the mined block was generated by an elegible miner, one must check its `ElectionProof`. Succinctly, the process of verifying a block's election proof at round N, is as follows. @@ -323,7 +323,7 @@ This process is repeated until either a winning ticket is found (and block publi Let's illustrate this with an example. Miner M is mining at Height H. -Heaviest tipset at H-1 is {B0} +Heaviest tipset at H-1 is {B0} - New Round: - M produces a ticket at H, from B0's ticket (the min ticket at H-1) - M draws the ticket from height H-K to generate an ElectionProof @@ -371,7 +371,7 @@ To create a block, the eligible miner must compute a few fields: - Strip the signatures off of the messages, and insert all the bare `Message`s for them into a sharray. - Aggregate all of the bls signatures into a single signature and use this to fill out the `BLSAggregate` field - For the secpk messages: - - Insert each of the secpk `SignedMessage`s into a sharray + - Insert each of the secpk `SignedMessage`s into a sharray - Create a `TxMeta` object and fill each of its fields as follows: - `blsMessages`: the root cid of the bls messages sharray - `secpkMessages`: the root cid of the secp messages sharray @@ -449,11 +449,11 @@ TODO: Ensure that if a miner earns a block reward while undercollateralized, the ### Notes on Block Reward Application -As mentioned above, every round, a miner checks to see if they have been selected as the leader for that particular round (see [Secret Leader Election](expected-consensus.md#secret-leader-election) in the Expected Consensus spec for more detail). Thus, it is possible that multiple miners may be selected as winners in a given round, and thus, that there will be multiple blocks with the same parents that are produced at the same block height (forming a TipSet). Each of the winning miners will apply the block reward directly to their actor's state in their state tree. +As mentioned above, every round, a miner checks to see if they have been selected as the leader for that particular round (see [Secret Leader Election](expected-consensus.md#secret-leader-election) in the Expected Consensus spec for more detail). Thus, it is possible that multiple miners may be selected as winners in a given round, and thus, that there will be multiple blocks with the same parents that are produced at the same block height (forming a TipSet). Each of the winning miners will apply the block reward directly to their actor's state in their state tree. Other nodes will receive these blocks and form a TipSet out of the eligible blocks (those that have the same parents and are at the same block height). These nodes will then validate the TipSet. The full procedure for how to verify a TipSet can be found above in [Block Validation](#block-validation). To validate TipSet state, the validating node will, for each block in the TipSet, first apply the block reward value directly to the mining node's account and then apply the messages contained in the block. -Thus, each of the miners who produced a block in the TipSet will receive a block reward. There will be no lockup. These rewards can be spent immediately. +Thus, each of the miners who produced a block in the TipSet will receive a block reward. There will be no lockup. These rewards can be spent immediately. Messages in Filecoin also have an associated transaction fee (based on the gas costs of executing the message). In the case where multiple winning miners included the same message in their blocks, only the first miner will be paid this transaction fee. The first miner is the miner with the lowest ticket value (sorted lexicographically). More details on message execution can be found in the [State Machine spec](state-machine.md#execution-calling-a-method-on-an-actor). diff --git a/proof-of-spacetime.md b/proof-of-spacetime.md new file mode 100644 index 000000000..d0eca9796 --- /dev/null +++ b/proof-of-spacetime.md @@ -0,0 +1,211 @@ +## Proof-of-Spacetime + +This document describes Rational-PoSt, the Proof-of-Spacetime used in Filecoin. + +## Rational PoSt + + + +### Definitions + +| Name | Value |Description | +|------|-------|------------| +| `POST_PROVING_PERIOD` | `2880` blocks (~24h) | The time interval in which a PoSt has to be submitted. | +| `POST_CHALLENGE_TIME` | `240` blocks (~2h) | The time offset at which the actual work of generating the PoSt **can not** be started earlier than. This is some delta before the end of the `Proving Period`, and as such less then a single `Proving Period`. | + +{{% notice todo %}} +**TODO**: The above values are tentative and need both backing from research as well as detailed reasoning why we picked them. +{{% /notice %}} + +### High Level API + +#### Fault Detection + +Fault detection happens over the course of the life time of a sector. When the sector is for some reason unavailable, the miner is responsible to submit the known `faults`, before the PoSt challenge begins. (Using the `AddFaults` message to the chain). +Only faults which have been reported at challenge time, will be accounted for. If any other faults have occured the miner can not submit a valid PoSt for this proving period. + +The PoSt generation then takes the latest available `faults` of the miner to generate a PoSt matching the committed sectors and faults. + +When a PoSt is successfully submitted all faults are reset and assumed to be recovered. A miner must either (1) resolve a faulty sector and accept challenges against it in the next proof submission, (2) report a sector faulty again if it persists but is eventually recoverable, (3) report a sector faulty *and done* if the fault cannot be recovered. + +If the miner knows that the sectors are permanently lost, they can submit them as part of the `doneSet`, to ensure they are removed from the proving set. + +{{% notice note %}} +**Note**: It is important that all faults are known (i.e submitted to the chain) prior to challenge generation, because otherwise it would be possible to know the challenge set, before the actual challenge time. This would allow a miner to report only faults on challenged sectors, with a gurantee that other faulty sectors would not be detected. +{{% /notice %}} + + +{{% notice todo %}} +**TODO**: The penalization for faults is not clear yet. +{{% /notice %}} + +#### Fault Penalization + +Each reported fault carries a penality with it. + +{{% notice todo %}} +**TODO**: Define the exact penality structure for this. +{{% /notice %}} + +#### Generation + +`GeneratePoSt` generates a __*Proof of Spacetime*__ over all __*sealed sectors*__ of a single miner— identified by their `commR` commitments. This is accomplished by performing a series of merkle inclusion proofs (__*Proofs of Retrievability*__). Each proof is of a challenged node in a challenged sector. The challenges are generated pseudo-randomly, based on the provided `seed`. At each time step, a number of __*Proofs of Retrievability*__ are performed. + +```go +// Generate a new PoSt. +func GeneratePoSt(sectorSize BytesAmount, sectors SectorSet, seed Seed, faults FaultSet) PoStProof { + // Generate the Merkle Inclusion Proofs + Faults + + inclusionProofs := [] + sectorsSorted := [] + challenges := DerivePoStChallenges(seed, faults, sectorSize, len(sectors)) + + for i := 0; i < len(challenges); i++ { + challenge := challenges[i] + + // Leaf index of the selected sector + inclusionProof, isFault := GenerateMerkleInclusionProof(challenge.Sector, challenge.Leaf) + if isFault { + // faulty sector, need to post a fault to the chain and try to recover from it + return Fatal("Detected late fault") + } + + inclusionProofs[n] = inclusionProof + sectorsSorted[i] = sectors[challenge.Sector] + } + + // Generate the snark + snark_proof := GeneratePoStSnark(sectorSize, challenges, sectorsSorted, inclusionProofs) + + return snark_proof +} +``` + +#### Verification + +`VerifyPoSt` is the functional counterpart to `GeneratePoSt`. It takes all of `GeneratePoSt`'s output, along with those of `GeneratePost`'s inputs required to identify the claimed proof. All inputs are required because verification requires sufficient context to determine not only that a proof is valid but also that the proof indeed corresponds to what it purports to prove. + +```go +// Verify a PoSt. +func VerifyPoSt(sectorSize BytesAmount, sectors SectorSet, seed Seed, proof PoStProof, faults FaultSet) bool { + challenges := DerivePoStChallenges(seed, faults, sectorSize, len(sectors)) + sectorsSorted := [] + + // Match up commitments with challenges + for i := 0; i < len(challenges); i++ { + challenge := challenges[i] + sectorsSorted[i] = sectors[challenge.Sector] + } + + // Verify snark + return VerifyPoStSnark(sectorSize, challenges, sectorsSorted) +} +``` + + +#### Types + +```go +// The random challenge seed, provided by the chain. +Seed [32]byte +``` + +```go +type Challenge struct { + Sector Uint + Leaf Uint +} +``` + +#### Challenge Derivation + +```go +// Derive the full set of challenges for PoSt. +func DerivePoStChallenges(seed Seed, faults FaultSet, sectorSize Uint, sectorCount Uint) [POST_CHALLENGES_COUNT]Challenge { + challenges := [] + + for n := 0; n < POST_CHALLENGES_COUNT; n++ { + attemptedSectors := {SectorID:bool} + while challenges[n] == nil { + challenge := DerivePoStChallenge(seed, n, faults, attempt, sectorSize, sectorCount) + + // check if we landed in a faulty sector + if !faults.Contains(challenge.Sector) { + // Valid challenge + challenges[n] = challenge + } + + // invalid challenge, regenerate + attemptedSectors[challenge.Sector] = true + + if len(attemptedSectors) >= sectorCount { + Fatal("All sectors are faulty") + } + } + } + + return challenges +} + +// Derive a single challenge for PoSt. +func DerivePoStChallenge(seed Seed, n Uint, attempt Uint, sectorSize Uint, sectorCount Uint) Challenge { + n_bytes := WriteUintToLittleEndian(n) + data := concat(seed, n_bytes, WriteUintToLittleEndian(attempt)) + challenge_bytes := blake2b(data) + + sector_challenge := ReadUintLittleEndian(challenge_bytes[0..8]) + leaf_challenge := ReadUintLittleEndian(challenge_bytes[8..16]) + + return Challenge { + Sector: sector_challenge % sectorCount, + Leaf: leaf_challenge % (sectorSize / NODE_SIZE), + } +} +``` + + +### PoSt Circuit + +#### Public Parameters + +*Parameters that are embeded in the circuits or used to generate the circuit* + +- `POST_CHALLENGES_COUNT: UInt`: Number of challenges. +- `POST_TREE_DEPTH: UInt`: Depth of the Merkle tree. Note, this is `(log_2(Size of original data in bytes/32 bytes per leaf))`. +- `SECTOR_SIZE: UInt`: The size of a single sector in bytes. + +#### Public Inputs + +*Inputs that the prover uses to generate a SNARK proof and that the verifier uses to verify it* + +- `CommRs: [POST_CHALLENGES_COUNT]Fr`: The Merkle tree root hashes of all replicas, ordered to match the inclusion paths and challenge order. +- `InclusionPaths: [POST_CHALLENGES_COUNT]Fr`: Inclusion paths for the replica leafs, ordered to match the `CommRs` and challenge order. (Binary packed bools) + +#### Private Inputs + +*Inputs that the prover uses to generate a SNARK proof, these are not needed by the verifier to verify the proof* + +- `InclusionProofs: [POST_CHALLENGES_COUNT][TREE_DEPTH]Fr`: Merkle tree inclusion proofs, ordered to match the challenge order. +- `InclusionValues: [POST_CHALLENGES_COUNT]Fr`: Value of the encoded leaves for each challenge, ordered to match challenge order. + + +#### Circuit + +##### High Level + +In high level, we do 1 check: + +1. **Inclusion Proofs Checks**: Check the inclusion proofs + +##### Details + +```go +for c in range POST_CHALLENGES_COUNT { + // Inclusion Proofs Checks + assert(MerkleTreeVerify(CommRs[c], InclusionPath[c], InclusionProof[c], InclusionValue[c])) +} +``` + +#### Verification of PoSt proof + +- SNARK proof check: **Check** that given the SNARK proof and the public inputs, the SNARK verification outputs true diff --git a/proofs.md b/proofs.md index edd039a80..073d10ba2 100644 --- a/proofs.md +++ b/proofs.md @@ -37,7 +37,6 @@ Throughout this document, the following definitions are used: - __*commitment:*__ an opaque block of data to which a prover 'commits', enabling subsequent proofs which cannot be validly constructed unless the __*commitment*__ itself was validly constructed. For example: the output of a suitable pseudorandom collision-resistant hash function may serve as a __*commitment*__ to the data which is the preimage of that hash. Publication of the __*commitment*__ proves that the creator was in possession of the preimage at the time the __*commitment*__ was generated. - __*prover:*__ the party who generates a proof, in Filecoin it's always the Storage Miner. - __*verifier:*__ the party who verifies a proof generated by a __*prover*__, in Filecoin it's a full node. -- __*sectors count:*__ the number of sectors over which a proof-of-spacetime is performed (`POST_SECTORS_COUNT`, `16 PiB / 64 GiB = 262144`). The `SectorID` defined by the chain is a`u64`, which gets encoded as a 31-byte array for the purpose of proofs. This transform consists of encoding the number to its little-endian byte representation and then zero-pad to 31-bytes. @@ -133,51 +132,8 @@ The Filecoin node verifies that the correct `proverId` and `sectorId` is used wh ------ -## Proof-of-Spacetime algorithms - -__NOTE:__ __*Proof of Spacetime*__ is in transition. Current implementations are mocked, and the final design has not been implemented. Consumers may refer to the below for reference, but nothing should be implemented until the spec is updated and synchronized with what will be the canonical construction. - -### GeneratePost - -`GeneratePoSt` generates a __*Proof of Spacetime*__ over `POST_SECTORS_COUNT` __*sealed sectors*__ — identified by their `commR` commitments. This is accomplished by performing a series of merkle inclusion proofs (__*Proofs of Retrievability*__). Each proof is of a challenged node in a challenged sector. The challenges are generated pseudo-randomly, based on the provided `challengeSeed`. At each time step, a number of __*Proofs of Retrievability*__ are performed. The result of each such set of __*Proofs of Retrievability*__ is used to seed challenge generation for another iteration. Repeated and necessarily sequential generation of these __*Proofs of Retrievability*__ proves that the claimed __*sealed sectors*__ existed during the time required to generate them. - -Since many __*sealed sectors*__ may be proved at once, it may be the case that one or more __*sealed sectors*__ has been lost, damaged, or otherwise become impossible to validly prove. In this case, a fault is recorded and returned in an array of faults. This allows provers to selectively default on individual __*sealed sector*__ proofs while still providing a verifiable proof of their aggregate __*Proof of Spacetime*__ claims. - -``` -GeneratePoSt - ( - // request represents a request to generate a proof-of-spacetime. - commRs [POST_SECTORS_COUNT][32]byte, // the commR commitments corresponding to the sealed sectors to prove - challengeSeed [32]byte, // a pseudo-random value to be used in challenge generation -) err Error | ( - // response contains PoST proof and any faults that may have occurred. - faults []uint64, // faults encountered while proving (by index of associated commR in the input) - proof []byte -) -``` - -### VerifyPoSt - -`VerifyPoSt` is the functional counterpart to `GeneratePoSt`. It takes all of `GeneratePoSt`'s output, along with those of `GeneratePost`'s inputs required to identify the claimed proof. All inputs are required because verification requires sufficient context to determine not only that a proof is valid but also that the proof indeed corresponds to what it purports to prove. - -``` -VerifyPoSt - ( - // request represents a request to generate verify a proof-of-spacetime. - commRs [POST_SECTORS_COUNT][32]byte, // the commRs provided to GeneratePoSt - challengeSeed [32]byte, - faults []uint64 - proof []byte, // Multi-SNARK proof returned by GeneratePoSt - ) err Error | - isValid bool // true iff the provided Proof of Spacetime is valid -``` - ------- - ## Piece Inclusion Proof -### PieceInclusionProof - A `PieceInclusionProof` contains a potentially complex Merkle inclusion proof that all leaves included in `commP` (the piece commitment) are also included in `commD` (the sector data commitment). ``` @@ -218,7 +174,7 @@ The structure of a `PieceInclusionProof` is determined by the start position and The form of a `PieceInclusionProof` is as follows: - The piece's position within the tree must be specified. This, combined with the length provided during verification completely determines the shape of the proof. - The remainder of the proof is a series of `ProofElements`, whose order is interpreted by the proof algorithm and is not (yet: TODO) specified here. The significance of the provided `ProofElements` is described by their role in the verification algorithm below. - + `VerifyPieceInclusionProof` takes a sector data commitment (`commD`), piece commitment (`commP`), sector size, and piece size. Iff it returns true, then `PieceInclusionProof` indeed proves that all of piece's bytes were included in the merkle tree corresponding @@ -249,13 +205,12 @@ Proof verification is as follows: - The proof proceeds in two stages: 1. Zero or more __*candidate `ProofElements`*__ are proved to hash to `commP` through subsequent applications of `RepCompress`. Only after `commP` has been constructed from a set of __*candidate `ProofElements`*__ do those `ProofElements` become __*eligible*__ for use in the second phase of the proof. (Because `RepCompress` takes height as an input, only `ProofElements` from the same height in either the piece or data tree's can be combined. The output of `RepCompress` is always a `ProofElement` with height one greater than that of its two inputs.) - `commP` itself is by definition always __*eligible*__ for use in the second proof phase. - - An __*aligned `PieceInclusionProof`*__ is one whose `start` index is a power of 2, and for which *either* its piece's length is a power of 2 *or* the piece was zero-padded with **Piece Padding** when packed in a sector. In these cases, `commP` exists as a node in the data tree, and a minimal-size `PieceInclusionProof` can be generated. + - An __*aligned `PieceInclusionProof`*__ is one whose `start` index is a power of 2, and for which *either* its piece's length is a power of 2 *or* the piece was zero-padded with **Piece Padding** when packed in a sector. In these cases, `commP` exists as a node in the data tree, and a minimal-size `PieceInclusionProof` can be generated. - In the case of an __*aligned `PieceInclusionProof`*__, zero candidate `ProofElements` are required. (This means that `commP` is the *only* __*eligible `ProofElement`*__.) 2. Provided `ProofElements` are added to the set of __*eligible `ProofElements`*__ by successive application of `RepCompress`, as in the first phase. - When `commD` is produced by application of an __*eligible `ProofElement`*__ and a `ProofElement` provided by the proof, the proof is considered complete. - If all __*eligible `ProofElement`s*__ have been used as inputs to `RepCompress` and are dependencies of the final construction of `commD`, then the proof is considered to be valid. - + NOTE: in the case of an __*aligned `PieceInclusionProof`*__ the `ProofElements` take the form of a standard Merkle inclusion proof proving that `commP` is contained in a sub-tree of the data tree whose root is `commD`. Because `commP`'s position within the data tree is fully specified by the tree's height and the piece's start position and length, the verifier can deterministically combine each successive `ProofElement` with the result of the previous `RepCompress` operation as either a right or left input to the next `RepCompress` operation. In this sense, an __*aligned `PieceInclusionProof`*__ is simply a Merkle inclusion proof that `commP` is a constituent of `commD`'s merkle tree *and* that it is located at the appropriate height for it to also be the root of a (piece) tree with `length` leaves. -In the non-aligned case, the principle is similar. However, in this case, `commP` does *not* occur as a node in `commD`'s Merkle tree. This is because the original piece has been packed out-of-order to minimize alignment padding in the sector (at the cost of a larger `PieceInclusionProof`). Because `commP` does not exist as the root of a data sub-tree, it is necessary first to prove that the root of every sub-tree into which the original piece has been decomposed (when reordering) *is* indeed present in the data tree. Once each __*candidate `ProofElement`*__ has been proved to be an actual constituent of `commP`, it must also be shown that this __*eligible `ProofElement`*__ is *also* a constituent of `commD`. - \ No newline at end of file +In the non-aligned case, the principle is similar. However, in this case, `commP` does *not* occur as a node in `commD`'s Merkle tree. This is because the original piece has been packed out-of-order to minimize alignment padding in the sector (at the cost of a larger `PieceInclusionProof`). Because `commP` does not exist as the root of a data sub-tree, it is necessary first to prove that the root of every sub-tree into which the original piece has been decomposed (when reordering) *is* indeed present in the data tree. Once each __*candidate `ProofElement`*__ has been proved to be an actual constituent of `commP`, it must also be shown that this __*eligible `ProofElement`*__ is *also* a constituent of `commD`.