1 module dcrypt.crypto.random.fortuna.accumulator;
2 
3 
4 import dcrypt.crypto.digests.sha2;
5 import dcrypt.crypto.digest;
6 import dcrypt.bitmanip;
7 
8 private enum minPoolSize = 64;	/// return empty entropy if pool0's size is < MINPOOLSIZE
9 private enum bufferSize = 32;	/// size of the output buffer and internal state
10 
11 // Test shared and non-shared Accumulator
12 unittest {
13 	auto acc = new Accumulator;
14 	auto accShared = new shared Accumulator;
15 
16 	ubyte[32] buf1;
17 	ubyte[32] buf2;
18 
19 	foreach(i; 0..32) {
20 
21 		acc.extractEntropy(buf1);
22 		accShared.extractEntropy(buf2);
23 
24 		assert(buf1 == buf2, "Accumulator does not behave deterministically!");
25 
26 		acc.addEntropy(0, i%Accumulator.pools, buf1);
27 		accShared.addEntropy(0, i%Accumulator.pools, buf2);
28 	}
29 
30 	// change only one accumulator
31 	acc.addEntropy(0, 0, buf1);
32 
33 	acc.extractEntropy(buf1);
34 	accShared.extractEntropy(buf2);
35 	
36 	assert(buf1 != buf2, "Outputs should be different!");
37 }
38 
39 
40 
41 /**
42  * This class is a core component of the Fortuna algorithm and is responsible for collecting
43  * and accumulating entropy from various sources.
44  */
45 @safe
46 package class Accumulator
47 {
48 
49 	public enum pools = 32; // TODO 32 might be overkill
50 
51 	alias SHA256 Digest; /// use SHA256 as digest
52 	
53 	nothrow @nogc:
54 
55 	/// Returns: Amount of new seed bytes in pool0.
56 	@property
57 	uint freshEntropyLength() {
58 		return entropyPools[0].freshEntropy;
59 	}
60 
61 	
62 	/// Multithreading aware version of `extractEntropy()`
63 	@safe
64 	synchronized void extractEntropy(ubyte[] buf)
65 	in {
66 		assert(buf.length == Digest.digestLength, "buffer size does not match digest size");
67 	}
68 	body {
69 		transaction(0, 0, null, buf);
70 	}
71 	
72 	/// Multithreading aware version of `addEntropy()`
73 	@safe
74 	synchronized void addEntropy(in ubyte sourceID, in size_t pool, in ubyte[] data) {
75 		transaction(sourceID, pool, data, null);
76 	}
77 
78 	/**
79 	 * Params:
80 	 * reseedCount = Used to determine from which pools entropy should be fetched.
81 	 * buf = Write the seed in this buffer. Length must be `bufferSize`.
82 	 */
83 	void extractEntropy(ubyte[] buf)
84 	in {
85 		assert(buf.length == Digest.digestLength, "buffer size does not match digest size");
86 	}
87 	body {
88 
89 		scope(exit) {
90 			counter++;
91 		}
92 
93 		ubyte[Digest.digestLength] iBuf;
94 
95 		foreach(i, pool; entropyPools) {
96 			if(counter % (1<<i) == 0) { // reseedCount divisible by 2^i ?
97 				pool.extractEntropy(iBuf);
98 				masterPool.addEntropy(iBuf);
99 			}else {
100 				// won't be divisible by 2^(i+1) either
101 				break;
102 			}
103 		}
104 
105 		masterPool.extractEntropy(buf);
106 		
107 	}
108 
109 	/// Accumulate an entropy event.
110 	/// 
111 	/// Params:
112 	/// sourceID = A number assigned to the source.
113 	/// pool = The pool to add the entropy. 0 <= pool < Accumulator.pools
114 	/// data = Entropy data.
115 	@safe
116 	void addEntropy(in ubyte sourceID, in size_t pool, in ubyte[] data...)
117 	in {
118 		assert(pool < pools, "Pool ID out of range.");
119 	}
120 	body {
121 		ubyte[5] iBuf; // contains sourceID and length of event data
122 
123 		// pack sourceID and data.length in buffer
124 		iBuf[0] = sourceID;
125 		toLittleEndian(cast(uint)data.length, iBuf[1..5]);
126 
127 		entropyPools[pool].addEntropy(iBuf);
128 		entropyPools[pool].addEntropy(data);
129 	}
130 
131 	/// Provides synchronized access to the accumulator.
132 	/// Used to add entropy or to extract entropy or both at the same time.
133 	/// 
134 	/// Params:
135 	/// sourceID = the ID of the entropy source.
136 	/// pool = The pool to add the entropy.
137 	/// data = Entropy data. Can be `null`.
138 	/// buf = 32 bytes buffer for random data. Can also be `null`.
139 	@trusted
140 	private synchronized void transaction(in ubyte sourceID, in size_t pool, in ubyte[] data, ubyte[] buf = null) {
141 		if(data !is null) {
142 			(cast(Accumulator) this).addEntropy(sourceID, pool, data);
143 		}
144 		if(buf !is null) {
145 			(cast(Accumulator) this).extractEntropy(buf);
146 		}
147 	}
148 
149 	private {
150 		EntropyPool!Digest[pools] entropyPools;
151 		EntropyPool!Digest masterPool;
152 		uint counter = 0; // count how many times extractEntropy() has been called
153 	}
154 
155 	
156 }
157 
158 @safe
159 private struct EntropyPool(Digest) 
160 if(isDigest!Digest && Digest.digestLength == bufferSize) {
161 
162 	private  Digest accumulator;
163 	private  uint freshEntropyBytes = 0;
164 	
165 	nothrow @nogc:
166 
167 	/// extract a block of entropy bits out of this pool.
168 	/// the internal state is not leaked.
169 	/// 
170 	/// Returns: Slice pointing to the extracted data
171 	/// 
172 	/// TODO calls doFinal twice: could be a performance issue
173 	ubyte[] extractEntropy(ubyte[] oBuf)
174 	in {
175 		assert(oBuf.length >= accumulator.digestLength, "output buffer too small");
176 	}
177 	body {
178 		ubyte[bufferSize] iBuf;
179 
180 		Digest temp = accumulator;
181 
182 		accumulator.put(0x01, 0x02, 0x03, 0x04);
183 
184 		ubyte[] slice = accumulator.finishTo(oBuf); // write to output buffer
185 
186 		freshEntropyBytes = 0; // out of fresh entropy
187 
188 		accumulator = temp;	// reset to previous state
189 
190 		return slice;
191 	}
192 	
193 	/// accumulate some bytes in the entropy pool
194 	/// Params:
195 	/// b = the entropy to add
196 	void addEntropy(in ubyte[] b...) {
197 		accumulator.put(b);
198 		freshEntropyBytes += b.length;
199 	}
200 
201 	/// Returns: the number of bytes that have flown in this pool since the last call of extractEntropy().
202 	@property
203 	uint freshEntropy() {
204 		return freshEntropyBytes;
205 	}
206 }
207 
208 
209 // Test cloning of a digest.
210 private unittest {
211 
212 	SHA256 d1;
213 	SHA256 d2;
214 
215 	ubyte[d1.digestLength] buf1;
216 	ubyte[d2.digestLength] buf2;
217 
218 	d1.put(0x01);
219 	d2.put(0x02);
220 	d2 = d1;
221 	d1.finishTo(buf1);
222 
223 	d1 = d2;
224 
225 	d1.finishTo(buf1);
226 	d2.finishTo(buf2);
227 
228 	assert(buf1 == buf2, "Cloning digests does not work properly.");
229 
230 }