Skip to content

Commit cc9fdc0

Browse files
authored
Version 4.3.0 (#103)
1 parent ab12839 commit cc9fdc0

File tree

3 files changed

+70
-10
lines changed

3 files changed

+70
-10
lines changed

README.md

Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -177,6 +177,63 @@ print(pattern().full_state)
177177
print(cir() / pattern().full_state)
178178
```
179179

180+
- Distributed simulation of quantum circuit
181+
182+
```python
183+
import torch
184+
# OMP_NUM_THREADS=2 torchrun --nproc_per_node=4 main.py
185+
backend = 'gloo' # for CPU
186+
# torchrun --nproc_per_node=4 main.py
187+
backend = 'nccl' # for GPU
188+
rank, world_size, local_rank = dq.setup_distributed(backend)
189+
data = torch.arange(4, dtype=torch.float, requires_grad=True)
190+
cir = dq.DistributedQubitCircuit(4)
191+
cir.rylayer(encode=True)
192+
cir.cnot_ring()
193+
cir.observable(0)
194+
cir.observable(1, 'x')
195+
if backend == 'nccl':
196+
data = data.to(f'cuda:{local_rank}')
197+
cir.to(f'cuda:{local_rank}')
198+
state = cir(data).amps
199+
result = cir.measure(with_prob=True)
200+
exp = cir.expectation().sum()
201+
exp.backward()
202+
if rank == 0:
203+
print(state)
204+
print(result)
205+
print(exp)
206+
print(data.grad)
207+
dq.cleanup_distributed()
208+
```
209+
210+
- Distributed simulation of photonic quantum circuit
211+
212+
```python
213+
# OMP_NUM_THREADS=2 torchrun --nproc_per_node=4 main.py
214+
backend = 'gloo' # for CPU
215+
# torchrun --nproc_per_node=4 main.py
216+
backend = 'nccl' # for GPU
217+
rank, world_size, local_rank = dq.setup_distributed(backend)
218+
nmode = 4
219+
cutoff = 4
220+
data = torch.arange(14, dtype=torch.float) / 10
221+
cir = dq.DistributedQumodeCircuit(nmode, [0] * nmode, cutoff)
222+
for i in range(nmode):
223+
cir.s(i, encode=True)
224+
for i in range(nmode - 1):
225+
cir.bs([i, i + 1], encode=True)
226+
if backend == 'nccl':
227+
data = data.to(f'cuda:{local_rank}')
228+
cir.to(f'cuda:{local_rank}')
229+
state = cir(data).amps
230+
result = cir.measure(with_prob=True)
231+
if rank == 0:
232+
print(state)
233+
print(result)
234+
dq.cleanup_distributed()
235+
```
236+
180237
# License
181238

182239
DeepQuantum is open source, released under the Apache License, Version 2.0.

src/deepquantum/__init__.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
DeepQuantum can be directly imported.
44
"""
55

6-
__version__ = '4.2.0'
6+
__version__ = '4.3.0'
77

88

99
from . import adjoint
@@ -42,6 +42,7 @@
4242
from .channel import BitFlip, PhaseFlip, Depolarizing, Pauli, AmplitudeDamping, PhaseDamping
4343
from .channel import GeneralizedAmplitudeDamping
4444
from .circuit import QubitCircuit, DistributedQubitCircuit
45+
from .communication import setup_distributed, cleanup_distributed
4546
from .gate import U3Gate, PhaseShift, Identity, PauliX, PauliY, PauliZ, Hadamard
4647
from .gate import SGate, SDaggerGate, TGate, TDaggerGate
4748
from .gate import Rx, Ry, Rz, ProjectionJ, CombinedSingleGate

src/deepquantum/communication.py

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
import torch.distributed as dist
1010

1111

12-
def setup_distributed(port = '29500', backend = 'nccl') -> Tuple[int, int, int]:
12+
def setup_distributed(backend: str = 'nccl', port: str = '29500') -> Tuple[int, int, int]:
1313
"""Initialize torch.distributed."""
1414
try:
1515
# These should be set by the launch script (e.g., torchrun)
@@ -24,16 +24,18 @@ def setup_distributed(port = '29500', backend = 'nccl') -> Tuple[int, int, int]:
2424
local_rank = 0
2525
os.environ['MASTER_ADDR'] = 'localhost'
2626
os.environ['MASTER_PORT'] = port
27-
28-
print(f'Initializing distributed setup: Rank {rank}/{world_size}, Local Rank (GPU): {local_rank}')
29-
27+
if backend == 'nccl':
28+
print(f'Initializing distributed setup: Rank {rank}/{world_size}, Local Rank (GPU): {local_rank}')
29+
elif backend == 'gloo':
30+
print(f'Initializing distributed setup: Rank {rank}/{world_size}, Local Rank (CPU): {local_rank}')
3031
# Initialize the process group
3132
dist.init_process_group(backend, world_size=world_size, rank=rank)
32-
33-
# Pin the current process to a specific GPU
34-
torch.cuda.set_device(local_rank)
35-
36-
print(f'Rank {rank} initialized, using GPU {local_rank}.')
33+
if backend == 'nccl':
34+
# Pin the current process to a specific GPU
35+
torch.cuda.set_device(local_rank)
36+
print(f'Rank {rank} initialized, using GPU {local_rank}.')
37+
elif backend == 'gloo':
38+
print(f'Rank {rank} initialized.')
3739
return rank, world_size, local_rank
3840

3941

0 commit comments

Comments
 (0)