o
    !i2                     @   s  d dl Z d dlmZmZmZmZmZmZmZ d dl	m
Z
 d dlZd dlmZ d dlmZ d dlmZ d dlmZ d dlmZ d dlm  mZ d dlmZmZmZmZmZm Z  d d	l!m"Z" d d
l#m$Z$ d dl%m&Z&m'Z' d dl(m)Z) d dl*m+Z+ d dl,m-Z- d dl	m.Z. d dl/m0Z0 d dl1m2Z2m3Z3m4Z4 d dl5m6Z6 ee7eeee8  ee8 f f Z9dgZ:d.de8de7de7fddZ;	d/deej< defddZ=dej>de?fddZ@d.d ed!ee8 de7dej>fd"d#ZAd$edee9eej< f fd%d&ZBG d'd( d(e-ZC	d/d)ed*e7d+ejDd,ee. def
d-dZEdS )0    N)DictListOptionalSequenceTupleUnioncast)LoadPlan)ShardedTensor)TensorProperties)Shard)ChunkShardingSpec)BytesStorageMetadataMetadataMetadataIndexSTATE_DICT_TYPETensorStorageMetadataChunkStorageMetadata)_get_default_group)_create_chunk_sharded_tensor) create_read_items_for_chunk_list_create_read_items)_remote_device)DTensor)DefaultLoadPlanner)LoadPlanner)unflatten_state_dict)_element_wise_add_element_wise_sub_normalize_device_info)_get_device_module!load_sharded_optimizer_state_dictcudaglobal_rankdevice_typereturnc                 C   s2   |dkrdS t |}| rt|| |  S dS )Ncpu)r    Zis_availabler   device_count)r#   r$   device_module r)   u/var/www/html/eduruby.in/lip-sync/lip-sync-env/lib/python3.10/site-packages/torch/distributed/checkpoint/optimizer.py_gen_rank_device:   s   r+   pgc                    sl   t j j d u rfddtt  D }n fddt  D }tdtt	t
ttf  |dS )Nc                    s"   g | ]}d | dt |  qS rank:/)r+   .0idx)pg_device_typer)   r*   
<listcomp>H   s    z(_create_colwise_spec.<locals>.<listcomp>c              
      s*   g | ]}d | dt t | qS r-   )r+   distZget_global_rankr0   r,   r3   r)   r*   r4   M   s    r   dim
placements)r5   distributed_c10d_get_pg_default_devicetyperangeget_world_sizesizer   r   r   r   r   str)r,   r9   r)   r6   r*   _create_colwise_specC   s   


rA   valc                 C   s   t | tu r.t|  dkrdS t |  d jtu rdS t |  d jtu r,tddS t | tu rFt | jtu sBt | jtu rFtddS )Nr   FTz2Cannot handle DTensor nested insided ShardedTensorzCannot handle nested DTensor)r<   r
   lenlocal_shardstensorr   
ValueErrorZ_local_tensor)rB   r)   r)   r*   _is_nested_tensorW   s   rG   propsr?   c              
   C   s.   t j|| j| j| j| jtt jt|	 dS )N)r?   dtypelayoutrequires_grad
pin_memorydevice)
torchemptyrI   rJ   rK   rL   r   rM   r    Zcurrent_device)rH   r?   r$   r)   r)   r*   _alloc_tensori   s   rP   
state_dictc                 C   s   i }d}|   D ]9\}}d| f||< t|rAt| dks$J dt|ts-J d| d }|jj|jj	f||< |j
j}q||fS )a5  
    We have to load the right TP slice of the optimizer state.
    This is not easy since the per-tensor slicing can't be inferred from checkpoint metadata.
    We take advantage of the model state_dict producing a sliced ST to figure out what we need to load.
    This is pretty fragile and it might be easier for FSDP to compute this info for us.
    Returns a dictionary where keys are the same of the state_dict and the value is a tuple of
    (offset, size) for the current rank TP slice.
    N.B. The state_dict *MUST* come from FSDP.sharded_state_dict.
    N   z%Cannot handle ST with multiple shardsz$Can only handle nested ShardedTensorr   )itemsr?   rG   rC   rD   
isinstancer
   metadatashard_offsetsshard_sizesrE   Z_process_group)rQ   specsdp_pgkeyvalueZshardr)   r)   r*   _get_state_dict_2d_layoutt   s,   r\   c                       sz   e Zd ZU eeef ed< eed< eed< deee	e
 f ddf fddZdefd	d
Zdedejf fddZ  ZS )_ReaderWithOffsettranslationrQ   rU   fqn_to_offsetr%   Nc                    s*   t    || _ti | _i | _i | _d S N)super__init__r_   r   rU   rQ   r^   )selfr_   	__class__r)   r*   rb      s
   


z_ReaderWithOffset.__init__c                 C   s   g }i | _ | j D ]\}}| jj| }t|ts"|t|||7 }q
|| jvr0|t|||7 }q
| j| }t	|
 dks?J |
 d }ttt|jj|t|jjdg}t|tt||}|D ]"}	|	jjd usnJ t|	jj|}
tj|	jt|
d}|| j |	j< qd||7 }q
t|S )NrR   r   )offsetssizes)offset)r^   rQ   rS   rU   state_dict_metadatarT   r
   r   r_   rC   rD   r   rN   Sizer   rV   rW   r   r   r   Z
dest_indexrh   r   dataclassesreplacer	   )rc   requestsZfqnobjmdrh   Zoriginal_shardZlocal_chunksreqsriZoriginal_offsetZoriginal_indexr)   r)   r*   create_local_plan   sH   



z#_ReaderWithOffset.create_local_planindexc                    s   t  | j||S r`   )ra   lookup_tensorr^   get)rc   rs   rd   r)   r*   rt      s   z_ReaderWithOffset.lookup_tensor)__name__
__module____qualname__r   r   __annotations__r   r   r@   r   intrb   r	   rr   rN   Tensorrt   __classcell__r)   r)   rd   r*   r]      s   
 " .r]   model_state_dictoptimizer_keystorage_readerplannerc              	   C   s(  |  }t| \}}tj|j}t|}|du r?g }	tt D ]}
t	||
|
  }|	d|
 d|  q!td|	d}nt|}i }i }|j D ]\}}|j| }|d |kr\qLt|trfd||< qL|j dkrxt|j|j|||< qL|du rtt|j|j|t t |
 t d||< qL|d	 }||d|jfd }|t||j}g }t|}|jD ]}tt |j!" |krq|t#t|j|j$||d
 qt%j&|||d}||v r|| d durtt't( || d ||< |||< qLt)j*|||durt+|n|d t,||j}|S )a  
    Loads a state_dict in conjunction with FSDP sharded optimizer state.
    This is the current recommended way to checkpoint FSDP.
    >>> # xdoctest: +SKIP
    >>> import torch.distributed.checkpoint as dist_cp
    >>> # Save
    >>> model: torch.nn.Model
    >>> optim_params = model.parameters()
    >>> optim = torch.optim.SGD(optim_params, lr=0.01)
    >>> # Save
    >>> with FSDP.state_dict_type(model, StateDictType.SHARDED_STATE_DICT):
    >>>     state_dict = {
    >>>         "optimizer": FSDP.optim_state_dict(model, optim),
    >>>         "model": model.state_dict()
    >>>     }
    >>>     dist_cp.save_state_dict(
    >>>         state_dict=optim_state,
    >>>         storage_writer=dist_cp.FileSystemWriter("checkpoint"),
    >>>         planner=dist_cp.DefaultSavePlanner(),
    >>>     )
    >>>
    >>> # Load
    >>> with FSDP.state_dict_type(model_tp, StateDictType.SHARDED_STATE_DICT):
    >>>     model_state_dict = model_tp.state_dict()
    >>>     checkpoint = {
    >>>         "model": model_state_dict
    >>>     }
    >>>     dist_cp.load_state_dict(
    >>>         state_dict=checkpoint,
    >>>         storage_reader=dist_cp.FileSystemReader(checkpoint_file),
    >>>         planner=dist_cp.DefaultLoadPlanner(),
    >>>     )
    >>>     model.load_state_dict(checkpoint["model_state"])
    >>>
    >>>     optim_state = dist_cp.load_sharded_optimizer_state_dict(
    >>>         model_state_dict,
    >>>         optimizer_key="optimizer",
    >>>         storage_reader=dist_cp.FileSystemReader("checkpoint"),
    >>>     )
    >>>
    >>>     flattened_osd = FSDP.optim_state_dict_to_load(
    >>>        model, optim, optim_state["optimizer"]
    >>>     )
    >>>
    >>>     optim.load_state_dict(flattened_osd)
    Nr.   r/   r   r7   z
<bytes_io>rR   )rankZ
world_sizeZnum_devices_per_noder,      )rE   rU   )Zprocess_group)rQ   r   r   )-Zread_metadatar\   r5   r:   r;   r<   r    r=   r>   r   r'   appendr   rA   ri   rS   Zplanner_datarT   r   r?   ZnumelrP   
propertiesr   Zget_rankr   ru   Zbuild_metadatarN   rj   Zshards_metadatar   r   Z	placementr   r   rW   r
   Z+_init_from_local_shards_and_global_metadatar   rz   dist_cpZload_state_dictr]   r   )r}   r~   r   r   rU   Zlayout_specsrY   Zdp_pg_device_typer(   r9   iZdevice_infoZsharding_specrQ   r_   rZ   r[   Zkey_pathZspec_keyZ
alloc_sizeZst_mdrD   Zcurrent_rankZshard_mdstr)   r)   r*   r!      s   4




	
)r"   r`   )Frk   typingr   r   r   r   r   r   r   Z$torch.distributed.checkpoint.plannerr	   rN   Ztorch.distributeddistributedr5   Z+torch.distributed._shard.sharded_tensor.apir
   Z0torch.distributed._shard.sharded_tensor.metadatar   Z-torch.distributed._shard.sharded_tensor.shardr   Z:torch.distributed._shard.sharding_spec.chunk_sharding_specr   Ztorch.distributed.checkpoint
checkpointr   Z%torch.distributed.checkpoint.metadatar   r   r   r   r   r   Z"torch.distributed.distributed_c10dr   Z#torch.distributed.fsdp._shard_utilsr   Z,torch.distributed.checkpoint.planner_helpersr   r   Ztorch.distributed.remote_devicer   Ztorch.distributed._tensorr   Z,torch.distributed.checkpoint.default_plannerr   r   Z)torch.distributed.checkpoint._nested_dictr   Z"torch.distributed.checkpoint.utilsr   r   r   Ztorch._utilsr    r@   rz   ZSTATE_DICT_2D_LAYOUT__all__r+   ZProcessGrouprA   r{   boolrG   rP   r\   r]   ZStorageReaderr!   r)   r)   r)   r*   <module>   sf   $  

"
$B