mirror of
				https://github.com/k4yt3x/video2x.git
				synced 2025-10-31 12:50:59 +01:00 
			
		
		
		
	fixed a NVIDIA-SMI problem submitted by 찬우 전
This commit is contained in:
		
							parent
							
								
									7f3a377ea8
								
							
						
					
					
						commit
						a963e407e0
					
				| @ -108,9 +108,12 @@ def check_memory(): | ||||
|             Avalon.warning('Nvidia-smi not available, skipping available memory check') | ||||
|             Avalon.warning('If you experience error \"cudaSuccess  out of memory\", try reducing number of threads you\'re using') | ||||
|         else: | ||||
|             # "0" is GPU ID. Both waifu2x drivers use the first GPU available, therefore only 0 makes sense | ||||
|             gpu_memory_available = (GPUtil.getGPUs()[0].memoryTotal - GPUtil.getGPUs()[0].memoryUsed) / 1024 | ||||
|             memory_status.append(('GPU', gpu_memory_available)) | ||||
|             try: | ||||
|                 # "0" is GPU ID. Both waifu2x drivers use the first GPU available, therefore only 0 makes sense | ||||
|                 gpu_memory_available = (GPUtil.getGPUs()[0].memoryTotal - GPUtil.getGPUs()[0].memoryUsed) / 1024 | ||||
|                 memory_status.append(('GPU', gpu_memory_available)) | ||||
|             except ValueError: | ||||
|                 pass | ||||
| 
 | ||||
|     # Go though each checkable memory type and check availability | ||||
|     for memory_type, memory_available in memory_status: | ||||
|  | ||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user