ovedrive commited on
Commit
ce402ac
·
1 Parent(s): 018b3ab

fix: seed to gnerator

Browse files
Files changed (1) hide show
  1. inference.py +5 -5
inference.py CHANGED
@@ -43,16 +43,16 @@ class DiffusionInference:
43
  if seed is not None:
44
  try:
45
  # Convert to integer and add to params
46
- params["seed"] = int(seed)
47
  except (ValueError, TypeError):
48
  # Use random seed if conversion fails
49
  random_seed = random.randint(0, 3999999999) # Max 32-bit integer
50
- params["seed"] = random_seed
51
  print(f"Warning: Invalid seed value: {seed}, using random seed {random_seed} instead")
52
  else:
53
  # Generate random seed when none is provided
54
  random_seed = random.randint(0, 3999999999) # Max 32-bit integer
55
- params["seed"] = random_seed
56
  print(f"Using random seed: {random_seed}")
57
 
58
  # Add negative prompt if provided
@@ -66,7 +66,7 @@ class DiffusionInference:
66
 
67
  try:
68
  # Call the API with all parameters as kwargs
69
- image = self.run_text_to_image_pipeline(model, **params)
70
  return image
71
  except Exception as e:
72
  print(f"Error generating image: {e}")
@@ -164,6 +164,6 @@ class DiffusionInference:
164
 
165
  @spaces.GPU
166
  def run_text_to_image_pipeline(self, model_name, **kwargs):
167
- pipeline = AutoPipelineForText2Image.from_pretrained(model_name, torch_dtype=torch.float16).to("cuda")
168
  image = pipeline(**kwargs).images[0]
169
  return image
 
43
  if seed is not None:
44
  try:
45
  # Convert to integer and add to params
46
+ generator = torch.Generator(device="cuda").manual_seed(seed)
47
  except (ValueError, TypeError):
48
  # Use random seed if conversion fails
49
  random_seed = random.randint(0, 3999999999) # Max 32-bit integer
50
+ generator = torch.Generator(device="cuda").manual_seed(random_seed)
51
  print(f"Warning: Invalid seed value: {seed}, using random seed {random_seed} instead")
52
  else:
53
  # Generate random seed when none is provided
54
  random_seed = random.randint(0, 3999999999) # Max 32-bit integer
55
+ generator = torch.Generator(device="cuda").manual_seed(random_seed)
56
  print(f"Using random seed: {random_seed}")
57
 
58
  # Add negative prompt if provided
 
66
 
67
  try:
68
  # Call the API with all parameters as kwargs
69
+ image = self.run_text_to_image_pipeline(model, generator, **params)
70
  return image
71
  except Exception as e:
72
  print(f"Error generating image: {e}")
 
164
 
165
  @spaces.GPU
166
  def run_text_to_image_pipeline(self, model_name, **kwargs):
167
+ pipeline = AutoPipelineForText2Image.from_pretrained(model_name, generator=generator, torch_dtype=torch.float16).to("cuda")
168
  image = pipeline(**kwargs).images[0]
169
  return image