Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -506,11 +506,11 @@ partial void ProcessTrainDatasetModelResponseContent(
/// Example: 1000
/// </param>
/// <param name="loraRank">
/// LoRA rank for model training. Must be one of 64, 128, or 256. Defaults to 128. Note: lora_rank &gt; 128 requires num_chips &gt;= 32.<br/>
/// LoRA rank for model training. Must be one of 64, 128, or 256. Defaults to 128. Note: lora_rank 256 requires num_chips &gt;= 64.<br/>
/// Example: 64
/// </param>
/// <param name="numChips">
/// Number of TPU chips to use. Allowed values depend on the base_model_version: V_3_1 supports 16 or 32, V_4_0 supports 128.<br/>
/// Number of TPU chips to use. Allowed values depend on the base_model_version: V_3_1 supports 16, 32, or 64, V_4_0 supports 128.<br/>
/// Example: 32
/// </param>
/// <param name="baseModelVersion">
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,11 +33,11 @@ public partial interface IDatasetsClient
/// Example: 1000
/// </param>
/// <param name="loraRank">
/// LoRA rank for model training. Must be one of 64, 128, or 256. Defaults to 128. Note: lora_rank &gt; 128 requires num_chips &gt;= 32.<br/>
/// LoRA rank for model training. Must be one of 64, 128, or 256. Defaults to 128. Note: lora_rank 256 requires num_chips &gt;= 64.<br/>
/// Example: 64
/// </param>
/// <param name="numChips">
/// Number of TPU chips to use. Allowed values depend on the base_model_version: V_3_1 supports 16 or 32, V_4_0 supports 128.<br/>
/// Number of TPU chips to use. Allowed values depend on the base_model_version: V_3_1 supports 16, 32, or 64, V_4_0 supports 128.<br/>
/// Example: 32
/// </param>
/// <param name="baseModelVersion">
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,15 +26,15 @@ public sealed partial class TrainDatasetModelRequest
public int? TrainingSteps { get; set; }

/// <summary>
/// LoRA rank for model training. Must be one of 64, 128, or 256. Defaults to 128. Note: lora_rank &gt; 128 requires num_chips &gt;= 32.<br/>
/// LoRA rank for model training. Must be one of 64, 128, or 256. Defaults to 128. Note: lora_rank 256 requires num_chips &gt;= 64.<br/>
/// Example: 64
/// </summary>
/// <example>64</example>
[global::System.Text.Json.Serialization.JsonPropertyName("lora_rank")]
public int? LoraRank { get; set; }

/// <summary>
/// Number of TPU chips to use. Allowed values depend on the base_model_version: V_3_1 supports 16 or 32, V_4_0 supports 128.<br/>
/// Number of TPU chips to use. Allowed values depend on the base_model_version: V_3_1 supports 16, 32, or 64, V_4_0 supports 128.<br/>
/// Example: 32
/// </summary>
/// <example>32</example>
Expand Down Expand Up @@ -85,11 +85,11 @@ public sealed partial class TrainDatasetModelRequest
/// Example: 1000
/// </param>
/// <param name="loraRank">
/// LoRA rank for model training. Must be one of 64, 128, or 256. Defaults to 128. Note: lora_rank &gt; 128 requires num_chips &gt;= 32.<br/>
/// LoRA rank for model training. Must be one of 64, 128, or 256. Defaults to 128. Note: lora_rank 256 requires num_chips &gt;= 64.<br/>
/// Example: 64
/// </param>
/// <param name="numChips">
/// Number of TPU chips to use. Allowed values depend on the base_model_version: V_3_1 supports 16 or 32, V_4_0 supports 128.<br/>
/// Number of TPU chips to use. Allowed values depend on the base_model_version: V_3_1 supports 16, 32, or 64, V_4_0 supports 128.<br/>
/// Example: 32
/// </param>
/// <param name="baseModelVersion">
Expand Down
4 changes: 2 additions & 2 deletions src/libs/Ideogram/openapi.json
Original file line number Diff line number Diff line change
Expand Up @@ -8466,13 +8466,13 @@
"type": "integer"
},
"lora_rank": {
"description": "LoRA rank for model training. Must be one of 64, 128, or 256. Defaults to 128. Note: lora_rank > 128 requires num_chips >= 32.\n",
"description": "LoRA rank for model training. Must be one of 64, 128, or 256. Defaults to 128. Note: lora_rank 256 requires num_chips >= 64.\n",
"example": 64,
"title": "lora_rank",
"type": "integer"
},
"num_chips": {
"description": "Number of TPU chips to use. Allowed values depend on the base_model_version: V_3_1 supports 16 or 32, V_4_0 supports 128.\n",
"description": "Number of TPU chips to use. Allowed values depend on the base_model_version: V_3_1 supports 16, 32, or 64, V_4_0 supports 128.\n",
"example": 32,
"title": "num_chips",
"type": "integer"
Expand Down