Skip to content

Commit c28bb43

Browse files
AssemblyAIhe-james
authored andcommitted
Project import generated by Copybara.
GitOrigin-RevId: 17245ee149afe164a2e1cfe9001f4e5df587f264
1 parent b27fba2 commit c28bb43

File tree

2 files changed

+181
-2
lines changed

2 files changed

+181
-2
lines changed

package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "assemblyai",
3-
"version": "4.18.1",
3+
"version": "4.18.2",
44
"description": "The AssemblyAI JavaScript SDK provides an easy-to-use interface for interacting with the AssemblyAI API, which supports async and real-time transcription, as well as the latest LeMUR models.",
55
"engines": {
66
"node": ">=18"

src/types/openapi.generated.ts

Lines changed: 180 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1037,7 +1037,7 @@ export type ListTranscriptParams = {
10371037
/**
10381038
* Only get throttled transcripts, overrides the status filter
10391039
* @defaultValue false
1040-
* @deprecated
1040+
* @deprecated This parameter is no longer supported and will be removed in a future version.
10411041
*/
10421042
throttled_only?: boolean;
10431043
};
@@ -1481,6 +1481,167 @@ export type SeverityScoreSummary = {
14811481
medium: number;
14821482
};
14831483

1484+
/**
1485+
* Speaker identification type for speech understanding
1486+
*/
1487+
export type SpeakerType = "role" | "name";
1488+
1489+
/**
1490+
* Speaker identification configuration for speech understanding
1491+
*/
1492+
export type SpeakerIdentificationRequest = {
1493+
/**
1494+
* The type of speaker identification to perform
1495+
*/
1496+
speaker_type: SpeakerType;
1497+
/**
1498+
* Known speaker values (required when speaker_type is 'role')
1499+
*/
1500+
known_values?: string[];
1501+
};
1502+
1503+
/**
1504+
* Translation configuration for speech understanding
1505+
*/
1506+
export type TranslationRequest = {
1507+
/**
1508+
* List of target language codes to translate the transcript into
1509+
*/
1510+
target_languages: string[];
1511+
/**
1512+
* Whether to use formal language in translations (default: false)
1513+
*/
1514+
formal?: boolean;
1515+
/**
1516+
* Whether to match the original utterance structure in translations (default: false)
1517+
*/
1518+
match_original_utterance?: boolean;
1519+
};
1520+
1521+
/**
1522+
* Custom formatting configuration for speech understanding
1523+
*/
1524+
export type CustomFormattingRequest = {
1525+
/**
1526+
* Custom date format pattern (e.g., 'mm/dd/yyyy')
1527+
*/
1528+
date?: string;
1529+
/**
1530+
* Custom phone number format pattern (e.g., '(xxx)xxx-xxxx')
1531+
*/
1532+
phone_number?: string;
1533+
/**
1534+
* Custom email format pattern (e.g., 'username\@domain.com')
1535+
*/
1536+
email?: string;
1537+
};
1538+
1539+
/**
1540+
* Speech understanding feature requests grouped together
1541+
*/
1542+
export type SpeechUnderstandingFeatureRequests = {
1543+
/**
1544+
* Speaker identification configuration
1545+
*/
1546+
speaker_identification?: SpeakerIdentificationRequest;
1547+
/**
1548+
* Translation configuration
1549+
*/
1550+
translation?: TranslationRequest;
1551+
/**
1552+
* Custom formatting configuration
1553+
*/
1554+
custom_formatting?: CustomFormattingRequest;
1555+
};
1556+
1557+
/**
1558+
* Speech understanding request configuration for LLM Gateway features
1559+
*/
1560+
export type SpeechUnderstandingRequest = {
1561+
/**
1562+
* The speech understanding feature requests
1563+
*/
1564+
request?: SpeechUnderstandingFeatureRequests;
1565+
};
1566+
1567+
/**
1568+
* Speaker identification response containing status and mapping
1569+
*/
1570+
export type SpeakerIdentificationResponse = {
1571+
/**
1572+
* Status of the speaker identification feature (e.g., 'success')
1573+
*/
1574+
status: string;
1575+
/**
1576+
* Mapping of original speaker labels to identified speaker labels
1577+
*/
1578+
mapping?: Record<string, string>;
1579+
};
1580+
1581+
/**
1582+
* Translation response containing status
1583+
*/
1584+
export type TranslationResponse = {
1585+
/**
1586+
* Status of the translation feature
1587+
*/
1588+
status: string;
1589+
};
1590+
1591+
/**
1592+
* Custom formatting response containing mapping and formatted texts
1593+
*/
1594+
export type CustomFormattingResponse = {
1595+
/**
1596+
* Mapping of original entities to formatted entities
1597+
*/
1598+
mapping?: Record<string, string>;
1599+
/**
1600+
* Full transcript text with formatted entities
1601+
*/
1602+
formatted_text?: string;
1603+
/**
1604+
* List of utterances with formatted text
1605+
*/
1606+
formatted_utterances?: Record<string, unknown>[];
1607+
/**
1608+
* Status of the custom formatting feature
1609+
*/
1610+
status: string;
1611+
};
1612+
1613+
/**
1614+
* Speech understanding feature responses grouped together
1615+
*/
1616+
export type SpeechUnderstandingFeatureResponses = {
1617+
/**
1618+
* Speaker identification results including status and mapping
1619+
*/
1620+
speaker_identification?: SpeakerIdentificationResponse;
1621+
/**
1622+
* Translation results
1623+
*/
1624+
translation?: TranslationResponse;
1625+
/**
1626+
* Custom formatting results
1627+
*/
1628+
custom_formatting?: CustomFormattingResponse;
1629+
};
1630+
1631+
/**
1632+
* Speech understanding response containing both request and response
1633+
*/
1634+
export type SpeechUnderstandingResponse = {
1635+
/**
1636+
* The original speech understanding request
1637+
*/
1638+
request?: SpeechUnderstandingRequest;
1639+
/**
1640+
* The speech understanding feature responses
1641+
*/
1642+
response?: SpeechUnderstandingFeatureResponses;
1643+
};
1644+
14841645
/**
14851646
* Advanced options for controlling speaker diarization parameters
14861647
*/
@@ -2709,6 +2870,14 @@ export type Transcript = {
27092870
* See {@link https://www.assemblyai.com/docs/models/speech-recognition | Speech recognition } for more information.
27102871
*/
27112872
words?: TranscriptWord[] | null;
2873+
/**
2874+
* Speech understanding response when enabled
2875+
*/
2876+
speech_understanding?: SpeechUnderstandingResponse;
2877+
/**
2878+
* Translations of the full transcript text when translation is enabled
2879+
*/
2880+
translated_texts?: Record<string, string>;
27122881
};
27132882

27142883
/**
@@ -3214,6 +3383,12 @@ export type TranscriptOptionalParams = {
32143383
* The list of custom vocabulary to boost transcription probability for
32153384
*/
32163385
word_boost?: string[];
3386+
/**
3387+
* Speech understanding configuration/response for LLM Gateway features
3388+
*/
3389+
speech_understanding?:
3390+
| SpeechUnderstandingRequest
3391+
| SpeechUnderstandingResponse;
32173392
};
32183393

32193394
/**
@@ -3592,6 +3767,10 @@ export type TranscriptUtterance = {
35923767
* The words in the utterance.
35933768
*/
35943769
words: TranscriptWord[];
3770+
/**
3771+
* Translations of the utterance text when translation is enabled
3772+
*/
3773+
translated_texts?: Record<string, string>;
35953774
};
35963775

35973776
/**

0 commit comments

Comments
 (0)