translation.json•23.2 kB
{
"Transcribe and extract data from audio using AssemblyAI's Speech AI.": "Transcribe and extract data from audio using AssemblyAI's Speech AI.",
"You can retrieve your AssemblyAI API key within your AssemblyAI [Account Settings](https://www.assemblyai.com/app/account?utm_source=activepieces).": "You can retrieve your AssemblyAI API key within your AssemblyAI [Account Settings](https://www.assemblyai.com/app/account?utm_source=activepieces).",
"Upload File": "Upload File",
"Transcribe": "Transcribe",
"Get Transcript": "Get Transcript",
"Get Transcript Sentences": "Get Transcript Sentences",
"Get Transcript Paragraphs": "Get Transcript Paragraphs",
"Get Transcript Subtitles": "Get Transcript Subtitles",
"Get Transcript Redacted Audio": "Get Transcript Redacted Audio",
"Search words in transcript": "Search words in transcript",
"List transcripts": "List transcripts",
"Delete transcript": "Delete transcript",
"Run a Task using LeMUR": "Run a Task using LeMUR",
"Retrieve LeMUR response": "Retrieve LeMUR response",
"Purge LeMUR request data": "Purge LeMUR request data",
"Custom API Call": "Custom API Call",
"Upload a media file to AssemblyAI's servers.": "Upload a media file to AssemblyAI's servers.",
"Transcribe an audio or video file using AssemblyAI.": "Transcribe an audio or video file using AssemblyAI.",
"Retrieves a transcript by its ID.": "Retrieves a transcript by its ID.",
"Retrieve the sentences of the transcript by its ID.": "Retrieve the sentences of the transcript by its ID.",
"Retrieve the paragraphs of the transcript by its ID.": "Retrieve the paragraphs of the transcript by its ID.",
"Export the transcript as SRT or VTT subtitles.": "Export the transcript as SRT or VTT subtitles.",
"Get the result of the redacted audio model.": "Get the result of the redacted audio model.",
"Search through the transcript for keywords. You can search for individual words, numbers, or phrases containing up to five words or numbers.": "Search through the transcript for keywords. You can search for individual words, numbers, or phrases containing up to five words or numbers.",
"Retrieve a list of transcripts you created.\nTranscripts are sorted from newest to oldest. The previous URL always points to a page with older transcripts.": "Retrieve a list of transcripts you created.\nTranscripts are sorted from newest to oldest. The previous URL always points to a page with older transcripts.",
"Remove the data from the transcript and mark it as deleted.": "Remove the data from the transcript and mark it as deleted.",
"Use the LeMUR task endpoint to input your own LLM prompt.": "Use the LeMUR task endpoint to input your own LLM prompt.",
"Retrieve a LeMUR response that was previously generated.": "Retrieve a LeMUR response that was previously generated.",
"Delete the data for a previously submitted LeMUR request.\nThe LLM response data, as well as any context provided in the original request will be removed.": "Delete the data for a previously submitted LeMUR request.\nThe LLM response data, as well as any context provided in the original request will be removed.",
"Make a custom API call to a specific endpoint": "Make a custom API call to a specific endpoint",
"Audio File": "Audio File",
"Audio URL": "Audio URL",
"Language Code": "Language Code",
"Language Detection": "Language Detection",
"Language Confidence Threshold": "Language Confidence Threshold",
"Speech Model": "Speech Model",
"Punctuate": "Punctuate",
"Format Text": "Format Text",
"Disfluencies": "Disfluencies",
"Dual Channel": "Dual Channel",
"Webhook URL": "Webhook URL",
"Webhook Auth Header Name": "Webhook Auth Header Name",
"Webhook Auth Header Value": "Webhook Auth Header Value",
"Key Phrases": "Key Phrases",
"Audio Start From": "Audio Start From",
"Audio End At": "Audio End At",
"Word Boost": "Word Boost",
"Word Boost Level": "Word Boost Level",
"Filter Profanity": "Filter Profanity",
"Redact PII": "Redact PII",
"Redact PII Audio": "Redact PII Audio",
"Redact PII Audio Quality": "Redact PII Audio Quality",
"Redact PII Policies": "Redact PII Policies",
"Redact PII Substitution": "Redact PII Substitution",
"Speaker Labels": "Speaker Labels",
"Speakers Expected": "Speakers Expected",
"Content Moderation": "Content Moderation",
"Content Moderation Confidence": "Content Moderation Confidence",
"Topic Detection": "Topic Detection",
"Custom Spellings": "Custom Spellings",
"Sentiment Analysis": "Sentiment Analysis",
"Auto Chapters": "Auto Chapters",
"Entity Detection": "Entity Detection",
"Speech Threshold": "Speech Threshold",
"Enable Summarization": "Enable Summarization",
"Summary Model": "Summary Model",
"Summary Type": "Summary Type",
"Enable Custom Topics": "Enable Custom Topics",
"Custom Topics": "Custom Topics",
"Wait until transcript is ready": "Wait until transcript is ready",
"Throw if transcript status is error": "Throw if transcript status is error",
"Transcript ID": "Transcript ID",
"Subtitles Format": "Subtitles Format",
"Number of Characters per Caption": "Number of Characters per Caption",
"Download file?": "Download file?",
"Download File Name": "Download File Name",
"Words": "Words",
"Limit": "Limit",
"Status": "Status",
"Created On": "Created On",
"Before ID": "Before ID",
"After ID": "After ID",
"Throttled Only": "Throttled Only",
"Prompt": "Prompt",
"Transcript IDs": "Transcript IDs",
"Input Text": "Input Text",
"Context": "Context",
"Final Model": "Final Model",
"Maximum Output Size": "Maximum Output Size",
"Temperature": "Temperature",
"LeMUR request ID": "LeMUR request ID",
"Method": "Method",
"Headers": "Headers",
"Query Parameters": "Query Parameters",
"Body": "Body",
"Response is Binary ?": "Response is Binary ?",
"No Error on Failure": "No Error on Failure",
"Timeout (in seconds)": "Timeout (in seconds)",
"The File or URL of the audio or video file.": "The File or URL of the audio or video file.",
"The URL of the audio or video file to transcribe.": "The URL of the audio or video file to transcribe.",
"The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).\nThe default value is 'en_us'.\n": "The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).\nThe default value is 'en_us'.\n",
"Enable [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection), either true or false.": "Enable [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection), either true or false.",
"The confidence threshold for the automatically detected language.\nAn error will be returned if the language confidence is below this threshold.\nDefaults to 0.\n": "The confidence threshold for the automatically detected language.\nAn error will be returned if the language confidence is below this threshold.\nDefaults to 0.\n",
"The speech model to use for the transcription. When `null`, the \"best\" model is used.": "The speech model to use for the transcription. When `null`, the \"best\" model is used.",
"Enable Automatic Punctuation, can be true or false": "Enable Automatic Punctuation, can be true or false",
"Enable Text Formatting, can be true or false": "Enable Text Formatting, can be true or false",
"Transcribe Filler Words, like \"umm\", in your media file; can be true or false": "Transcribe Filler Words, like \"umm\", in your media file; can be true or false",
"Enable [Dual Channel](https://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription) transcription, can be true or false.": "Enable [Dual Channel](https://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription) transcription, can be true or false.",
"The URL to which we send webhook requests.\nWe sends two different types of webhook requests.\nOne request when a transcript is completed or failed, and one request when the redacted audio is ready if redact_pii_audio is enabled.\n": "The URL to which we send webhook requests.\nWe sends two different types of webhook requests.\nOne request when a transcript is completed or failed, and one request when the redacted audio is ready if redact_pii_audio is enabled.\n",
"The header name to be sent with the transcript completed or failed webhook requests": "The header name to be sent with the transcript completed or failed webhook requests",
"The header value to send back with the transcript completed or failed webhook requests for added security": "The header value to send back with the transcript completed or failed webhook requests for added security",
"Enable Key Phrases, either true or false": "Enable Key Phrases, either true or false",
"The point in time, in milliseconds, to begin transcribing in your media file": "The point in time, in milliseconds, to begin transcribing in your media file",
"The point in time, in milliseconds, to stop transcribing in your media file": "The point in time, in milliseconds, to stop transcribing in your media file",
"The list of custom vocabulary to boost transcription probability for": "The list of custom vocabulary to boost transcription probability for",
"How much to boost specified words": "How much to boost specified words",
"Filter profanity from the transcribed text, can be true or false": "Filter profanity from the transcribed text, can be true or false",
"Redact PII from the transcribed text using the Redact PII model, can be true or false": "Redact PII from the transcribed text using the Redact PII model, can be true or false",
"Generate a copy of the original media file with spoken PII \"beeped\" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "Generate a copy of the original media file with spoken PII \"beeped\" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.",
"Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.",
"The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.",
"The replacement logic for detected PII, can be \"entity_type\" or \"hash\". See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.": "The replacement logic for detected PII, can be \"entity_type\" or \"hash\". See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.",
"Enable [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization), can be true or false": "Enable [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization), can be true or false",
"Tells the speaker label model how many speakers it should attempt to identify, up to 10. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.": "Tells the speaker label model how many speakers it should attempt to identify, up to 10. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.",
"Enable [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation), can be true or false": "Enable [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation), can be true or false",
"The confidence threshold for the Content Moderation model. Values must be between 25 and 100.": "The confidence threshold for the Content Moderation model. Values must be between 25 and 100.",
"Enable [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection), can be true or false": "Enable [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection), can be true or false",
"Customize how words are spelled and formatted using to and from values.\nUse a JSON array of objects of the following format:\n```\n[\n {\n \"from\": [\"original\", \"spelling\"],\n \"to\": \"corrected\"\n }\n]\n```\n": "Customize how words are spelled and formatted using to and from values.\nUse a JSON array of objects of the following format:\n```\n[\n {\n \"from\": [\"original\", \"spelling\"],\n \"to\": \"corrected\"\n }\n]\n```\n",
"Enable [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis), can be true or false": "Enable [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis), can be true or false",
"Enable [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters), can be true or false": "Enable [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters), can be true or false",
"Enable [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection), can be true or false": "Enable [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection), can be true or false",
"Reject audio files that contain less than this fraction of speech.\nValid values are in the range [0, 1] inclusive.\n": "Reject audio files that contain less than this fraction of speech.\nValid values are in the range [0, 1] inclusive.\n",
"Enable [Summarization](https://www.assemblyai.com/docs/models/summarization), can be true or false": "Enable [Summarization](https://www.assemblyai.com/docs/models/summarization), can be true or false",
"The model to summarize the transcript": "The model to summarize the transcript",
"The type of summary": "The type of summary",
"Enable custom topics, either true or false": "Enable custom topics, either true or false",
"The list of custom topics": "The list of custom topics",
"Wait until the transcript status is \"completed\" or \"error\" before moving on to the next step.": "Wait until the transcript status is \"completed\" or \"error\" before moving on to the next step.",
"If the transcript status is \"error\", throw an error.": "If the transcript status is \"error\", throw an error.",
"The maximum number of characters per caption": "The maximum number of characters per caption",
"The desired file name for storing in ActivePieces. Make sure the file extension is correct.": "The desired file name for storing in ActivePieces. Make sure the file extension is correct.",
"Keywords to search for": "Keywords to search for",
"Maximum amount of transcripts to retrieve": "Maximum amount of transcripts to retrieve",
"Filter by transcript status": "Filter by transcript status",
"Only get transcripts created on this date": "Only get transcripts created on this date",
"Get transcripts that were created before this transcript ID": "Get transcripts that were created before this transcript ID",
"Get transcripts that were created after this transcript ID": "Get transcripts that were created after this transcript ID",
"Only get throttled transcripts, overrides the status filter": "Only get throttled transcripts, overrides the status filter",
"Your text to prompt the model to produce a desired output, including any context you want to pass into the model.": "Your text to prompt the model to produce a desired output, including any context you want to pass into the model.",
"A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.\nUse either transcript_ids or input_text as input into LeMUR.\n": "A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.\nUse either transcript_ids or input_text as input into LeMUR.\n",
"Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.\nUse either transcript_ids or input_text as input into LeMUR.\n": "Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.\nUse either transcript_ids or input_text as input into LeMUR.\n",
"Context to provide the model. This can be a string or a free-form JSON value.": "Context to provide the model. This can be a string or a free-form JSON value.",
"The model that is used for the final prompt after compression is performed.\n": "The model that is used for the final prompt after compression is performed.\n",
"Max output size in tokens, up to 4000": "Max output size in tokens, up to 4000",
"The temperature to use for the model.\nHigher values result in answers that are more creative, lower values are more conservative.\nCan be any value between 0.0 and 1.0 inclusive.\n": "The temperature to use for the model.\nHigher values result in answers that are more creative, lower values are more conservative.\nCan be any value between 0.0 and 1.0 inclusive.\n",
"The ID of the LeMUR request whose data you want to delete. This would be found in the response of the original request.": "The ID of the LeMUR request whose data you want to delete. This would be found in the response of the original request.",
"Authorization headers are injected automatically from your connection.": "Authorization headers are injected automatically from your connection.",
"Enable for files like PDFs, images, etc..": "Enable for files like PDFs, images, etc..",
"English (Global)": "English (Global)",
"English (Australian)": "English (Australian)",
"English (British)": "English (British)",
"English (US)": "English (US)",
"Spanish": "Spanish",
"French": "French",
"German": "German",
"Italian": "Italian",
"Portuguese": "Portuguese",
"Dutch": "Dutch",
"Afrikaans": "Afrikaans",
"Albanian": "Albanian",
"Amharic": "Amharic",
"Arabic": "Arabic",
"Armenian": "Armenian",
"Assamese": "Assamese",
"Azerbaijani": "Azerbaijani",
"Bashkir": "Bashkir",
"Basque": "Basque",
"Belarusian": "Belarusian",
"Bengali": "Bengali",
"Bosnian": "Bosnian",
"Breton": "Breton",
"Bulgarian": "Bulgarian",
"Burmese": "Burmese",
"Catalan": "Catalan",
"Chinese": "Chinese",
"Croatian": "Croatian",
"Czech": "Czech",
"Danish": "Danish",
"Estonian": "Estonian",
"Faroese": "Faroese",
"Finnish": "Finnish",
"Galician": "Galician",
"Georgian": "Georgian",
"Greek": "Greek",
"Gujarati": "Gujarati",
"Haitian": "Haitian",
"Hausa": "Hausa",
"Hawaiian": "Hawaiian",
"Hebrew": "Hebrew",
"Hindi": "Hindi",
"Hungarian": "Hungarian",
"Icelandic": "Icelandic",
"Indonesian": "Indonesian",
"Japanese": "Japanese",
"Javanese": "Javanese",
"Kannada": "Kannada",
"Kazakh": "Kazakh",
"Khmer": "Khmer",
"Korean": "Korean",
"Lao": "Lao",
"Latin": "Latin",
"Latvian": "Latvian",
"Lingala": "Lingala",
"Lithuanian": "Lithuanian",
"Luxembourgish": "Luxembourgish",
"Macedonian": "Macedonian",
"Malagasy": "Malagasy",
"Malay": "Malay",
"Malayalam": "Malayalam",
"Maltese": "Maltese",
"Maori": "Maori",
"Marathi": "Marathi",
"Mongolian": "Mongolian",
"Nepali": "Nepali",
"Norwegian": "Norwegian",
"Norwegian Nynorsk": "Norwegian Nynorsk",
"Occitan": "Occitan",
"Panjabi": "Panjabi",
"Pashto": "Pashto",
"Persian": "Persian",
"Polish": "Polish",
"Romanian": "Romanian",
"Russian": "Russian",
"Sanskrit": "Sanskrit",
"Serbian": "Serbian",
"Shona": "Shona",
"Sindhi": "Sindhi",
"Sinhala": "Sinhala",
"Slovak": "Slovak",
"Slovenian": "Slovenian",
"Somali": "Somali",
"Sundanese": "Sundanese",
"Swahili": "Swahili",
"Swedish": "Swedish",
"Tagalog": "Tagalog",
"Tajik": "Tajik",
"Tamil": "Tamil",
"Tatar": "Tatar",
"Telugu": "Telugu",
"Thai": "Thai",
"Tibetan": "Tibetan",
"Turkish": "Turkish",
"Turkmen": "Turkmen",
"Ukrainian": "Ukrainian",
"Urdu": "Urdu",
"Uzbek": "Uzbek",
"Vietnamese": "Vietnamese",
"Welsh": "Welsh",
"Yiddish": "Yiddish",
"Yoruba": "Yoruba",
"Best": "Best",
"Nano": "Nano",
"Low": "Low",
"Default": "Default",
"High": "High",
"MP3": "MP3",
"WAV": "WAV",
"Account Number": "Account Number",
"Banking Information": "Banking Information",
"Blood Type": "Blood Type",
"Credit Card CVV": "Credit Card CVV",
"Credit Card Expiration": "Credit Card Expiration",
"Credit Card Number": "Credit Card Number",
"Date": "Date",
"Date Interval": "Date Interval",
"Date of Birth": "Date of Birth",
"Driver's License": "Driver's License",
"Drug": "Drug",
"Duration": "Duration",
"Email Address": "Email Address",
"Event": "Event",
"Filename": "Filename",
"Gender Sexuality": "Gender Sexuality",
"Healthcare Number": "Healthcare Number",
"Injury": "Injury",
"IP Address": "IP Address",
"Language": "Language",
"Location": "Location",
"Marital Status": "Marital Status",
"Medical Condition": "Medical Condition",
"Medical Process": "Medical Process",
"Money Amount": "Money Amount",
"Nationality": "Nationality",
"Number Sequence": "Number Sequence",
"Occupation": "Occupation",
"Organization": "Organization",
"Passport Number": "Passport Number",
"Password": "Password",
"Person Age": "Person Age",
"Person Name": "Person Name",
"Phone Number": "Phone Number",
"Physical Attribute": "Physical Attribute",
"Political Affiliation": "Political Affiliation",
"Religion": "Religion",
"Statistics": "Statistics",
"Time": "Time",
"URL": "URL",
"US Social Security Number": "US Social Security Number",
"Username": "Username",
"Vehicle ID": "Vehicle ID",
"Zodiac Sign": "Zodiac Sign",
"Entity Name": "Entity Name",
"Hash": "Hash",
"Informative": "Informative",
"Conversational": "Conversational",
"Catchy": "Catchy",
"Bullets": "Bullets",
"Bullets Verbose": "Bullets Verbose",
"Gist": "Gist",
"Headline": "Headline",
"Paragraph": "Paragraph",
"SRT": "SRT",
"VTT": "VTT",
"Queued": "Queued",
"Processing": "Processing",
"Completed": "Completed",
"Error": "Error",
"Claude 3.5 Sonnet (on Anthropic)": "Claude 3.5 Sonnet (on Anthropic)",
"Claude 3 Opus (on Anthropic)": "Claude 3 Opus (on Anthropic)",
"Claude 3 Haiku (on Anthropic)": "Claude 3 Haiku (on Anthropic)",
"Claude 3 Sonnet (on Anthropic)": "Claude 3 Sonnet (on Anthropic)",
"Claude 2.1 (on Anthropic)": "Claude 2.1 (on Anthropic)",
"Claude 2 (on Anthropic)": "Claude 2 (on Anthropic)",
"Claude Instant 1.2 (on Anthropic)": "Claude Instant 1.2 (on Anthropic)",
"Basic": "Basic",
"Mistral 7B (Hosted by AssemblyAI)": "Mistral 7B (Hosted by AssemblyAI)",
"GET": "GET",
"POST": "POST",
"PATCH": "PATCH",
"PUT": "PUT",
"DELETE": "DELETE",
"HEAD": "HEAD"
}